seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
7457839612 | import random
import numpy as np
import pandas as pd
from keras.layers import Input, Dense
from keras.models import Model
import tensorflow as tf
from shared_module import *
def input_encoding_model(encoded):
X_input = Input(encoded)
# X = Dense(2056, activation='sigmoid', name='fc0.0')(X_input)
X = Dense(1024, activation='sigmoid', name='fc1.0')(X_input)
X = Dense(512, activation='sigmoid', name='fc2.0')(X)
X = Dense(128, activation='sigmoid', name='fc3.0')(X)
model = Model(inputs=X_input, outputs=X, name='inputEncodingModel')
return model
def input_training_model(a, p, n, encoding_model):
X0_Input = Input(a)
X1_Input = Input(p)
X2_Input = Input(n)
X0 = encoding_model(X0_Input)
X1 = encoding_model(X1_Input)
X2 = encoding_model(X2_Input)
model = Model(inputs=[X0_Input, X1_Input, X2_Input], outputs=[X0, X1, X2], name='inputTrainingModel')
return model
def triplet_loss(y_true, y_pred, alpha=0.2):
"""
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)))
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)))
basic_loss = tf.square(tf.add(alpha, tf.subtract(pos_dist, neg_dist)))
loss = tf.reduce_sum(basic_loss)
return loss
def generate_hard_triplet_dataframe(df_labels, id_column, type_column, n_anchors, model):
return generate_triplet_dataframe(df_labels, id_column, type_column, n_anchors)
def generate_random_triplet_dataframe(df_labels, id_column, type_column, n_anchors):
return generate_triplet_dataframe(df_labels, id_column, type_column, n_anchors)
def generate_triplet_dataframe(df_labels, id_column, type_column, n_anchors, model=None):
unique_types = df_labels[type_column].unique()
df_anchors = find_all_anchors(df_labels)
results = []
random.seed()
for anchor_type in unique_types:
other_types = np.setdiff1d(unique_types, anchor_type)
anchors = find_anchors(df_labels, anchor_type, type_column, n_anchors)
for i, anchor in anchors.iterrows():
positives = find_positives(df_labels, anchor, anchor_type, type_column)
negatives = find_negatives(df_labels, other_types, type_column)
for ni, n in negatives.iterrows():
if model is None:
selected_positives = select_positives(anchor, positives)
for p in selected_positives:
results.append(
[anchor[id_column], anchor[type_column], anchor[ENCODING_COL], p[id_column], p[type_column],
p[ENCODING_COL], n[id_column], n[type_column], n[ENCODING_COL]])
return df_anchors, pd.DataFrame(results,
columns=['anchor_id', 'anchor_type', 'anchor_encoding',
'positive_id', 'positive_type', 'positive_encoding',
'negative_id', 'negative_type', 'negative_encoding'])
def select_positives(anchor, positives):
selected = random.randint(0, positives.shape[0] - 1)
return [positives.iloc[selected]]
def find_all_anchors(df_labels):
df_anchors = df_labels.loc[df_labels[ANP_COL] == 'A']
return df_anchors
def generate_triplet(df_labels, type_column, n_anchors):
unique_types = df_labels[type_column].unique()
results = []
for anchor_type in unique_types:
other_types = np.setdiff1d(unique_types, anchor_type)
anchors = find_anchors(df_labels, anchor_type, type_column, n_anchors)
for i, anchor in anchors.iterrows():
random.seed()
positives = find_positives(df_labels, anchor, anchor_type, type_column)
negatives = find_negatives(df_labels, other_types, type_column)
anchor_dict = {'id': anchor['id'], type_column: anchor[type_column], ENCODING_COL: anchor[ENCODING_COL]}
for ni, n in negatives.iterrows():
neg_dict = {'id': n['id'], type_column: n[type_column], ENCODING_COL: n[ENCODING_COL]}
p1 = positives.iloc[random.randint(0, positives.shape[0] - 1)]
pos_dict = {'id': p1['id'], type_column: p1[type_column], ENCODING_COL: p1[ENCODING_COL]}
results.append((anchor_dict, pos_dict, neg_dict))
return np.array(results)
def find_anchors(df_labels, anchor_type, type_column, n_anchors):
anchors = df_labels.loc[(df_labels[type_column] == anchor_type) & (df_labels[ANP_COL] == 'A'), :]
if anchors.shape[0] < n_anchors:
print('Asking for n_achors {n_anchors} but only have {actual} from dataframe for type {anchor_type}'.format(
n_anchors=n_anchors, actual=anchors.shape[0], anchor_type=anchor_type))
return anchors.head(n_anchors)
def find_negatives(df_labels, other_types, type_column):
return df_labels.loc[(df_labels[type_column].isin(other_types) & (df_labels[ANP_COL] == 'A'))]
def find_positives(df_labels, anchor, anchor_type, type_column):
return df_labels.loc[(df_labels[type_column] == anchor_type) & (df_labels[ANP_COL] == 'P') & (
df_labels['id'] != anchor['id']) & (df_labels['cluster'] != anchor['cluster']), :]
def distance(encoding1, encoding2):
return np.sum(np.square(np.subtract(encoding1, encoding2)), axis=-1)
def softmax(X):
ps = np.exp(X)
ps /= np.sum(ps)
return ps
def get_class(result, identities):
resulting_identity = identities.iloc[np.argmax(result)]
return resulting_identity['breed'], resulting_identity['id'], resulting_identity['encoding']
def model_encode(model, encoding_size):
def encode(image_encoding):
encoding = np.zeros((1, 1, encoding_size))
encoding[0] = image_encoding
return model.predict_on_batch(encoding)
return encode
def get_identities_encoding(df_train, encoding_function):
identities = find_all_anchors(df_train)
identities_encoding = list(map(lambda x: np.loadtxt(x), identities[ENCODING_COL].values.tolist()))
identities_encoded = list(map(encoding_function, identities_encoding))
return identities, identities_encoded
def get_identities_encoding_map(df_train, encoding_function):
identities = find_all_anchors(df_train)
identity_map = {}
for i, row in identities.iterrows():
breed = row['breed']
if not (breed in identity_map.keys()):
#print('breed {breed} not in map'.format(breed=breed))
identity_map[breed] = []
encoding_list = identity_map[breed]
#print('loading encoding for {id}'.format(id=row['id']))
encoding = encoding_function(np.loadtxt(row[ENCODING_COL]))
encoding_list.append(encoding)
#print('breed {breed} list {len}'.format(breed=breed, len=len(encoding_list)))
return identity_map;
def predict_on_model(df_labels, encoding_function):
identities, identities_encoded = get_identities_encoding(df_labels, encoding_function)
prediction_error_count = 0
prediction = []
prediction_ids = []
prediction_encodings = []
for i, row in df_labels.iterrows():
row_encoding = encoding_function(np.loadtxt(row.encoding))
result = []
for id_encoding in identities_encoded:
dist = distance(id_encoding, row_encoding)
result.append(dist.item(0))
result = softmax(np.array(result))
result = 1 - result
predicted_breed, predicted_id, predicted_encoding = get_class(result, identities)
prediction.append(predicted_breed)
prediction_ids.append(predicted_id)
prediction_encodings.append(predicted_encoding)
if row['breed'] != predicted_breed:
prediction_error_count += 1
df_labels['prediction_breed'] = prediction
df_labels['prediction'] = (df_labels['prediction_breed'] == df_labels['breed'])
df_labels['prediction_id'] = prediction_ids
df_labels['prediction_encoding'] = prediction_encodings
total = df_labels.shape[0]
bad_predictions = df_labels.loc[df_labels['prediction'] == False].shape[0]
accuracy = (total - bad_predictions) / total
return df_labels, total, bad_predictions, accuracy
def save_best_distance(breed_dict, identity, distance):
if identity in breed_dict and breed_dict[identity] > distance:
breed_dict[identity] = distance
else:
breed_dict[identity] = distance
def predict_on_test_model(df_labels, df_test, breeds, encoding_function):
identity_encoding_map = get_identities_encoding_map(df_labels, encoding_function)
predictions = []
for i, row in df_test.iterrows():
test_encoding = encoding_function(np.loadtxt(row.encoding))
result = []
id = row['id']
result.append(id)
i = 0
for breed in breeds:
min_dist = np.max(list(map(lambda x: distance(test_encoding, x), identity_encoding_map[breed])))
# print('{id} {breed} dist {min_dist}'.format(id=id, breed=breed, min_dist=min_dist))
result.append(min_dist)
predictions.append(result)
return predictions
| alechfho/dog_breed | triplet_encoding.py | triplet_encoding.py | py | 9,622 | python | en | code | 0 | github-code | 13 |
12001682700 | # -*- coding: utf-8 -*-
# Write : lgy
# Data : 2017-09-24
# function: Algorithm class NetWork for bp network
from Connections import Connections
from Layer import Layer
from Connection import Connection
class NetWork(object):
def __init__(self, layers):
"""
初始化一个全连接神经网络
:param layers: 二维数组,描述神经网络每层节点数
"""
self.connections = Connections()
self.layers = []
layers_count = len(layers)
node_count = 0
for i in range(layers_count-1):
self.layers.append(Layer(i,layers[i]))
for layer in range(layers_count-1):
connections = [Connection(upstream_node,downstream_node)
for upstream_node in self.layers[layer].nodes
for downstream_node in self.layers[layer+1].nodes[:-1]]
for conn in connections:
self.connections.add_connection(conn)
conn.downstream_node.append_upstream_connection(conn)
conn.upstream_node.append_downstream_connection(conn)
def train(self, data_set, labels, rate, iteration):
"""
训练神经网络
:param data_set: 二维数组 训练样本的特征,每个元素是一个样本特征
:param labels: 数组,训练样本标签。没个元素是一个样本的标签
:param rate: 学习率
:param iteration: 迭代次数
:return:
"""
for i in range(iteration):
for d in range(len(data_set)):
self.train_one_sample(data_set[d],labels[d],rate)
def train_one_sample(self, sample, label, rate):
"""
内部函数
一次一个样本训练网络
:param sample:
:param label:
:param rate:
:return:
"""
self.predict(sample)
self.calc_delta(label)
self.update_weight(rate)
def calc_delta(self, label):
"""
内部函数
计算每个节点的delta
:param label:
:return:
"""
output_nodes = self.layers[-1].nodes
for i in range(len(label)):
output_nodes[i].calc_output_layer_delta(label[i])
for layer in self.layers[-2::-1]:
for node in layer:
node.calc_hidden_layer_delta()
def update_weight(self, rate):
"""
内部函数,
更新每个连接权重
:param rate:
:return:
"""
for layer in self.layers[:-1]:
for node in layer.nodes:
for conn in node.downstream:
conn.update_weight(rate)
def calc_gradient(self):
"""
内部函数
计算每个连接的梯度
:return:
"""
for layer in self.layers[:-1]:
for node in layer.nodes:
for conn in node.downstream:
conn.calc_gradient()
def get_gradient(self, sample, label):
"""
获得网络在一个样本下,每个连接上的梯度
:param sample: 样本输入
:param label: 样本标签
:return:
"""
self.predict(sample)
self.calc_delta(label)
self.calc_gradient()
def predict(self, sample):
"""
根据输入样本预测输出值
:param sample: 数组,样本的特征,也就是网络的输入向量
:return:
"""
self.layers[0].set_output(sample)
for i in range(1 , len(self.layers)):
self.layers[i].calc_output()
return map(lambda node : node.output, self.layers[-1].modes[:-1])
def dump(self):
"""
打印网络信息
:return:
"""
for layer in self.layers:
layer.dump() | liguoyu1/python | codeNLP/Algorithm/NeuralNetwork/Bp/NetWork.py | NetWork.py | py | 3,137 | python | en | code | 49 | github-code | 13 |
17589022332 | from pandas_datareader import data as wb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Dense, LSTM, SimpleRNN, Dropout, Flatten
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.models import load_model
from keras.optimizers import SGD
import math
class Finance_Neural_Nets:
data_column =''
window = 0
batch_size = 0
epoch = 0
def __init__(self, asset_name, source, start_date, end_date):
self.asset_name = asset_name
self.source = source
self.start_date = start_date
self.end_date = end_date
def plot_finance_data(self):
asset = wb.DataReader(self.asset_name, data_source=self.source, start=self.start_date, end=self.end_date)
plt.figure(figsize=(16, 8))
plt.title('Close Price History')
plt.plot(asset['Close'])
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price', fontsize=18)
plt.show()
def prepare_data(self):
asset = wb.DataReader(self.asset_name, data_source=self.source, start=self.start_date, end=self.end_date)
data = asset.filter([self.data_column])
# Convert the dataframe to a numpy array
dataset = data.values
# Get the number of rows to train the model on
training_data_len = math.ceil(len(dataset) * .8)
# Scale the data
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
train_data = scaled_data[0:training_data_len, :]
x_train = []
y_train = []
for i in range(self.window, len(train_data)):
x_train.append(train_data[i - self.window:i, 0])
y_train.append(train_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
print(x_train.shape, 'X train data shape')
# Reshape the data
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
return x_train, y_train, x_train.shape, training_data_len, scaler, scaled_data, data, dataset
def neural_net_RNN_model(self, x_train):
# Build the RNN model
model = Sequential()
model.add(SimpleRNN(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(keras.layers.Dropout(0.2))
model.add(SimpleRNN(50, return_sequences=True))
model.add(keras.layers.Dropout(0.5))
model.add(SimpleRNN(50))
model.add(Dense(1, activation='linear'))
#print(model.summary())
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
return model
def neural_net_LSTM_model(self, x_train):
# Build the LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(keras.layers.Dropout(0.2))
model.add(LSTM(50, return_sequences=False))
model.add(keras.layers.Dropout(0.5))
model.add(Dense(25))
# model.add(keras.layers.Dropout(0.5))
model.add(Dense(1, activation='linear'))
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
return model
def train_neural_net(self, model, x_train, y_train, training_data_len, scaler, scaled_data, data, dataset):
# Train the model
model_hist = model.fit(x_train, y_train, batch_size=self.batch_size, epochs=self.epoch, verbose=1)
print(model.summary(), 'Model summery')
# Create the testing data set
# Create a new array containing scaled values from index 1543 to 2002
test_data = scaled_data[training_data_len - self.window:, :]
x_test = []
y_test = dataset[training_data_len:, :]
for i in range(self.window, len(test_data)):
x_test.append(test_data[i - self.window:i, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
# Get the root mean squared error (RMSE)
rms = np.sqrt(np.mean(np.power((predictions - y_test), 2)))
rmse = np.sqrt(np.mean(((predictions - y_test) ** 2)))
print(rmse, 'RMSE')
print(rms, 'RMS')
# Plot the loss
tra_loss = model_hist.history['loss']
plt.plot(tra_loss)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.legend(['Training Loss'])
# Plot the data
train = data[:training_data_len]
valid = data[training_data_len:]
valid['Predictions'] = predictions
# Visualize the data
plt.figure(figsize=(16, 8))
plt.title('Model')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price', fontsize=18)
plt.plot(train['Adj Close'])
plt.plot(valid[['Adj Close', 'Predictions']])
plt.legend(['Train', 'Val', 'Predictions'], loc='lower right')
plt.show()
print(valid.head(), ' valid dataset')
def save_neural_net_model(self, model, model_name):
model.save(model_name + 'h5')
def load_neural_net_model(self, model_name):
model = load_model(model_name + 'h5')
return model
def test_neural_net_model(self, model, scaler):
apple_quote = wb.DataReader('AAPL', data_source='yahoo', start='2012-01-01', end='2019-12-17')
new_df = apple_quote.filter([str(self.data_column)])
last_window_days = new_df[-self.window:].values
last_window_days_scaled = scaler.transform(last_window_days)
X_test = []
X_test.append(last_window_days_scaled)
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
pred_price = model.predict(X_test)
pred_price = scaler.inverse_transform(pred_price)
print(pred_price)
return pred_price
| aidingh/Project-Data-analytics-and-Finance | Project_Finance_and_Data_Analytics/Finance_Neural_Nets.py | Finance_Neural_Nets.py | py | 6,327 | python | en | code | 0 | github-code | 13 |
5945917046 | from flask import Flask, jsonify, make_response
from db import trade
app = Flask(__name__)
@app.route('/api/trade', methods=['GET'])
def get_trade():
trade_info = trade.read()
return jsonify({'result': trade_info})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'result': False}), 404)
if __name__ == '__main__':
app.run(debug=True)
| smartchris84/bitmexbot | rest/app.py | app.py | py | 384 | python | en | code | 0 | github-code | 13 |
32773464499 | import openpyxl
def writeExcel(file_name, file_name_database):
import_wb = openpyxl.load_workbook(file_name)
import_sheet = import_wb['Sheet1']
wb = openpyxl.load_workbook(file_name_database)
sheet = wb['Первый лист']
for i in range(2, import_sheet.max_row + 1):
import_list_names = [import_sheet.cell(row=i, column=j).value for j in range(1, import_sheet.max_column + 1) if import_sheet.cell(row=i, column=j).value]
for i in range(2, sheet.max_row + 1):
list_names = [sheet.cell(row=i, column=j).value for j in range(1, sheet.max_column + 1) if sheet.cell(row=i, column=j).value]
if(import_list_names == list_names): break
elif(i == sheet.max_row):
max_row_value = sheet.max_row
for j in range(len(import_list_names)):
sheet.cell(row=max_row_value + 1, column=j + 1, value=import_list_names[j])
wb.save(file_name_database)
wb.close()
import_wb.close()
def excelFormat(file_name_export, file_name_database):
wb = openpyxl.load_workbook(file_name_database)
sheet = wb['Первый лист']
export_wb = openpyxl.Workbook()
export_sheet = export_wb.active
for i in range(1, sheet.max_row + 1):
for j in range(1, sheet.max_column + 1):
cell = sheet.cell(row=i, column=j)
export_sheet.cell(row=i, column=j, value=cell.value)
export_wb.save(f"{file_name_export}.xlsx")
export_wb.close()
wb.close() | AlekseyNizhnikov/Home_Work_Python | Work_7/converter_excel.py | converter_excel.py | py | 1,579 | python | en | code | 0 | github-code | 13 |
72813985617 | from datetime import date
from six import iteritems
from seatsio.util import parse_date
class Chart:
def __init__(self, data):
self.id = data.get("id")
self.key = data.get("key")
self.status = data.get("status")
self.name = data.get("name")
self.published_version_thumbnail_url = data.get("publishedVersionThumbnailUrl")
self.draft_version_thumbnail_url = data.get("draftVersionThumbnailUrl")
self.events = Event.create_list(data.get("events"))
self.tags = data.get("tags")
self.archived = data.get("archived")
self.validation = data.get("validation")
class ChartValidation:
def __init__(self, data):
self.errors = data.get("errors")
self.warnings = data.get("warnings")
class Category:
def __init__(self, key, label, color, accessible = False):
self.key = key
self.label = label
self.color = color
self.accessible = accessible
def __eq__(self, other):
return self.key == other.key and \
self.label == other.label and \
self.color == other.color and \
self.accessible == other.accessible
def __hash__(self):
return hash((self.key, self.label, self.color, self.accessible))
@classmethod
def create(cls, data):
return Category(data.get("key"), data.get("label"), data.get("color"), data.get("accessible"))
@classmethod
def create_list(cls, lst):
if lst is not None:
return list(map(Category.create, lst))
class Event:
def __init__(self, data):
self.id = data.get("id")
self.key = data.get("key")
self.name = data.get("name")
self.date = None if data.get("date") is None else date.fromisoformat(data.get("date"))
self.chart_key = data.get("chartKey")
self.table_booking_config = TableBookingConfig.create(data.get("tableBookingConfig"))
self.supports_best_available = data.get("supportsBestAvailable")
self.for_sale_config = ForSaleConfig.create(data.get("forSaleConfig"))
self.created_on = parse_date(data.get("createdOn"))
self.updated_on = parse_date(data.get("updatedOn"))
self.channels = Channel.createList(data.get("channels"))
self.is_top_level_season = data.get("isTopLevelSeason")
self.is_partial_season = data.get("isPartialSeason")
self.is_event_in_season = data.get("isEventInSeason")
self.top_level_season_key = data.get("topLevelSeasonKey")
self.object_categories = data.get("objectCategories")
self.categories = Category.create_list(data.get("categories"))
self.is_in_the_past = data.get("isInThePast")
@classmethod
def create_list(cls, lst):
if lst is None:
return None
else:
result = []
for e in lst:
result.append(event_from_json(e))
return result
def is_season(self):
return False
class Season(Event):
def __init__(self, data):
Event.__init__(self, data)
self.partial_season_keys = data.get("partialSeasonKeys")
self.events = Event.create_list(data.get("events"))
def is_season(self):
return True
def event_from_json(json):
if json.get("isSeason"):
return Season(json)
else:
return Event(json)
class ForSaleConfig:
def __init__(self, data):
self.for_sale = data.get("forSale")
self.objects = data.get("objects")
self.area_places = data.get("areaPlaces")
self.categories = data.get("categories")
def __eq__(self, other):
return self.for_sale == other.for_sale and \
self.objects == other.objects and \
self.area_places == other.area_places and \
self.categories == other.categories
def __hash__(self):
return hash((self.for_sale, self.objects, self.area_places, self.categories))
@classmethod
def create(cls, param):
if param is not None:
return ForSaleConfig(param)
def to_json(self):
json = {"forSale": self.for_sale}
if self.objects is not None:
json["objects"] = self.objects
if self.area_places is not None:
json["areaPlaces"] = self.area_places
if self.categories is not None:
json["categories"] = self.categories
return json
@classmethod
def create_new(cls, for_sale, objects=None, area_places=None, categories=None):
return ForSaleConfig({"forSale": for_sale, "objects": objects, "areaPlaces": area_places, "categories": categories})
class TableBookingConfig:
def __init__(self, mode, tables=None):
self.mode = mode
self.tables = tables
def __eq__(self, other):
return self.mode == other.mode and \
self.tables == other.tables
def __hash__(self):
return hash((self.mode, self.tables))
def to_json(self):
json = {"mode": self.mode}
if self.tables is not None:
json["tables"] = self.tables
return json
@classmethod
def inherit(cls):
return TableBookingConfig('INHERIT')
@classmethod
def all_by_table(cls):
return TableBookingConfig('ALL_BY_TABLE')
@classmethod
def all_by_seat(cls):
return TableBookingConfig('ALL_BY_SEAT')
@classmethod
def custom(cls, tables):
return TableBookingConfig('CUSTOM', tables)
@classmethod
def create(cls, data):
return TableBookingConfig(data.get("mode"), data.get("tables"))
class Channel:
def __init__(self, name, color, index, key=None, objects=None):
self.key = key
self.name = name
self.color = color
self.index = index
self.objects = objects
def __eq__(self, other):
return self.key == other.key and \
self.name == other.name and \
self.color == other.color and \
self.index == other.index and \
self.objects == other.objects
def __hash__(self):
return hash((self.key, self.name, self.color, self.index, self.objects))
@classmethod
def create(cls, param):
if param is not None:
return Channel(param.get('name'), param.get('color'), param.get('index'), param.get('key'), param.get('objects'))
@classmethod
def createList(cls, param):
if param is not None:
return list(map(Channel.create, param))
class ChartReport:
def __init__(self, response_body):
self.items = {}
for key, value in iteritems(response_body):
self.items[key] = []
for item in value:
self.items[key].append(ChartObjectInfo(item))
def get(self, key):
return self.items.get(key)
class ChartObjectInfo:
def __init__(self, item_data):
self.label = item_data.get("label")
self.labels = item_data.get("labels")
self.ids = item_data.get("ids")
self.category_label = item_data.get("categoryLabel")
self.category_key = item_data.get("categoryKey")
self.section = item_data.get("section")
self.entrance = item_data.get("entrance")
self.capacity = item_data.get("capacity")
self.book_as_a_whole = item_data.get("bookAsAWhole")
self.object_type = item_data.get("objectType")
self.left_neighbour = item_data.get('leftNeighbour')
self.right_neighbour = item_data.get('rightNeighbour')
self.distance_to_focal_point = item_data.get('distanceToFocalPoint')
self.num_seats = item_data.get('numSeats')
self.is_accessible = item_data.get("isAccessible")
self.is_companion_seat = item_data.get("isCompanionSeat")
self.has_restricted_view = item_data.get("hasRestrictedView")
class EventReport:
def __init__(self, response_body):
self.items = {}
for key, value in iteritems(response_body):
self.items[key] = []
for item in value:
self.items[key].append(EventObjectInfo(item))
def get(self, key):
return self.items.get(key)
class EventObjectInfo:
FREE = "free"
BOOKED = "booked"
HELD = "reservedByToken"
def __init__(self, item_data):
self.status = item_data.get("status")
self.label = item_data.get("label")
self.labels = item_data.get("labels")
self.ids = item_data.get("ids")
self.category_label = item_data.get("categoryLabel")
self.category_key = item_data.get("categoryKey")
self.ticket_type = item_data.get("ticketType")
self.order_id = item_data.get("orderId")
self.for_sale = item_data.get("forSale")
self.hold_token = item_data.get("holdToken")
self.section = item_data.get("section")
self.entrance = item_data.get("entrance")
self.num_booked = item_data.get("numBooked")
self.num_free = item_data.get("numFree")
self.num_held = item_data.get("numHeld")
self.capacity = item_data.get("capacity")
self.book_as_a_whole = item_data.get("bookAsAWhole")
self.object_type = item_data.get("objectType")
self.extra_data = item_data.get("extraData")
self.is_accessible = item_data.get("isAccessible")
self.is_companion_seat = item_data.get("isCompanionSeat")
self.has_restricted_view = item_data.get("hasRestrictedView")
self.displayed_object_type = item_data.get("displayedObjectType")
self.left_neighbour = item_data.get('leftNeighbour')
self.right_neighbour = item_data.get('rightNeighbour')
self.is_available = item_data.get('isAvailable')
self.channel = item_data.get('channel')
self.distance_to_focal_point = item_data.get('distanceToFocalPoint')
self.holds = item_data.get('holds')
self.num_seats = item_data.get('numSeats')
self.variable_occupancy = item_data.get('variableOccupancy')
self.min_occupancy = item_data.get('minOccupancy')
self.max_occupancy = item_data.get('maxOccupancy')
class UsageSummaryForAllMonths:
def __init__(self, json):
self.usage = list(map(lambda x: UsageSummaryForMonth(x), json.get("usage")))
self.usage_cutoff_date = parse_date(json.get("usageCutoffDate"))
class UsageSummaryForMonth(object):
def __init__(self, json):
self.month = Month.from_json(json.get("month"))
self.numUsedObjects = json.get("numUsedObjects")
class Month(object):
def __init__(self, year, month):
self.year = year
self.month = month
def serialize(self):
return str(self.year) + '-' + str(self.month).rjust(2, '0')
@classmethod
def from_json(cls, json):
return Month(json.get("year"), json.get("month"))
class UsageDetailsForMonth:
@classmethod
def from_json(cls, json):
return list(map(lambda x: UsageDetails(x), json))
class UsageDetails:
def __init__(self, json):
self.workspace = json.get("workspace")
self.usage_by_chart = list(map(lambda x: UsageByChart(x), json.get("usageByChart")))
class UsageByChart:
def __init__(self, json):
if json.get("chart") is not None:
self.chart = UsageChart(json.get("chart"))
self.usage_by_event = list(map(lambda x: UsageByEvent(x), json.get("usageByEvent")))
class UsageChart:
def __init__(self, json):
self.name = json.get("name")
self.key = json.get("key")
class UsageByEvent:
def __init__(self, json):
self.event = UsageEvent(json.get("event"))
self.num_used_objects = json.get("numUsedObjects")
class UsageEvent:
def __init__(self, json):
self.id = json.get("id")
self.key = json.get("key")
class UsageDetailsForEventInMonth:
@classmethod
def from_json(cls, json):
if len(json) == 0 or "usageByReason" not in json[0]:
return list(map(lambda x: UsageForObjectV1(x), json))
else:
return list(map(lambda x: UsageForObjectV2(x), json))
class UsageForObjectV1:
def __init__(self, json):
self.object = json.get("object")
self.num_first_bookings = json.get("numFirstBookings")
self.first_booking_date = parse_date(json.get("firstBookingDate"))
self.num_first_selections = json.get("numFirstSelections")
self.num_first_bookings_or_selections = json.get("numFirstBookingsOrSelections")
class UsageForObjectV2:
def __init__(self, json):
self.object = json.get("object")
self.num_usedObjects = json.get("numUsedObjects")
self.usage_by_reason = json.get("usageByReason")
class Workspace:
def __init__(self, data):
self.id = data.get("id")
self.name = data.get("name")
self.key = data.get("key")
self.secret_key = data.get("secretKey")
self.is_test = data.get("isTest")
self.is_active = data.get("isActive")
self.is_default = data.get("isDefault")
@classmethod
def create(cls, param):
if param is not None:
return Workspace(param)
class HoldToken:
def __init__(self, data):
self.hold_token = data.get("holdToken")
self.expires_at = parse_date(data.get("expiresAt"))
self.expires_in_seconds = data.get("expiresInSeconds")
self.workspace_key = data.get("workspaceKey")
class StatusChange:
def __init__(self, data):
self.id = data.get("id")
self.status = data.get("status")
self.date = parse_date(data.get("date"))
self.object_label = data.get("objectLabel")
self.event_id = data.get("eventId")
self.extra_data = data.get("extraData")
self.origin = StatusChangeOrigin(data['origin'])
self.is_present_on_chart = data.get("isPresentOnChart")
self.not_present_on_chart_reason = data.get("notPresentOnChartReason")
self.hold_token = data.get("holdToken")
class StatusChangeOrigin:
def __init__(self, data):
self.type = data.get("type")
self.ip = data.get("ip")
class BestAvailableObjects:
def __init__(self, data):
self.next_to_each_other = data.get("nextToEachOther")
self.objects = data.get("objects")
self.objectDetails = {}
for key, value in iteritems(data.get("objectDetails")):
self.objectDetails[key] = EventObjectInfo(value)
class ChangeObjectStatusResult:
def __init__(self, data):
self.objects = {}
for key, value in iteritems(data.get("objects")):
self.objects[key] = EventObjectInfo(value)
| seatsio/seatsio-python | seatsio/domain.py | domain.py | py | 14,699 | python | en | code | 8 | github-code | 13 |
71261645779 | from __future__ import print_function
import argparse
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
import math
from Preprocess_data import is_image_file, load_img, save_img, get_test_set, get_training_set
# Testing settings
parser = argparse.ArgumentParser(description='RestoNet-PyTorch-implementation')
parser.add_argument('--dataset', default='facades_subdir', help='facades')
parser.add_argument('--model', type=str, default='3000', help='model file to use')
parser.add_argument('--cuda', default=True, action='store_true', help='use cuda')
parser.add_argument('--datasetPath', default='../dataset/Facade', help='facades')
opt = parser.parse_args()
print(opt)
if not os.path.exists(opt.dataset):
os.mkdir(opt.dataset)
if not os.path.exists(os.path.join("{}/train".format(opt.dataset))):
os.mkdir(os.path.join("{}/train".format(opt.dataset)))
os.mkdir(os.path.join("{}/train/A".format(opt.dataset)))
os.mkdir(os.path.join("{}/train/B".format(opt.dataset)))
if not os.path.exists(os.path.join("{}/test".format(opt.dataset))):
os.mkdir(os.path.join("{}/test".format(opt.dataset)))
os.mkdir(os.path.join("{}/test/A".format(opt.dataset)))
os.mkdir(os.path.join("{}/test/B".format(opt.dataset)))
train_set = get_training_set(opt.datasetPath)
train_data_loader = DataLoader(dataset=train_set, num_workers=0, batch_size=1, shuffle=True)
test_set = get_test_set(opt.datasetPath)
testing_data_loader = DataLoader(dataset=test_set, num_workers=0, batch_size=1, shuffle=False)
criterionMSE = nn.MSELoss()
criterionMSE = criterionMSE.cuda()
i=0
'''
for x in range(10):
for batch in train_data_loader:
input, target, input_masked = Variable(batch[0], volatile=True), Variable(batch[1], volatile=True), Variable(
batch[2], volatile=True)
input = input.cpu()
target = target.cpu()
in_img = input.data[0]
target = target.data[0]
merged_result = torch.cat((in_img,target), 2)
#save_img(merged_result,"{}/train/{}_{}.jpg".format(opt.dataset, opt.dataset, i))
save_img(in_img, "{}/train/A/{}_{}.jpg".format(opt.dataset, opt.dataset, i))
save_img(target, "{}/train/B/{}_{}.jpg".format(opt.dataset, opt.dataset, i))
i=i+1
'''
i=0
for batch in testing_data_loader:
input, target, input_masked = Variable(batch[0], volatile=True), Variable(batch[1], volatile=True), Variable(
batch[2], volatile=True)
input = input.cpu()
target = target.cpu()
in_img = input.data[0]
target = target.data[0]
merged_result = torch.cat((in_img,target), 2)
#save_img(merged_result,"{}/test/{}_{}.jpg".format(opt.dataset, opt.dataset, i))
save_img(in_img, "{}/test/A/{}_{}.jpg".format(opt.dataset, opt.dataset, i))
save_img(target, "{}/test/B/{}_{}.jpg".format(opt.dataset, opt.dataset, i))
i=i+1
| dreamegg/2_Step_Inpainting | tools/make_pair.py | make_pair.py | py | 2,984 | python | en | code | 0 | github-code | 13 |
28396382662 | # cd Desktop\RETI NEURALI E ALG GENETICI\Cazzeggio\1) xor problem
import sys
sys.path.insert(1, '/Users/Sax/Desktop/RETI NEURALI E ALG GENETICI/Cazzeggio/toy_nn')
from nn import *
import random
if __name__ == '__main__':
training_data = [{
"inputs":[0,1], "targets":[1]
},
{
"inputs":[1,0], "targets":[1]
},
{
"inputs":[1,1], "targets":[0]
},
{
"inputs":[0,0], "targets":[0]
}]
nn = NeuralNetwork(2, 2, 1)
for i in range(0,100000):
data = random.choice(training_data)
nn.train(data["inputs"], data["targets"])
guess1 = nn.feedforward([1,0])
guess2 = nn.feedforward([0,1])
guess3 = nn.feedforward([0,0])
guess4 = nn.feedforward([1,1])
print(f"inputs [1,0] -> guessed {guess1}")
print(f"inputs [0,1] -> guessed {guess2}")
print(f"inputs [0,0] -> guessed {guess3}")
print(f"inputs [1,1] -> guessed {guess4}") | simsax/Neural-Networks | 1) xor problem/main.py | main.py | py | 1,013 | python | en | code | 0 | github-code | 13 |
13169567638 | # linear sorting
# COUNTING Sort:
arr=[2,4,1,6,3,5,9,8,7]
low,high=int(input("enter the range a to b:").split())
extra=[0]*(high-low+1)
n=len(arr)
for i in arr:
extra[i]+=1
for i in range(1,len(extra)):
extra[i]+=extra[i-1]
| saketha55/Daily_Challenges | D6/Practice/counting srt.py | counting srt.py | py | 237 | python | en | code | 0 | github-code | 13 |
8191052915 | import os
from collections import defaultdict
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from d3l.indexing.lsh.lsh_index import LSHIndex
from d3l.indexing.similarity_indexes import NameIndex, SimilarityIndex
from d3l.input_output.dataloaders import DataLoader
from tqdm import tqdm
from transformers import BertTokenizer
from tabert_cl_training.input_prep import InputFormatter
from tabert_cl_training.train_model import TableEmbeddingModule
def load_pylon_tabert_model(ckpt_path: str):
return TableEmbeddingModule.load_from_checkpoint(ckpt_path).model
class PylonTabertTransformer:
def __init__(
self,
ckpt_path: str,
embedding_dim: int,
num_samples: int = None,
cache_dir: Optional[str] = None
):
self._ckpt_path = ckpt_path
self._embedding_dimension = embedding_dim
self._num_samples = num_samples
self._cache_dir = (
cache_dir if cache_dir is not None and os.path.isdir(cache_dir) else None
)
self._embedding_model = self.get_embedding_model(ckpt_path=self._ckpt_path)
self._input_formatter = self.get_input_formatter(num_samples=self._num_samples)
def __getstate__(self):
d = self.__dict__
self_dict = {k: d[k] for k in d if k != "_embedding_model" and k != "_input_formatter"}
return self_dict
def __setstate__(self, state):
self.__dict__ = state
self._embedding_model = self.get_embedding_model(ckpt_path=self._ckpt_path)
self._input_formatter = self.get_input_formatter(num_samples=self._num_samples)
@property
def cache_dir(self) -> Optional[str]:
return self._cache_dir
def get_embedding_model(self, ckpt_path: str):
model = load_pylon_tabert_model(ckpt_path)
return model
def get_input_formatter(self, num_samples: int):
input_formatter = InputFormatter(BertTokenizer.from_pretrained("bert-base-uncased"), num_rows=num_samples)
return input_formatter
def get_embedding_dimension(self):
return self._embedding_dimension
def transform(self, table: pd.DataFrame) -> np.ndarray:
"""
Extract a column embedding for each column in the table
Parameters
----------
table: pd.DataFrame
The table to extract column embeddings from.
Returns
-------
np.ndarray
A Numpy vector representing the mean of all token embeddings.
"""
tbl_input = self._input_formatter.generate_instance_input(table)
tbl_tensor_dict = self._input_formatter.collate([tbl_input])
# Get projected embeddings
_, embeddings = self._embedding_model.inference(tbl_tensor_dict)
# print("Embedding shape: ", embeddings.shape)
# exit()
embeddings = embeddings[0].detach().cpu().numpy()
return embeddings
class PylonTabertEmbeddingIndex(SimilarityIndex):
def __init__(
self,
ckpt_path: str,
dataloader: DataLoader,
embedding_dim: int,
num_samples: int,
index_hash_size: int = 1024,
index_similarity_threshold: float = 0.5,
index_fp_fn_weights: Tuple[float, float] = (0.5, 0.5),
index_seed: int = 12345,
data_root: Optional[str] = None,
index_cache_dir: Optional[str] = None
):
"""
Parameters
----------
dataloader : DataLoader
A DataLoader object used to read the data.
data_root : Optional[str]
A schema name if the data is being loaded from a database.
transformer_token_pattern : str
The regex used to identify tokens.
The default value is scikit-learn's TfidfVectorizer default.
transformer_max_df : float
Percentage of values the token can appear in before it is ignored.
transformer_stop_words : Iterable[str]
A collection of stopwords to ignore that defaults to NLTK's English stopwords.
transformer_embedding_model_lang : str
The embedding model language.
index_hash_size : int
The expected size of the input hashcodes.
index_similarity_threshold : float
Must be in [0, 1].
Represents the minimum similarity score between two sets to be considered similar.
The similarity type is given by the type of hash used to generate the index inputs.
E.g., *MinHash* hash function corresponds to Jaccard similarity,
*RandomProjections* hash functions corresponds to Cosine similarity.
index_fp_fn_weights : Tuple[float, float]
A pair of values between 0 and 1 denoting a preference for high precision or high recall.
If the fp weight is higher then indexing precision is preferred. Otherwise, recall is preferred.
Their sum has to be 1.
index_seed : int
The random seed for the underlying hash generator.
index_cache_dir : str
A file system path for storing the embedding model.
"""
super(PylonTabertEmbeddingIndex, self).__init__(dataloader=dataloader, data_root=data_root)
self.ckpt_path = ckpt_path
self.embedding_dim = embedding_dim
self.num_samples = num_samples
self.index_hash_size = index_hash_size
self.index_similarity_threshold = index_similarity_threshold
self.index_fp_fn_weights = index_fp_fn_weights
self.index_seed = index_seed
self.index_cache_dir = index_cache_dir
self.transformer = PylonTabertTransformer(
ckpt_path=self.ckpt_path,
embedding_dim=self.embedding_dim,
num_samples=self.num_samples,
cache_dir=self.index_cache_dir
)
self.lsh_index = self.create_index()
def create_index(self) -> LSHIndex:
"""
Create the underlying LSH index with data from the configured data loader.
Returns
-------
LSHIndex
A new LSH index.
"""
lsh_index = LSHIndex(
hash_size=self.index_hash_size,
dimension=self.embedding_dim,
similarity_threshold=self.index_similarity_threshold,
fp_fn_weights=self.index_fp_fn_weights,
seed=self.index_seed,
)
num_indexed_tables = 0
for table_name in tqdm(self.dataloader.get_table_names()):
# print(table_name)
try:
table_data = self.dataloader.read_table(table_name)
if table_data.empty:
print(f"Table *{table_name}* is empty after preprocessing...")
print("Continue to the next table...")
print("=" * 80)
continue
except:
print(f"Table *{table_name}* cannot be read correctly...")
print("Continue to the next table...")
print("=" * 50)
continue
num_indexed_tables += 1
column_signatures = self.transformer.transform(table_data)
column_names = table_data.columns
# print(column_signatures.shape)
# print(column_signatures[0].shape)
# exit()
for i in range(column_signatures.shape[0]):
lsh_index.add(input_id=str(table_name) + "!" + str(column_names[i]), input_set=column_signatures[i])
print(f"Total number of tables: {len(self.dataloader.get_table_names())}")
print(f"Number of tables indexed: {num_indexed_tables}")
print("=" * 80)
return lsh_index
def query(
self, query: np.ndarray, k: Optional[int] = None
) -> Iterable[Tuple[str, float]]:
"""
Search for the nearest neighbours of the given query.
Parameters
----------
query : Iterable[Any]
A collection of values representing the query set.
k : Optional[int]
Only the top-k neighbours will be retrieved.
If this is None all results are retrieved.
Returns
-------
Iterable[Tuple[str, float]]
A collection of (item id, score value) pairs.
The item ids typically represent pre-indexed column ids.
The score is a similarity measure between the query set and the indexed items.
"""
# if is_numeric(query):
# return []
# query_signature = self.transformer.transform(query)
if len(query) == 0:
return []
return self.lsh_index.query(
query_id=None, query=query, k=k, with_scores=True)
class QueryEngine():
def __init__(self, *query_backends: SimilarityIndex):
"""
Create a new querying engine to perform data discovery in datalakes.
Parameters
----------
query_backends : SimilarityIndex
A variable number of similarity indexes.
"""
self.query_backends = query_backends
self.tabert_embedding_backend = query_backends[-1]
@staticmethod
def group_results_by_table(
target_id: str,
results: Iterable[Tuple[str, Iterable[float]]],
table_groups: Optional[Dict] = None,
) -> Dict:
"""
Groups column-based results by table.
For a given query column, at most one candidate column is considered for each candidate table.
This candidate column is the one with the highest sum of similarity scores.
Parameters
----------
target_id : str
Typically the target column name used to get the results.
results : Iterable[Tuple[str, Iterable[float]]]
One or more pairs of column names (including the table names) and backend similarity scores.
table_groups: Optional[Dict]
Iteratively created table groups.
If None, a new dict is created and populated with the current results.
Returns
-------
Dict
A mapping of table names to similarity scores.
"""
if table_groups is None:
table_groups = defaultdict(list)
candidate_scores = {}
for result_item, result_scores in results:
name_components = result_item.split("!") # change "." to "!" as "." often appearsin column name, e.g., abc.def
table_name, column_name = (
"!".join(name_components[:-1]),
name_components[-1:][0],
)
candidate_column, existing_scores = candidate_scores.get(
table_name, (None, None)
)
if existing_scores is None or sum(existing_scores) < sum(result_scores):
candidate_scores[table_name] = (column_name, result_scores)
for table_name, (candidate_column, result_scores) in candidate_scores.items():
table_groups[table_name].append(
((target_id, candidate_column), result_scores)
)
return table_groups
@staticmethod
def get_cdf_scores(
score_distributions: List[np.ndarray], scores: np.ndarray
) -> np.ndarray:
"""
Parameters
----------
score_distributions : List[np.ndarray]
The samples of all existing scores for each of the LSH backend.
Each of these has to be sorted in order to extract the
Empirical Cumulative Distribution Function (ECDF) value.
scores : np.ndarray
An array of current scores for which to extract the ECDF values.
Returns
-------
np.ndarray
A vector of scores of size (1xc).
"""
def ecdf(samples, values):
return [
np.searchsorted(samples[:, j], value, side="right") / len(samples)
for j, value in enumerate(values)
]
ecdf_weights = []
for i in range(len(scores)):
ecdfs = ecdf(score_distributions[i], scores[i])
ecdf_weights.append(ecdfs)
ecdf_weights = np.array(ecdf_weights)
return np.average(scores, axis=0, weights=ecdf_weights)
def column_query(
self,
column: pd.Series,
tabert_embedding: np.ndarray,
aggregator: Optional[callable] = None,
k: Optional[int] = None,
) -> Iterable[Tuple[str, Iterable[float]]]:
"""
Perform column-level top-k nearest neighbour search over the configured LSH backends.
Parameters
----------
column : pd.Series
The column query as a Pandas Series.
The series name will give the name queries.
The series values will give the value queries.
tabert_embedding : np.ndarray
Column embedding from TaBERT model
aggregator: Optional[callable] = None
An aggregating function used to merge the results of all configured backends.
If None then all scores are returned.
k : Optional[int]
Only the top-k neighbours will be retrieved from each backend.
Then, these results are aggregated using the aggregator function and the results re-ranked to retrieve
the top-k aggregated neighbours.
If this is None all results are retrieved.
Returns
-------
Iterable[Tuple[str, Iterable[float]]]
A collection of (column id, aggregated score values) pairs.
The scores are the values returned by the backends or one aggregated value if an aggregator is passed.
"""
results = defaultdict(lambda: [0.0] * len(self.query_backends))
query_name = str(column.name)
query_value = column.values.tolist()
for i, backend in enumerate(self.query_backends):
if isinstance(backend, NameIndex):
query_results = backend.query(query=query_name, k=k)
elif isinstance(backend, PylonTabertEmbeddingIndex):
query_results = backend.query(query=tabert_embedding, k=k)
else:
query_results = backend.query(query=query_value, k=k)
for rid, score in query_results:
results[rid][i] = score
if aggregator is None:
# If not aggregation is used results are sorted by the mean of the scores.
# Reverse sorting because the scores are similarities.
results = sorted(
results.items(),
key=lambda items: sum(items[1]) / len(self.query_backends),
reverse=True,
)
else:
results = {rid: [aggregator(scores)] for rid, scores in results.items()}
# Reverse sorting because the scores are similarities.
results = sorted(
results.items(), key=lambda items: items[1][0], reverse=True
)
if k is None:
return results
return results[:k]
def table_query(
self,
table: pd.DataFrame,
aggregator: Optional[callable] = None,
k: Optional[int] = None,
verbose: bool = False,
) -> Union[Iterable[Tuple], Tuple[Iterable[Tuple], Iterable[Tuple]]]:
"""
Perform table-level top-k nearest neighbour search over the configured LSH backends.
Note that this functions assumes that the table name is part of the canonical indexed item ids.
In other words, it considers the first part of the item id separated by a dot to be the table name.
Parameters
----------
table : pd.DataFrame
The table query as a Pandas DataFrame.
Each column will be the subject of a column-based query.
aggregator: callable
An aggregating function used to merge the results of all configured backends at table-level.
k : Optional[int]
Only the top-k neighbours will be retrieved from each backend.
Then, these results are aggregated using the aggregator function and the results re-ranked to retrieve
the top-k aggregated neighbours.
If this is None all results are retrieved.
verbose: bool
Whether or not to also return the detailed scores for each similar column to some query column.
Returns
-------
Union[Iterable[Tuple], Tuple[Iterable[Tuple], Iterable[Tuple]]]
Pairs of the form (candidate table name, aggregated similarity score).
If verbosity is required, also return pairs with column-level similarity details.
"""
tabert_column_embeddings = self.tabert_embedding_backend.transformer.transform(table)
extended_table_results = None
score_distributions = {}
# Table should already be preprocessed.
for i, column in enumerate(table.columns.to_list()):
# Column scores are not aggregated when performing table queries.
if i < tabert_column_embeddings.shape[0]:
column_results = self.column_query(
column=table[column],
tabert_embedding=tabert_column_embeddings[i],
aggregator=None,
k=None # None, k
)
score_distributions[column] = np.sort(
np.array([scores for _, scores in column_results]), axis=0
)
extended_table_results = self.group_results_by_table(
target_id=column,
results=column_results,
table_groups=extended_table_results,
)
else:
# Table has too many columns or long cell values, and TaBERT cuts off overflow columns due to the encoding limit
assert tabert_column_embeddings.shape[0] < len(table.columns.to_list())
break
table_results = {}
for candidate in extended_table_results.keys():
candidate_scores = np.array(
[details[1] for details in extended_table_results[candidate]]
)
distributions = [
score_distributions[details[0][0]]
for details in extended_table_results[candidate]
]
weighted_scores = self.get_cdf_scores(distributions, candidate_scores)
if aggregator is None:
table_results[candidate] = weighted_scores.tolist()
else:
table_results[candidate] = aggregator(weighted_scores.tolist())
# Reverse sorting because the scores are similarities.
table_results = sorted(table_results.items(), key=lambda pair: pair[1], reverse=True)
if k is not None:
table_results = table_results[:k]
if verbose:
extended_table_results = [(cand, extended_table_results[cand])
for cand, _ in table_results]
return table_results, extended_table_results
return table_results | superctj/pylon | tabert_cl/d3l_extension.py | d3l_extension.py | py | 19,157 | python | en | code | 1 | github-code | 13 |
34779039 | import speech_recognition
recognizer = speech_recognition.Recognizer()
print("开始识别!")
has_next = True
while has_next:
try:
with speech_recognition.Microphone() as mic:
recognizer.adjust_for_ambient_noise(mic, duration=0.2)
audio = recognizer.listen(mic)
text = recognizer.recognize_google(audio_data=audio, language='zh-CN')
print(f"识别到的文字: \033[1;35m{text}\033[0m")
except Exception:
print("未检测到语音!")
break
has_next = eval(input("是否继续? (输入 'True' 或 'False'): "))
print("停止识别!")
| NormalLLer/Speech-Topic-Classification-on-ChatGPT | speech2text.py | speech2text.py | py | 619 | python | en | code | 0 | github-code | 13 |
39216713984 | import os
filePath = 'C:/Users/Administrator/Downloads/'
filenames=os.listdir(filePath)
# print(filenames)
for filename in filenames:
if(filename.endswith(".txt")):
file = open(r'C:/Users/Administrator/Downloads/'+filename, 'r',encoding="utf-8")
try:
string=file.read()
finally:
file.close()
print("-----------------------------------------------------原始文章-----------------------------------------------------------------")
print(string)
str1=string.replace("治疗","医治")
str2=str1.replace("疾病","病")
str3=str2.replace("杭州","宁波")
str4=str3.replace("教师","")
str5=str4.replace("学生","")
str6=str5.replace("企业","")
str7=str6.replace("中国","")
str8=str7.replace("国家","")
str9=str8.replace("我国","")
str10=str9.replace("发展","")
str11=str10.replace("进行","")
str12=str11.replace("网络","")
str13=str12.replace("研究","")
str14=str13.replace(" ","")
str15=str14.replace("社会","")
str16=str15.replace("我们","")
str17=str16.replace("他们","")
print("-----------------------------------------------------替换后的文章-----------------------------------------------------------------")
print(str17)
# file = open(r'C:/Users/Administrator/Downloads/filename', 'w',encoding="utf-8")
try:
# coding=UTF-8,写入多行取消注释即可
target=r"C:/Users/Administrator/Desktop/targetArticle/"+filename
with open(target, 'w',encoding="UTF-8") as file_object:
file_object.write(str17)
# file_object.write("Add two words")
finally:
file.close() | cerebrumWeaver/python-example | txt文字替换.py | txt文字替换.py | py | 1,821 | python | en | code | 0 | github-code | 13 |
41697851534 | """
#!/usr/bin/env python
# -*- coding: utf-8 -*-
Author: @Andrew Auyeung andrew.k.auyeung@gmail.com
Location: Metis-Project-3/lib/
Dependencies: cleaning.py
Functions in this module are used to prepare cleaned data for classification.
Features are engineered to determine how they change in the leading rows.
"""
import sys
print(sys.version)
import pandas as pd
import numpy as np
import pickle
def set_precip_level(thresh):
"""
Sets the threshold on precipitation level to be considered rain
args:
df : (DataFrame) df with precipitation level
thresh : (float) Sets the decimal threshold for how many inches of precipitation to consider it raining
returns:
df : (DataFrame) with new column with 1 or 0 for rain or no
"""
wdf['raining'] = wdf.precip.map(lambda x: int(x > thresh))
def ma_shifts(window, lags, wdf, col_name):
"""
Calculates the rolling average based on windowsize.
Generates the lagged differences between the target and lags
args:
window (int): Size of window to take moving average
l (int): Days of lag
wdf (DataFrame): weather dataframe
col_name (str): name of column to create lag differences
returns:
ma_df (DataFrame): Dataframe with new lagged features
"""
ma_df = wdf[[col_name]]
# create rolling average
roll_col = col_name + '_roll_' + str(window)
ma_df.loc[:,roll_col] = ma_df.loc[:, col_name].rolling(window=window).mean()
col_err = col_name + '_error'
ma_df[col_err] = ma_df[col_name] - ma_df[roll_col]
# get diff
# lag columns
ma_df = ma_df.assign(**{col_err+'_lag_'+str(lag_n): ma_df[col_err].shift(lag_n) for lag_n in lags})
return ma_df
def leading_trends(window, lags, wdf, col_name):
"""
Generates the leading trend based on the number of days lagged
Similar to the ma_shifts method with minor change.
Calculates the rolling average based on windowsize.
Generates the lagged differences between the target and lags
Sums the differences to get the trend of the change in the last
args:
window (int): Size of window to take moving average
l (int): Days of lag
wdf (DataFrame): weather dataframe
col_name (str): name of column to create lag differences
returns:
ma_df (DataFrame): Dataframe with new lagged features
"""
ma_df = wdf[[col_name]]
# create rolling average
roll_col = col_name + '_roll_' + str(window)
ma_df.loc[:,roll_col] = ma_df.loc[:, col_name].rolling(window=window).mean()
col_err = col_name + '_error'
ma_df[col_err] = ma_df[col_name] - ma_df[roll_col]
# get diff
# lag columns
ma_df = ma_df.assign(**{col_err+'_lag_'+str(lag_n): ma_df[col_err].shift(lag_n) for lag_n in lags})
return ma_df
| aauyeung19/rainOne | streamlit/feature_eng.py | feature_eng.py | py | 2,839 | python | en | code | 0 | github-code | 13 |
41904607526 | from fractions import Fraction
from random import choice
from random import seed
def random_bool(probability):
"""
get a random choice with desired probability.
Uses the python random choice function to
pick a bool out of a list.
To avoid to much memory usage because of
a long list, it is best to pass in a "Fraction"
from the fractions module.
The denominator is equal to the list length.
The used list length is choosen to provide
the required resolution to match the probabilty
the best.
The exactness relies on the exactness of
the fraction that is made by the Fraction
class from the fractions module.
Also relies the exactness
on the randomness of the choices function,
of the random module, that is unknown to me.
"""
fraction = Fraction(probability)
length_true = fraction.numerator
length_false = fraction.denominator - fraction.numerator
lt = [True] * length_true
lf = [False] * length_false
l = lt+lf
return choice(l)
def random_bool_of_num(numTrue,numThings):
"""
Returns a random choice from a pool of booleans
that has "numTrue" things that are true in it,
the other ones are false,
and that has a pool size of "numThings" in total.
"""
probability = Fraction( numTrue , numThings )
return random_bool(probability)
| alexander-n8hgeg5e/pylib | pylib/random.py | random.py | py | 1,368 | python | en | code | 0 | github-code | 13 |
18191316022 | """
Created by Jan Schiffeler on 18.10.20
jan.schiffeler[at]gmail.com
Changed by
MICROSERVICE for:
Finding gripping angles for a desired position
In:
Name | ID sender number: description, ...
frame | 1001 1: Cube frame
frame | 1001 2: Hand frame
Out:
Name | ID sender number: description, ...
frame | 1001 3: Angles
"""
# UPD multicast
from libcluon import OD4Session
from libcluon import opendlv_sim_Frame
# hand eye calibration matrix
from resources import M_r_w
from resources import best_chromosome
# genetic
from functions.genetic_parameters import *
from functions.genetic_functions import evaluate_chromosome, forward_kinematics
from functions.position_class import Position
# utilities
import numpy as np
from time import sleep
from functions.serial_connection import SerialConnection
import argparse
import logging
logging.basicConfig(level=logging.INFO)
def create_argparser():
"""
setup all required arguments as in:
parser.add_argument('-s', '--side', required=False, default=0, help='some help text')
:return: argparse dictionary
"""
parser = argparse.ArgumentParser(description='template opendlv for python')
parser.add_argument('-f', '--frequency', required=False, default=2, help='updating frequency')
parser.add_argument('-c', '--cid', required=False, default=111, help='conference ID')
parser.add_argument('-n', '--name', required=False, default="execution", help='logging output name')
args = vars(parser.parse_args())
return args
# handling setup arguments
arg = create_argparser()
logger = logging.getLogger(arg["name"])
own_cid = int(arg['cid'])
print(f"CID: {own_cid}")
freq = int(arg['frequency'])
sleeping_time = 1 / freq
print(f"Frequency: {freq}")
# callback function for receive handling
cube_position = Position()
hand_position = Position()
def process_message(msg, senderStamp, timeStamps):
"""
Handle the incoming messages.
Change the parameter name according to the received message type!
:param msg: message
:param senderStamp: cid of the sender
:param timeStamps: array of timeStamps [sent, received, sample]
:return: nothing, writes in message_info dictionary
"""
logger.debug(f"Received distance; senderStamp= {senderStamp}")
logger.debug(f"sent: {timeStamps[0]}, received: {timeStamps[1]}, sample time stamps: {timeStamps[2]}")
logger.debug(msg)
if senderStamp == 0:
cube_position.x = msg.x
cube_position.y = msg.y
cube_position.z = msg.z
# distance is the name of a entry in the msg
if senderStamp == 1:
hand_position.x = msg.x
hand_position.y = msg.y
hand_position.z = msg.z
def evaluate_fitness(ser, body, shoulder, elbow):
ser.run_serial(body, shoulder, elbow)
sleep(5)
if cube_position.x == 500 or hand_position.x == 500:
# give bad fitness
return 0.0
return 1/(cube_position - hand_position)
def transform_world_robot(x, y, z):
vector_world = np.array([[x, y, z, 1]]).T
vector_robot = M_r_w.dot(vector_world)
return vector_robot[0, 0], vector_robot[1, 0], vector_robot[2, 0]
# setup UPD multicast
# list of all incoming feeds
incoming_id = 1001
incoming_type = opendlv_sim_Frame
session = OD4Session(cid=own_cid)
session.registerMessageCallback(incoming_id, process_message, incoming_type)
session.connect()
# initializations
serial = SerialConnection()
sleep(5)
print("Cube Position: {:.4f} | {:.4f} | {:.4f}".format(*cube_position.position()))
print("Hand Position: {:.4f} | {:.4f} | {:.4f}".format(*hand_position.position()))
x, y, z = cube_position.position()
print(f'WORLD: X: {x} | Y: {y} | Z: {z}')
x_r, y_r, z_r = transform_world_robot(x, y, z)
print(f'ROBOT: X: {x_r} | Y: {y_r} | Z: {z_r}')
theta1, theta2, theta3 = evaluate_chromosome(x=x_r, y=y_r, z=z_r, chromosome=best_chromosome, n_variables=n_variables,
constants=constants)
print("Angles:")
print(theta1 * 180/3.1415, theta2 * 180/3.1415, theta3 * 180/3.1415)
print("Forward:")
x_new, y_new, z_new = forward_kinematics(theta1, theta2, theta3)
print('{1}, {0}, {2}'.format(-y_new, -x_new, z_new))
# move and grab
serial.run_serial(theta1, theta2, theta3, False)
serial.wait_for_execution()
serial.run_serial(theta1, theta2, theta3, True)
serial.wait_for_execution()
serial.move_to_home()
serial.wait_for_execution()
# read data
print("Cube Position: {:.4f} | {:.4f} | {:.4f}".format(*cube_position.position()))
print("Hand Position: {:.4f} | {:.4f} | {:.4f}".format(*hand_position.position()))
serial.move_to_home()
serial.wait_for_execution()
serial.close_connection()
| Platygator/visually-supervised-learning-for-robotic-manipulation | main/execution.py | execution.py | py | 4,674 | python | en | code | 1 | github-code | 13 |
3354822880 | from pathlib import Path
import pytest
from pydantic import Field, BaseModel
from wunderkafka.time import now
from wunderkafka.tests import TestProducer, TestHTTPClient
from wunderkafka.serdes.avro import AvroModelSerializer, ConfluentClouderaHeadersHandler
from wunderkafka.serdes.store import AvroModelRepo
from wunderkafka.schema_registry import SimpleCache, ClouderaSRClient
from wunderkafka.producers.constructor import HighLevelSerializingProducer
class Signal(BaseModel):
ts: int = Field(default_factory=now)
source: str = 'test'
type: str = 'string'
id: str = 'string'
value: str = 'NA'
class Meta:
name = 'SignalsTest'
class DefaultKey(BaseModel):
ts: int = Field(default_factory=now)
class Meta:
namespace = "com.localhost"
name = "SignalsTestKey"
@pytest.fixture
def ts() -> int:
return 1632128298534
@pytest.fixture
def value_answer() -> bytes:
return b'\x01\x00\x00\x00\x00\x00\x00\x06\x9c\x00\x00\x00\x01\xcc\xb8\xeb\xa6\x80_\x08test\x0cstring\x0cstring\x04NA'
@pytest.fixture
def test_producer() -> TestProducer:
return TestProducer()
@pytest.fixture
def clean_producer(test_producer: TestProducer, sr_root: Path) -> HighLevelSerializingProducer:
return HighLevelSerializingProducer(
producer=test_producer,
schema_registry=ClouderaSRClient(TestHTTPClient(sr_root), SimpleCache()),
header_packer=ConfluentClouderaHeadersHandler().pack,
serializer=AvroModelSerializer(),
store=AvroModelRepo(),
mapping={},
)
def test_avro_producer_moving_parts_value_only(
clean_producer: HighLevelSerializingProducer,
test_producer: TestProducer,
topic: str,
ts: int,
value_answer: bytes
) -> None:
clean_producer.set_target_topic(topic, Signal)
value = Signal(ts=ts)
clean_producer.send_message(topic, value)
[message] = test_producer.sent
assert message.key is None
assert message.value == value_answer
def test_avro_producer_moving_parts_value_and_key(
clean_producer: HighLevelSerializingProducer,
test_producer: TestProducer,
value_answer: bytes,
topic: str,
ts: int,
) -> None:
clean_producer.set_target_topic(topic, Signal, DefaultKey)
key = DefaultKey(ts=ts)
value = Signal(ts=ts)
clean_producer.send_message(topic, value, key)
[message] = test_producer.sent
assert message.key == b'\x01\x00\x00\x00\x00\x00\x00\x06\x9d\x00\x00\x00\x01\xcc\xb8\xeb\xa6\x80_'
assert message.value == value_answer
| severstal-digital/wunderkafka | tests/integration/cloudera/test_serializing_avromodel.py | test_serializing_avromodel.py | py | 2,537 | python | en | code | 3 | github-code | 13 |
36093637435 | # Program to evaluate the sum of all the amicable numbers under 10,000
amicable_numbers = []
# Function getDivisors(num) finds the divisors of any inputted number and runs the getPossiblePair(divisors, checkNum) function on them
def getDivisors(num):
firstNum = num
set_of_divisors = []
for i in range(1, (num // 2) + 1):
if num % i == 0:
set_of_divisors.append(i)
getPossiblePair(set_of_divisors, firstNum)
# Function getPossiblePair(divisors, checkNum) checks if the sum of the inputted divisors has an amicable pair
# If the program finds an amicable pair for the sum it adds both the sum and the found pair to the amicable_numbers list
def getPossiblePair(divisors, checkNum):
possiblePair = 0
second_set_divisors = []
possiblePair = sum(divisors)
for i in range(1, (possiblePair // 2) + 1):
if possiblePair % i == 0:
second_set_divisors.append(i)
if sum(second_set_divisors) == checkNum:
# The following if statement checks if the found pair is less than 10,000 and only then adds the pair to the amicable_numbers list
if possiblePair < 10000:
if possiblePair != checkNum:
amicable_numbers.append(possiblePair)
amicable_numbers.append(checkNum)
# Running the getDivisors(num) function on every number from 1 to 10,000
for i in range(1, 10000):
getDivisors(i)
# Adding all the values of amicable_numbers to list_of_amicables without duplicates
list_of_amicables = []
for i in amicable_numbers:
if i not in list_of_amicables:
list_of_amicables.append(i)
# Printing the sum of all the amicable numbers found
print(sum(list_of_amicables)) | SuhaybDev/Project-Euler | 21_amicable_numbers.py | 21_amicable_numbers.py | py | 1,704 | python | en | code | 0 | github-code | 13 |
2954636855 | import urllib.request
import httplib2
import os
import pickle
import hashlib
from googleapiclient.discovery import build
def make_dir(path):
if not os.path.isdir(path):
os.mkdir(path)
def make_correspondence_table(correspondence_table, original_url, hashed_url):
correspondence_table[original_url] = hashed_url
def get_image_url(api_key, cse_key, search_word, page_limit, save_dir_path):
service = build("customsearch", "v1", developerKey=api_key)
page_limit = page_limit
start_index = 1
response = []
img_list = []
make_dir(save_dir_path)
save_res_path = os.path.join(save_dir_path, 'api_response_file')
make_dir(save_res_path)
for nPage in range(0, page_limit):
try:
response.append(service.cse().list(
q=search_word,
cx=cse_key,
lr='lang_ja',
num=10,
start=start_index,
searchType='image',
).execute())
start_index = response[nPage].get("queries").get("nextPage")[
0].get("start_index")
except Exception as e:
print(e)
with open(os.path.join(save_res_path, 'api_response.pickle'), mode='wb') as f:
pickle.dump(response, f)
for one_res in range(len(response)):
if len(response[one_res]['items']) > 0:
for i in range(len(response[one_res]['items'])):
img_list.append(response[one_res]['items'][i]['link'])
return img_list
def get_image(save_dir_path, img_list):
make_dir(save_dir_path)
save_img_path = os.path.join(save_dir_path, 'imgs')
make_dir(save_img_path)
opener = urllib.request.build_opener()
http = httplib2.Http(".cache")
for i in range(len(img_list)):
try:
url = img_list[i]
extension = os.path.splitext(img_list[i])[-1]
if extension.lower() in ('.jpg', '.jpeg', '.gif', '.png', '.bmp'):
encoded_url = url.encode('utf-8') # required encoding for hashed
hashed_url = hashlib.sha3_256(encoded_url).hexdigest()
full_path = os.path.join(save_img_path, hashed_url + extension.lower())
response, content = http.request(url)
with open(full_path, 'wb') as f:
f.write(content)
print('saved image... {}'.format(url))
make_correspondence_table(correspondence_table, url, hashed_url)
except:
print("failed to download images.")
continue
if __name__ == '__main__':
API_KEY = '********'
CUSTOM_SEARCH_ENGINE = '********'
page_limit = 1
search_word = 'dog'
save_dir_path = './outputs/{}'.format(search_word)
correspondence_table = {}
img_list = get_image_url(API_KEY, CUSTOM_SEARCH_ENGINE, search_word,
page_limit, save_dir_path)
get_image(save_dir_path, img_list)
| dsonoda/ttango-engine | get_images/get_images.py | get_images.py | py | 2,968 | python | en | code | 0 | github-code | 13 |
20778541884 | # Python program to find the H.C.F of two input number
num1=input("enter first number:")
num2=input("second number:")
def calculateHCF(x, y):
# choose the smaller number
if x > y:
smaller = y
else:
smaller = x
for i in range(1, smaller + 1):
if ((x % i == 0) and (y % i == 0)):
hcf = i
return hcf
print("The H.C.F. of", num1, "and", num2, "is", calculateHCF(num1, num2)) | SShital/PythonAssignments | BasicAssignemnt/Ass22.py | Ass22.py | py | 431 | python | en | code | 0 | github-code | 13 |
478580730 | """
This file not meant to be modified to configure a local nor a production instance.
To configure the instance, please consider:
- editing a ".env" file at the root of the "oxe-api" directory
- passing the environment variables via "-e" flag if you use a docker container
"""
import os
from dotenv import load_dotenv
load_dotenv()
def _getenv(key, default=None, mandatory=True):
if mandatory:
if key in os.environ or default is not None:
return os.getenv(key, default)
raise KeyError("environment variable '%s' not set" % key)
return os.getenv(key, default)
ENVIRONMENT = _getenv('ENVIRONMENT', default='dev')
PORT = _getenv('PORT', default='5002')
JWT_SECRET_KEY = _getenv('JWT_SECRET_KEY', mandatory=True)
REG_FORM_REFERENCE = _getenv('REG_FORM_REFERENCE', default='FORM-ECCC-001')
OPENXECO_API_ENDPOINT = _getenv('OPENXECO_API_ENDPOINT', mandatory=True)
ECCC_API_ENDPOINT = _getenv('ECCC_API_ENDPOINT', mandatory=True)
ECCC_API_KEY = _getenv('ECCC_API_KEY', mandatory=True)
ECCC_HTTP_AUTH_LOGIN = _getenv('ECCC_HTTP_AUTH_LOGIN', mandatory=False, default=None)
ECCC_HTTP_AUTH_PASS = _getenv('ECCC_HTTP_AUTH_PASS', mandatory=False, default=None)
HTTP_PROXY = _getenv('HTTP_PROXY', mandatory=False)
CORS_DOMAINS = _getenv('CORS_DOMAINS',
mandatory=ENVIRONMENT != "dev",
default="localhost:\\d*" if ENVIRONMENT == "dev" else None)
# remove extra spaces, remove empty items
domains = filter(len, map(str.strip, CORS_DOMAINS.split(",")))
# pylint: disable=unnecessary-lambda
CORS_ORIGINS = list(map(lambda d: r'((http|https)://)?(.*\.)?{}'.format(d), domains))
| CybersecurityLuxembourg/openxeco-eccc-middleware | oxe-api/config/config.py | config.py | py | 1,846 | python | en | code | 0 | github-code | 13 |
3614020518 | from pwn import *
from Crypto.Util.number import *
import json, codecs
d = {
"base64": b64d,
"hex": unhex,
"rot13": lambda s: codecs.encode(s, 'rot_13').encode(),
"bigint": lambda s: long_to_bytes(int(s, 0)),
"utf-8": bytes
}
def dec(o):
return d[o["type"]](o["encoded"])
io = remote("socket.cryptohack.org", 13377)
for _ in range(100):
o = json.loads(io.recvline())
io.sendline(json.dumps({"decoded": dec(o).decode()}))
io.interactive() | ThomasNJordan/CryptoHack | DataFormats/Conversion/better_remote.py | better_remote.py | py | 492 | python | en | code | 2 | github-code | 13 |
35373545869 | import numpy as np
import matplotlib.pylab as plt
from qampy import equalisation, signals, impairments, helpers
fb = 40.e9
os = 2
fs = os*fb
N = 2**18
theta = np.pi/2.35
M = 16
snr = 24
muCMA = 1e-3
muRDE = 0.5e-3
ntaps = 30
t_pmd = 50e-12
sig = signals.ResampledQAM(M, N, nmodes=2, fb=fb, fs=fs, resamplekwargs={"beta":0.01, "renormalise":True})
sig = impairments.change_snr(sig, snr)
SS = impairments.apply_PMD(sig, theta, t_pmd)
E_s, wxy_s, (err_s, err_rde_s) = equalisation.dual_mode_equalisation(SS, (muCMA, muRDE), ntaps,
methods=("mcma", "mrde"))
E_s = helpers.normalise_and_center(E_s)
evm = sig[:, ::2].cal_evm()
evmE_s = E_s.cal_evm()
gmiE = E_s.cal_gmi()
print(gmiE)
plt.figure()
plt.subplot(221)
plt.hexbin(E_s[0].real, E_s[0].imag)
plt.text(0.999, 0.9, r"$EVM_x={:.1f}\%$".format(100*evmE_s[0]), color='w', horizontalalignment="right", fontsize=14)
plt.subplot(222)
plt.title('Recovered MCMA/MRDE')
plt.hexbin(E_s[1].real, E_s[1].imag)
plt.text(0.999, 0.9, r"$EVM_y={:.1f}\%$".format(100*evmE_s[1]), color='w', horizontalalignment="right", fontsize=14)
plt.subplot(223)
plt.title('Original')
plt.hexbin(sig[0,::2].real, sig[0,::2].imag)
plt.text(0.999, 0.9, r"$EVM_x={:.1f}\%$".format(100*evm[0]), color='w', horizontalalignment="right", fontsize=14)
plt.subplot(224)
plt.hexbin(sig[1,::2].real, sig[1,::2].imag)
plt.text(0.999, 0.9, r"$EVM_y={:.1f}\%$".format(100*evm[1]), color='w', horizontalalignment="right", fontsize=14)
plt.figure()
plt.subplot(221)
plt.title('MCMA/MRDE Taps X-Pol')
plt.plot(wxy_s[0, 0,:], 'r')
plt.plot(wxy_s[0, 1,:], '--r')
plt.subplot(223)
plt.title('MCMA/MRDE Taps Y-Pol')
plt.plot(wxy_s[1, 0,:], 'g')
plt.plot(wxy_s[1, 1,:], '--g')
plt.subplot(222)
plt.title('MCMA/MRDE error mcma')
plt.plot(abs(err_s[0]), color='r')
plt.plot(abs(err_s[1]), color='g')
plt.subplot(224)
plt.title('MCMA/MRDE error mrde')
plt.plot(abs(err_rde_s[0]), color='r')
plt.plot(abs(err_rde_s[1]), color='g')
plt.show()
| ChalmersPhotonicsLab/QAMpy | Scripts/mrde_equaliser.py | mrde_equaliser.py | py | 2,019 | python | en | code | 54 | github-code | 13 |
21271933714 | import mlflow
import os
mlflow.set_tracking_uri("http://127.0.0.1:5000")
if __name__ == "__main__":
# Start a new MLflow run
with mlflow.start_run():
mlflow.log_param("threshold", 5)
mlflow.log_metric("timestamp", 0.001)
# Ensure the artifact file exists before logging
file_path = "nba.csv"
if os.path.exists(file_path):
mlflow.log_artifact(file_path)
else:
print(f"File {file_path} does not exist. Unable to log artifact.")
| jithsg/jupyter-notebook | test-mlflow.py | test-mlflow.py | py | 509 | python | en | code | 0 | github-code | 13 |
27543730676 | import numpy as np
import random
import sys
# calculate clusters for each data points
def nearestCluster(num, meanvalues):
dist = sys.maxsize
currCluster = -1
for i in range(len(meanvalues)):
if abs(num - meanvalues[i]) < dist:
dist = abs(num - meanvalues[i])
currCluster = i
return currCluster + 1
# update new mean values for each cluster
def newMean(cluster):
mean = []
for i in range(len(cluster)):
mean.append(np.mean(cluster[i+1]))
return mean
# printing clusters after each iteration
def printIter(i, cluster, k, meanvalues):
print('Iteration #'+str(i))
for curr in range(k):
print('k'+str(curr+1)+': '+str(cluster[curr+1]))
print()
for mean in range(len(meanvalues)):
print('Mean'+str(mean+1)+' = '+str(meanvalues[mean]))
print()
# main code
n = int(input('Enter the number of elements: '))
print('Enter numbers: ')
arr = [int(x) for x in input().split()]
k = int(input('Enter the number of clusters: '))
meanValues = random.sample(range(1, max(arr)), k)
meanValues.sort()
clusters = {}
for i in range(k):
clusters[i+1] = []
for i in arr:
clusters[nearestCluster(i, meanValues)].append(i)
iterations = 1
while True:
printIter(iterations, clusters, k, meanValues)
iterations += 1
meanValues = newMean(clusters)
newClusters = {}
for i in range(k):
newClusters[i + 1] = []
for i in arr:
newClusters[nearestCluster(i, meanValues)].append(i)
if clusters == newClusters:
break
clusters = newClusters
# testcase -> 1 2 5 6 7 8 9 10 11 12 12 15 17 20 23
# 2 4 10 12 3 20 30 11 25
# 5 9 11 13 2 20 18 25 50 30 8 32 24 27 17
| aditya9110/ScratchML | KMeans Clustering.py | KMeans Clustering.py | py | 1,698 | python | en | code | 1 | github-code | 13 |
10967867093 | """Functions to build a complex SQL Select statement to query variant.
In the most of cases, you will only use build_sql_query function.
Examples
conn = sqlite3.Connection("::memory::")
query = build_sql_query(fields, source, filters)
conn.execute(query)
Fields contains columns to select according sql table that they belong to.
{
"variants": ["chr","pos","ref"]
"annotations": ["gene","impact"],
"samples": [
{"name":"boby", "fields":["gt","dp"]},
{"name":"boby", "fields":["gt","dp"]}
]
}
"""
# Standard imports
import sqlite3
import re
from functools import lru_cache
# Custom imports
from cutevariant.core import sql
import cutevariant.constants as cst
from cutevariant import LOGGER
# TODO : can be move somewhere else ? In common ?
# Function names used in VQL
# sample["boby"].gt
# WORDSET["truc"]
WORDSET_FUNC_NAME = "WORDSET"
PY_TO_SQL_OPERATORS = {
"$eq": "=",
"$gt": ">",
"$gte": ">=",
"$lt": "<",
"$lte": "<=",
"$in": "IN",
"$nin": "NOT IN",
"$ne": "!=",
"$regex": "REGEXP",
"$nregex": "NOT REGEXP",
"$and": "AND",
"$or": "OR",
"$has": "HAS",
"$nhas": "NOT HAS",
}
PY_TO_VQL_OPERATORS = {
"$eq": "=",
"$gt": ">",
"$gte": ">=",
"$lt": "<",
"$lte": "<=",
"$in": "IN",
"$nin": "!IN",
"$ne": "!=",
"$regex": "=~",
"$nregex": "!~",
"$and": "AND",
"$or": "OR",
"$has": "HAS",
"$nhas": "!HAS",
}
def filters_to_flat(filters: dict):
"""Recursive function to convert the filter hierarchical dictionnary into a list of fields
Examples::
filters = {
'$and': [
{"ref":"A"},
{"alt","C"}
]
}
filters = _flatten_filter(filters)
filters is now:
[
{"ref":"A", "alt":"C"}
]
"""
flatten = []
for k, v in filters.items():
if isinstance(v, list):
for i in v:
flatten += filters_to_flat(i)
else:
if filters not in flatten:
flatten.append(filters)
return flatten
def is_annotation_join_required(fields, filters, order_by=None) -> bool:
"""Return True if SQL join annotation is required
Args:
fields (TYPE): Description
filters (TYPE): Description
Returns:
bool: Description
"""
for field in fields:
if field.startswith("ann."):
return True
if order_by:
for by in order_by:
field, direction = by
if field.startswith("ann."):
return True
for condition in filters_to_flat(filters):
condition = list(condition.keys())[0]
if condition.startswith("ann."):
return True
return False
def samples_join_required(fields, filters, order_by=None) -> list:
"""Return sample list of sql join is required
Args:
field (TYPE): Description
filters (TYPE): Description
Returns:
list: Description
"""
samples = set()
for field in fields:
if field.startswith("samples"):
_, *sample, _ = field.split(".")
sample = ".".join(sample)
samples.add(sample)
if order_by:
for by in order_by:
field, direction = by
if field.startswith("samples"):
_, *sample, _ = field.split(".")
sample = ".".join(sample)
samples.add(sample)
for condition in filters_to_flat(filters):
key = list(condition.keys())[0]
if key.startswith("samples"):
_, *sample, _ = key.split(".")
sample = ".".join(sample)
samples.add(sample)
return list(samples)
# def wordset_data_to_vql(wordset_expr: tuple):
# """Get the VQL version of a Wordset expression (`(WORDSET', 'boby')`)
# Example:
# >>> wordset_data_to_vql(("WORDSET", "boby"))
# "WORDSET['boby']"
# Args:
# wordset_expr (tuple): Tuple of 2 items: First one is "WORDSET",
# second one is the name of the queried wordset.
# Returns:
# (str): Query statement
# """
# return "{}['{}']".format(*wordset_expr)
# def fields_to_vql(field) -> str:
# """Return field as VQL syntax
# This is used to convert tuple field and create a VQL query
# Examples:
# >>> field = ("sample", "boby", "gt")
# >>> field_to_vql(field)
# "sample['boby'].gt"
# Args:
# field(str or tuple): a Field
# Returns:
# str: fields for vql query
# """
# if isinstance(field, tuple):
# if field[0] == GENOTYPE_FUNC_NAME and len(field) == 3:
# return f"{field[0]}['{field[1]}'].{field[2]}"
# # str
# return field
# refactor
def fields_to_vql(fields) -> list:
vql_fields = []
for field in fields:
if field.startswith("samples."):
_, *name, param = field.split(".")
name = ".".join(name)
vql_fields.append(f"samples['{name}'].{param}")
else:
vql_fields.append(field)
return vql_fields
def fields_to_sql(fields, use_as=False) -> list:
"""Return field as SQL syntax
Args:
field (dict): Column name from a table
Returns:
str: Sql field
TODO:
REMOVE USE_AS ?
Examples:
fields = { {
"variants": ["chr","pos","ref"]
"annotations": ["gene","impact"],
"samples": [
{"name":"boby", "fields":["gt","dp"]},
{"name":"charles", "fields":["gt"]}
]
}
This will converted to :
["
`variants`.`chr`,
`variants`.`pos`,
`variants`.`ref`,
`ann`.`gene` AS ann.gene,
`ann`.`impact` AS ann.impact,
`sample_boby`.`gt` AS sample.boby.gt,
`sample_boby`.`dp` AS sample.boby.dp,
`sample_charles`.`gt` AS sample.charles.gt,
"]
"""
sql_fields = []
for field in fields:
if field.startswith("ann."):
sql_field = f"`annotations`.`{field[4:]}`"
if use_as:
sql_field = f"{sql_field} AS `ann.{field[4:]}`"
sql_fields.append(sql_field)
elif field.startswith("samples."):
# "sample.boby.gt"
_, *name, value = field.split(".")
name = ".".join(name)
sql_field = f"`sample_{name}`.`{value}`"
if use_as:
sql_field = f"{sql_field} AS `samples.{name}.{value}`"
sql_fields.append(sql_field)
else:
sql_fields.append(f"`variants`.`{field}`")
return sql_fields
# refactor
def condition_to_sql(item: dict, samples=None) -> str:
"""
Convert a key, value items from fiters into SQL query
{"ann.gene": "CFTR"}
Exemples:
condition_to_sql({"chr":3}) ==> `variants`.`chr `= 3
condition_to_sql({"chr":{"$gte": 30}}) ==> `variants`.`chr `>= 3
condition_to_sql({"ann.gene":{"$gte": 30}}) ==> `annotation`.`gene` >= 30
condition_to_sql({"samples.$all.gt": 1 }) ==> (`samples.boby.gt = 1 AND samples.charles.gt = 1)
condition_to_sql({"samples.$any.gt": 1 }) ==> (`samples.boby.gt = 1 OR samples.charles.gt = 1)
"""
# TODO : optimiser
k = list(item.keys())[0]
v = item[k]
if k.startswith("ann."):
table = "annotations"
k = k[4:]
elif k.startswith("samples."):
table = "samples"
_, *name, k = k.split(".")
name = ".".join(name)
else:
table = "variants"
field = f"`{table}`.`{k}`"
if isinstance(v, dict):
vk, vv = list(v.items())[0]
operator = vk
value = vv
else:
operator = "$eq"
value = v
if isinstance(value, str):
value = value.replace("'", "''")
# MAP operator
sql_operator = PY_TO_SQL_OPERATORS[operator]
# Optimisation REGEXP
# use LIKE IF REGEXP HAS NO special caractere
if "REGEXP" in sql_operator:
special_caracter = "[]+.?*()^$"
if not set(str(value)) & set(special_caracter):
sql_operator = "LIKE" if sql_operator == "REGEXP" else "NOT LIKE"
value = f"%{value}%"
if "HAS" in sql_operator:
field = f"'{cst.HAS_OPERATOR}' || {field} || '{cst.HAS_OPERATOR}'"
sql_operator = "LIKE" if sql_operator == "HAS" else "NOT LIKE"
value = f"%{cst.HAS_OPERATOR}{value}{cst.HAS_OPERATOR}%"
# Cast value
if isinstance(value, str):
value = f"'{value}'"
if isinstance(value, bool):
value = int(value)
# Cast IS NULL
if value is None:
if operator == "$eq":
sql_operator = "IS"
if operator == "$ne":
sql_operator = "IS NOT"
value = "NULL"
# Cast wordset
if isinstance(value, dict):
if "$wordset" in value:
wordset_name = value["$wordset"]
value = f"(SELECT value FROM wordsets WHERE name = '{wordset_name}')"
# Convert [1,2,3] => "(1,2,3)"
if isinstance(value, list) or isinstance(value, tuple):
value = "(" + ",".join([f"'{i}'" if isinstance(i, str) else f"{i}" for i in value]) + ")"
operator = None
condition = ""
if table == "samples":
if name == "$any":
operator = "OR"
if name == "$all":
operator = "AND"
if operator and samples:
condition = (
"("
+ f" {operator} ".join(
[f"`sample_{sample}`.`{k}` {sql_operator} {value}" for sample in samples]
)
+ ")"
)
else:
condition = f"`sample_{name}`.`{k}` {sql_operator} {value}"
else:
condition = f"{field} {sql_operator} {value}"
return condition
def condition_to_vql(item: dict) -> str:
"""
Convert a key, value items from fiters into SQL query
{"ann.gene": "CFTR"}
Exemples:
condition_to_sql({"chr":3}) ==> chr = 3
condition_to_sql({"chr":{"$gte": 30}}) ==> chr >= 3
condition_to_sql({"ann.gene":{"$gte": 30}}) ==> ann.gene >= 30
"""
# TODO : optimiser
k = list(item.keys())[0]
v = item[k]
field = k
if isinstance(v, dict):
vk, vv = list(v.items())[0]
operator = vk
value = vv
else:
operator = "$eq"
value = v
# MAP operator
sql_operator = PY_TO_VQL_OPERATORS[operator]
# # hack .. we want ~ instead of REGEXP
# if sql_operator == "REGEXP":
# sql_operator = "=~"
# if sql_operator == "NOT REGEXP":
# sql_operator = "!~"
# Cast value
if isinstance(value, str):
value = value.replace("'", "\\'")
value = f"'{value}'"
if isinstance(value, bool):
value = int(value)
# Cast IS NULL
if value is None:
if operator == "$eq":
sql_operator = "="
if operator == "$ne":
sql_operator = "!="
value = "NULL"
# Cast wordset
if isinstance(value, dict):
if "$wordset" in value:
wordset_name = value["$wordset"]
value = f"WORDSET['{wordset_name}']"
# Convert [1,2,3] => "(1,2,3)"
if isinstance(value, list) or isinstance(value, tuple):
value = "(" + ",".join([f"'{i}'" if isinstance(i, str) else f"{i}" for i in value]) + ")"
if k.startswith("samples."):
_, *name, k = k.split(".")
name = ".".join(name)
if name == "$any":
name = "ANY"
elif name == "$all":
name = "ALL"
else:
name = f"'{name}'"
k = f"samples[{name}].{k}"
condition = f"{k} {sql_operator} {value}"
return condition
def remove_field_in_filter(filters: dict, field: str = None) -> dict:
"""Remove field from filter
Examples:
filters = {
"$and": [
{"chr": "chr1"},
{"pos": {"$gt": 111}},
{"$or":[
{"chr": "chr7"},
{"chr": "chr6"}
]
}
]}
Args:
filters (dict): A nested set of conditions
field (str): A field to remove from filter
Returns:
dict: New filters dict with field removed
"""
# ---------------------------------
def recursive(obj):
output = {}
for k, v in obj.items():
if k in ["$and", "$or"]:
temp = []
for item in v:
rec = recursive(item)
if field not in item and rec:
temp.append(rec)
if temp:
output[k] = temp
return output
# if not output[k]:
# del output[k]
else:
output[k] = v
return output
return recursive(filters) or {}
def filters_to_sql(filters: dict, samples=None) -> str:
"""Build a the SQL where clause from the nested set defined in filters
Examples:
filters = {
"$and": [
{"chr": "chr1"},
{"pos": {"$gt": 111}},
{"$or":[
{"ann.gene": "CFTR"},
{"ann.gene": "GJB2"}
]
}
]}
where_clause = filter_to_sql(filters)
# will output
# variants.chr = 'chr1' AND variants.pos > 11 AND ( annotation.gene = CFTR OR annotations.gene="GJB2")
Args:
filters (dict): A nested set of conditions
Returns:
str: A sql where expression
"""
# ---------------------------------
def recursive(obj):
conditions = ""
for k, v in obj.items():
if k in ["$and", "$or"]:
conditions += (
"(" + f" {PY_TO_SQL_OPERATORS[k]} ".join([recursive(item) for item in v]) + ")"
)
else:
conditions += condition_to_sql(obj, samples)
return conditions
# ---------------------------------
query = recursive(filters)
# hacky code to remove first level parenthesis
return query
def filters_to_vql(filters: dict) -> str:
"""Build a the VQL where clause from the nested set defined in filters
Examples:
filters = {
"$and": [
{"chr": "chr1"},
{"pos": {"$gt": 111}},
{"$or":[
{"ann.gene": "CFTR"},
{"ann.gene": "GJB2"}
]
}
]}
where_clause = filter_to_sql(filters)
# will output
# chr = 'chr1' AND pos > 11 AND ( ann.gene = CFTR OR ann.gene="GJB2")
Args:
filters (dict): A nested set of conditions
Returns:
str: A sql where expression
"""
# ---------------------------------
def recursive(obj):
conditions = ""
for k, v in obj.items():
if k in ["$and", "$or"]:
conditions += (
"(" + f" {PY_TO_VQL_OPERATORS[k]} ".join([recursive(item) for item in v]) + ")"
)
else:
conditions += condition_to_vql(obj)
return conditions
# ---------------------------------
query = recursive(filters)
# hacky code to remove first level parenthesis
query = query[1:-1]
return query
# def build_vql_query(fields, source="variants", filters={}, group_by=[], having={}):
# """Build VQL SELECT query
# Args:
# fields (list): List of fields
# source (str): source of the virtual table ( see: selection )
# filters (dict): nested condition tree
# group_by (list/None): list of field you want to group
# """
# query = "SELECT " + ",".join([fields_to_vql(i) for i in fields]) + " FROM " + source
# if filters:
# where_clause = filters_to_vql(filters)
# if where_clause:
# query += " WHERE " + where_clause
# if group_by:
# query += " GROUP BY " + ",".join((fields_to_vql(i) for i in group_by))
# if having:
# operator = having["op"]
# value = having["value"]
# query += f" HAVING count {operator} {value}"
# return query
def build_sql_query(
conn: sqlite3.Connection,
fields,
source="variants",
filters={},
order_by=[],
limit=50,
offset=0,
selected_samples=[],
**kwargs,
):
"""Build SQL SELECT query
Args:
fields (list): List of fields
source (str): source of the virtual table ( see: selection )
filters (dict): nested condition tree
order_by (list[(str,bool)]): list of tuple (fieldname, is_ascending) ;
If None, order_desc is not required.
limit (int/None): limit record count;
If None, offset is not required.
offset (int): record count per page
group_by (list/None): list of field you want to group
"""
# get samples ids
samples_ids = {i["name"]: i["id"] for i in sql.get_samples(conn)}
# Create fields
sql_fields = ["`variants`.`id`"] + fields_to_sql(fields, use_as=True)
sql_query = f"SELECT DISTINCT {','.join(sql_fields)} "
# Add source table
sql_query += "FROM variants"
if is_annotation_join_required(fields, filters, order_by):
sql_query += " LEFT JOIN annotations ON annotations.variant_id = variants.id"
# Add Join Selection
# TODO: set variants as global variables
if source != "variants":
sql_query += (
" INNER JOIN selection_has_variant sv ON sv.variant_id = variants.id "
f"INNER JOIN selections s ON s.id = sv.selection_id AND s.name = '{source}'"
)
# Test if sample*
filters_fields = " ".join([list(i.keys())[0] for i in filters_to_flat(filters)])
# Join all samples if $all or $any keywords are present
if "$all" in filters_fields or "$any" in filters_fields:
join_samples = list(samples_ids.keys())
else:
join_samples = samples_join_required(fields, filters, order_by)
for sample_name in join_samples:
if sample_name in samples_ids:
sample_id = samples_ids[sample_name]
sql_query += f""" LEFT JOIN genotypes `sample_{sample_name}` ON `sample_{sample_name}`.variant_id = variants.id AND `sample_{sample_name}`.sample_id = {sample_id}"""
# Add Where Clause
if filters:
where_clause = filters_to_sql(filters, join_samples)
if where_clause and where_clause != "()":
sql_query += " WHERE " + where_clause
# Add Order By
if order_by:
# TODO : sqlite escape field with quote
order_by_clause = []
for item in order_by:
field, direction = item
field = fields_to_sql([field])[0]
direction = "ASC" if direction else "DESC"
order_by_clause.append(f"{field} {direction}")
order_by_clause = ",".join(order_by_clause)
sql_query += f" ORDER BY {order_by_clause}"
if limit:
sql_query += f" LIMIT {limit} OFFSET {offset}"
return sql_query
def build_vql_query(
fields,
source="variants",
filters={},
order_by=[],
**kwargs,
):
select_clause = ",".join(fields_to_vql(fields))
where_clause = filters_to_vql(filters)
if where_clause and where_clause != "()":
where_clause = f" WHERE {where_clause}"
else:
where_clause = ""
order_by_clause = ""
if order_by:
order_by_clause = []
for item in order_by:
field, direction = item
field = fields_to_vql([field])[0]
direction = "ASC" if direction else "DESC"
order_by_clause.append(f"{field} {direction}")
order_by_clause = " ORDER BY " + ",".join(order_by_clause)
return f"SELECT {select_clause} FROM {source}{where_clause}{order_by_clause}"
| labsquare/cutevariant | cutevariant/core/querybuilder.py | querybuilder.py | py | 20,120 | python | en | code | 86 | github-code | 13 |
19394967475 | from configparser import ConfigParser
def config_reader(filename='database.ini', section='postgresql'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db_conn_dict = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
#print(param)
#key:value update in the dict
db_conn_dict[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db_conn_dict
| navodas/music-genre_classif | config.py | config.py | py | 620 | python | en | code | 0 | github-code | 13 |
14758730764 | DATETIME_FORMAT = '%Y%m%d%H%M%S'
class JOB:
FINISHED_SUCCESSFULLY = 0
FINISHED_WITH_WARNINGS = 1
FINISHED_WITH_ERRORS = 2
class DATETIME:
FORMAT = '%Y%m%d%H%M%S'
FORMAT_2 = '%Y-%m-%d %H:%M:%S'
PP_FORMAT = '%d/%m/%Y %H:%M:%S'
class ServiceDetails:
ROWS = [
['netflix', 'Netflix', 'https://reelgood.com/movies/source/netflix', 'https://img.reelgood.com/service-logos/netflix.svg'],
['amazon', 'Prime Video', 'https://reelgood.com/movies/source/amazon', 'https://img.reelgood.com/service-logos/amazon_prime.svg'],
['nowtv', 'NowTV', 'https://reelgood.com/movies/source/nowtv', 'https://img.reelgood.com/service-logos/nowtv.svg'],
]
class API:
READ_ONLY = 0
WRITE = 1
SCRAPE = 2
class SERVER:
GET = 'get'
POST = 'post'
class SCORE:
WATCH_LIST_ADD = 2
WATCH_LIST_REMOVE = -1
CLICK_THROUGH = 1
| joshnic3/StreamGuide | src/Library/constants.py | constants.py | py | 904 | python | en | code | 0 | github-code | 13 |
35203599820 | import logging
import re
import urlparse
import sst.actions
from functools import wraps
logger = logging.getLogger('User test')
def log_action(log_func):
"""Decorator to log the call of an action method."""
def middle(f):
@wraps(f)
def inner(instance, *args, **kwargs):
class_name = str(instance.__class__.__name__)
docstring = f.__doc__
if docstring:
docstring = docstring.split('\n')[0].strip()
else:
docstring = f.__name__
log_line = '%r: %r. Arguments %r. Keyword arguments: %r.'
log_func(log_line, class_name, docstring, args, kwargs)
return f(instance, *args, **kwargs)
return inner
return middle
class Page(object):
"""Base class for the page objects used in acceptance testing.
Instance variables:
title -- The title of the page.
url_path -- The path of the page.
is_url_path_regex -- If True, the url path will be considered as a regular
expression.
headings1 -- A list with the expected text of the h1 elements. If it's
empty, the h1 elements will not be checked.
headings2 -- A list with the expected text of the h2 elements. If it's
empty, the h2 elements will not be checked.
qa_anchor -- An string with the expected qa id
"""
title = None
url_path = None
is_url_path_regex = False
headings1 = []
headings2 = []
qa_anchor = ''
def __init__(self, open_page=False):
super(Page, self).__init__()
if open_page:
self._open_page()
self.assert_page_is_open()
@log_action(logging.info)
def _open_page(self):
"""Open the page."""
if self.is_url_path_regex:
raise ValueError(
"We can't open a page with a regular expression on the path.")
else:
assert self.url_path is not None
sst.actions.go_to(self.url_path)
return self
def assert_page_is_open(self):
"""Assert that the page is open and that no oops are displayed."""
try:
assert not self._is_oops_displayed(), \
'An oops error is displayed: {0}'.format(
self._get_oops_element().text)
self.assert_url_path()
# qa_anchor should take precedence
# since checking for text should be
# deprecated behaviour
if self.qa_anchor:
self.assert_qa_anchor()
else:
self.assert_title()
if self.headings1:
self.assert_headings1()
if self.headings2:
self.assert_headings2()
except AssertionError:
self._log_errors()
raise
def _is_oops_displayed(self):
try:
self._get_oops_element()
return True
except AssertionError:
return False
def _get_oops_element(self):
# TODO this works for U1. Does it work the same for pay and SSO?
oops_class = 'yui3-error-visible'
return sst.actions.get_element(css_class=oops_class)
def assert_title(self):
"""Assert the title of the page."""
sst.actions.assert_title(self.title)
def assert_url_path(self):
"""Assert the path of the page URL."""
if not self.is_url_path_regex:
sst.actions.assert_equal(
self._get_current_url_path(), self.url_path)
else:
self._assert_url_path_match()
def _assert_url_path_match(self):
# Make sure that there are no more characters at the end of the path.
url_path_regexp = self.url_path + '$'
current_url_path = self._get_current_url_path()
assert re.match(url_path_regexp, current_url_path), \
"The current URL path {0} doesn't match {1}".format(
current_url_path, url_path_regexp)
def _get_current_url_path(self):
current_url = sst.actions.get_current_url()
return urlparse.urlparse(current_url).path
def assert_headings1(self):
"""Assert the h1 elements of the page."""
self._assert_elements_text('h1', self.headings1)
def _assert_elements_text(self, tag, expected_texts):
elements_text = self._get_elements_text(tag)
assert elements_text == expected_texts, \
'Expected elements texts: {0}\n' \
'Actual elements texts: {1}'.format(
', '.join(expected_texts), ', '.join(elements_text))
def _get_elements_text(self, tag=None, css_class=None):
get_text = lambda x: x.text
return map(get_text, sst.actions.get_elements(
tag=tag, css_class=css_class))
def assert_headings2(self):
"""Assert the h2 elements of the page."""
self._assert_elements_text('h2', self.headings2)
def assert_qa_anchor(self):
"""Assert the qa anchor."""
sst.actions.assert_element(
tag='html', **{'data-qa-id': self.qa_anchor})
def _log_errors(self):
if sst.actions.exists_element(css_class='error'):
logger.error(
', '.join(self._get_elements_text(css_class='error')))
| miing/mci_migo_packages_u1-test-utils | u1testutils/sst/__init__.py | __init__.py | py | 5,281 | python | en | code | 0 | github-code | 13 |
31631005442 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import math
from glob import glob
import tGD_aux as aux
import tGD_fun as fun
import tGD_gene as drv
import tGD_plots as plot
from datetime import datetime
import MoNeT_MGDrivE as monet
import compress_pickle as pkl
(USR, DRV, AOI) = (sys.argv[1], sys.argv[2], sys.argv[3])
# (USR, DRV, AOI) = ('dsk', 'linkedDrive', 'HLT')
(FMT, SKP, MF, FZ) = ('bz', False, (False, True), True)
(QNT, thPlt) = (90, 0.1) # Change this QNT when the new pst are out!!!!!!!
EXP = ('000', )# '001', '005', '010', '100')
GRP = (0, ) # 1)
###############################################################################
# (grp, exp) = (GRP[0], EXP[0])
for grp in GRP:
for exp in EXP:
grpPad = str(grp).zfill(2)
#######################################################################
# Setting up paths and style
#######################################################################
# Paths ---------------------------------------------------------------
(PT_ROT, PT_IMG, PT_DTA, PT_PRE, PT_OUT) = aux.selectPath(USR, DRV, exp)
PT_IMG = PT_IMG[:-1] + 'Pst/'
monet.makeFolder(PT_IMG)
drive = drv.driveSelector(DRV, AOI)
(CLR, YRAN) = (drive.get('colors'), (0, drive.get('yRange')))
STYLE = {
"width": .5, "alpha": .15, "dpi": 750, "legend": False,
"aspect": .25, "colors": CLR, "xRange": [0, (365*5)/3],
"yRange": YRAN
}
STYLE['aspect'] = monet.scaleAspect(1, STYLE)
tS = datetime.now()
aux.printExperimentHead(PT_ROT, PT_IMG, PT_PRE, tS, 'Traces ' + AOI)
#######################################################################
# Load preprocessed files lists
#######################################################################
tyTag = ('sum', 'rep')
if FZ:
fLists = list(zip(*[fun.getFilteredFiles(
PT_PRE+'*_00_*'+AOI+'*'+grpPad+'*'+tp+'*',
PT_PRE+'*'+AOI+'*'+grpPad+'*'+tp+'*') for tp in tyTag]
))
else:
fLists = list(zip(
*[sorted(glob(PT_PRE+'*'+AOI+'*'+tp+'*')) for tp in tyTag]
))
#######################################################################
# Process files
#######################################################################
(xpNum, digs) = monet.lenAndDigits(fLists)
msg = '* Analyzing ({}/{})'
for i in range(0, xpNum):
print(
msg.format(str(i+1).zfill(digs), str(xpNum).zfill(digs)),
end='\r'
)
# Preprocessed data (sum and rep) ---------------------------------
(sumDta, repDta) = [pkl.load(file) for file in (fLists[i])]
name = fLists[i][0].split('/')[-1].split('.')[0][:-4]
# Thresholds timing data ------------------------------------------
id = [int(i) for i in name.split('-')[0].split('_')[1:]]
id.extend([grp])
relNum = id[4]
ttx = (
fun.getTTX(PT_OUT, relNum, AOI, str(QNT), 'TTI', id, '%0.1f' %(thPlt)),
fun.getTTX(PT_OUT, relNum, AOI, str(QNT), 'TTO', id, '%0.1f' %(thPlt))
)
ttx = [sumDta['population'].shape[0] if math.isnan(i) else i for i in ttx]
ssv = fun.getTTX(PT_OUT, relNum, AOI, QNT, 'TTS', id, 'ssv')
ssv = 0 if (math.isnan(ssv)) else ssv
# Export plots ----------------------------------------------------
plot.exportTracesPlot(
repDta, name+'_Q'+str(QNT), STYLE, PT_IMG,
vLines=ttx, hLines=ssv
)
cl = [i[:-2]+'cc' for i in CLR]
monet.exportGeneLegend(
sumDta['genotypes'], cl, PT_IMG+'/plt_{}.png'.format(AOI), 500
)
tE = datetime.now()
print('* Analyzed ({}/{}) '.format(xpNum, xpNum), end='\n')
print(monet.PAD)
| Chipdelmal/MoNeT | DataAnalysis/tGDDecay/legacy/tGD_pstTracesOLD.py | tGD_pstTracesOLD.py | py | 4,106 | python | en | code | 7 | github-code | 13 |
42012874834 | import librosa
hop_length = 256
frame_length = 512
import librosa.display as disp
x, sr = librosa.load('audio/simple_loop.wav')
y = x
energy = np.array([sum((x[i:i+frame_length]**2)) for i in range(0,len(y),hop_length)])
energy_n = energy/energy.max()
frames = range(len(energy))
t = librosa.frames_to_time(frames,sr,hop_length=hop_length)
def strip(x, frame_length, hop_length):
# Compute RMSE.
rms = librosa.feature.rms(x, frame_length=frame_length, hop_length=hop_length, center=True)
# Identify the first frame index where RMSE exceeds a threshold.
thresh = 0.01
frame_index = 0
while rms[0][frame_index] < thresh:
frame_index += 1
# Convert units of frames to samples.
start_sample_index = librosa.frames_to_samples(frame_index, hop_length=hop_length)
# Return the trimmed signal.
return x[start_sample_index:]
#y = strip(x, frame_length, hop_length)
X = librosa.stft(y)
Xdb = librosa.amplitude_to_db(abs(X))
#plt.ion()
#disp.specshow(Xdb,sr=sr,x_axis='time',y_axis='hz')
#plt.figure()
#plt.show()
rms = librosa.feature.rms(y,hop_length=hop_length)
rms = rms[0]
rms_n = rms/rms.max()
plt.figure()
disp.waveplot(y,sr)
plt.plot(t,energy_n,c='g')
plt.plot(t,rms_n,c='b')
plt.savefig('wave_E_RMS.png')
onset_frames = librosa.onset.onset_detect(y,sr=sr,hop_length=hop_length)
onset_times = librosa.frames_to_time(onset_frames,sr=sr,hop_length=hop_length)
onset_samples = librosa.frames_to_samples(onset_frames,hop_length=hop_length)
clicks = librosa.clicks(times=onset_times,length=len(y))
librosa.output.write_wav('clicks_simple_loop.wav',y+clicks,sr=sr)
frame_sz = int(0.100*sr)
segments = np.array([x[i:i+frame_sz] for i in onset_samples])
def concatenate_segments(segments,sr=22050,pad_time=0.300):
padded_segments = [np.concatenate([segment,np.zeros(int(pad_time*sr))]) for segment in segments]
return np.concatenate(padded_segments)
concatenated_signal = concatenate_segments(segments,sr)
librosa.output.write_wav('conc.wav',concatenated_signal,sr)
zcrs = [sum(librosa.zero_crossings(segment)) for segment in segments]
ind = np.argsort(zcrs)
concatenated_signal_sort = concatenate_segments(segments[ind],sr)
librosa.output.write_wav('sort.wav',concatenated_signal_sort,sr=sr)
| wubinbai/2020 | Music/MIR/mir.py | mir.py | py | 2,268 | python | en | code | 0 | github-code | 13 |
36412528672 | # Import every library needed
#%%
import matplotlib
from LakesModeling_Subprogram import *
from geopy.distance import geodesic
from rasterio.plot import show
from matplotlib import gridspec
#%% md
# Init every parameter
#%%
# name of the lake to model
# available names : Amadeus, Austin, Barlee, Blanche, Carey, Carnegie, Cowan, Disappointment, Everard, Eyre_North, Eyre_South, Frome, Gairdner, Gregory, Island_Lagoon, Lefroy, Mackay, Macleod, Moore, Torrens, Yamma_Yamma
lake_to_model = 'Amadeus'
# path to folder containing data (shapefiles, icesatdata, tiff files, etc.)
data_folder = 'path/to/data/folder' + lake_to_model
# path to icesatdata containing all topography data
icesatdata_path = data_folder + '/' + lake_to_model + '_icesatdata'
# path to shapefile containing geometry of each lake
lakes_shapefile = 'path/to/shapefile'
# define the time period of icesatdata
time_period = ['2021-01-01', '2022-01-01']
# define the pixel size (in degree per pixel)
pixel_size = 0.005
# define the region size around the lake (geological context)
region_size = 0.1
# define the clipping distance to the lake edges
distance_to_edges = 0.01
# define the filter coefficient (if alpha = 1 then 1% of minimal values and 1% of maximal values will be removed)
alpha = 1
# path to the shapefile containing rivers of Australia
rivers_shapefile = 'path/to/rivers/shapefile'
# path to hydroSHEDS tiff file
hydro_file = data_folder + '/hydro.tif'
# CRS of 2D projection
projection_crs = 'EPSG:7853'
#%% md
# Open and select the lake geodataframe
#%%
# file containing all lakes geometry
lakes = geopandas.read_file(lakes_shapefile)
# select the lake to model
lake = lakes[lakes.Lake_Name == lake_to_model]
# Obtain the bounds of the lake
lake_bounds = lake.bounds.iloc[0]
#%% md
# Get icesatadata (ONLY if you did not download it already)
#%%
download_icesat_data(spatial_bounds=[lake_bounds],
time_period=time_period,
destination=icesatdata_path)
#%% md
# extract data and rasterize it
#%%
# extract elevation points from corresponding hdf5 files
original_geospatial_points = extract_data_from_folder(folder_path=icesatdata_path)
# rasterize elevation points with a given pixel size
rasterize_geospatial_data(data=original_geospatial_points,
output_path=data_folder,
pixel_size=pixel_size)
#%% md
# Clip data with the lake shape, get the confluence points and plot everything
#%%
# ignore warnings due to CRS
warnings.filterwarnings("ignore")
# open tiff file corresponding to icesat elevation data
with rasterio.open(data_folder + '/original_raster.tif') as file:
# clip the data within the lake
inside_raster, inside_transform = clip_geospatial_data_with_mask(geospatial_data=file,
mask_gdf=lake,
buffer_distance=0.005,
clip_type='within',
region_scaling=0.6)
with rasterio.open(data_folder + '/temp.tif',
'w',
driver='GTiff',
height=inside_raster.shape[0],
width=inside_raster.shape[1],
count=1, dtype=inside_raster.dtype,
crs='EPSG:4326',
transform=inside_transform) as dst:
dst.write(interpolate_missing_data(inside_raster), 1)
with rasterio.open(data_folder + '/temp.tif') as file:
# clip the data within the lake
interpolated_inside_raster, interpolated_inside_transform = clip_geospatial_data_with_mask(geospatial_data=file,
mask_gdf=lake,
buffer_distance=0.005,
clip_type='within',
region_scaling=0.6)
# open tiff file corresponding to hydroSHEDS elevation data
with rasterio.open(hydro_file)as file:
# clip the data without the lake
outside_raster, outside_transform = clip_geospatial_data_with_mask(geospatial_data=file,
mask_gdf=lake,
buffer_distance=0,
clip_type='without',
region_scaling=0.6)
# extract edges as linestring
confluence = extract_confluence_points(lake_geodataframe=lake,
path_to_rivers_shapefile=rivers_shapefile)
#%% md
# Plot the rivers (if needed)
#%%
rivers = geopandas.read_file(rivers_shapefile)
#%%
river = rivers[rivers['geometry'].apply(lambda x: x.intersects(lake.iloc[0].geometry))]
river = river.to_crs('EPSG:4326')
xmin, ymin, xmax, ymax = rivers.total_bounds
large_polygon = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)])
inverse_polygons = geopandas.GeoSeries([large_polygon]).difference(lake.unary_union)
clipped_lines_outside = geopandas.clip(river, inverse_polygons)
#%%
# plot the result
fig, ax = matplotlib.pyplot.subplots(figsize=(20,20))
norm = matplotlib.colors.TwoSlopeNorm(vmin=numpy.nanpercentile(outside_raster, 2), vcenter=numpy.nanpercentile(outside_raster, 30), vmax=numpy.nanpercentile(outside_raster, 90))
show(outside_raster, transform=outside_transform, ax=ax, cmap='binary', norm=norm)
show(inside_raster, transform=inside_transform, ax=ax, cmap='gist_earth')
ax.scatter(confluence[0], confluence[1], c='red', marker='2', s=15)
for i in range(len(confluence[1])):
ax.annotate(str(i), xy=(confluence[0][i],confluence[1][i]), xytext=(confluence[0][i],confluence[1][i]+0.01), size=10, ha='center')
clipped_lines_outside.plot(ax=ax)
#%% md
# Filter main confluence points
#%%
selected_indexes = [7,10,40,0]
main_confluence = [confluence[0][selected_indexes], confluence[1][selected_indexes]]
# plot the result
fig, ax = matplotlib.pyplot.subplots(figsize=(20,20))
c = show(inside_raster, transform=inside_transform, ax=ax, cmap='gist_earth')
show(outside_raster, transform=outside_transform, ax=ax, cmap='binary')
ax.scatter(main_confluence[0], main_confluence[1], c='red')
ax.set_xlabel('Longitude (m)',fontsize=20)
ax.set_ylabel('Latitude (m)',fontsize=20)
cbar = matplotlib.pyplot.colorbar(c1, ax=ax)
cbar.set_label('Altitude (m)')
ax.set_title('Eyre North Elevations',fontsize=20)
fig.savefig('./report5.png', dpi=100)
#%% md
# Modeling
#%%
# convert the raster into geospatial points
points = unrasterize(raster_data=inside_raster,
geo_transform=inside_transform)
# filter out the points considered as noise
filtered_points = filter_extreme_altitudes(geospatial_data=points,
filter_threshold=alpha)
# project the points to have everything in meters
projected_filtered_points = transform_coordinates(points_data=[filtered_points[0],filtered_points[1]],
crs=projection_crs)
projected_filtered_points.append(filtered_points[2])
# model the points to obtain the plane parameters (we fit the lake to a plane)
model = model_altitude_from_coordinates(geospatial_data=projected_filtered_points)
# extract edges points
edges_points = extract_geospatial_edges(geometry=lake.geometry.iloc[0])
# project edges also
projected_edges_points = transform_coordinates(points_data=edges_points,
crs=projection_crs)
# use the previous parameters to plot edges within the plane
ztemp = []
for i in range(len(projected_edges_points[0])):
ztemp.append(projected_edges_points[0][i]*model[0] + projected_edges_points[1][i]*model[1] + model[2])
projected_edges_points.append(numpy.array(ztemp))
# remove longitude and latitude offset
corrected_projected_edges_points = projected_edges_points.copy()
corrected_projected_filtered_points = projected_filtered_points.copy()
corrected_projected_edges_points[:2] = [projected_edges_points[i] - numpy.min(projected_filtered_points[i]) for i in range(2)]
corrected_projected_filtered_points[:2] = [projected_filtered_points[i] - numpy.min(projected_filtered_points[i]) for i in range(2)]
corrected_model = model_altitude_from_coordinates(geospatial_data=corrected_projected_filtered_points)
#%% md
# plot result
#%%
# plot result
fig = matplotlib.pyplot.figure(figsize=(20,20))
ax = fig.add_subplot(projection='3d')
ax.set_title('Modeling')
ax.plot(corrected_projected_edges_points[0],corrected_projected_edges_points[1],corrected_projected_edges_points[2], c='black')
ax.scatter(corrected_projected_filtered_points[0], corrected_projected_filtered_points[1], corrected_projected_filtered_points[2], c=corrected_projected_filtered_points[2], cmap='gist_earth')
# Plot the plane
plane_colors = matplotlib.pyplot.cm.inferno((corrected_model[5] - numpy.min(corrected_model[5])) / (numpy.max(corrected_model[5]) - numpy.min(corrected_model[5])))
ax.plot_surface(corrected_model[3], corrected_model[4], corrected_model[5], facecolors=plane_colors, alpha=0.1)
ax.set_xlabel('Longitude (in meters)')
ax.set_ylabel('Latitude (in meters)')
ax.set_zlabel('Altitude (in meters)')
# ax.view_init(elev=90, azim=-90) # elev=90 to see it from above
matplotlib.pyplot.show()
#%% md
# Polar projection
#%%
# get indexes of the points with min and max altitude value
i_min = numpy.where(projected_edges_points[2] == numpy.min(projected_edges_points[2]))[0][0]
i_max = numpy.where(projected_edges_points[2] == numpy.max(projected_edges_points[2]))[0][0]
# calculate distance between the two points
point_max = (edges_points[0][i_max], edges_points[1][i_max])
point_min = (edges_points[0][i_min], edges_points[1][i_min])
distance = geodesic((point_max[1],point_max[0]), (point_min[1],point_min[0])).kilometers
azimuth = round(calculate_initial_compass_bearing(pointA=point_max, pointB=point_min),2)
# elevation difference
alt_diff = projected_edges_points[2][i_max] - projected_edges_points[2][i_min]
# get the slope
slope = round(alt_diff / distance, 3)
centroid_lon = (point_max[0] + point_min[0]) / 2
centroid_lat = (point_max[1] + point_min[1]) / 2
edges_r, edges_theta = convert_to_polar_coordinates(data=edges_points,
reference_point=[centroid_lon, centroid_lat])
point_max_r, point_max_theta = convert_to_polar_coordinates(data=point_max,
reference_point=[centroid_lon, centroid_lat])
point_min_r, point_min_theta = convert_to_polar_coordinates(data=point_min,
reference_point=[centroid_lon, centroid_lat])
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'}, figsize=(20,20))
c = ax.scatter(edges_theta, edges_r, c=projected_edges_points[2], cmap='gist_earth', s=2)
ax.plot([point_max_theta, point_min_theta], [point_max_r, point_min_r], c='red', lw=5, linestyle='dashed')
plt.colorbar(c, ax=ax, label='Altitude')
ax.set_title("Projection polaire du lac")
ax.set_xticklabels(['E\n0°', 'N-E\n45°', 'N\n90°', 'N-W\n135°', 'W\n180°', 'S-W\n225°', 'S\n270°', 'S-E\n315°'])
plt.show()
#%% md
# Final plot
#%%
df = generate_lake_information(lake_gdf=lake,
time_period=time_period,
resolution=pixel_size,
filter_threshold=alpha,
target_crs=projection_crs,
r_squared_value=model[6]*100,
slope=slope,
azimuth=azimuth)
fig = matplotlib.pyplot.figure(figsize=(21, 29.7))
gs0 = gridspec.GridSpec(3, 2, figure=fig)
face_color = (255/255, 255/255, 255/255)
labels_color = 'black'
fig.set_facecolor(face_color)
ax0 = fig.add_subplot(gs0[0, :])
ax0.set_facecolor(face_color)
# Masquer les axes
ax0.axis('off')
ax0.axis('tight')
# Affichage du tableau
table = ax0.table(cellText=df.values, colLabels=df.columns, cellLoc = 'left', loc='center')
ax0.set_title(lake_to_model, color=labels_color, fontsize=32)
table.scale(0.9, 2)
for key, cell in table.get_celld().items():
row, col = key
if row == 0: # Pour les headers
cell.set_fontsize(22)
cell.set_facecolor('grey')
cell.set_text_props(color='white')
else:
cell.set_fontsize(16)
cell.set_facecolor('darkgrey')
cell.set_text_props(color='white')
ax1 = fig.add_subplot(gs0[1, 0])
ax1.set_facecolor(face_color)
show(inside_raster, transform=inside_transform, ax=ax1, cmap='gist_earth')
show(outside_raster, transform=outside_transform, ax=ax1, cmap='binary', norm=norm)
ax1.scatter(main_confluence[0], main_confluence[1], c='red', marker='2')
ax1.set_title('Original Elevations', color=labels_color)
ax1.set_xlabel('Longitude (°)', color=labels_color)
ax1.set_ylabel('Latitude (°)', color=labels_color)
ax1.tick_params(axis='both', colors=labels_color)
ax2 = fig.add_subplot(gs0[1, 1])
ax2.set_facecolor(face_color)
show(interpolated_inside_raster, transform=interpolated_inside_transform, ax=ax2, cmap='gist_earth')
show(outside_raster, transform=outside_transform, ax=ax2, cmap='binary', norm=norm)
ax2.scatter(main_confluence[0], main_confluence[1], c='red', marker='2')
ax2.set_title('Interpolated Elevations', color=labels_color)
ax2.set_xlabel('Longitude (°)', color=labels_color)
ax2.set_ylabel('Latitude (°)', color=labels_color)
ax2.tick_params(axis='both', colors=labels_color)
ax3 = fig.add_subplot(gs0[2, 0], projection='3d')
ax3.set_facecolor(face_color)
ax3.set_title(f"Modeling\nPlane parameters (ax + bx + c) :\na = {model[0]:.2e}, b = {model[1]:.2e}, c = {model[2]:.2e}", color=labels_color)
ax3.plot(corrected_projected_edges_points[0],corrected_projected_edges_points[1],corrected_projected_edges_points[2], c=labels_color)
c1 = ax3.scatter(corrected_projected_filtered_points[0], corrected_projected_filtered_points[1], corrected_projected_filtered_points[2], c=corrected_projected_filtered_points[2], cmap='gist_earth', s=1)
plane_colors = matplotlib.pyplot.cm.inferno((corrected_model[5] - numpy.min(corrected_model[5])) / (numpy.max(corrected_model[5]) - numpy.min(corrected_model[5])))
ax3.plot_surface(corrected_model[3], corrected_model[4], corrected_model[5], facecolors=plane_colors, alpha=0.1)
ax3.set_xlabel('Longitude (m)', color=labels_color)
ax3.set_ylabel('Latitude (m)', color=labels_color)
ax3.set_zlabel('Altitude (m)', color=labels_color)
ax3.tick_params(axis='x', colors=labels_color)
ax3.tick_params(axis='y', colors=labels_color)
ax3.tick_params(axis='z', colors=labels_color)
ax3.xaxis.pane.fill = False
ax3.yaxis.pane.fill = False
ax3.zaxis.pane.fill = False
# # ax3.view_init(elev=90, azim=-90) # elev=90 to see from above
ax4 = fig.add_subplot(gs0[2, 1], projection='polar')
ax4.set_facecolor(face_color)
ax4.plot(edges_theta, edges_r, color=labels_color)
ax4.annotate("", xy=(point_min_theta, point_min_r), xytext=(point_max_theta, point_max_r), arrowprops=dict(arrowstyle="->", lw=3, color='red'),size=15)
ax4.set_title(f"Polar Projection\nSlope : {slope} m/km\nAzimuth : {round(azimuth,2)} °", color=labels_color)
ax4.set_xticklabels(['E\n90°', 'N-E\n45°', 'N\n0°', 'N-W\n315°', 'W\n270°', 'S-W\n225°', 'S\n180°', 'S-E\n135°'], color=labels_color)
for label in ax4.yaxis.get_ticklabels():
label.set_color(labels_color)
cbar = matplotlib.pyplot.colorbar(c1, ax=[ax1, ax2, ax3, ax4])
cbar.ax.tick_params(colors=labels_color)
cbar.set_label('Altitude (m)', color=labels_color)
matplotlib.pyplot.show()
#%%
fig.savefig('./Modeling.png', dpi=300, facecolor=fig.get_facecolor(), edgecolor='none')
| LeoCkr/stage4A | LakesModelling_Notebook.py | LakesModelling_Notebook.py | py | 16,123 | python | en | code | 0 | github-code | 13 |
18415791141 | def even_odd(*args):
result = []
cmd = args[-1]
if cmd == 'even':
result = [i for i in args[:-1] if i % 2 == 0]
elif cmd == 'odd':
result = [i for i in args[:-1] if i % 2 != 0]
return result
print(even_odd(1, 2, 3, 4, 5, 6, "even"))
print(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "odd"))
| MiroVatov/Python-SoftUni | Python Advanced 2021/FUNCTIONS-ADVANCED/Exercise 08.py | Exercise 08.py | py | 341 | python | en | code | 0 | github-code | 13 |
15519037843 | import random
import sys
import os
pi_tuple = (3,1,4,1,5,9)
#Typecasting the tuple into a new list
new_list = list(pi_tuple)
#Typecasting a list into a tuple
new_tuple = tuple(new_list)
print(new_tuple)
print(new_list)
#legal, as we can append to list
new_list.append(19)
#Illegal operation: tuples are immutable
#new_tuple.append(19)
print(len(new_tuple))
| Priyankasingtamkar/public | python/tuples.py | tuples.py | py | 364 | python | en | code | null | github-code | 13 |
72680646418 | import sublime, sublime_plugin
import os
from os.path import dirname
import sys
from subprocess import Popen, PIPE
import subprocess
import shlex
from sublime import Region, Phantom, PhantomSet
from os import path
from importlib import import_module
from time import time
import threading
from .Modules.ProcessManager import ProcessManager
from .settings import base_name, get_settings, root_dir
from .debuggers import debugger_info
from .Highlight.CppVarHighlight import highlight
from .Highlight.test_interface import get_test_styles
class TestEditCommand(sublime_plugin.TextCommand):
BEGIN_TEST_STRING = 'Test %d {'
OUT_TEST_STRING = ''
END_TEST_STRING = '} rtcode %s'
REGION_BEGIN_KEY = 'test_begin_%d'
REGION_OUT_KEY = 'test_out_%d'
REGION_END_KEY = 'test_end_%d'
REGION_POS_PROP = ['', '', sublime.HIDDEN]
REGION_ACCEPT_PROP = ['string', 'dot', sublime.HIDDEN]
REGION_DECLINE_PROP = ['variable.c++', 'dot', sublime.HIDDEN]
REGION_UNKNOWN_PROP = ['text.plain', 'dot', sublime.HIDDEN]
REGION_OUT_PROP = ['entity.name.function.opd', 'bookmark', sublime.HIDDEN]
REGION_BEGIN_PROP = ['string', 'Packages/FastOlympicCoding/icons/arrow_right.png', \
sublime.DRAW_NO_FILL | sublime.DRAW_STIPPLED_UNDERLINE | \
sublime.DRAW_NO_OUTLINE | sublime.DRAW_EMPTY_AS_OVERWRITE]
REGION_END_PROP = ['variable.c++', 'Packages/FastOlympicCoding/icons/arrow_left.png', sublime.HIDDEN]
REGION_LINE_PROP = ['string', 'dot', \
sublime.DRAW_NO_FILL | sublime.DRAW_STIPPLED_UNDERLINE | \
sublime.DRAW_NO_OUTLINE | sublime.DRAW_EMPTY_AS_OVERWRITE]
# Test
# REGION_POS_PROP = REGION_UNKNOWN_PROP
def __init__(self, view):
self.view = view
self.use_debugger = False
self.delta_input = 0
self.tester = None
self.session = None
self.phantoms = PhantomSet(view, 'test-phantoms')
def insert_text(self, edit, text=None):
v = self.view
if text is None:
if not self.tester.proc_run:
return None
to_shove = v.substr(Region(self.delta_input, v.sel()[0].b))
# print('shovel -> ', to_shove)
v.insert(edit, v.sel()[0].b, '\n')
else:
to_shove = text
v.insert(edit, v.sel()[0].b, to_shove + '\n')
self.delta_input = v.sel()[0].b
self.tester.insert(to_shove + '\n')
def insert_cb(self, edit):
v = self.view
s = sublime.get_clipboard()
lst = s.split('\n')
for i in range(len(lst) - 1):
self.tester.insert(lst[i] + '\n', call_on_insert=True)
self.tester.insert(lst[-1], call_on_insert=True)
def open_test_edit(self, i):
v = self.view
edit_view = v.window().new_file()
v.window().set_view_index(edit_view, 1, 1)
def on_test_action(self, i, event):
v = self.view
tester = self.tester
if event == 'test-click':
self.toggle_fold(i)
elif event == 'test-edit':
self.open_test_edit(i)
def on_accdec_action(self, i, event):
v = self.view
tester = self.tester
if event == 'click-acc':
tester.accept_out(i)
elif event == 'click-dec':
tester.decline_out(i)
self.update_configs()
self.memorize_tests()
def cb_action(self, event):
v = self.view
if event == 'test-save':
for sub in v.window().views():
if sub.id() == self.source_view_id:
sub.run_command('test_manager', {
'action': 'set_test_input',
'data': v.substr(Region(1, v.size())),
'id': self.test_id
})
v.close()
break
elif event == 'test-delete':
for sub in v.window().views():
if sub.id() == self.source_view_id:
sub.run_command('test_manager', {
'action': 'delete_test',
'id': self.test_id
})
v.close()
break
def update_config(self):
v = self.view
styles = get_test_styles(v)
content = open(root_dir + '/Highlight/test_edit.html').read()
content = content.format(
test_id=self.test_id,
)
content = '<style>' + styles + '</style>' + content
phantom = Phantom(Region(0), content, sublime.LAYOUT_BLOCK, self.cb_action)
self.phantoms.update([phantom])
def memorize_tests(self):
# print([x.memorize() for x in (self.tester.get_tests())])
f = open(self.dbg_file + ':tests', 'w')
f.write(sublime.encode_value([x.memorize() for x in (self.tester.get_tests())], True))
f.close()
def add_region(self, line, region_prop):
v = self.view
pos = v.line(line)
from random import randint
v.add_regions(str(randint(0, 1e9)), [Region(pos.a, pos.a + 1)], *region_prop)
def toggle_side_bar(self):
self.view.window().run_command('toggle_side_bar')
def change_process_status(self, status):
# name = self.view.name()
# self.view.set_name(name[:name.index(' ')] + ' -' + status.lower())
self.view.set_status('process_status', status)
def clear_all(self):
v = self.view
v.run_command('test_manager', {'action': 'erase_all'})
if self.tester:
v.erase_regions('type')
for i in range(-1, self.tester.test_iter + 1):
v.erase_regions(self.REGION_BEGIN_KEY % i)
v.erase_regions(self.REGION_END_KEY % i)
v.erase_regions('line_%d' % i)
v.erase_regions('test_error_%d' % i)
def init(self, edit, run_file=None, build_sys=None, clr_tests=False, \
test='', source_view_id=None, test_id=None, load_session=False):
v = self.view
self.delta_input = 0
self.test_id = test_id
self.source_view_id = source_view_id
v.set_scratch(True)
v.set_name('test ' + str(test_id) + ' -edit')
v.run_command('toggle_setting', {'setting': 'line_numbers'})
v.run_command('set_setting', {'setting': 'fold_buttons', 'value': False})
v.settings().set('edit_mode', True)
v.set_syntax_file('Packages/%s/TestSyntax.tmLanguage' % base_name)
v.insert(edit, 0, '\n' + test)
self.update_config()
def get_style_test_status(self, nth):
check = self.tester.check_test(nth)
if check:
return self.REGION_ACCEPT_PROP
elif check is False:
return self.REGION_DECLINE_PROP
return self.REGION_UNKNOWN_PROP
def sync_read_only(self):
view = self.view
if view.settings().get('edit_mode'):
view.set_read_only(False)
return
have_sel_no_end = False
for sel in view.sel():
if sel.begin() != view.size():
have_sel_no_end = True
break
end_cursor = len(view.sel()) and \
((self.tester is None) or (not self.tester.proc_run)) and \
view.size() == view.sel()[0].a
# view.set_read_only(have_sel_no_end or end_cursor)
def get_begin_region(self, id):
v = self.view
return v.get_regions(self.REGION_BEGIN_KEY % id)
def run(self, edit, action=None, run_file=None, build_sys=None, text=None, clr_tests=False, \
test='', source_view_id=None, var_name=None, test_id=None, pos=None, \
load_session=False, region=None, frame_id=None):
v = self.view
pt = v.sel()[0].begin()
scope_name = (v.scope_name(pt).rstrip())
v.set_read_only(False)
if action == 'insert_line':
self.insert_text(edit)
elif action == 'insert_cb':
self.insert_cb(edit)
elif action == 'insert_opd_input':
v.insert(edit, self.delta_input, text)
self.delta_input += len(text)
elif action == 'insert_opd_out':
self.delta_input += len(text)
v.insert(edit, self.view.size(), text)
elif action == 'replace':
v.replace(edit, Region(region[0], region[1]), text)
elif action == 'erase':
v.erase(edit, Region(region[0], region[1]))
elif action == 'apply_edit_changes':
self.apply_edit_changes()
elif action == 'init':
self.init(edit, run_file=run_file, build_sys=build_sys, clr_tests=clr_tests, \
test=test, source_view_id=source_view_id, test_id=test_id, load_session=load_session)
elif action == 'redirect_var_value':
self.redirect_var_value(var_name, pos=pos)
elif action == 'close':
try:
self.process_manager.terminate()
except:
print('[FastOlympicCoding] process terminating error')
# v.run_command('test_manager', {'action': 'erase_all'})
elif action == 'redirect_frames':
self.redirect_frames()
elif action == 'select_frame':
self.select_frame(frame_id)
elif action == 'new_test':
self.new_test(edit)
elif action == 'toggle_new_test':
self.toggle_new_test()
elif action == 'delete_tests':
self.delete_tests(edit)
elif action == 'accept_test':
self.set_tests_status()
elif action == 'decline_test':
self.set_tests_status(accept=False)
elif action == 'erase_all':
v.replace(edit, Region(0, v.size()), '\n')
elif action == 'show_text':
v.replace(edit, Region(0, v.size()), self.text_buffer)
v.sel().clear()
v.sel().add(Region(v.size(), v.size()))
elif action == 'hide_text':
self.text_buffer = v.substr(Region(0, v.size()))
self.sel_buffer = v.sel()
v.run_command('test_manager', {'action':'erase_all'})
elif action == 'kill_proc':
self.tester.terminate()
elif action == 'sync_read_only':
self.sync_read_only()
elif action == 'enable_edit_mode':
self.enable_edit_mode()
return
elif action == 'toggle_using_debugger':
self.use_debugger = not self.use_debugger
if (self.use_debugger):
sublime.status_message('debugger enabled')
else:
sublime.status_message('debugger disabled')
elif action == 'set_cursor_to_end':
v.sel().clear()
v.sel().add(Region(v.size(), v.size()))
self.sync_read_only()
def isEnabled(view, args):
pass
class EditModifyListener(sublime_plugin.EventListener):
def on_selection_modified(self, view):
if view.settings().get('edit_mode'):
if view.size() == 0:
view.run_command('test_edit', {
'action': 'replace',
'region': [0, view.size()],
'text': '\n'
})
mod = []
change = False
for reg in view.sel():
if reg.a == 0:
change = True
mod.append(Region(max(reg.a, 1), max(reg.b, 1)))
if change:
view.sel().clear()
view.sel().add_all(mod)
| Jatana/FastOlympicCoding | test_edit.py | test_edit.py | py | 9,634 | python | en | code | 365 | github-code | 13 |
43230317536 | #!/usr/bin/env python
from os import path
import sys
# Directory containing this program.
PROGDIR = path.dirname(path.realpath(__file__))
# For click_common and common.
sys.path.insert(0, path.join(PROGDIR, ".."))
# For python_config.
sys.path.insert(0, path.join(PROGDIR, "..", "..", "etc"))
import click_common
import common
import python_config
# If True, then do not run experiments and instead only print configurations.
DRY_RUN = False
MAX_RESIZE = 4500
def maybe(fnc, do=not DRY_RUN):
if do:
fnc()
def main():
cnfs = []
# CC modes are the outside loop to minimize how frequently we change the CC
# mode, since doing so requires restarting the cluster.
for cc in python_config.CCS:
# (1) Long days, static buffers. Show the cases where TCP ramp up is not
# a problem.
if cc in ["cubic"]:
# Old optical switches.
cnfs += [{"type": "strobe", "buffer_size": 16,
"night_len_us": 1000. * python_config.TDF,
"day_len_us": 9000. * python_config.TDF,
"cc": cc}]
# New optical switches, but using long days.
cnfs += [{"type": "strobe", "buffer_size": 16,
"night_len_us": python_config.RECONFIG_DELAY_us * \
python_config.TDF,
"day_len_us": 9000. * python_config.TDF, "cc": cc}]
# (2) Static buffers. Show that all the TCP variants perform poorly when
# nights/days are short.
# (3) reTCP. Show how much improvement reTCP offers with static buffers
# (i.e., on its own).
if cc in ["cubic", "retcp"]:
for exp in range(2, 8):
cnfs += [{"type": "strobe", "buffer_size": 2**exp, "cc": cc}]
else:
cnf = {"type": "strobe", "buffer_size": 2**4, "cc": cc}
# For DCTCP, we will enable threshold-based ECN marking.
dctcp = cc == "dctcp"
if dctcp:
cnf["ecn"] = python_config.DCTCP_THRESH
cnfs += [cnf]
# (4) Dynamic buffers. Show that dynamic buffers help TCP cubic when
# nights/days are short.
# (5) reTCP. Show how much improvement reTCP offers with dynamic
# buffers.
if cc in ["cubic", "retcp"]:
for i in xrange(MAX_RESIZE + 1):
if i % 500 == 0:
cnf = {"type": "strobe", "buffer_size": 16,
"queue_resize": True, "in_advance": i, "cc": cc}
if dctcp:
cnf["ecn"] = python_config.DCTCP_THRESH
cnfs += [cnf]
# For all configurations, enable the packet log.
cnfs = [dict(cnf, packet_log=True) for cnf in cnfs]
# Use the first experiment's CC mode, or "cubic" if no CC mode is specified.
# This avoid unnecessarily restarting the cluster.
maybe(lambda: common.initializeExperiment(
"flowgrindd", cnfs[0].get("cc", "cubic")))
# Total number of experiments.
tot = len(cnfs)
for cnt, cnf in enumerate(cnfs, start=1):
maybe(lambda: click_common.setConfig(cnf))
print("--- experiment {} of {}, config:\n{}".format(cnt, tot, cnf))
maybe(lambda: common.flowgrind(
settings={"flows": [{"src": "r1", "dst": "r2"}]}))
maybe(lambda: common.finishExperiment())
if __name__ == "__main__":
main()
| mukerjee/etalon | experiments/buffers/optsys.py | optsys.py | py | 3,455 | python | en | code | 12 | github-code | 13 |
42987267960 | import os
# Helper functions
def getFiles(path, extension):
"""
Walks down all directories starting at *path* looking for files
ending with *extension*. Knows that UFOs are directories and stops
the walk for any found UFO.
"""
if not extension.startswith('.'):
extension = '.' + extension
if extension == '.ufo':
return [dir for (dir, dirs, files) in os.walk(path)
if dir[-len(extension):] == extension]
else:
return [os.sep.join((dir, file)) for (dir, dirs, files)
in os.walk(path) for file in files if
file[-len(extension):] == extension]
def splitall(path):
"""
Splits a path into all it's parts, returns a list.
*path* is the path to split
"""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def printProgressBar(iteration, total, prefix='', suffix='',
decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
Nabbed, of course, from Stack Overflow
(https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console)
"""
percent = ("{0:."+str(decimals)+"f}").format(100*(iteration/float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
# Print New Line on Complete
if iteration == total:
print()
def makeWOFF(files, destination):
"""
Makes WOFF2 files from list of paths.
*files* is a `list` of file paths as `string`
*destination* is a `string` of the destination to save the WOFF files.
"""
from fontTools.ttLib import woff2
from fontTools.ttx import makeOutputFileName
if not os.path.exists(destination):
os.mkdir(destination)
print("🏗 Making WOFF2")
printProgressBar(0, len(files), prefix=' ', suffix='Complete', length=50)
for i, file in enumerate(files):
outfilename = makeOutputFileName(file,
outputDir=destination,
extension='.woff2')
if os.path.exists(outfilename):
os.remove(outfilename)
woff2.compress(file, outfilename)
printProgressBar(i + 1, len(files), prefix=' ',
suffix='Complete', length=50)
def batchCheckOutlines(root):
from afdko.checkoutlinesufo import run as checkoutlinesufo
from contextlib import redirect_stdout, redirect_stderr
import re
files = getFiles(root, "ufo")
skips = ["uni000D has no contours\n",
"uniE0A0 has no contours\n", ]
outputFile = os.path.join(root, "checkoutlines.txt")
if os.path.exists(outputFile):
os.remove(outputFile)
print("🏗 Running checkoutlinesUFO on files")
printProgressBar(0, len(files), prefix=' ', suffix='Complete', length=50)
for i, file in enumerate(files):
with open(outputFile, "a") as f:
with redirect_stdout(f), redirect_stderr(f):
print(f"Checking {file}")
checkoutlinesufo([file, "--all"])
print("\n\n")
printProgressBar(i + 1, len(files), prefix=' ',
suffix='Complete', length=50)
log = []
with open(outputFile, "r") as f:
for line in f:
if not line.startswith("Checking"):
pass1 = re.sub(r'[\.]{2,}', '', line)
pass2 = re.sub(r' Flat curve at \([0-9,\.]+, [0-9,\.]+\)\.',
'', pass1)
if len(pass2.split()) > 1:
if pass2 not in skips:
log.append(pass2)
elif line.startswith("Checking"):
log.append("\n\n" + line)
with open(outputFile, "w") as f:
f.write("".join(log))
def make_mark_mkmk_gdef_feature(font, GDEF_Classes=True):
from collections import defaultdict
"""
Takes in a font and builds the mark, mkmk,
and gdef table in Adobe Feature File syntax.
*font* is a Defcon like font object.
"""
# Gather all the data we need
ligCarets = defaultdict(list)
mark = defaultdict(list)
base = defaultdict(list)
for glyph in font:
# note, we're rounding (by int()) the anchor positions
if len(glyph.anchors) > 0:
# there can be more than one ligCaret in a ligature
# so need to store them before writing them out
carets = []
for a in glyph.anchors:
# Lig caret marks are named starting
# caret, so we look for those. Only
# need the x position for the feature
if a.name.startswith('caret'):
carets.append(int(a.x))
# if a anchor name starts with a
# underscore, it's a mark
elif a.name.startswith("_"):
mark[(int(a.x), int(a.y))].append((a.name, glyph.name))
# if it's not a ligature caret or a mark, it's base
else:
base[(int(a.x), int(a.y))].append((a.name, glyph.name))
# make a dict of all the same caret positions
# with the glyph names as values. Streamines
# the GDEF table
if carets != []:
ligCarets[tuple(carets)].append(glyph.name)
# Now process the data
# Mark name list
kinds = list(base.keys())
# Get a list of all the ligs in the font for GDEF
ligatures = []
for names in ligCarets.values():
ligatures += names
ligatures.sort()
# Sort out the marks
uc_marks = defaultdict(list)
lc_marks = defaultdict(list)
common_marks = defaultdict(list)
mark_names = []
mark_glyph_names = []
uc_marks_names = []
for pos, data in mark.items():
_lc_marks = defaultdict(list)
_uc_marks = defaultdict(list)
for markName, glyphName in data:
markName = markName[1:]
if markName not in mark_names:
mark_names.append(markName)
if glyphName not in mark_glyph_names:
mark_glyph_names.append(glyphName)
if glyphName.endswith(".case"):
_uc_marks[markName].append(glyphName)
uc_marks_names.append(glyphName)
else:
_lc_marks[markName].append(glyphName)
for markName, glyphs in _uc_marks.items():
uc_marks[markName].append((pos, glyphs))
for markName, glyphs in _lc_marks.items():
lc_marks[markName].append((pos, glyphs))
for markName, data in lc_marks.items():
newData = []
for pos, glyphNames in data:
common = []
for n in glyphNames:
if (n + ".case") not in uc_marks_names:
common.append(n)
for i in common:
glyphNames.remove(i)
if common != []:
common_marks[markName].append((pos, common))
if glyphNames != []:
newData.append((pos, glyphNames))
lc_marks[markName] = newData
mark_names.sort()
mark_glyph_names.sort()
# Collect base info
# This is a hardcoded list, if marks are added to another UC glyph, this
# list needs extending. One could do this by looking at all glyphs with
# anchors anddoing a check of unicode category, but then you have to check
# for non-encodedglyphs also.
uc_base_names = ['A', 'AE', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'L.sans', 'M', 'N', 'O', 'Oslash', 'P', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'Z.sans',
'Ldotbelow','Rdotbelow']
uc_bases = defaultdict(list)
bases = defaultdict(list)
mkmk_bases = defaultdict(list)
mkmk_uc_bases = defaultdict(list)
base_names = []
for pos, data in base.items():
_bases = defaultdict(list)
_uc_bases = defaultdict(list)
_mkmk_bases = defaultdict(list)
_mkmk_uc_bases = defaultdict(list)
for markName, glyphName in data:
if glyphName not in base_names:
base_names.append(glyphName)
if glyphName in uc_base_names and markName in uc_marks.keys():
_uc_bases[markName].append(glyphName)
elif glyphName in mark_glyph_names and glyphName.endswith(".case"):
_mkmk_uc_bases[markName].append(glyphName)
base_names.remove(glyphName)
elif glyphName in mark_glyph_names:
_mkmk_bases[markName].append(glyphName)
base_names.remove(glyphName)
else:
_bases[markName].append(glyphName)
for markName, glyphs in _uc_bases.items():
uc_bases[markName].append((pos, glyphs))
for markName, glyphs in _bases.items():
bases[markName].append((pos, glyphs))
for markName, glyphs in _mkmk_bases.items():
mkmk_bases[markName].append((pos, glyphs))
for markName, glyphs in _mkmk_uc_bases.items():
mkmk_uc_bases[markName].append((pos, glyphs))
base_names.sort()
# Make features
classes = ""
mark_feature = "feature mark {\n"
mkmk_feature = "feature mkmk {\n"
for n in mark_names:
if n in uc_marks.keys():
for a in uc_marks[n]:
pos = a[0]
names = a[1]
classes += f"markClass [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> @mark_uc_{n};\n"
if n in uc_bases:
mark_feature += f" lookup mark_uc_{n} {{\n"
for base in uc_bases[n]:
pos = base[0]
names = base[1]
mark_feature += f" pos base [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> mark @mark_uc_{n};\n"
mark_feature += f" }} mark_uc_{n};\n"
if n in mkmk_uc_bases:
mkmk_feature += f" lookup mkmk_uc_{n} {{\n"
for base in mkmk_uc_bases[n]:
pos = base[0]
names = base[1]
mkmk_feature += f" pos mark [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> mark @mark_uc_{n};\n"
mkmk_feature += f" }} mkmk_uc_{n};\n"
if n in lc_marks.keys():
for a in lc_marks[n]:
pos = a[0]
names = a[1]
classes += f"markClass [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> @mark_lc_{n};\n"
if n in bases:
mark_feature += f" lookup mark_lc_{n} {{\n"
for base in bases[n]:
pos = base[0]
names = base[1]
mark_feature += f" pos base [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> mark @mark_lc_{n};\n"
mark_feature += f" }} mark_lc_{n};\n"
if n in mkmk_bases:
mkmk_feature += f" lookup mkmk_lc_{n} {{\n"
for base in mkmk_bases[n]:
pos = base[0]
names = base[1]
mkmk_feature += f" pos mark [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> mark @mark_lc_{n};\n"
mkmk_feature += f" }} mkmk_lc_{n};\n"
if n in common_marks.keys():
for a in common_marks[n]:
pos = a[0]
names = a[1]
classes += f"markClass [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> @mark_common_{n};\n"
# build our common bases for this mark
common_bases = []
if n in bases:
common_bases.append(bases[n])
if n in uc_bases:
common_bases.append(uc_bases[n])
common_mkmk_bases = []
if n in mkmk_bases:
if mkmk_bases[n][0][1] != []:
common_mkmk_bases.append(mkmk_bases[n])
if n in mkmk_uc_bases:
if mkmk_uc_bases[n][0][1] != []:
common_mkmk_bases.append(mkmk_uc_bases[n])
if common_bases != []:
mark_feature += f" lookup mark_common_{n} {{\n"
for c in common_bases:
for pos, names in c:
mark_feature += f" pos base [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> mark @mark_common_{n};\n"
mark_feature += f" }} mark_common_{n};\n"
if common_mkmk_bases != []:
mkmk_feature += f" lookup mkmk_common_{n} {{\n"
for c in common_mkmk_bases:
for pos, names in c:
mkmk_feature += f" pos mark [{' '.join(names)}] <anchor {str(pos[0])} {str(pos[1])}> mark @mark_common_{n};\n"
mkmk_feature += f" }} mkmk_common_{n};\n"
mark_feature += "} mark;"
mkmk_feature += "} mkmk;"
if GDEF_Classes:
gdef = f"@BASE = [{' '.join(base_names)}];\n@MARKS = [{' '.join(mark_glyph_names)}];\n@LIGATURES = [{' '.join(ligatures)}];\n\ntable GDEF {{\n GlyphClassDef @BASE, @LIGATURES, @MARKS,;\n"
else:
gdef = "table GDEF {\n"
for k, v in ligCarets.items():
if len(v) > 1:
gdef += f" LigatureCaretByPos [{' '.join(v)}] {' '.join(str(i) for i in k)};\n"
else:
gdef += f" LigatureCaretByPos {' '.join(v)} {' '.join(str(i) for i in k)};\n"
gdef += "} GDEF;"
font.features.text += f'{classes}\n{mark_feature}\n{mkmk_feature}\n{gdef}'
font.save(font.path)
return f'{classes}\n{mark_feature}\n{mkmk_feature}\n{gdef}'
if __name__ == "__main__":
import argparse
description = "Two helper tools"
parser = argparse.ArgumentParser(description=description)
group = parser.add_mutually_exclusive_group()
parser.add_argument("directory", help="Directory of files to work on")
group.add_argument("-w", "--woff", action="store_true",
help="Make WOFF & WOFF2 files from fonts in directory")
group.add_argument("-c", "--checkoutlines", action="store_true",
help="Run checkoutlines on all UFOs in directory")
args = parser.parse_args()
if args.woff:
out = os.path.join(args.directory, "WOFF")
ttfs = getFiles(args.directory, 'ttf')
otfs = getFiles(args.directory, 'otf')
fonts = ttfs + otfs
print(fonts)
if len(fonts) != 0:
makeWOFF(fonts, out)
else:
print("No otfs or ttfs to make WOFFs from")
if args.checkoutlines:
batchCheckOutlines(args.directory)
| arrowtype/recursive | mastering/utils.py | utils.py | py | 15,682 | python | en | code | 2,922 | github-code | 13 |
71899352338 | from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
from operator import attrgetter
import sys
import six
from docker.errors import APIError
from docker.utils import create_host_config
from .config import DOCKER_CONFIG_KEYS
from .container import Container, get_container_name
from .progress_stream import stream_output, StreamOutputError
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'net',
'pid',
'privileged',
'restart',
]
VALID_NAME_CHARS = '[a-zA-Z0-9]'
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class CannotBeScaledError(Exception):
pass
class ConfigError(ValueError):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, name):
raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
if 'image' in options and 'build' in options:
raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
if 'image' not in options and 'build' not in options:
raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name)
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False):
return [Container.from_ps(self.client, container)
for container in self.client.containers(all=stopped)
if self.has_container(container, one_off=one_off)]
def has_container(self, container, one_off=False):
"""Return True if `container` was created to fulfill this service."""
name = get_container_name(container)
if not name or not is_valid_name(name, one_off):
return False
project, name, _number = parse_name(name)
return project == self.project and name == self.name
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
for container in self.client.containers():
if not self.has_container(container):
continue
_, _, container_number = parse_name(get_container_name(container))
if container_number == number:
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
def scale(self, desired_num):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if not self.can_be_scaled():
raise CannotBeScaledError()
# Create enough containers
containers = self.containers(stopped=True)
while len(containers) < desired_num:
log.info("Creating %s..." % self._next_container_name(containers))
containers.append(self.create_container(detach=True))
running_containers = []
stopped_containers = []
for c in containers:
if c.is_running:
running_containers.append(c)
else:
stopped_containers.append(c)
running_containers.sort(key=lambda c: c.number)
stopped_containers.sort(key=lambda c: c.number)
# Stop containers
while len(running_containers) > desired_num:
c = running_containers.pop()
log.info("Stopping %s..." % c.name)
c.stop(timeout=1)
stopped_containers.append(c)
# Start containers
while len(running_containers) < desired_num:
c = stopped_containers.pop(0)
log.info("Starting %s..." % c.name)
self.start_container(c)
running_containers.append(c)
self.remove_stopped()
def remove_stopped(self, **options):
for c in self.containers(stopped=True):
if not c.is_running:
log.info("Removing %s..." % c.name)
c.remove(**options)
def create_container(self,
one_off=False,
insecure_registry=False,
do_build=True,
intermediate_container=None,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
container_options = self._get_container_create_options(
override_options,
one_off=one_off,
intermediate_container=intermediate_container,
)
if (do_build and
self.can_be_built() and
not self.client.images(name=self.full_name)):
self.build()
try:
return Container.create(self.client, **container_options)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
log.info('Pulling image %s...' % container_options['image'])
output = self.client.pull(
container_options['image'],
stream=True,
insecure_registry=insecure_registry
)
stream_output(output, sys.stdout)
return Container.create(self.client, **container_options)
raise
def recreate_containers(self, insecure_registry=False, do_build=True, **override_options):
"""
If a container for this service doesn't exist, create and start one. If there are
any, stop them, create+start new ones, and remove the old containers.
"""
containers = self.containers(stopped=True)
if not containers:
log.info("Creating %s..." % self._next_container_name(containers))
container = self.create_container(
insecure_registry=insecure_registry,
do_build=do_build,
**override_options)
self.start_container(container)
return [(None, container)]
else:
tuples = []
for c in containers:
log.info("Recreating %s..." % c.name)
tuples.append(self.recreate_container(c, insecure_registry=insecure_registry, **override_options))
return tuples
def recreate_container(self, container, **override_options):
"""Recreate a container. An intermediate container is created so that
the new container has the same name, while still supporting
`volumes-from` the original container.
"""
try:
container.stop()
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
intermediate_container = Container.create(
self.client,
image=container.image,
entrypoint=['/bin/echo'],
command=[],
detach=True,
host_config=create_host_config(volumes_from=[container.id]),
)
intermediate_container.start()
intermediate_container.wait()
container.remove()
options = dict(override_options)
new_container = self.create_container(
do_build=False,
intermediate_container=intermediate_container,
**options
)
self.start_container(new_container)
intermediate_container.remove()
return (intermediate_container, new_container)
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def start_or_create_containers(
self,
insecure_registry=False,
detach=False,
do_build=True):
containers = self.containers(stopped=True)
if not containers:
log.info("Creating %s..." % self._next_container_name(containers))
new_container = self.create_container(
insecure_registry=insecure_registry,
detach=detach,
do_build=do_build,
)
return [self.start_container(new_container)]
else:
return [self.start_container_if_stopped(c) for c in containers]
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def _next_container_name(self, all_containers, one_off=False):
bits = [self.project, self.name]
if one_off:
bits.append('run')
return '_'.join(bits + [str(self._next_container_number(all_containers))])
def _next_container_number(self, all_containers):
numbers = [parse_name(c.name).number for c in all_containers]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self, intermediate_container=None):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
if intermediate_container:
volumes_from.append(intermediate_container.id)
return volumes_from
def _get_net(self):
if not self.net:
return "bridge"
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(self, override_options, one_off=False, intermediate_container=None):
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
container_options['name'] = self._next_container_name(
self.containers(stopped=True, one_off=one_off),
one_off)
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port in all_ports:
port = str(port)
if ':' in port:
port = port.split(':')[-1]
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
if self.can_be_built():
container_options['image'] = self.full_name
else:
container_options['image'] = self._get_image_name(container_options['image'])
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(override_options, one_off=one_off, intermediate_container=intermediate_container)
return container_options
def _get_container_host_config(self, override_options, one_off=False, intermediate_container=None):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in options.get('volumes') or []
if ':' in volume)
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
pid = options.get('pid', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=volume_bindings,
volumes_from=self._get_volumes_from(intermediate_container),
privileged=privileged,
network_mode=self._get_net(),
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
extra_hosts=extra_hosts,
pid_mode=pid
)
def _get_image_name(self, image):
repo, tag = parse_repository_tag(image)
if tag == "":
tag = "latest"
return '%s:%s' % (repo, tag)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
build_output = self.client.build(
self.options['build'],
tag=self.full_name,
stream=True,
rm=True,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, unicode(e))
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def can_be_scaled(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return False
return True
def pull(self, insecure_registry=False):
if 'image' in self.options:
image_name = self._get_image_name(self.options['image'])
log.info('Pulling %s (%s)...' % (self.name, image_name))
self.client.pull(
image_name,
insecure_registry=insecure_registry
)
NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
def is_valid_name(name, one_off=False):
match = NAME_RE.match(name)
if match is None:
return False
if one_off:
return match.group(3) == 'run_'
else:
return match.group(3) is None
def parse_name(name):
match = NAME_RE.match(name)
(project, service_name, _, suffix) = match.groups()
return ServiceName(project, service_name, int(suffix))
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
return VolumeSpec(None, parts[0], 'rw')
if len(parts) == 2:
parts.append('rw')
external, internal, mode = parts
if mode not in ('rw', 'ro'):
raise ConfigError("Volume %s has invalid mode (%s), should be "
"one of: rw, ro." % (volume_config, mode))
return VolumeSpec(external, internal, mode)
def parse_repository_tag(s):
if ":" not in s:
return s, ""
repo, tag = s.rsplit(":", 1)
if "/" in tag:
return s, ""
return repo, tag
def build_volume_binding(volume_spec):
internal = {'bind': volume_spec.internal, 'ro': volume_spec.mode == 'ro'}
return volume_spec.external, internal
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port, external = split_port(port)
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
return port_bindings
def split_port(port):
parts = str(port).split(':')
if not 1 <= len(parts) <= 3:
raise ConfigError('Invalid port "%s", should be '
'[[remote_ip:]remote_port:]port[/protocol]' % port)
if len(parts) == 1:
internal_port, = parts
return internal_port, None
if len(parts) == 2:
external_port, internal_port = parts
return internal_port, external_port
external_ip, external_port, internal_port = parts
return internal_port, (external_ip, external_port or None)
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
| ianblenke/awsebcli | ebcli/bundled/_compose/service.py | service.py | py | 22,719 | python | en | code | 3 | github-code | 13 |
29294734871 | class Tournament:
def __init__(self, name, place, date, rounds, time, description):
self.name = name
self.place = place
self.date = date
self.rounds = rounds
self.round = []
self.player = []
self.time = time
self.description = description
#si 1er round trier joueurs par rang
#sinon
| lbrrs/P4 | model/tournament.py | tournament.py | py | 372 | python | en | code | 0 | github-code | 13 |
33259270427 | """This spout connects to rabbitmq and gets messages off a queue.
It will map tuples to queues, and when told to ack a tuple, it will ack the
relevant message as well.
"""
import Queue
from streamparse import spout
import kombu
import genuid, settings
class GetDocumentsSpout(spout.Spout):
def initialize(self, stormconf, context):
self.source_queue = get_source_queue(settings.SOURCE)
self.open_messages = {}
def next_tuple(self):
try:
message = self.source_queue.get(block=False)
except Queue.Empty:
# This is fine. It's just that we have no messages at this point.
return
tuple_id = genuid.genuid()
self.open_messages[tuple_id] = message
payload_as_tuple = (message.payload,)
self.log(u"about to emit {0} {1}".format(tuple_id, payload_as_tuple))
self.emit(payload_as_tuple, tup_id=tuple_id)
def ack(self, tuple_id):
message = self.open_messages[tuple_id]
self.log(u"about to ack {0} {1}".format(tuple_id, message.payload))
message.ack()
# We are done with the message!
del self.open_messages[tuple_id]
def fail(self, tuple_id):
"""Mark a tuple as failed. We put it back on the rabbitmq queue so
it is processed again. So, we only want to fail tuples that we think
might succeed if we try again (e.g. a host down erroe).
"""
message = self.open_messages[tuple_id]
self.log(u"about to fail {0} {1}".format(tuple_id, message.payload))
message.requeue()
del self.open_messages[tuple_id]
def get_source_queue(source_setting):
"""Connect to a rabbitmq queue.
"""
rabbitmq_host, queue_name = source_setting[:2]
if len(source_setting) > 2:
kwargs = source_setting[2]
else:
kwargs = {}
broker = kombu.BrokerConnection(rabbitmq_host)
return broker.SimpleQueue(queue_name, **kwargs)
| sujaymansingh/sparse_average | src/getdocuments.py | getdocuments.py | py | 1,962 | python | en | code | 1 | github-code | 13 |
10884921048 | import random
import time
import ujson
from machine import UART, Pin, SPI
from ili9341 import Display, color565
from xglcd_font import XglcdFont
BL = 13
DC = 8
RST = 12
MOSI = 11
SCK = 10
CS = 9
# font = None
display: Display = None
first_message = False
font_dict = {
'big': XglcdFont('fonts/Agency_FB21x40.c', 21, 40),
'little': XglcdFont('fonts/Agency_FB11x22.c', 11, 22),
'mid': XglcdFont('fonts/Agency_FB14x25.c', 14, 25),
}
title_dict = {
'cpu_usage': {'desc': 'CPU Usage', 'suffix': '%', 'x': 80, 'y': 35, 'width': 35, 'height': 24, 'font': font_dict['mid']},
'cpu_frequency': {'desc': 'CPU Frequency:', 'suffix': ' MHz', 'x': 80, 'y': 11, 'width': 66, 'height': 24, 'font': font_dict['mid']},
'cpu_temperature': {'desc': 'CPU Temperature:', 'suffix': '', 'x': 135, 'y': 35, 'width': 28, 'height': 24, 'font': font_dict['mid']},
'cpu_power': {'desc': 'CPU Power:', 'suffix': ' Watts', 'x': 80, 'y': 58, 'width': 63, 'height': 17, 'font': font_dict['mid']},
'mem_total': {'desc': 'Memory Total:', 'suffix': 'G', 'x': 80, 'y': 90, 'width': 50, 'height': 24, 'font': font_dict['mid']},
'mem_usage': {'desc': 'Memory Usage:', 'suffix': '%', 'x': 80, 'y': 115, 'width': 50, 'height': 24, 'font': font_dict['mid']},
'disk_usage': {'desc': 'Disk Usage:', 'suffix': '%', 'x': 80, 'y': 195, 'width': 66, 'height': 24, 'font': font_dict['mid']},
'disk_total': {'desc': 'Disk Total:', 'suffix': 'G', 'x': 80, 'y': 169, 'width': 66, 'height': 24, 'font': font_dict['mid']},
'gpu_usage': {'desc': 'GPU Usage', 'suffix': '%', 'x': 80, 'y': 194, 'width': 66, 'height': 24, 'font': font_dict['mid']},
'date': {'desc': '', 'suffix': '', 'x': 199, 'y': 52, 'width': 100, 'height': 22, 'font': font_dict['little']},
'time': {'desc': '', 'suffix': '', 'x': 199, 'y': 15, 'width': 100, 'height': 38, 'font': font_dict['big']},
}
def print_cpu_name(cpu_name, line_height, top):
display.draw_text(x=5, y=top, font=font, text=cpu_name, color=color565(255, 255, 255))
display.draw_line(x1=0, y1=top+line_height, x2=318, y2=top+line_height, color=color565(255, 255, 255))
display.draw_line(x1=0, y1=top+line_height+5, x2=318, y2=top+line_height+5, color=color565(255, 255, 255))
def print_pc_info(pc_info, key, line_height, top, refresh_title):
map_dict = title_dict[key]
if map_dict is None:
return
if key in pc_info:
text = str(pc_info[key]) + map_dict['suffix']
display.fill_rectangle(x=map_dict['x'], y=map_dict['y'], w=map_dict['width'], h=map_dict['height'], color=color565(0, 0, 0))
display.draw_text(x=map_dict['x'], y=map_dict['y'], font=map_dict['font'], text=text, color=color565(17, 241, 20))
def init_display():
global font, display
Pin(BL, Pin.OUT).value(1)
spi = SPI(1, baudrate=800_000_000, sck=Pin(SCK), mosi=Pin(MOSI))
display = Display(spi, dc=Pin(DC), cs=Pin(CS), rst=Pin(RST), rotation=90, width=320, height=240)
display.draw_image('img/bg.raw', 0, 0, 320, 240)
# font = XglcdFont('fonts/Noto_Sans_CJK_Medium15x18.c', 15, 18)
# display.draw_text(x=50, y=100, font=font, text='No Message From PC', color=color565(255, 125, 0))
def init_uart():
uart = UART(0, baudrate=9600, tx=Pin(0), rx=Pin(1))
temp_str = ''
while True:
data = uart.readline()
if data is not None:
# print(data)
temp_str = temp_str + (data.decode('utf-8'))
if temp_str.endswith('___'):
process(temp_str.replace('___', ''))
temp_str = ''
def process(data):
global first_message
top = 5
line_height = 20
line_top = top
pc_info = ujson.loads(data)
# print(pc_info)
if first_message:
display.clear()
print_cpu_name(pc_info['cpu_name'], line_height, line_top)
line_top += line_height + 10
for key in sorted(title_dict.keys()):
print_pc_info(pc_info, key, line_height, line_top, first_message)
line_top += line_height + 2
first_message = False
if __name__ == '__main__':
init_display()
init_uart()
| mostaron/pc_monitor_board_pico | main.py | main.py | py | 4,304 | python | en | code | 0 | github-code | 13 |
29218352511 | # This example is provided for informational purposes only and has not been audited for security.
from pathlib import Path
from feature_gates import FeatureGates
FeatureGates.set_sourcemap_enabled(True)
from pyteal import * # noqa: E402
""" Template for layer 1 dutch auction (from Fabrice and Shai)
"""
tmpl_start_round = Tmpl.Int("TMPL_START_ROUND")
tmpl_start_price = Tmpl.Int("TMPL_START_PRICE")
tmpl_supply = Tmpl.Int("TMPL_N")
tmpl_price_decrement = Tmpl.Int("TMPL_PRICE_INCREMENT")
tmpl_period = Tmpl.Int("TMPL_PERIOD")
tmpl_asset_a = Tmpl.Int("TMPL_ASSET_A")
tmpl_asset_b = Tmpl.Int("TMPL_ASSET_B")
tmpl_asset_c = Tmpl.Int("TMPL_ASSET_C")
tmpl_asset_d = Tmpl.Int("TMPL_ASSET_D")
tmpl_receiver = Tmpl.Addr("TMPL_RECEIVER")
tmpl_algo_sink = Tmpl.Addr("TMPL_ALGO_ZERO")
tmpl_asset_a_sink = Tmpl.Addr("TMPL_A_ZERO")
tmpl_asset_b_sink = Tmpl.Addr("TMPL_B_ZERO")
tmpl_asset_c_sink = Tmpl.Addr("TMPL_C_ZERO")
tmpl_asset_d_sink = Tmpl.Addr("TMPL_D_ZERO")
tmpl_redeem_round = Tmpl.Int("TMPL_REDEEM_ROUND")
tmpl_wrapup_time = Tmpl.Int("TMPL_WRAPUP_ROUND")
def dutch_auction(
start_round=tmpl_start_round,
start_price=tmpl_start_price,
supply=tmpl_supply,
price_decrement=tmpl_price_decrement,
period=tmpl_period,
asset_a=tmpl_asset_a,
asset_b=tmpl_asset_b,
asset_c=tmpl_asset_c,
asset_d=tmpl_asset_d,
receiver=tmpl_receiver,
algo_sink=tmpl_algo_sink,
asset_a_sink=tmpl_asset_a_sink,
asset_b_sink=tmpl_asset_b_sink,
asset_c_sink=tmpl_asset_c_sink,
asset_d_sink=tmpl_asset_d_sink,
redeem_round=tmpl_redeem_round,
wrapup_time=tmpl_wrapup_time,
):
# the definition of i simplifies the original desgin by only constraining last_valid here
i_upper = (Gtxn[0].last_valid() - start_round) / period
bid = And(
Gtxn[0].rekey_to() == Global.zero_address(),
Gtxn[1].rekey_to() == Global.zero_address(),
Gtxn[2].rekey_to() == Global.zero_address(),
Gtxn[3].rekey_to() == Global.zero_address(),
Gtxn[4].rekey_to() == Global.zero_address(),
Gtxn[0].last_valid() == Gtxn[1].last_valid(),
Gtxn[1].last_valid() == Gtxn[2].last_valid(),
Gtxn[2].last_valid() == Gtxn[3].last_valid(),
Gtxn[3].last_valid() == Gtxn[4].last_valid(),
Gtxn[4].last_valid() < (start_round + i_upper * period),
Gtxn[0].type_enum() == TxnType.AssetTransfer,
Gtxn[0].xfer_asset() == asset_d,
Gtxn[0].receiver() == receiver,
Gtxn[1].type_enum() == TxnType.AssetTransfer,
Gtxn[1].xfer_asset() == asset_b,
Gtxn[2].type_enum() == TxnType.AssetTransfer,
Gtxn[2].xfer_asset() == asset_c,
Gtxn[3].type_enum() == TxnType.AssetTransfer,
Gtxn[4].xfer_asset() == asset_c,
Gtxn[4].type_enum() == TxnType.Payment,
Gtxn[4].amount()
== (Gtxn[1].fee() + Gtxn[2].fee() + Gtxn[3].fee()), # ? why only 1, 2, 3
Gtxn[0].asset_amount() == Gtxn[1].asset_amount(),
Gtxn[1].asset_amount() == Gtxn[2].asset_amount(),
Gtxn[3].asset_amount() == i_upper * supply * price_decrement,
Gtxn[0].sender() == Gtxn[1].receiver(),
Gtxn[2].receiver() == asset_c_sink,
Gtxn[1].sender() == Gtxn[2].sender(),
Gtxn[2].sender() == Gtxn[3].sender(),
Gtxn[3].sender() == Gtxn[3].receiver(),
Gtxn[3].receiver() == Gtxn[4].receiver(),
)
redeem = And(
Gtxn[0].rekey_to() == Global.zero_address(),
Gtxn[1].rekey_to() == Global.zero_address(),
Gtxn[2].rekey_to() == Global.zero_address(),
Gtxn[3].rekey_to() == Global.zero_address(),
Gtxn[0].first_valid() == Gtxn[1].first_valid(),
Gtxn[1].first_valid() == Gtxn[2].first_valid(),
Gtxn[2].first_valid() == Gtxn[3].first_valid(),
Gtxn[3].first_valid() >= redeem_round,
Gtxn[0].type_enum() == TxnType.AssetTransfer,
Gtxn[0].xfer_asset() == asset_b,
Gtxn[1].type_enum() == TxnType.AssetTransfer,
Gtxn[1].xfer_asset() == asset_a,
Gtxn[2].type_enum() == TxnType.AssetTransfer,
Gtxn[2].xfer_asset() == asset_c,
Gtxn[2].asset_amount() == redeem_round * supply * price_decrement,
Gtxn[3].type_enum() == TxnType.Payment,
Gtxn[3].amount() == Gtxn[1].fee() + Gtxn[2].fee(),
Gtxn[1].asset_amount()
== Gtxn[0].asset_amount() * (start_price - price_decrement * Btoi(Arg(0))),
Gtxn[0].sender() == Gtxn[1].receiver(),
Gtxn[0].receiver() == Gtxn[1].sender(),
Gtxn[1].sender() == Gtxn[2].sender(),
Gtxn[2].sender() == Gtxn[2].receiver(),
Gtxn[2].receiver() == Gtxn[3].receiver(),
)
wrapup = And(
Txn.rekey_to() == Global.zero_address(),
Txn.first_valid() >= start_round + wrapup_time,
Or(
And(
Txn.type_enum() == TxnType.Payment,
Txn.amount() == Int(0),
Txn.close_remainder_to() == receiver,
),
And(
Txn.type_enum() == TxnType.AssetTransfer,
Txn.asset_amount() == Int(0),
Txn.asset_close_to() == receiver,
Txn.xfer_asset() == asset_a,
),
),
)
dutch = Cond(
[Global.group_size() == Int(5), bid],
[Global.group_size() == Int(4), redeem],
[Global.group_size() == Int(1), wrapup],
)
return dutch
if __name__ == "__main__":
# to recreate files, run this script from the root of the repo
results = Compilation(dutch_auction(), mode=Mode.Signature, version=2).compile(
with_sourcemap=True,
pcs_in_sourcemap=True,
annotate_teal=True,
annotate_teal_headers=True,
annotate_teal_concise=False,
)
EXAMPLES = Path.cwd() / "examples" / "signature"
with open(EXAMPLES / "dutch_auction.teal", "w") as f:
f.write(results.teal)
with open(EXAMPLES / "dutch_auction_annotated.teal", "w") as f:
f.write(results.sourcemap.annotated_teal)
| algorand/pyteal | examples/signature/dutch_auction.py | dutch_auction.py | py | 6,017 | python | en | code | 269 | github-code | 13 |
1456312341 | import RPi.GPIO as GPIO
import time
servo_pin = 18
rain_pin = 26
in1 = 24
in2 = 23
in3 = 5
in4 = 6
en1 = 25
en2 = 13
temp1=1
GPIO.setmode(GPIO.BCM)
GPIO.setup(rain_pin, GPIO.IN)
#server motor
GPIO.setup(servo_pin, GPIO.OUT)
pwm = GPIO.PWM(servo_pin, 50) # 50Hz
#mortor
GPIO.setmode(GPIO.BCM)
GPIO.setup(in1,GPIO.OUT)
GPIO.setup(in2,GPIO.OUT)
GPIO.setup(in3,GPIO.OUT)
GPIO.setup(in4,GPIO.OUT)
GPIO.setup(en1,GPIO.OUT)
GPIO.setup(en2,GPIO.OUT)
p1=GPIO.PWM(en1,10)
p2=GPIO.PWM(en2,10)
def workwheel():
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
def workwater():
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
def stopwheel():
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
def stopwater():
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
def workbrush():
time.sleep(1)
pwm.start(3.0)
def stopbrush():
time.sleep(1)
pwm.stop()
def israining():
rain = GPIO.input(rain_pin)
if rain == 0:
print("raining")
p1.start(25)
p2.start(25)
workwheel()
workwater()
workbrush()
if rain == 1:
print("not raining")
stopbrush()
stopwater()
stopwheel()
while True:
israining()
#GPIO.cleanup() | ParkJunHyung17/team_A | CleenRobot.py | CleenRobot.py | py | 1,344 | python | en | code | 0 | github-code | 13 |
13812343138 | from utils.constants import (
HOG_HIST_NORMALIZATION_SIZE,
HOG_BLOCK_SIZE,
HOG_HISTOGRAM_BINS,
HOG_HISTOGRAM_RANGE,
HOG_KERNEL,
)
from utils.utils import magnitude, orientation, split_blocks, weighted_histogram
import numpy as np
def hog(img):
# calculate the magnitudes
magnitudes = magnitude(img, HOG_KERNEL)
# calculate the orientations
orientations = orientation(img, HOG_KERNEL)
# transform results into an array of 8x8 zones
magnitude_zones = split_blocks(magnitudes, HOG_BLOCK_SIZE)
orientation_zones = split_blocks(orientations, HOG_BLOCK_SIZE)
# calculate the histogram of each zone
hist = np.empty(
magnitude_zones.shape,
dtype=object,
)
for i in range(0, magnitude_zones.shape[0]):
for j in range(0, magnitude_zones.shape[1]):
hist[i, j] = weighted_histogram(
magnitude_zones[i, j],
orientation_zones[i, j],
HOG_HISTOGRAM_BINS,
HOG_HISTOGRAM_RANGE,
)
# normalize the histogram in 16x16 zones
normalized_hist = split_blocks(hist, HOG_HIST_NORMALIZATION_SIZE)
for i in range(0, normalized_hist.shape[0]):
for j in range(0, normalized_hist.shape[1]):
sq = normalized_hist[i, j] ** 2
norm = np.sqrt(np.sum(np.sum(sq)))
normalized_hist[i, j] = np.divide(normalized_hist[i, j], norm)
# Concatenate results into feature array
return np.concatenate(np.concatenate(np.concatenate(normalized_hist.flatten())))
| alexisbeaulieu97/PedestrianDetector | PedestrianDetector/utils/hog.py | hog.py | py | 1,558 | python | en | code | 0 | github-code | 13 |
27214175888 | from copy import deepcopy
from presidio_analyzer import AnalyzerEngine
# Set up the engine, loads the NLP module (spaCy model by default) and other PII recognizers
analyzer = AnalyzerEngine()
text = """
John, please get that article on www.linkedin.com to me by 5:00PM
on Jan 9th 2012. 4:00 would be ideal, actually. If you have any
questions, You can reach me at (519)-236-2723x341 or get in touch with
my associate at harold.smith@gmail.com
"""
# Call analyzer to get results
results = analyzer.analyze(text=text, language='en')
# print(results)
output = []
urls_emails = []
for r in results:
print(r.start, r.end, text[r.start: r.end], r.entity_type)
if r.entity_type in ["URL", "EMAIL_ADDRESS", "LINK"]:
urls_emails.append((r.start, r.end, r.entity_type))
else:
output.append((r.start, r.end, r.entity_type))
doutput = deepcopy(urls_emails)
for o in doutput:
for i in doutput:
if o[0] == i[0] and o[1] < i[1]:
urls_emails.remove(o)
elif o[0] > i[0] and o[1] == i[1]:
urls_emails.remove(o)
print(output)
print(urls_emails)
output.extend(urls_emails)
print(output) | mirfan899/PII_API | pa.py | pa.py | py | 1,145 | python | en | code | 0 | github-code | 13 |
11559333720 | from flask import Flask, request
import requests
from twilio.twiml.messaging_response import MessagingResponse
app = Flask(__name__)
#@app.route("/")
#def hello():
# return "Hello, World!"
@app.route("/sms", methods=['POST'])
def sms_reply():
incoming_msg = request.form.get('Body').lower()
resp = MessagingResponse()
msg = resp.message()
responded = False
if "hello" in incoming_msg:
msg.body("Hi, hope you have a good day! \nHow may I help you?")
responded = True
elif "order" in incoming_msg:
msg.body("Choose Product \n1. Cutters\n2. Tools Bit\n3. Shank Tools\n4. Reamers\n5. Carbide tipped")
responded = True
elif(incoming_msg == "1"):
r = requests.get('')
if r.status_code == 200:
data = r.json()
res = "Choose your sub-category"
else:
res = "Not able to retrieve data. \n Please try again."
msg.body(res)
responded = True
elif "finish" in incoming_msg:
msg.body("Thanks for placing the order. \nNow, you can pay your bill here http://brainmagic.co.in/")
elif "exit" in incoming_msg:
msg.body("The order process is terminated.")
elif "cart" in incoming_msg:
msg.body("The list of orders added to cart are:")
else:
msg.body("Sorry. Wrong response. \nKindly start from the beginning.")
return str(resp)
if __name__ == "__main__":
app.run(debug=True) | jerriebright/WaBot-For-Business | order.py | order.py | py | 1,475 | python | en | code | 0 | github-code | 13 |
377474470 | from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.core.validators import RegexValidator
from django.db import models
from django.contrib.auth import get_user_model
class UserManager(BaseUserManager):
"""
Manager to work with custom user model
"""
def create_user(self, email, name, password=None):
# Create a new user
if not email:
raise ValueError("Users must have a email address")
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, name, password):
# Create a new superuser.
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""
Represents a user in the system
"""
email = models.EmailField(max_length=255, unique=True, help_text="Email id of user")
name = models.CharField(max_length=255, help_text="Name of the user")
phone = models.CharField(
max_length=10,
null=True,
blank=True,
validators=[
RegexValidator(
regex="^[0-9]{10}$", message="Enter valid number (10 digits only)"
)
],
help_text="Phone Number",
)
about = models.TextField(null=True, blank=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_invited = models.BooleanField(default=False)
is_verified = models.BooleanField(default=False)
date_joined = models.DateTimeField(auto_now_add=True)
objects = UserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["name"]
def get_full_name(self):
return self.name
def get_short_name(self):
return self.name
def __str__(self):
return str(self.name)
def to_representation(self):
rep = {
"email": self.email,
"name": self.name,
"phone": self.phone,
"about": self.about,
"is_active": self.is_active,
"is_staff": self.is_staff,
"is_invited": self.is_invited,
"is_verified": self.is_verified,
"date_joined": self.date_joined,
"groups": [
{"id": group.id, "name": group.name} for group in self.groups.all()
],
"profile_links": [
profile_link.to_representation()
for profile_link in self.profile_links_user.all()
],
}
return rep
UserModel = get_user_model()
class UserFollowing(models.Model):
user_id = models.ForeignKey(UserProfile, related_name="following", on_delete=models.CASCADE)
following_user_id = models.ForeignKey(UserProfile, related_name="followers", on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=['user_id','following_user_id'], name="unique_followers")
]
def __str__(self):
return f"{self.user_id} follows {self.following_user_id}" | mathuranish/Social-media-api | user/models.py | models.py | py | 3,348 | python | en | code | 1 | github-code | 13 |
21632339915 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
infile_1=open("../results/individual_energies_1.000000.txt").readlines()
infile_24=open("../results/individual_energies_2.400000.txt").readlines()
energies=[]
temp_1_dist=[]
temp_24_dist=[]
for line in infile_1[2:]:
energy,dist=line.split(",")
energies.append(int(energy))
temp_1_dist.append(int(dist))
for line in infile_24[2:]:
energy,dist=line.split(",")
temp_24_dist.append(int(dist))
energies_1=[]
energies=np.asarray(energies)
temp_1_dist=np.asarray(temp_1_dist)
temp_24_dist=np.asarray(temp_24_dist)
average24=average10=var24=var10=0;
for i in range(len(energies)):
average24+=energies[i]*temp_24_dist[i]
average10+=energies[i]*temp_1_dist[i]
average24=average24/sum(temp_24_dist)
average10=average10/sum(temp_1_dist)
for i in range(len(energies)):
var24+=temp_24_dist[i]*(energies[i]-average24)**2
var10+=temp_1_dist[i]*(energies[i]-average10)**2
var24=var24/sum(temp_24_dist)
var10=var10/sum(temp_1_dist)
print(sum(temp_24_dist))
print(sum(temp_1_dist))
print("Average: %f , Standard deviation for T=2.4: %f"%(average24,np.sqrt(var24)))
print("Average: %f , Standard deviation for T=1.0: %f"%(average10,np.sqrt(var10)))
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 22}
temp_1_dist=np.asarray(temp_1_dist)/sum(temp_1_dist)
temp_24_dist=np.asarray(temp_24_dist)/sum(temp_24_dist)
matplotlib.rc('font', **font)
plt.figure(figsize=(20,10))
plt.subplot(121)
plt.bar(energies[:10],temp_1_dist[:10],width=4)
plt.xticks(energies[:10:2])
plt.title("T=1.0, spins 400, runs:1e6")
plt.xlabel("Energy in units of J")
plt.ylabel("Relative occurence")
plt.subplot(122)
plt.bar(energies[:int((len(energies)/2))],temp_24_dist[:int((len(energies)/2))],width=4)
plt.title("T=2.4, spins 400, runs:1e6")
plt.xlabel("Energy in units of J")
plt.ylabel("Relative occurence")
plt.savefig("../plots/Dist.pdf",bbox_inches='tight')
plt.show()
| adrian2208/FYS3150_collab | Project4/python/plot_distribution.py | plot_distribution.py | py | 1,964 | python | en | code | 0 | github-code | 13 |
35521334467 | #coding=utf-8
from __future__ import division
import numpy as np
from scipy.spatial.distance import euclidean
from scipy.spatial.distance import euclidean
def costFunction(x_1,x_2,y,m1,m2,delta):
cost = 0
for i in range(len(x_1)):
d= delta[0]*euclidean(x_1[i],m1) - delta[1]*euclidean(x_2[i],m2)
cost += np.log(1+np.exp(-y[i]*d))
return cost
def minbatchSGD(epoch,data_1,data_2,y,m1,m2,belta,parameter_1,parameter_2,learning_rate):
fw = open('log','w')
cost = costFunction(data_1,data_2,y,m1,m2,belta)
d = belta
eps = 1e-8
m = np.zeros(len(belta));v = np.zeros(len(belta))
while True:
for i in range(len(data_1)):
#计算h(a)函数
d1 = euclidean(data_1[i],m1)
d2 = euclidean(data_2[i],m2)
fw.write('y: %s , d1-d2 %s'%(y[i],belta[0]*d1-belta[1]*d2) + '\n')
h = np.array([d1, - d2])
delta = 1/(1+np.exp(-y[i]*np.dot(belta,h.T)))*np.exp(-y[i]*np.dot(belta,h.T))*(-y[i])*h
m = parameter_1*m + (1-parameter_1)*delta
v = parameter_2*v + (1-parameter_2)*(delta**2)
belta -= learning_rate*m/(np.sqrt(v)+eps)
# 重新计算cost
c = costFunction(data_1, data_2, y, m1, m2, belta)
fw.write('Cost : ' + str(c)+'\n')
if c < cost:
cost = c
d = belta
else:
break
return d
def reduceDimension(projection,validX):
valid_matrix_1 = np.zeros([len(validX), 10, 72])
valid_matrix_2 = np.zeros([len(validX), 10, 72])
# 对test做变换 test是len(test),320*72--> len(test),320,72
validX = np.reshape(validX, [len(validX), 300, 72])
for i in range(np.shape(validX)[-1]):
m = validX[:, :, i]
r1, r2 = projection[i]
r_1 = np.dot(m, r1)
r_2 = np.dot(m, r2)
valid_matrix_1[:, :, i] = r_1
valid_matrix_2[:, :, i] = r_2
valid_1 = np.reshape(valid_matrix_1, [-1, np.shape(valid_matrix_1)[1] * np.shape(valid_matrix_1)[-1]])
valid_2 = np.reshape(valid_matrix_2, [-1, np.shape(valid_matrix_2)[1] * np.shape(valid_matrix_2)[-1]])
return valid_1,valid_2
| lizaigaoge550/reduct_Dimension | updata_parameter.py | updata_parameter.py | py | 2,156 | python | en | code | 0 | github-code | 13 |
23034381452 | from typing import Any, Dict, List
from library.depends import tree
def order_levels(trees: List[Dict[str, Any]]) -> List[List[str]]:
"""
Given a collection of dependency trees,
walk down them and collect services at the same level
The collection returned are groups of services
that may be brought up at the same time, in appropriate order
"""
services_flat = [
service
for flattened in [tree.flat_tree(it) for it in trees]
for service in flattened
]
if not services_flat:
return []
ordered = [set() for _ in range(max(it["level"] for it in services_flat) + 1)]
for service in services_flat:
ordered[service["level"]].add(service["name"])
return [*filter(bool, map(list, ordered))]
def order_flat(trees: List[Dict[str, Any]]) -> List[str]:
return [service for group in order_levels(trees) for service in group]
| gastrodon/terraform-compose | library/depends/order.py | order.py | py | 914 | python | en | code | 3 | github-code | 13 |
74905213137 | print('Advent of Code 2017 - Day 11')
with open('day11.txt') as f:
path = f.read().split(',')
def no_of_steps(x, y):
y_prime = abs(y * 2)
x_prime = abs(x)
if x_prime >= y_prime:
return int(x_prime)
else:
return int(x_prime + ((y_prime - x_prime) / 2))
x = y = 0
furthest_step = 0
for step in path:
if step == 'n':
y += 1
elif step == 'ne':
y += 0.5
x += 1
elif step == 'se':
y -= 0.5
x += 1
elif step == 's':
y -= 1
elif step == 'sw':
y -= 0.5
x -= 1
elif step == 'nw':
y += 0.5
x -= 1
furthest_step = max(furthest_step, no_of_steps(x, y))
print(f'Part 1: {no_of_steps(x, y)}') # 747
print(f'Part 2: {furthest_step}') # 1544
| kdmontero/aoc | 2017/day11.py | day11.py | py | 780 | python | en | code | 0 | github-code | 13 |
39506577535 | #!/usr/bin/env python3
from multiprocessing import Pool
import os
import time, random
def my_fork():
print('process (%s) start...' % (os.getpid()))
pid = os.fork()
if pid == 0:
print('I am child process (%s) and my parent is (%s)' % (os.getpid(), os.getppid()))
else:
print('I (%s) just create a child process (%s)' % (os.getpid(), pid))
def long_time_task(name):
print('Run task %s (%s)...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s runs %0.2f sceconds.' % (name, (end-start)))
if __name__ == '__main__':
print('parent process %s.' % (os.getpid()))
p = Pool(4)
print(dir(p))
for i in range(5):
p.apply_async(long_time_task, args=(i,))
print('Waitong for all subprocess done...')
p.close()
p.join()
print('All subprocesses done.')
my_fork()
| zzucainiao/code-backup | learn_python/my_mulprocess.py | my_mulprocess.py | py | 907 | python | en | code | 0 | github-code | 13 |
31557466220 | import pygame.font
from pygame.sprite import Group
"""Pygame's Group class provides useful methods for adding, removing, and checking if a sprite exists in the group. In this code, the prep_ships() method creates a new Group object to store instances of the Ship class, representing the remaining ships in the game."""
from ship import Ship
class Scoreboard:#keep track of the score, high score, level, and remaining number of ships in a game
"""A class to report scoring information."""
def __init__(self, ai_game):
"""Initialize scorekeeping attributes."""
self.ai_game = ai_game
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.settings = ai_game.settings
self.stats = ai_game.stats
# Font settings for scoring information.
self.text_color = (255, 0, 0)
self.font = pygame.font.SysFont(None, 48)#scores ka font size
# Prepare the initial score images.
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
""" The prep_score(), prep_high_score(), and prep_level() methods use Pygame's font module to render text-based images of the score, high score, and level, respectively"""
def prep_score(self):
"""Turn the score into a rendered image."""
rounded_score = round(self.stats.score, -1)# In this case, -1 means to round to the nearest ten. 123.45=120
score_str = "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True,
self.text_color, self.settings.bg_color)
# Display the score at the top right of the screen.
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20#margin of 20 pixels from the right edge of the screen to the right edge of the score.
self.score_rect.top = 20#agar 200 kru to ye 2nd row me sary scores ko ly ay ga
def prep_high_score(self):#center wala high score
"""Turn the high score into a rendered image."""
high_score = round(self.stats.high_score, -1)#-1 means to round to the nearest ten. 123.45=120
high_score_str = "{:,}".format(high_score)
self.high_score_image = self.font.render(high_score_str, True,
self.text_color, self.settings.bg_color)
# Center the high score at the top of the screen.
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.score_rect.top
def prep_level(self):
"""Turn the level into a rendered image."""
level_str = str(self.stats.level)
self.level_image = self.font.render(level_str, True,
self.text_color, self.settings.bg_color)
# Position the level below the score.
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 10#top sy 10 pixel neechy
def prep_ships(self):
"""Show how many ships are left."""
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_game)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10#10 pixel uper left corner of the screen
self.ships.add(ship)
def check_high_score(self):
"""Check to see if there's a new high score.The method check_high_score checks if the current score self.stats.score is greater than the current high score self.stats.high_score. If it is, then the current high score is updated to the current score and the method prep_high_score is called to update the high score display on the screen. This method is used to keep track of the highest score achieved in the game, so that it can be displayed to the user."""
if self.stats.score > self.stats.high_score:
self.stats.high_score = self.stats.score
self.prep_high_score()
def show_score(self):
"""Draw scores, level, and ships to the screen."""#draw the image of the score on the screen.
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
| wahabali790/Alien-Invasion | scoreboard.py | scoreboard.py | py | 4,482 | python | en | code | 0 | github-code | 13 |
41357125273 | """ Get ssh connexions for Buildbot master libvirt
watchdog script"""
import yaml
with open('../os_info.yaml', 'r') as f:
os_info = yaml.safe_load(f)
SSH_CONNECTIONS = 0
for os in os_info:
for arch in os_info[os]['arch']:
addInstall = True
if 'has_install' in os_info[os]:
addInstall = os_info[os]['has_install']
if arch not in ['s390x', 'x86'] and addInstall:
SSH_CONNECTIONS += 1
print(SSH_CONNECTIONS)
| MariaDB/buildbot | master-libvirt/get_ssh_cnx_num.py | get_ssh_cnx_num.py | py | 464 | python | en | code | 2 | github-code | 13 |
20408398074 | from time import time as get_current_time
from os import path, makedirs
from threading import Thread
from requests import Session
def convert_bytes(size: int) -> str:
"""
Convert bytes to human-readable format
:param size: bytes
"""
for s in ["bytes", "KB", "MB", "GB", "TB"]:
if size < 1024:
return f"{size:.2f} {s}"
size /= 1024
class FastDownloader:
"""
Downloads a list of files with threading
can also give extra information about the download process
"""
_downloader = Session()
_total_download_time = 0
_total_downloaded = 0
_total_size = 0
_total_time = 0
_threads = []
def __init__(
self,
output_dir: str = "downloaded",
overwrite: bool = False,
max_threads: int = "inf",
info: bool = False
) -> None:
"""
Configures the downloader and creates the output directory if not already existing
:param output_dir: The directory where the files will be saved
:param max_threads: The maximum amount of threads that can be used
:param info: If True, prints information about the download process
"""
self._output_dir = output_dir
self._overwrite = overwrite
if max_threads == "inf":
max_threads = float("inf")
self._max_threads = max_threads
self._info = info
if not path.exists(self._output_dir):
makedirs(self._output_dir)
def download(self, urls: list[str]) -> None:
"""
Downloads all files from the given urls
:param urls: A list of urls (the list of files to download)
"""
urls = urls.copy()
if len(urls) - len(list(set(urls))) > 0: # Detect duplicates
if self._info:
print(f"Found {len(urls) - len(list(set(urls)))} duplicated url(s). Removing and continuing...")
urls = list(set(urls)) # remove all duplicates
while len(self._threads) <= self._max_threads and urls:
self._threads.append(
Thread(target=self._download, args=(urls.pop(),))
)
self._threads[-1].start()
else:
if urls:
self.wait_to_finish()
self.download(urls)
def wait_to_finish(self) -> None:
"""
Waits for all threads to finish and resets the threads list
"""
for thread in self._threads:
thread.join()
self._threads = []
def statistics(self) -> list:
"""
Returns a list containing the total downloaded files,
the total size of all downloaded files in bytes,
the time it would have taken to download all files in seconds.
"""
return [self._total_downloaded, self._total_size, self._total_download_time]
def _download(self, url: str) -> None:
"""
The actual download function
:param url: The url of the file to download
"""
start_time = get_current_time()
file = self._downloader.get(url)
filename = url.split("/")[-1]
# Check if the file already exists
if path.exists(path.join(self._output_dir, filename)) and not self._overwrite:
duplicate = 1
old_filename = filename
# Generate new filename in the format "{filename} ({duplicate number}).{extension}"
while path.exists(path.join(self._output_dir, filename)):
filename = f"{''.join(old_filename.split('.')[:-1])} ({duplicate}).{old_filename.split('.')[-1]}"
duplicate += 1
if self._info:
print(f"File {old_filename} already exists! Renaming to {filename}")
# Write the file to the output directory
with open(f"{self._output_dir}/{filename}", "wb") as f:
f.write(file.content)
# Update the statistics
self._total_size += f.tell()
if self._info:
print(f"Downloaded {url} -> {self._output_dir}/{filename} ({convert_bytes(f.tell())})")
self._total_downloaded += 1
self._total_download_time += get_current_time() - start_time
| DerSchinken/FontServer | src/FastDownload.py | FastDownload.py | py | 4,234 | python | en | code | 1 | github-code | 13 |
26710228794 | def long(n):
max=len(n[0])
for i in n:
if(len(i)>max):
max=len(i)
print('the length',max)
n=[]
m=int(input("enter the number of words: "))
for j in range(0,m):
i=input()
n.append(i)
long(n) | Ashwathy-rk/python | exp 30.py | exp 30.py | py | 240 | python | en | code | 0 | github-code | 13 |
27364477052 | import inspect
import torch
from torch import nn
import torch.distributed as dist
from ..schedule import create_schedule
from ..initialization import init_empty_weights
from ..pattern import call_module
from ..logger import get_logger
from .registry import register_schedule
@register_schedule()
def _apply_schedule(
model,
**sch_config,
):
model_config = sch_config.get("model_config", None)
if model_config is None:
raise ValueError(
"Model config is not specified in sch_config. Please provide `model_config` in the kwarg."
)
try:
model_name = model_config._name_or_path
except Exception:
model_name = model_config.get("_name_or_path", None)
logger = get_logger(f"{model_name}")
# Change data type.
fp16 = sch_config.get("fp16", False)
bf16 = sch_config.get("bf16", False)
if fp16 and bf16:
raise ValueError("Cannot use both fp16 and bf16")
if fp16:
logger.info("Change model dtype to fp16", ranks=0)
model.half()
elif bf16:
logger.info("Change model dtype to bf16", ranks=0)
model.bfloat16()
else:
logger.info("Use fp32 as default model dtype", ranks=0)
group = sch_config.get("group", None)
sch = create_schedule(model, group=group)
logger.info(
"Scheduling %s with TP=%d, config: %s",
model_name,
sch.world_size,
sch_config,
ranks=0,
)
# Tensor parallelism.
prefix = sch_config.get("prefix", "")
if sch.world_size > 1:
logger.info("Shard model parameters", ranks=0)
replace_and_shard_attention(sch, model_config, sch_config)
shard_mlp(sch[prefix], model_config, "encoder.block.N.layer.1.DenseReluDense")
shard_mlp(sch[prefix], model_config, "decoder.block.N.layer.2.DenseReluDense")
shard_word_embedding(sch[prefix], model_config.vocab_size)
if sch.world_size > 1 and sch_config.get("bcast_input", False):
# Broadcast input to all devices within the MP group.
# This is not required when running on Megatron.
logger.info("Broadcast input to all devices", ranks=0)
broadcast_input(sch)
# Insert activation checkpoints.
ckpt_ratio = sch_config.get("ckpt_ratio", 0.0)
if ckpt_ratio > 0.0:
checkpoint_method = sch_config.get("checkpoint_method", "uniform")
logger.info("Checkpoint ratio: %.2f", ckpt_ratio, ranks=0)
n_ckpt = checkpoint(
sch[prefix],
model_config,
ckpt_ratio=ckpt_ratio,
checkpoint_method=checkpoint_method,
)
logger.info("Checkpointed %d layers", n_ckpt, ranks=0)
# Pipeline parallelism.
if sch_config.get("pipeline_cuts", None):
logger.info("Generate pipeline schedule", ranks=0)
generate_pipeline_schedule(sch, sch_config)
return sch
def replace_and_shard_attention(sch, model_config, sch_config):
model_name = model_config._name_or_path
logger = get_logger(model_name)
prefix = sch_config.get("prefix", "")
delay_init = sch_config.get("delay_init", True)
# Replace self attention with flash attention, and shard QKV/output
# if MP group > 1.
attn_op_name = sch_config.get("attn_op_name", "cuda")
disable_flash_attn = attn_op_name == "native_xformers"
cnt, fix_shape_cnt = _replace_and_shard_attention(
sch[prefix],
model_config,
"encoder.block.N.layer.0.SelfAttention",
delay_init=delay_init,
disable_flash_attn=disable_flash_attn,
)
logger.info(
"Replace %d encoder self attention patterns with %d shape fixing",
cnt,
fix_shape_cnt,
ranks=0,
)
cnt, fix_shape_cnt = _replace_and_shard_attention(
sch[prefix],
model_config,
"decoder.block.N.layer.0.SelfAttention",
delay_init=delay_init,
disable_flash_attn=disable_flash_attn,
)
logger.info(
"Replace %d decoder self attention patterns with %d shape fixing",
cnt,
fix_shape_cnt,
ranks=0,
)
cnt, fix_shape_cnt = _replace_and_shard_attention(
sch[prefix],
model_config,
"decoder.block.N.layer.1.EncDecAttention",
cross_attn=True,
delay_init=delay_init,
disable_flash_attn=disable_flash_attn,
)
logger.info(
"Replace %d decoder cross attention patterns with %d shape fixing",
cnt,
fix_shape_cnt,
ranks=0,
)
def checkpoint(
sch,
model_config,
path="",
ckpt_ratio=1.0,
checkpoint_method="uniform",
):
if checkpoint_method != "uniform":
raise NotImplementedError(
f"Checkpoint method {checkpoint_method} is not supported yet."
)
n_ckpt = 0
if ckpt_ratio > 0.0:
n_ckpt += _checkpoint(
sch, model_config, "encoder.block.N", ckpt_ratio=ckpt_ratio
)
n_ckpt += _checkpoint(
sch, model_config, "decoder.block.N", ckpt_ratio=ckpt_ratio
)
return n_ckpt
def generate_pipeline_schedule(sch, sch_config):
pipeline_cuts = sch_config.get("pipeline_cuts", None)
prefix = sch_config.get("prefix", "")
# Cut pipeline stages. For example, [[11], [11]] means to cut
# encoder.block.11, decoder.block.11. And we always cut between encoder/decoder,
# so there will be 4 stages in total.
if pipeline_cuts:
assert len(pipeline_cuts) == 2
input_names = [
"decoder_input_ids",
"input_ids",
"decoder_attention_mask",
"attention_mask",
]
sig = inspect.signature(sch.mod.forward)
concrete_args = {
p.name: p.default
for p in sig.parameters.values()
if p.name not in input_names
}
_prefix = f"{prefix}." if prefix else ""
sch.trace_until(
[f"{_prefix}encoder", f"{_prefix}decoder"],
tracer="huggingface",
concrete_args=concrete_args,
)
for cut in pipeline_cuts[0]:
sch[f"{_prefix}encoder.block.{cut}"].cut_pipeline_stage()
sch[f"{_prefix}encoder"].cut_pipeline_stage()
for cut in pipeline_cuts[1]:
sch[f"{_prefix}decoder.block.{cut}"].cut_pipeline_stage()
return sch
def fix_position_bias_shape(sch, delay_init=True):
# Target EPOI T5 attention module.
cnt = 0
# Case 1: If position_bias is not given and the layer
# does not have relative position bias, it generates zeros
# with ZeroBiasLike that takes (n_heads).
if "zero_bias_like" in sch:
from epoi.ops.xformers_attn import ZeroBiasLike
old_mod = sch["zero_bias_like"].mod
with init_empty_weights(enable=delay_init):
new_mod = ZeroBiasLike(old_mod.n_heads // sch.world_size)
sch["zero_bias_like"].replace(new_mod)
cnt += 1
# Case 2: If position_bias is not given and the layer
# has relative position bias, it generates bias with RelativeBias
# that takes (n_buckets, max_dist, n_heads, is_decoder).
if "relative_attention_bias" in sch:
from epoi.ops.xformers_attn import RelativeBias
old_mod = sch["relative_attention_bias"].mod
new_bias_mod = RelativeBias(
old_mod.relative_attention_num_buckets,
old_mod.relative_attention_max_distance,
old_mod.n_heads // sch.world_size,
old_mod.is_decoder,
)
sch["relative_attention_bias"].replace(new_bias_mod)
cnt += 1
return cnt
def _replace_and_shard_attention(
sch,
model_config,
attn_path,
cross_attn=False,
delay_init=True,
disable_flash_attn=False,
):
from epoi.inject.policy.t5 import InjectHFT5AttentionPolicy
from epoi.ops.xformers_attn import T5Attention
num_layers, num_heads, hidden_size, d_kv = (
model_config.num_hidden_layers,
model_config.num_attention_heads,
model_config.hidden_size,
model_config.d_kv,
)
cnt = 0
fix_shape_cnt = 0
for idx in range(num_layers):
prefix = attn_path.replace("N", str(idx))
sub_sch = sch[f"{prefix}"]
init_config = InjectHFT5AttentionPolicy.gen_init_config_from_object(sub_sch.mod)
if disable_flash_attn:
init_config["attn_op_name"] = "native"
with init_empty_weights(enable=delay_init):
new_mod = T5Attention(**init_config)
sub_sch.replace(new_mod)
concrete_args = {
"layer_head_mask": None,
"past_key_value": None,
"layer_past": None,
"use_cache": False,
"output_attentions": False,
}
if not cross_attn:
concrete_args["key_value_states"] = None
if idx == 0:
# The first layer of encoder and decoder generates position bias
# from scratch.
concrete_args["position_bias"] = None
sub_sch.trace(
tracer="pytorch",
leaf_modules=["MemoryEfficientAttentionOp", "RelativeBias", "ZeroBiasLike"],
concrete_args=concrete_args,
)
if cross_attn:
# Cross attention can only fuse k, v, because k, v are taking encoder status
# while q is taking the current hidden status.
class FusedKV(nn.Module):
def __init__(self, num_heads, d_model, d_kv) -> None:
super().__init__()
self.hidden_size = d_model
self.num_heads = num_heads
self.key_value_proj_dim = d_kv
self.inner_dim = num_heads * self.key_value_proj_dim
self.fused_linear = nn.Linear(
self.hidden_size, self.inner_dim * 2, bias=False
)
def reshape_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_heads // sch.world_size,
self.key_value_proj_dim,
2,
)
x = x.view(new_x_shape)
return x.contiguous()
def forward(self, hidden_states):
kv = self.fused_linear(hidden_states)
reshaped_qkv = self.reshape_for_scores(kv)
k, v = torch.split(reshaped_qkv, 1, dim=-1)
k = torch.squeeze(k, -1).contiguous()
v = torch.squeeze(v, -1).contiguous()
return [k, v]
class ShardableQ(nn.Module):
def __init__(self, num_heads, d_model, d_kv) -> None:
super().__init__()
self.hidden_size = d_model
self.num_heads = num_heads
self.key_value_proj_dim = d_kv
self.inner_dim = num_heads * self.key_value_proj_dim
self.query = nn.Linear(hidden_size, self.inner_dim, bias=False)
def reshape_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_heads // sch.world_size,
self.key_value_proj_dim,
)
x = x.view(new_x_shape)
return x.contiguous()
def forward(self, hidden_states):
states = self.query(hidden_states)
states = self.reshape_for_scores(states)
return states
def pattern_kv(x: torch.Tensor) -> torch.Tensor:
x = call_module("key|value", x)
new_x_shape = x.size()[:-1] + (num_heads, d_kv)
x = x.view(new_x_shape)
return x
subgraphs = sub_sch.find(pattern_kv)
assert len(subgraphs) == 2
with init_empty_weights(enable=delay_init):
new_fused_kv = FusedKV(num_heads, hidden_size, d_kv)
sub_sch.replace(new_fused_kv, subgraphs)
def pattern_q(x: torch.Tensor) -> torch.Tensor:
x = call_module("query", x)
new_x_shape = x.size()[:-1] + (num_heads, d_kv)
x = x.view(new_x_shape)
return x
subgraphs = sub_sch.find(pattern_q)
assert len(subgraphs) == 1
with init_empty_weights(enable=delay_init):
new_q = ShardableQ(num_heads, hidden_size, d_kv)
sub_sch.replace(new_q, subgraphs)
if sch.world_size > 1:
sub_sch["FusedKV_0.fused_linear"].shard("weight", axis=0)
sub_sch["FusedKV_0.fused_linear"].sync(
mode="bwd_post", sync_op_or_fn="all_reduce"
)
# q is not fused so we shard it along.
sub_sch["ShardableQ_0.query"].shard("weight", axis=0)
sub_sch["ShardableQ_0.query"].sync(
mode="bwd_post", sync_op_or_fn="all_reduce"
)
else:
# Self attention can fuse q, k, v.
class FusedQKV(nn.Module):
def __init__(self, num_heads, d_model, d_kv) -> None:
super().__init__()
self.hidden_size = d_model
self.num_heads = num_heads
self.key_value_proj_dim = d_kv
self.inner_dim = num_heads * self.key_value_proj_dim
self.fused_linear = nn.Linear(
self.hidden_size, self.inner_dim * 3, bias=False
)
def reshape_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_heads // sch.world_size,
self.key_value_proj_dim,
3,
)
x = x.view(new_x_shape)
return x.contiguous()
def forward(self, hidden_states):
qkv = self.fused_linear(hidden_states)
reshaped_qkv = self.reshape_for_scores(qkv)
q, k, v = torch.split(reshaped_qkv, 1, dim=-1)
q = torch.squeeze(q, -1).contiguous()
k = torch.squeeze(k, -1).contiguous()
v = torch.squeeze(v, -1).contiguous()
return [q, k, v]
def pattern(x: torch.Tensor) -> torch.Tensor:
x = call_module("query|key|value", x)
new_x_shape = x.size()[:-1] + (num_heads, d_kv)
x = x.view(new_x_shape)
return x
subgraphs = sub_sch.find(pattern)
assert len(subgraphs) == 3
new_fused_qkv = FusedQKV(num_heads, hidden_size, d_kv)
sub_sch.replace(new_fused_qkv, subgraphs)
if sch.world_size > 1:
sub_sch["FusedQKV_0.fused_linear"].shard("weight", axis=0)
sub_sch["FusedQKV_0.fused_linear"].sync(
mode="bwd_post", sync_op_or_fn="all_reduce"
)
if sch.world_size > 1:
fix_shape_cnt += fix_position_bias_shape(sub_sch)
sch[f"{prefix}.out"].shard("weight", axis=1)
sch[f"{prefix}.out"].sync(mode="fwd_post", sync_op_or_fn="all_reduce")
cnt += 1
return cnt, fix_shape_cnt
def shard_word_embedding(sch, vocab_size, word_embed_name="shared"):
if sch.world_size == 1:
return
# Embedding
sch[word_embed_name].shard("weight", axis=0)
# Build the mask
vocab_start_index = sch.rank * vocab_size // sch.world_size
vocab_end_index = (sch.rank + 1) * vocab_size // sch.world_size
def fwd_pre_hook(_module, _input):
# Mask the input
input_mask = (_input[0] < vocab_start_index) | (_input[0] >= vocab_end_index)
masked_input = _input[0].clone() - vocab_start_index
masked_input[input_mask] = 0
return masked_input
sch[word_embed_name].sync(mode="fwd_pre", sync_op_or_fn=fwd_pre_hook)
def fwd_post_hook(_module, _input, output):
# Mask the output embedding
input_mask = (_input[0] < vocab_start_index) | (_input[0] >= vocab_end_index)
output[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs
dist.all_reduce(output, op=dist.ReduceOp.SUM, group=sch.group)
return output
sch[word_embed_name].sync(mode="fwd_post", sync_op_or_fn=fwd_post_hook)
# pylint: disable=dangerous-default-value
def shard_mlp(sch, model_config, path, fc_names=["wi", "wo"]):
if sch.world_size == 1:
return
assert not model_config.is_gated_act, "Gated activation is not supported yet."
for idx in range(model_config.num_hidden_layers):
prefix = path.replace("N", str(idx))
sch[f"{prefix}.{fc_names[0]}"].shard("weight", axis=0)
sch[f"{prefix}.{fc_names[0]}"].sync(mode="bwd_post", sync_op_or_fn="all_reduce")
sch[f"{prefix}.{fc_names[1]}"].shard("weight", axis=1)
sch[f"{prefix}.{fc_names[1]}"].sync(mode="fwd_post", sync_op_or_fn="all_reduce")
def _checkpoint(sch, model_config, path, ckpt_ratio=1.0):
if ckpt_ratio == 0.0:
return 0
n_ckpt = int(model_config.num_hidden_layers * ckpt_ratio)
for idx in range(n_ckpt):
sch[path.replace("N", str(idx))].checkpoint()
return n_ckpt
def broadcast_input(sch):
def broadcast(inputs):
for inp in inputs:
dist.broadcast(inp, src=0, group=sch.group)
return inputs
sch.sync(mode="fwd_pre", sync_op_or_fn=broadcast)
| awslabs/slapo | slapo/model_schedule/t5.py | t5.py | py | 17,649 | python | en | code | 120 | github-code | 13 |
18995104263 | import pyfiglet as pf
from termcolor import colored
text1 = 'Ordinautz'
text2 = 'prepare to enter !orbit'
def ordinautz():
print(colored(pf.figlet_format(text1, font='slant'), 'yellow'))
print(pf.figlet_format(text2, font='slant'))
ordinautz() | kluless13/ordiquiz | ordinautz.py | ordinautz.py | py | 259 | python | en | code | 0 | github-code | 13 |
22737763029 | from django.shortcuts import render
from django.http import HttpResponse
from django.http import *
from . models import blooddata
from django.db.models import Q
from django.contrib import messages
from django.contrib import auth
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
# Create your views here.
def index(request):
return render(request,'index.html')
def donate(request):
return render(request,'donate.html')
def register_doner(request):
if request.method=="POST":
first_name= request.POST['first_name']
last_name= request.POST['last_name']
name= first_name + last_name
age= request.POST['age']
blood_group= request.POST['blood_group']
contact= request.POST['contact']
address= request.POST['address']
blooddata.objects.create(name=name, age=age, bloodgroup=blood_group, address=address, contact=contact)
return HttpResponseRedirect("/")
def search(request):
return render(request, 'search.html')
def result(request):
if (request.method=="POST"):
group=request.POST['bloodgroup']
city=request.POST['city']
if (group,city):
match= blooddata.objects.filter(Q(bloodgroup__icontains=group), Q(address__icontains=city))
if match:
return render(request, 'result.html', {'match':match})
else:
messages.error(request,"No result found")
else:
return HttpResponseRedirect('/receive/')
if group:
match= blooddata.objects.filter(Q(bloodgroup=group))
if match:
return render(request, 'result.html', {'match':match})
else:
messages.error(request,"No result found")
else:
return HttpResponseRedirect('/receive/')
| ookapil/Blood-share | bloodshare/views.py | views.py | py | 1,644 | python | en | code | 0 | github-code | 13 |
4970415353 | MAX = 1001
parent = []
def makeSet():
global parent
parent = [i for i in range(MAX + 1)]
def findSet(u):
while u != parent[u]:
u = parent[u]
return u
def unionSet(u, v):
up = findSet(u)
vp = findSet(v)
parent[up] = vp
T = int(input())
t = input()
while T:
T -= 1
x = input()
n = ord(x) - ord('A') + 1
makeSet()
while True:
line = input()
if line == "":
break
x = line[0]
y = line[1]
unionSet(ord(x) - ord('A') + 1, ord(y) - ord('A') + 1)
res = 0
for i in range(1, n + 1):
if parent[i] == i:
res += 1
print(res)
if T > 0:
print() | truclycs/code_for_fun | algorithms/python/python_blue/L18/_459_Graph_Connectivity.py | _459_Graph_Connectivity.py | py | 725 | python | en | code | 7 | github-code | 13 |
44890878848 | # advancedCalculator.py
# Author: Tinli Yarrington
# Date created: 3/21/15
# Dates edited: 3/23/15
# Purpose: to create a program that when user inputs certain function, program will calculate answer
# Notes:
# - add more shapes to calculate areas and volumes (maybe even be able to solve using integrals)
# - add more things that users can select to solve for (log, e, ln, exponents, differential equations)
import math
def selectChoice(choice):
if choice == "a":
solveArea()
elif choice == "b":
solveVolume()
elif choice == "c":
createLine()
elif choice == "d":
quadraticFormula()
elif choice == "e":
anglesInRight()
else:
print("That was not one of the choices.")
print()
main()
def solveArea():
print("Which shape would you like to calculate?")
print("{0:>5}".format("a) Circle"))
print("{0:>5}".format("b) Triangle"))
print("{0:>5}".format("c) Square"))
print("{0:>5}".format("d) Rectangle"))
print("{0:>5}".format("e) Trapezoid"))
choice = input("(please type the letter associated with the chocie you would like) ")
print()
getArea(choice)
def getArea(choice):
if choice == "a":
radius = eval(input("What is the radius of your circle? "))
area = math.pow(radius,2)*math.pi
print("The area of your circle is:", area)
elif choice == "b":
base = eval(input("What is the base of your triangle? "))
height = eval(input("What is the height of your triangle? "))
area = (0.5)*base*height
print("The area of your triangle is:", area)
elif choice == "c":
side = eval(input("What is the side of your square? "))
area = math.pow(side,2)
print("The area of your square is:", area)
elif choice == "d":
length = eval(input("What is the length of your rectangle? "))
width = eval(input("What is the width of your rectangle? "))
area = length*width
print("The area of your rectangle is:", area)
elif choice == "e":
base1 = eval(input("What is one of the bases of your trapezoid? "))
base2 = eval(input("What is the other base of your trapezoid? "))
height = eval(input("What is the height of your trapezoid? "))
area = (height/2)*(base1+base2)
print("The area of your trapezoid is:", area)
else:
print("That was not one of the choices.")
solveArea()
def solveVolume():
print("Which shape would you like to calculate?")
print("{0:>5}".format("a) Sphere"))
print("{0:>5}".format("b) Triangular Prism"))
print("{0:>5}".format("c) Cube"))
print("{0:>5}".format("d) Rectangular Prism"))
print("{0:>5}".format("e) Cone"))
print("{0:>5}".format("f) Cylinder"))
choice = input("(please type the letter associated with the chocie you would like) ")
getVolume(choice)
def getVolume(choice):
if choice == "a":
radius = eval(input("What is the radius of your sphere? "))
volume = math.pow(radius,3)*math.pi*(4./3.)
print("The volume of your sphere is:", volume)
elif choice == "b":
base = eval(input("What is the base of your trianglar prism? "))
height = eval(input("What is the height of your trianglar prism? "))
length = eval(input("What is the length of your triangular prism? "))
volume = (0.5)*base*height*length
print("The volume of your trianglar prism is:", volume)
elif choice == "c":
side = eval(input("What is the side of your cube? "))
volume = math.pow(side,3)
print("The volume of your cube is:", volume)
elif choice == "d":
length = eval(input("What is the length of your rectanglar prism? "))
width = eval(input("What is the width of your rectanglar prism? "))
height = eval(input("What is the height of your rectangular prism? "))
volume = length*width*height
print("The volume of your rectanglar prism is:", volume)
elif choice == "e":
radius = eval(input("What is the radius of your cone? "))
height = eval(input("What is the height of your cone? "))
volume = math.pow(radius,2)*math.pi*(height/3.)
print("The volume of your sphere is:", volume)
elif choice == "f":
radius = eval(input("What is the radius of your cylinder? "))
height = eval(input("What is the height of your cylinder? "))
volume = math.pow(radius,2)*math.pi*height
print("The volume of your cylinder is:", volume)
else:
print("That was not one of the choices.")
solveVolume()
def createLine():
xPoint1 = eval(input("What is the x-coordinate of one of the points? "))
yPoint1 = eval(input("What is the y-coordinate of one of the points? "))
xPoint2 = eval(input("What is the x-coordinate of the other point? "))
yPoint2 = eval(input("What is the y-coordinate of the other point? "))
slope = float(yPoint2 - yPoint1)/(xPoint2 - xPoint1)
print("The slope of the line for these two points is: y -", yPoint1, "=", slope, "(x -", xPoint1, ")")
def quadraticFormula():
print("Your equation is in the format: ax^2 + bx + c...")
a = eval(input("What is the value of the a? "))
b = eval(input("What is the value of the b? "))
c = eval(input("What is the value of the c? "))
x1 = -b + math.sqrt(math.pow(b,2) - 4*a*c)/(2*a)
x2 = -b - math.sqrt(math.pow(b,2) - 4*a*c)/(2*a)
print("The solutions to this quadratic equation are:", x1, "and", x2)
def anglesInRight():
side1 = eval(input("What is one of the sides of the right triangle? "))
side2 = eval(input("What is another side of the right triangle? "))
hypotenuse = eval(input("What is the length of the hypotenuse? "))
angle1 = math.degrees(math.asin(side1/hypotenuse))
angle2 = math.degrees(math.asin(side2/hypotenuse))
angle3 = 90
sumAngles = angle1 + angle2 + angle3
print("The angles in the triangle are:{0:6.2f},{1:6.2f},{2:3}".format(angle1, angle2, angle3))
print("The sum of the angles is:", sumAngles)
if sumAngles != 180:
print("These sides do NOT form a right triangle.")
else:
print("These sides correctly form a right triangle.")
def main():
bar = "+" + ("-"*48) + "+"
print(bar)
print("|{0:^48}|".format("CALCULATOR"))
print(bar)
print()
print("What would you like to solve for?")
print("{0:>5}".format("a) Area of a shape"))
print("{0:>5}".format("b) Volume of a shape"))
print("{0:>5}".format("c) Equation of a line"))
print("{0:>5}".format("d) Solution to quadratic equation"))
print("{0:>5}".format("e) Angles in a right triangle"))
choice = input("(please type the letter associated with the choice you would like) ")
print()
selectChoice(choice)
print()
option = ""
while option != "YES" and option != "NO":
print("Would you like to solve for something else?")
option = input("(type yes or no as answers) ").upper()
if option == "YES":
main()
elif option == "NO":
print("Hope your problem was solved!")
main()
| tyarrington/Calculator | advancedCalculator.py | advancedCalculator.py | py | 7,271 | python | en | code | 0 | github-code | 13 |
40206219640 | #!/usr/bin/env python
import rrdtool
'''
Given configuration
α : 0.1
β : 0.0035
γ : 0.1
period : 10
'''
ret = rrdtool.create("netP.rrd",
"--start",'N',
"--step",'300',
"DS:inoctets:COUNTER:600:U:U",
"DS:outoctets:COUNTER:600:U:U",
"RRA:AVERAGE:0.5:1:20",
"RRA:HWPREDICT:30:0.1:0.0035:10",
#RRA:HWPREDICT:rows:alpha:beta:seasonal period[:rra-num]
"RRA:SEASONAL:10:0.1:2",
#RRA:SEASONAL:seasonal period:gamma:rra-num[:smoothing-window=fraction]
"RRA:DEVSEASONAL:10:0.1:2",
#RRA:DEVSEASONAL:seasonal period:gamma:rra-num[:smoothing-window=fraction]
"RRA:DEVPREDICT:30:4",
#RRA:DEVPREDICT:rows:rra-num
"RRA:FAILURES:30:7:9:4"
#RRA:FAILURES:rows:threshold:window length:rra-num
)
#HWPREDICT rra-num is the index of the SEASONAL RRA.
#SEASONAL rra-num is the index of the HWPREDICT RRA.
#DEVPREDICT rra-num is the index of the DEVSEASONAL RRA.
#DEVSEASONAL rra-num is the index of the HWPREDICT RRA.
#FAILURES rra-num is the index of the DEVSEASONAL RRA.
if ret:
print(rrdtool.error())
| PitCoder/NetworkMonitor | Service_Monitoring/Prediction/rrdPredict.py | rrdPredict.py | py | 1,349 | python | en | code | 2 | github-code | 13 |
33246231999 | #board =list(input())
board = [["X", ".", ".", "X"], [".", ".", ".", "X"], [".", ".", ".", "X"]]
def check(board):
if len(board) == 0:
return 0
result = 0
for i in range(len(board)):
for j in range(len(board[0])):
if (board[i][j] == '.'):
continue
if i > 0 and board[i - 1][j] == 'X':
continue
if j > 0 and board[i][j - 1] == 'X':
continue
result += 1
return result
print(check(board))
| Narek-Papyan/ml | Practical_5/battleships-in-a-board.py | battleships-in-a-board.py | py | 527 | python | en | code | 0 | github-code | 13 |
12666421865 | class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
def Inbetween(self,middle_node,newdata):
if middle_node is None:
print("The mentioned node is absent")
return
NewNode = Node(newdata)
NewNode.nextval = middle_node.nextval
middle_node.nextval = NewNode
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
a2 = Node("Tue")
a3 = Node("Thu")
list.headval.nextval = a2
a2.nextval = a3
list.Inbetween(list.headval.nextval,"wed")
list.listprint() | lalithakre/Python- | insertionAtMiddleSingleLinkedList.py | insertionAtMiddleSingleLinkedList.py | py | 787 | python | en | code | 0 | github-code | 13 |
73257835218 | from django.urls import path
from . import views
app_name = 'menus'
urlpatterns = [
path('', views.ItemListView.as_view(), name='items'),
path('<int:pk>/', views.ItemUpdateView.as_view(), name='item_detail'),
path('create/', views.ItemCreateView.as_view(), name='create'),
] | flpn/muypicky | menus/urls.py | urls.py | py | 289 | python | en | code | 0 | github-code | 13 |
6795989238 | from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from users.models import Profile
from users.serializers import ProfileSerializer
class RegisterAPIView(CreateAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
permission_classes = [AllowAny]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_data = data["user"]
if not User.objects.filter(username=user_data["email"]).exists():
user = User.objects.create(
username=user_data["email"], email=user_data["email"],
first_name=user_data["first_name"], last_name=user_data["last_name"]
)
user.set_password(user_data["password"])
token = Token.objects.create(user=user)
profile = Profile.objects.create(user=user, avatar=data["avatar"])
return Response({
"token": token.key,
"username": user.username,
"profile_id": profile.id,
"avatar": profile.avatar.url
}, status=status.HTTP_201_CREATED)
else:
return Response("There is already a registered user with this email.",
status=status.HTTP_409_CONFLICT)
class LoginAPIView(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={"request": request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
token, created = Token.objects.get_or_create(user=user)
profile = Profile.objects.get(user=user)
return Response({
"token": token.key,
"username": user.username,
"profile_id": profile.id,
"avatar": profile.avatar.url
}, status=status.HTTP_200_OK)
| cesarparrado20/api-pairgame | src/users/views.py | views.py | py | 2,315 | python | en | code | 0 | github-code | 13 |
30568713682 | from typing import Optional
from fastapi import APIRouter, Depends
from fastapi.encoders import jsonable_encoder
from fastapi_pagination import Params
from fastapi_pagination.ext.sqlalchemy import paginate
from sqlalchemy.orm import Session
from app.models.movies import MovieBase
from db.database import get_db
from db.db_models import ModelMovies
router = APIRouter()
object = 'movies'
@router.get('/movies')
def fetch_subscriptions(
db: Session = Depends(get_db),
params: Params = Depends()
) -> list:
_movies = jsonable_encoder(paginate(db.query(ModelMovies), params))
if _movies.get('items') is not None:
return _movies.get('items')
return []
@router.get("/movies/{id}")
def get_id(
id: Optional[str],
db: Session = Depends(get_db)
) -> dict:
response = db.query(ModelMovies).filter(ModelMovies.id == id).first()
if response is None:
return {"response": f'The {object} not found'}
else:
return response
@router.post("/movies")
def create(
movies: MovieBase,
db: Session = Depends(get_db)
) -> dict:
if db.query(ModelMovies).filter(ModelMovies.name == movies.name).first() is not None:
return {"response": f'The {object} already exists'}
else:
to_create = ModelMovies(
name=movies.name,
description=movies.description,
price=movies.price
)
db.add(to_create)
db.commit()
return {"response": to_create.id}
@router.put("/movies/{id}")
async def update(
id: Optional[str],
movies: MovieBase,
db: Session = Depends(get_db)
) -> dict:
response = db.query(ModelMovies).filter(ModelMovies.id == id).first()
if response is None:
return {"response": f'The {object} not found'}
else:
response.name = movies.name
response.description = movies.description
response.price = movies.price
db.add(response)
db.commit()
return {"response": id}
| SamMeown/billing_service | app/api/v1/server/movies.py | movies.py | py | 2,024 | python | en | code | 0 | github-code | 13 |
17113938846 | from django.urls import include, path
from . import views
app_name = 'parking'
urlpatterns = [
path('add/<zone>/<place_number>/<car_number>/', views.place_add, name='place-add'),
path('book/<slug:zone>/<int:place_number>/', views.add_booking, name='add-booking'),
path('zonemap/<int:zone_pk>/', views.get_zone_map, name='place-map'),
path('entmap/<int:ent_pk>/', views.get_enterprise_map, name='enterprise-map'),
path('placebookings/<int:zone_pk>/<int:place_number>', views.get_place_bookings, name='place-bookings'),
]
| staplercut/parking | parking/urls.py | urls.py | py | 542 | python | en | code | 0 | github-code | 13 |
23602805352 | import numpy as np
import torch
from torch import nn
from utils.attentionTransfer_util.util import AverageMeter
from prefetch_generator import BackgroundGenerator
from utils.attentionTransfer_util.util import get_learning_rate, accuracy, record_epoch_learn_alpha, get_fc_name
from utils.attentionTransfer_util.regularizer import reg_classifier, reg_fea_map, reg_att_fea_map, reg_l2sp, \
reg_pixel_att_fea_map_learn, reg_channel_att_fea_map_learn, reg_channel_pixel_att_fea_map_learn
class TransferFramework:
def __init__(self, args, train_loader, target_class_num, base_model_name, model_source, source_metric_fc,
model_target, target_metric_fc, feature_criterions, reg_type, loss_fn, channel_weights, num_epochs, alpha,
beta, optimizer, lr_scheduler, writer, logger, print_freq=10):
self.setting = args
self.train_loader = train_loader
self.target_class_num = target_class_num
self.base_model_name = base_model_name
self.model_source = model_source
self.source_metric_fc = source_metric_fc
self.model_target = model_target
self.target_metric_fc = target_metric_fc
self.loss_fn = loss_fn
self.model_source_weights = {}
self.model_target_weights = {}
self.reg_type = reg_type
self.feature_criterions = feature_criterions
self.alpha = alpha
self.beta = beta
self.channel_weights = channel_weights
self.num_epochs = num_epochs
self.optimizer = optimizer
self.lr = 0.0
self.lr_scheduler = lr_scheduler
self.writer = writer
self.logger = logger
self.print_freq = print_freq
# framework init
# self.fc_name = get_fc_name(self.base_model_name, self.logger)
self.hook_layers = []
self.layer_outputs_source = []
self.layer_outputs_target = []
self.framework_init()
def framework_init(self):
if 'fea_map' in self.reg_type:
self.hook_setting()
elif self.reg_type in ['l2sp']:
for name, param in self.model_source.named_parameters():
self.model_source_weights[name] = param.detach()
# print('name={}'.format(name))
self.logger.info('self.model_source_weights len = {} !'.format(len(self.model_source_weights)))
# hook
def _for_hook_source(self, module, input, output):
self.layer_outputs_source.append(output)
def _for_hook_target(self, module, input, output):
self.layer_outputs_target.append(output)
def register_hook(self, model, func):
for name, layer in model.named_modules():
if name in self.hook_layers:
layer.register_forward_hook(func)
def get_hook_layers(self):
if self.base_model_name == 'LResNet34E_IR':
self.hook_layers = ['layer1.2.conv2', 'layer2.3.conv2', 'layer3.5.conv2', 'layer4.2.conv2']
elif self.base_model_name == 'mobilefacenet_p0.5':
self.hook_layers = ['layers.4.conv3', 'layers.8.conv3', 'layers.11.conv3', 'layers.14.conv3']
else:
assert False, self.logger.info("invalid base_model_name={}".format(self.base_model_name))
def hook_setting(self):
# hook
self.get_hook_layers()
self.register_hook(self.model_source, self._for_hook_source)
self.register_hook(self.model_target, self._for_hook_target)
self.logger.info("self.hook_layers={}".format(self.hook_layers))
def train(self, epoch):
# train mode
self.model_target.train()
self.target_metric_fc.train()
self.model_source.eval()
clc_losses = AverageMeter()
classifier_losses = AverageMeter()
feature_losses = AverageMeter()
# attention_losses = AverageMeter()
total_losses = AverageMeter()
train_top1_accs = AverageMeter()
self.lr_scheduler.step(epoch)
self.lr = get_learning_rate(self.optimizer)
self.logger.info('self.optimizer={}'.format(self.optimizer))
self.logger.info('feature_loss alpha={}, beta={}'.format(self.alpha, self.beta))
self.logger.info('self.reg_type={}'.format(self.reg_type))
# for i, (imgs, labels) in enumerate(target_par):
for i, (imgs, labels) in enumerate(self.train_loader):
# target_data
if torch.cuda.is_available():
imgs = imgs.cuda()
labels = labels.cuda()
# taget forward and loss
outputs = self.model_target(imgs)
outputs = self.target_metric_fc(outputs, labels)
clc_loss = self.loss_fn(outputs, labels)
classifier_loss = 0
feature_loss = 0
# attention_loss = 0
# source_model forward for hook
if self.reg_type not in ['l2', 'l2fe', 'l2sp']:
with torch.no_grad():
# self.logger.info("model_source forward!")
_ = self.model_source(imgs)
if not self.reg_type in ['l2', 'l2fe']:
classifier_loss = reg_classifier(self.target_metric_fc)
if self.reg_type == 'pixel_att_fea_map_learn':
feature_loss = reg_pixel_att_fea_map_learn(self.layer_outputs_source,
self.layer_outputs_target, self.feature_criterions)
elif self.reg_type == 'channel_att_fea_map_learn':
feature_loss = reg_channel_att_fea_map_learn(self.layer_outputs_source,
self.layer_outputs_target, self.feature_criterions)
total_loss = clc_loss + self.alpha * feature_loss + self.beta * classifier_loss
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
# batch update
self.layer_outputs_source.clear()
self.layer_outputs_target.clear()
clc_losses.update(clc_loss.item(), imgs.size(0))
if classifier_loss == 0:
classifier_losses.update(classifier_loss, imgs.size(0))
else:
classifier_losses.update(classifier_loss.item(), imgs.size(0))
if feature_loss == 0:
feature_losses.update(feature_loss, imgs.size(0))
else:
feature_losses.update(feature_loss.item(), imgs.size(0))
total_losses.update(total_loss.item(), imgs.size(0))
# compute accuracy
top1_accuracy = accuracy(outputs, labels, 1)
train_top1_accs.update(top1_accuracy, imgs.size(0))
# Print status
if i % self.print_freq == 0:
self.logger.info(
'Train Epoch: [{:d}/{:d}][{:d}/{:d}]\tlr={:.6f}\tclc_loss={:.4f}\t\tclassifier_loss={:.4f}'
'\t\tfeature_loss={:.6f}\t\ttotal_loss={:.4f}\ttop1_Accuracy={:.4f}'
.format(epoch, self.num_epochs, i, len(self.train_loader), self.lr, clc_losses.avg,
classifier_losses.avg, feature_losses.avg, total_losses.avg, train_top1_accs.avg))
# break
# save tensorboard
self.writer.add_scalar('lr', self.lr, epoch)
self.writer.add_scalar('Train_classification_loss', clc_losses.avg, epoch)
self.writer.add_scalar('Train_classifier_loss', classifier_losses.avg, epoch)
self.writer.add_scalar('Train_feature_loss', feature_losses.avg, epoch)
# self.writer.add_scalar('Train_attention_loss', attention_losses.avg, epoch)
self.writer.add_scalar('Train_total_loss', total_losses.avg, epoch)
self.writer.add_scalar('Train_top1_accuracy', train_top1_accs.avg, epoch)
self.logger.info(
'||==> Train Epoch: [{:d}/{:d}]\tTrain: lr={:.6f}\tclc_loss={:.4f}\t\tclassifier_loss={:.4f}'
'\t\tfeature_loss={:.6f}\t\ttotal_loss={:.4f}\ttop1_Accuracy={:.4f}'
.format(epoch, self.num_epochs, self.lr, clc_losses.avg, classifier_losses.avg,
feature_losses.avg, total_losses.avg, train_top1_accs.avg))
return clc_losses.avg, classifier_losses.avg, feature_losses.avg, \
total_losses.avg, train_top1_accs.avg
# def val(self, epoch):
# # test mode
# self.model_target.eval()
#
# val_losses = AverageMeter()
# val_top1_accs = AverageMeter()
#
# # Batches
# for i, (imgs, labels) in enumerate(self.val_loader):
# # Move to GPU, if available
# if torch.cuda.is_available():
# imgs = imgs.cuda()
# labels = labels.cuda()
#
# if self.data_aug == 'improved':
# bs, ncrops, c, h, w = imgs.size()
# imgs = imgs.view(-1, c, h, w)
#
# # forward and loss
# with torch.no_grad():
# outputs = self.model_target(imgs)
# if self.data_aug == 'improved':
# outputs = outputs.view(bs, ncrops, -1).mean(1)
#
# val_loss = self.loss_fn(outputs, labels)
#
# val_losses.update(val_loss.item(), imgs.size(0))
# # compute accuracy
# top1_accuracy = accuracy(outputs, labels, 1)
# val_top1_accs.update(top1_accuracy, imgs.size(0))
#
# # batch update
# self.layer_outputs_source.clear()
# self.layer_outputs_target.clear()
#
# # Print status
# if i % self.print_freq == 0:
# self.logger.info('Val Epoch: [{:d}/{:d}][{:d}/{:d}]\tval_loss={:.4f}\t\ttop1_accuracy={:.4f}\t'
# .format(epoch, self.num_epochs, i, len(self.val_loader), val_losses.avg, val_top1_accs.avg))
# # break
#
# self.writer.add_scalar('Val_loss', val_losses.avg, epoch)
# self.writer.add_scalar('Val_top1_accuracy', val_top1_accs.avg, epoch)
#
# self.logger.info('||==> Val Epoch: [{:d}/{:d}]\tval_oss={:.4f}\t\ttop1_accuracy={:.4f}'
# .format(epoch, self.num_epochs, val_losses.avg, val_top1_accs.avg))
#
# return val_losses.avg, val_top1_accs.avg
| CN1Ember/feathernet_mine | data_clean/utils/attentionTransfer_util/framework.py | framework.py | py | 10,361 | python | en | code | 1 | github-code | 13 |
26621283749 |
# 이분탐색
import sys
input = sys.stdin.readline
n = int(input())
# 마을 번호만 담기
arr = []
# 마을정보
dic ={}
# 전체인구수
number_of_people = 0
for i in range(n):
# 마을별 사람 수 만큼, 채워 넣기
a,b = map(int, input().split())
# 딕셔너리로 처리
dic[a] = b
# 마을 번호 담기
arr.append(a)
# 전체 인구수 더하기
number_of_people += b
# 마을 순서 정렬
arr.sort()
half = number_of_people//2
# 정확히 중간 안 나눠질 때, 비교
if number_of_people%2 == 0:
# 두 지점이 같은 마을이라면 상관없다:
# half 벤치마킹
total = 0
answer = 0
for i in arr:
temp = dic[i]
total += temp
if total > half:
answer = i
break
total_cost = 0
for i in arr:
total_cost += dic[i] *(abs(answer - i))
# half -1 벤치마킹
total_back = 0
answer_back = 0
for i in arr:
temp = dic[i]
total_back += temp
if total_back > half-1:
answer_back = i
break
total_cost_back = 0
for i in arr:
total_cost_back += dic[i] * (abs(answer_back - i))
# 비교, 같다면, 더 작은 위치 출력
if total_cost >= total_cost_back:
print(answer_back)
else:
print(answer)
else:
total = 0
answer = 0
for i in arr:
temp = dic[i]
total += temp
# 중간지점이 포함된 영역이면, 해당 마을 저장
if total > half:
answer = i
break
print(answer)
| isakchoe/TIL | algorithm /back_j/2141_우체국.py | 2141_우체국.py | py | 1,622 | python | ko | code | 0 | github-code | 13 |
72146493459 | import argparse
import numpy as np
from numpy import linalg as LA
from scipy.stats import norm
def calculate_cpma(sim_zscores, num_genes):
sim_pvalues = norm.cdf(sim_zscores)
likelihood = np.mean(np.negative(np.log(sim_pvalues)))
value = -2 * ((((likelihood - 1) * num_genes)/likelihood) - num_genes*np.log(likelihood))
return value
def simulateZscores(zfile, efile, qfile, output, n):
#mean_zscores = np.loadtxt('/storage/cynthiawu/trans_eQTL/Nerve-Tibial/chr1_gene_snp_eqtls_meanzscores.csv', dtype=complex, delimiter='\t')
mean_zscores = np.loadtxt(zfile, delimiter='\t')
print('mean zscores file read')
#e_values = np.loadtxt('/storage/cynthiawu/trans_eQTL/Nerve-Tibial/chr1_gene_snp_eqtls_evalues.csv', dtype=complex, delimiter='\t')
e_values = np.loadtxt(efile, dtype=complex, delimiter='\t')
n_genes = len(e_values)
print(n_genes)
print('e_values file read')
#Q = np.loadtxt('/storage/cynthiawu/trans_eQTL/Nerve-Tibial/chr1_gene_snp_eqtls_Q.csv', dtype=complex, delimiter='\t')
Q = np.loadtxt(qfile, dtype=complex, delimiter='\t')
print('Q file read')
diag_e_values = np.diag(e_values)
E = np.sqrt(diag_e_values)
print('starting simulations')
sim_cpma = []
e_matrix = np.dot(Q, E)
for i in range(n):
if (i%1000==0):
print(i)
z = np.random.normal(0, 1, n_genes)
sim_zscores = mean_zscores + np.dot(e_matrix,z)
cpma = calculate_cpma(sim_zscores.real, n_genes)
sim_cpma.append(cpma)
print('simuated cpma calculated')
sim_cpma = np.array(sim_cpma)
# np.savetxt('/storage/cynthiawu/trans_eQTL/Nerve-Tibial/chr1_gene_snp_eqtls_simzscores100.csv', sim_zscores, delimiter='\t')
np.savetxt(output, sim_cpma, delimiter='\t', fmt='%f')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-z", "--mzscores", required=True, help="Input mean zscores file")
parser.add_argument("-e", "--eigenvalues", required=True, help="Input eigenvalues file")
parser.add_argument("-q", "--eigenvectors", required=True, help="Input eigenvectorsfile")
parser.add_argument("-o", "--output", required=True, help="Ouptput file with simulated cpma values")
parser.add_argument("-n", "--simulations", required=True, type=int, help="Number of simulations")
params = parser.parse_args()
simulateZscores(params.mzscores, params.eigenvalues, params.eigenvectors, params.output, params.simulations)
if __name__ == "__main__":
main()
| cynthiaewu/trans-eQTL | CPMA/misc/simulate_cpma.py | simulate_cpma.py | py | 2,514 | python | en | code | 3 | github-code | 13 |
43140399096 | import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from collections import defaultdict
from scipy.spatial.distance import cdist
def read_data():
data = pd.read_csv("./spotify-dataset/data.csv")
data['artists'] = data['artists'].apply(lambda x: x[1:-1].replace("'", ''))
return data
# Przygotowanie scaler'a w celu standaryzacji danych.
# Dzięki temu dane będzie można wykorzystać w żądanym formacie od -1 do 1
def fit_songs_numeric_data(data):
all_songs_pipeline = Pipeline([('scaler', StandardScaler())], verbose=False)
# X = data.select_dtypes(np.number)
X = data[number_cols].values
# number_cols = list(X.columns)
all_songs_pipeline.fit(X)
return all_songs_pipeline
def get_songs_not_present_in_dataset(spotify_data, songs):
not_present_songs = []
for song in songs:
try:
song_data = spotify_data[(spotify_data['id'] == song['id'])].iloc[0]
except IndexError:
not_present_songs.append(song)
return not_present_songs
number_cols = ['valence', 'year', 'acousticness', 'danceability', 'duration_ms', 'energy', 'explicit',
'instrumentalness', 'key', 'liveness', 'loudness', 'mode', 'popularity', 'speechiness', 'tempo']
def get_song_data(song, spotify_data):
try:
song_data = spotify_data[(spotify_data['id'] == song['id'])].iloc[0]
return song_data
except IndexError:
song_data = None
return song_data
def get_mean_vector(song_list, spotify_data):
song_vectors = []
for song in song_list:
song_data = get_song_data(song, spotify_data)
if song_data is None:
print('Warning: {} does not exist in Spotify or in database'.format(song['name']))
continue
song_vector = song_data[number_cols].values
song_vectors.append(song_vector)
song_matrix = np.array(list(song_vectors))
return np.mean(song_matrix, axis=0)
def flatten_dict_list(dict_list):
flattened_dict = defaultdict()
for key in dict_list[0].keys():
flattened_dict[key] = []
for dictionary in dict_list:
for key, value in dictionary.items():
flattened_dict[key].append(value)
return flattened_dict
def recommend_songs(input_song_list, spotify_data, n_songs=10):
# Fit the numeric data of all songs in spotify_data (compute the mean)
all_songs_pipeline = fit_songs_numeric_data(spotify_data)
# Find mean of songs attributes from input list
song_center = get_mean_vector(input_song_list, spotify_data)
# Scale the numeric data of all songs in spotify_data
scaled_data = all_songs_pipeline.transform(spotify_data[number_cols].values)
# Scale the numeric data of song_center using the same pipeline
scaled_song_center = all_songs_pipeline.transform(song_center.reshape(1, -1))
# Calculate cosine distances (Songs closest to input songs)
distances = cdist(scaled_song_center, scaled_data, 'cosine')
# Get index'es of the n_songs closest to song_center
index = list(np.argsort(distances)[:, :n_songs][0])
# Create a dictionary from input_song_list
song_dict = flatten_dict_list(input_song_list)
# Select recommended songs from spotify_data using index'es
recommended_songs = spotify_data.iloc[index]
# Filter out songs that are already in song_list
recommended_songs = recommended_songs[~recommended_songs['name'].isin(song_dict['name'])]
metadata_cols = ['name', 'year', 'artists', 'id']
return recommended_songs[metadata_cols].to_json(orient='records') | mi-wit/tunecamp | backend/recomendation.py | recomendation.py | py | 3,729 | python | en | code | 0 | github-code | 13 |
38023136578 | #==============================================================
#
# Job options file for Geant4 Simulations
#
# CTB_G4Sim: CTB (2004) simulation production
#
__version__="$Revision: 1.12"
#==============================================================
#--- Detector flags -------------------------------------------
from AthenaCommon.DetFlags import DetFlags
# - Select detectors
DetFlags.ID_setOn()
DetFlags.Calo_setOn()
DetFlags.LAr_setOn()
DetFlags.em_setOn()
DetFlags.Tile_setOn()
DetFlags.Muon_setOff() # muons are not supported in >=13.0.0
# - MCTruth
DetFlags.simulate.Truth_setOn()
#--- AthenaCommon flags ---------------------------------------
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
athenaCommonFlags.PoolHitsOutput='ctb_MyOutputFile.root'
athenaCommonFlags.EvtMax=3
#--- Simulation flags -----------------------------------------
from G4AtlasApps.SimFlags import SimFlags
SimFlags.import_JobProperties('G4AtlasApps.ctb_flags') # - specfic CTB flags
# - Option1: run using specfic CTB flags for combined layout
# ----------
#SimFlags.SimLayout.set_Value('ctbh8_combined')
#SimFlags.LArEMBenergyCor.set_On()
##SimFlags.CalibrationRun.set_Value('LAr+Tile') # Calibration runs
#SimFlags.Eta.set_Value(0.2) # Sets eta value
#SimFlags.MagnetMBPSIDBz.set_Value(-1.4) # Constant fields
# - Option2: run using run-conditions for the CTB runs,
#----------- only few runs availble (example with run 2345)
SimFlags.LArEMBenergyCor.set_On()
SimFlags.RunConditions.set_Value('CTB_G4Sim.CtbRunConditionsData')
SimFlags.RunNumber.set_Value(2345)
# - Option3: run for LAr material studies
#-----------
#SimFlags.SimLayout.set_Value('ctbh8_lar-material')
#SimFlags.LArEMBenergyCor.set_On()
#SimFlags.Eta.set_Value(0.4)
#SimFlags.LArMaterialAluThickness.set_Value(25.)
# - Option4: Re-writes the constant magnetic field for
#----------- the MBPSID with the map.
#SimFlags.MagFieldMap.set_Value('mbps1-all-id-800-mbps2-muons-800.data')
#--- Generator flags ------------------------------------------
SimFlags.Seeds.set_Value('SINGLE 2000160768 643921183')
SimFlags.ParticlePDG.set_Value('11')
SimFlags.Energy.set_Value(54000)
# - Ex1: how to set the full set of orders for the
#------- generator:
#SimFlags.ParticleGeneratorOrders.set_Value({'vertX:' : \
#' constant -27500.0','vertY:' :' flat -10.0 15.0',\
#'vertZ:' : ' flat -15.0 15.0','t:' :' constant -27500.0',\
#'momX:' : ' fixed 1','momY:' : ' fixed 0','momZ:' : ' fixed 0'})
# - Ex2: how to run with a gaussian energy distribution:
#-------
#SimFlags.ParticleGeneratorOrders.Value['energy:']=' gauss 54000 20'
# - Ex3: how to run with any energy distribution:
#-------
# In this case you have to customize the ParticleGenerator (see
# in the generator section)
SimFlags.ParticleGeneratorOrders()['energy:']=' histogram 2857'
#--- Output printout level -----------------------------------
#output threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL)
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = 4
#==============================================================
#==============================================================
# Job configuration
# ***>> Do not add flags or simulation options below this line
#==============================================================
#==============================================================
#--- CTB setup description ----------------------------------
from G4AtlasApps import SimKernel
theApp.initialize()
#>## this starts the space to customize the simulation #######
# - Ex1: change the verbosity
#-------
#SimFlags.G4Commands += ['/tracking/verbose 1']
# - Ex2: change the Pixel position
#------
#pixel_position=AtlasG4Eng.G4Eng.gbl.CLHEP.Hep3Vector(-300,0.,8)
#ctb_pixel.df.MoveTo(pixel_position)
#<## this ends the space to customize the simulation #########
#--- Generator ----------------------------------------------
##############################################################
#### NOTE: this is not woorking since 11.3.0 due to problems
#### in the ParticleGenerator code.
##############################################################
#>## this starts the space to customize ParticleGenerator ####
# basically to use your own distributions for the different
# ParticleGenerator orders:
# - Ex1: import a distribution from a root file
#-------
#from G4AtlasApps.atlas_utilhisto import HistoAtHistoSvc
#h1=HistoAtHistoSvc('2857')
#h1.retrieve_historoot("spectrum_function_2857.root",'h2857')
#h1.dump_histo(theApp)
# - Ex2: build the distribution
#-------
from CTB_G4Sim.CtbPhotonESpectrumExample import build_spectrum
h2=build_spectrum(theApp,2857)
##<## this ends the space to customize ParticleGenerator ######
theApp.nextEvent( theApp.EvtMax )
#--- End jobOptions.G4Ctb_SimProd.py file --------------------
| rushioda/PIXELVALID_athena | athena/Simulation/G4Sim/CTB_G4Sim/share/jobOptions.G4Ctb_SimExamples.py | jobOptions.G4Ctb_SimExamples.py | py | 4,841 | python | en | code | 1 | github-code | 13 |
1044556812 | import numpy as np
import cv2
import os
import tqdm
import argparse
from skimage.draw import polygon
import random
def random_flip_horizontal(img, box, p=0.5):
'''
对img和mask随机进行水平翻转。box为二维np.array。
https://blog.csdn.net/weixin_41735859/article/details/106468551
img[:,:,::-1] gbr-->bgr、img[:,::-1,:] 水平翻转、img[::-1,:,:] 上下翻转
'''
if np.random.random() < p:
w = img.shape[1]
img = img[:, ::-1, :]
box[:, [0, 2, 4, 6]] = w - box[:, [2, 0, 6, 4]] # 仅针对4个点变换
return img, box
def Large_Scale_Jittering(img, box, min_scale=0.1, max_scale=2.0):
'''
对img和box进行0.1-2.0的大尺度抖动,并变回h*w的大小。
'''
rescale_ratio = np.random.uniform(min_scale, max_scale)
h, w, _ = img.shape
# rescale
h_new, w_new = int(h * rescale_ratio), int(w * rescale_ratio)
img = cv2.resize(img, (w_new, h_new), interpolation=cv2.INTER_LINEAR)
# crop or padding
# x,y是随机选择左上角的一个点,让小图片在这个位置,或者让大图片从这个位置开始裁剪
x, y = int(np.random.uniform(0, abs(w_new - w))), int(np.random.uniform(0, abs(h_new - h)))
# 如果图像缩小了,那么其余部分要填充为像素168大小
if rescale_ratio <= 1.0: # padding
img_pad = np.ones((h, w, 3), dtype=np.uint8) * 168
img_pad[y:y + h_new, x:x + w_new, :] = img
box[:, [0, 2, 4, 6]] = box[:, [0, 2, 4, 6]] * w_new / w + x # x坐标
box[:, [1, 3, 5, 7]] = box[:, [1, 3, 5, 7]] * h_new / h + y # y坐标
return img_pad, box
# 如果图像放大了,那么要裁剪成h*w的大小
else: # crop
img_crop = img[y:y + h, x:x + w, :]
box[:, [0, 2, 4, 6]] = box[:, [0, 2, 4, 6]] * w_new / w - x
box[:, [1, 3, 5, 7]] = box[:, [1, 3, 5, 7]] * h_new / h - y
return img_crop, box
def img_add(img_src, img_main, mask_src, box_src):
'''
将src加到main图像中,结果图还是main图像的大小。
'''
if len(img_main.shape) == 3:
h, w, c = img_main.shape
elif len(img_main.shape) == 2:
h, w = img_main.shape
src_h, src_w = img_src.shape[0], img_src.shape[1]
mask = np.asarray(mask_src, dtype=np.uint8)
# mask是二值图片,对src进行局部遮挡,即只露出目标物体的像素。
sub_img01 = cv2.add(img_src, np.zeros(np.shape(img_src), dtype=np.uint8), mask=mask) # 报错深度不一致
mask_02 = cv2.resize(mask, (w, h), interpolation=cv2.INTER_NEAREST)
mask_02 = np.asarray(mask_02, dtype=np.uint8)
sub_img02 = cv2.add(img_main, np.zeros(np.shape(img_main), dtype=np.uint8),
mask=mask_02) # 在main图像上对应位置挖了一块
# main图像减去要粘贴的部分的图,然后加上复制过来的图
img_main = img_main - sub_img02 + cv2.resize(sub_img01, (w, h),
interpolation=cv2.INTER_NEAREST)
box_src[:, [0, 2, 4, 6]] = box_src[:, [0, 2, 4, 6]] * w / src_w
box_src[:, [1, 3, 5, 7]] = box_src[:, [1, 3, 5, 7]] * h / src_h
return img_main, box_src
def normal_(jpg_path, txt_path="", box=None):
"""
根据txt获得box或者根据box获得mask。
:param jpg_path: 图片路径
:param txt_path: x1,y1,x2,y2 x3,y3,x4,y4...
:param box: 如果有box,则为根据box生成mask
:return: 图像,box 或 掩码
"""
if isinstance(jpg_path, str): # 如果是路径就读取图片
jpg_path = cv2.imread(jpg_path)
img = jpg_path.copy()
if box is None: # 一定有txt_path
lines = open(txt_path).readlines()
box = []
for line in lines:
ceils = line.strip().split(',')
xy = []
for ceil in ceils:
print(type(ceil))
xy.append(round(float(ceil)))
box.append(np.array(xy))
return np.array(img), np.array(box)
else: # 获得mask
h, w = img.shape[:2]
mask = np.zeros((h, w), dtype=np.float32)
for xy in box: # 对每个框
xy = np.array(xy).reshape(-1, 2)
cv2.fillPoly(mask, [xy.astype(np.int32)], 1)
return np.array(mask)
def is_coincide(polygon_1, polygon_2):
'''
判断2个四边形是否重合
:param polygon_1: [x1, y1,...,x4, y4]
:param polygon_2:
:return: bool,1表示重合
'''
rr1, cc1 = polygon([polygon_1[i] for i in range(0, len(polygon_1), 2)],
[polygon_1[i] for i in range(1, len(polygon_1), 2)])
rr2, cc2 = polygon([polygon_2[i] for i in range(0, len(polygon_2), 2)],
[polygon_2[i] for i in range(1, len(polygon_2), 2)])
try: # 能包含2个四边形的最小矩形长宽
r_max = max(rr1.max(), rr2.max()) + 1
c_max = max(cc1.max(), cc2.max()) + 1
except:
return 0
# 相当于canvas是包含了2个多边形的一个画布,有2个多边形的位置像素为1,重合位置像素为2
canvas = np.zeros((r_max, c_max))
canvas[rr1, cc1] += 1
canvas[rr2, cc2] += 1
intersection = np.sum(canvas == 2)
return 1 if intersection != 0 else 0
def copy_paste(img_main_path, img_src_path, txt_main_path, txt_src_path, coincide=False, muti_obj=True):
'''
整个复制粘贴操作,输入2张图的图片和坐标路径,返回其融合后的图像和坐标结果。
1. 传入随机选择的main图像和src图像的img和txt路径;
2. 对其进行随机水平翻转;
3. 对其进行随机抖动;
4. 获得src变换完后对应的mask;
5. 将src的结果加到main中,返回对应main_new的img和src图的box.
'''
# 读取图像和坐标
img_main, box_main = normal_(img_main_path, txt_main_path)
img_src, box_src = normal_(img_src_path, txt_src_path)
# 随机水平翻转
img_main, box_main = random_flip_horizontal(img_main, box_main)
img_src, box_src = random_flip_horizontal(img_src, box_src)
# LSJ, Large_Scale_Jittering 大尺度抖动,并变回h*w大小
img_main, box_main = Large_Scale_Jittering(img_main, box_main)
img_src, box_src = Large_Scale_Jittering(img_src, box_src)
if not muti_obj or box_src.ndim == 1: # 只复制粘贴一个目标
id = random.randint(0, len(box_src) - 1)
box_src = box_src[id]
box_src = box_src[np.newaxis, :] # 增加一维
# 获得一系列变换后的img_src的mask
mask_src = normal_(img_src_path, box=box_src)
# 将src结果加到main图像中,返回main图像的大小的叠加图
img, box_src = img_add(img_src, img_main, mask_src, box_src)
# 判断融合后的区域是否重合
if not coincide:
for point_main in box_main:
for point_src in box_src:
if is_coincide(point_main, point_src):
return None, None
box = np.vstack((box_main, box_src))
return img, box
def save_res(img, img_path, box, txt_path):
'''
保存图片和txt坐标结果。
'''
cv2.imwrite(img_path, img)
h, w = img.shape[:2]
with open(txt_path, 'w+') as ftxt:
for point in box: # [x1,y1,...x4,,y4]
strxy = ""
for i, p in enumerate(point):
if i % 2 == 0: # x坐标
p = np.clip(p, 0, w - 1)
else: # y坐标
p = np.clip(p, 0, h - 1)
strxy = strxy + str(p) + ','
strxy = strxy[:-1] # 去掉最后一个逗号
ftxt.writelines(strxy + "\n")
def main(args):
# 图像和坐标txt文件输入路径
JPEGs = os.path.join(args.input_dir, 'jpg')
BOXes = os.path.join(args.input_dir, 'txt')
# 输出路径
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'cpAug_jpg'), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'cpAug_txt'), exist_ok=True)
# 参与数据增强的图片名称,不含后缀
imgs_list = open(args.aug_txt, 'r').read().splitlines()
flag = '.jpg' # 图像的后缀名 .jpg ,png
tbar = tqdm.tqdm(imgs_list, ncols=100) # 进度条显示
for src_name in tbar:
# src图像
img_src_path = os.path.join(JPEGs, src_name + flag)
txt_src_path = os.path.join(BOXes, src_name + '.txt')
# 随机选择main图像
main_name = np.random.choice(imgs_list)
img_main_path = os.path.join(JPEGs, main_name + flag)
txt_main_path = os.path.join(BOXes, main_name + '.txt')
# 数据增强
img, box = copy_paste(img_main_path, img_src_path, txt_main_path, txt_src_path,
args.coincide, args.muti_obj)
if img is None:
continue
# 保存结果
img_name = "copy_" + src_name + "_paste_" + main_name
save_res(img, os.path.join(args.output_dir, 'cpAug_jpg', img_name + flag),
box, os.path.join(args.output_dir, 'cpAug_txt', img_name + '.txt'))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", default=r"D:\songjiahao\DATA\coco128\aa", type=str,
help="要进行数据增强的图像路径,路径结构下应有jpg和txt文件夹")
parser.add_argument("--output_dir", default=r"D:\songjiahao\DATA\coco128\bb", type=str,
help="保存数据增强结果的路径")
parser.add_argument("--aug_txt", default=r"D:\songjiahao\DATA\coco128\train.txt",
type=str, help="要进行数据增强的图像的名字,不包含后缀")
parser.add_argument("--coincide", default=False, type=bool,
help="True表示允许数据增强后的图像目标出现重合,默认不允许重合")
parser.add_argument("--muti_obj", default=False, type=bool,
help="True表示将src图上的所有目标都复制粘贴,False表示只随机粘贴一个目标")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
main(args)
| songjiahao-wq/untitled | youhua/数据增多/copy2.py | copy2.py | py | 10,164 | python | en | code | 1 | github-code | 13 |
12193904635 | #!/user/bin/env python3
#加载bcc的库
from bcc import BPF
#加载我们写的C源代码,之后被编译成BPF字节码
byte = BPF(src_file="HookOutput.c")
#将BPF程序挂载到内核探针kprobe,其中ip_local_out是iptablesOutput的系统调用
byte.attach_kprobe(event="ip_local_out",fn_name="hookOutput")
#读取内核调试文件 /sys/kernel/debug/tracing/trace_pipe
byte.trace_print()
| 564194070/ebpfDaliyDemo | 01-HookIptablesOutPut/LLVMGenerate.py | LLVMGenerate.py | py | 400 | python | zh | code | 0 | github-code | 13 |
32855375079 | import setuptools
#import pyGM
with open("readme.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='pyGMs',
#name=pyGM.__title__,
#version=pyGM.__version__,
version='0.1.1',
author='Alexander Ihler',
author_email='ihler@ics.uci.edu',
description='Python Graphical Models Toolbox',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
],
)
| ihler/pyGMs | setup.py | setup.py | py | 685 | python | en | code | 12 | github-code | 13 |
9535990857 | from urllib.request import urlopen
from bs4 import BeautifulSoup
from urllib import error
from urllib.request import Request
import random
import re
import sqlite3
import time
import numpy as np
headers = [{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}, \
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'}, \
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'}]
def get_house_info(urls):
for url in urls:
time.sleep(np.random.rand() * 5)
try:
res = Request(url, headers=headers[random.randint(0,2)])
response= urlopen(res)
soup = BeautifulSoup(response, 'lxml')
except (error.HTTPError, error.URLError) as e:
print(e, '2')
continue
title_list = soup.select('body > div.main-wrap > div.house-title > h1')
price_list = str(soup.select('body > div.main-wrap > div.house-basic-info > div.house-basic-right.fr > div.house-basic-desc > div.house-desc-item.fl.c_333 > div > span.c_ff552e > b'))
img_urls = str(soup.findAll('ul', {'class': 'house-pic-list'}))
add_list = soup.select(
'body > div.main-wrap > div.house-basic-info > div.house-basic-right.fr > div.house-basic-desc > div.house-desc-item.fl.c_333 > ul > li > span.dz')
# title
title = "".join(i.getText() for i in title_list)
print(title)
# 价格list
price = "".join(re.findall(r'b class="f36">(\d+)', str(price_list)))
print(price)
# 图片urls
img_url = ",".join(re.findall(r'img lazy_src="(.*?)"', str(img_urls)))
# print(img_url)
# 地址list
add = "".join(i.getText().strip() for i in add_list)
# print(add)
# return title,price,img_url,add
# 存入数据库
conn = sqlite3.connect('houseinfo.db')
cur = conn.cursor()
try:
sql1 = 'create table if not exists house_info(title CHAR(30),houseurl CHAR(50),price CHAR(5),imgurl text,address CHAR(30))'
sql2 = 'insert into house_info values (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')' %(title,url,price,img_url,add)
cur.execute(sql1)
cur.execute(sql2)
conn.commit()
finally:
conn.close() | simonzhao88/personal-works | tongcheng/gethouseinfo.py | gethouseinfo.py | py | 2,414 | python | en | code | 1 | github-code | 13 |
18093456509 | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import render_to_string
import re
class BasicSpeedDial:
def __init__(self):
self.options = []
def add_option(
self,
name,
url,
method="POST",
icon="fa-plus",
color="text-slate-500"):
if not icon.startswith('fa-'):
icon = 'fa-' + icon
if not color.startswith('bg-'):
color = 'bg-' + color
if not re.findall(r'-\d\d\d', color):
color = color + '-500'
self.options.append( {
'name': name,
'url': url,
'method': method,
'icon': icon,
'color': color
} )
def render( self ) -> str :
return render_to_string('sch3/speedDial.html', {'options': self.options} )
workday_speed_dial_menu = BasicSpeedDial()
workday_speed_dial_menu.add_option(
name= "Solve",
url= "solve/",
icon= "robot-user",
color="zinc-300"
)
| jts-bio/techn-i-cal | sch/components.py | components.py | py | 1,167 | python | en | code | 0 | github-code | 13 |
4060751398 | import numpy as np
from problems import zero_periodic
def F2(u, dt, dx, uold):
n = int(len(u) ** 0.5)
usqr = u ** 2
uoldsqr = uold ** 2
return u - uold + dt / (4 * dx) * (2 * usqr - np.roll(usqr, -n) - np.roll(usqr, -1)
+ 2 * uoldsqr - np.roll(uoldsqr, -n) - np.roll(uoldsqr, -1))
@zero_periodic
def ua_2D(x):
n = x.shape[0]
xx, yy = np.meshgrid(x, x)
mat = 2 + 2*np.sin(np.pi / 8 * (xx - yy - 4)) * np.sin(np.pi / 8 * xx)
return mat.reshape((n ** 2,)) | VilmerD/IterativeMethods | project/problems_2D.py | problems_2D.py | py | 526 | python | en | code | 0 | github-code | 13 |
36492556362 | # state values
modeOff = 0
modeHeat = 1
modeCool = 2
modeFan = 3
modeAuto = 4
inhibitDelay = 60
inhibitState = 1
inhibitWatchInterval = 1
defaultTemp = 72
from homealone import *
from homealone.resources.tempControl import *
# thermostat control for heating and cooling
class ThermostatControl(Control):
def __init__(self, name, heatUnit, coolUnit, fanUnit, tempSensor, heatTempTargetControl, coolTempTargetControl,
inhibitSensor=None, persistenceControl=None, hysteresis=[1, 1], **kwargs):
Control.__init__(self, name, **kwargs)
self.className = "Control"
self.heatControl = TempControl(self.name+"HeatControl", None, heatUnit, tempSensor, heatTempTargetControl,
unitType=unitTypeHeater, hysteresis=hysteresis)
self.coolControl = TempControl(self.name+"CoolControl", None, coolUnit, tempSensor, coolTempTargetControl,
unitType=unitTypeAc, hysteresis=hysteresis)
self.fanUnit = fanUnit # the fan unit
self.tempSensor = tempSensor # the temperature sensor
self.heatTempTargetControl = heatTempTargetControl
self.heatTempTargetControl.stateSet=self.tempTargetState
self.coolTempTargetControl = coolTempTargetControl
self.coolTempTargetControl.stateSet=self.tempTargetState
self.inhibitSensor = inhibitSensor # sensor that inhibits thermostat operation if it is on
# if self.inhibitSensor:
# self.setInhibit(self.inhibitSensor.getState())
# else:
# self.inhibited = False
self.inhibited = False
self.persistenceControl = persistenceControl # persistent storage of the state
self.currentState = 0
self.hysteresis = hysteresis
def start(self):
if self.persistenceControl:
self.currentState = self.persistenceControl.getState()
if self.currentState == None:
self.setState(modeOff)
else:
self.setState(self.currentState)
else:
self.setState(modeOff)
# inhibit the tempControl after a delay
def inhibitTimer():
debug('debugThermostatEvent', self.name, "inhibitTimer ended")
self.setInhibit(True)
# thread to monitor the state of the inhibit sensor
def inhibitWatch():
debug('debugThermostatEvent', self.name, "inhibitWatch started")
inhibitTimerThread = None
while True:
if self.inhibitSensor.event: # wait for inhibitSensor state to change
debug('debugThermostatEvent', self.name, "waiting for", self.inhibitSensor.name, "event")
self.inhibitSensor.event.wait()
else: # poll inhibitSensor state
debug('debugThermostatEvent', self.name, "waiting for", inhibitWatchInterval, "seconds")
time.sleep(inhibitWatchInterval)
if self.inhibitSensor.getState() == inhibitState:
if not inhibitTimerThread: # start the delay timer
inhibitTimerThread = threading.Timer(inhibitDelay, inhibitTimer)
inhibitTimerThread.start()
debug('debugThermostatEvent', self.name, "inhibitTimer started")
else:
if self.inhibited: # state changed back, cancel the timer and enable the thermostat
self.setInhibit(False)
if inhibitTimerThread:
inhibitTimerThread.cancel()
debug('debugThermostatEvent', self.name, "inhibitTimer cancelled")
inhibitTimerThread = None
self.inhibited = False
if self.inhibitSensor: # start the thread to watch the state of the inhibit sensor
inhibitWatchThread = LogThread(name="inhibitWatchThread", target=inhibitWatch)
inhibitWatchThread.start()
def setInhibit(self, value):
debug('debugThermostat', self.name, "inhibit", value)
self.inhibited = value
self.heatControl.setInhibit(value)
self.coolControl.setInhibit(value)
def getState(self, wait=False, missing=None):
debug('debugState', self.name, "getState ", self.currentState)
return self.currentState
def setState(self, state, wait=False):
debug('debugState', self.name, "setState ", state)
if state == modeOff:
self.heatControl.setState(off)
self.coolControl.setState(off)
self.fanUnit.setState(off)
elif state == modeHeat:
self.heatControl.setState(on)
self.coolControl.setState(off)
self.fanUnit.setState(off)
elif state == modeCool:
self.heatControl.setState(off)
self.coolControl.setState(on)
self.fanUnit.setState(off)
elif state == modeFan:
self.heatControl.setState(off)
self.coolControl.setState(off)
self.fanUnit.setState(on)
elif state == modeAuto:
self.heatControl.setState(on)
self.coolControl.setState(on)
self.fanUnit.setState(off)
else:
debug('debugThermostat', self.name, "unknown state", state)
return
self.currentState = state
if self.persistenceControl:
self.persistenceControl.setState(state)
self.notify()
# callback for temp target control states
def tempTargetState(self, control, state):
debug('debugThermostat', "TempTargetState", control.name, state)
# prevent the heating and cooling temp targets from overlapping
tempSeparation = self.hysteresis[0] + self.hysteresis[1]
minCoolTemp = state + tempSeparation
maxHeatTemp = state - tempSeparation
if control == self.heatTempTargetControl: # heat temp is being set
if self.coolTempTargetControl.getState() < minCoolTemp:
self.coolTempTargetControl.setState(minCoolTemp) # adjust the cool temp
else: # cool temp is being set
if self.heatTempTargetControl.getState() > maxHeatTemp:
self.heatTempTargetControl.setState(maxHeatTemp) # adjust the heat temp
heatOn = modeHeat
coolOn = modeCool
fanOn = modeFan
hold = 5
# Sensor that returns the thermostat unit control that is currently running
class ThermostatUnitSensor(Sensor):
def __init__(self, name, thermostatControl, **kwargs):
Sensor.__init__(self, name, **kwargs)
self.className = "Sensor"
self.thermostatControl = thermostatControl
def getState(self, missing=None):
# assume only one of them is on
if self.thermostatControl.getState() == Off:
return Off
elif self.thermostatControl.fanUnit.getState() == On:
return fanOn
elif self.thermostatControl.inhibited:
return hold
elif self.thermostatControl.heatControl.unitControl.getState() == On:
return heatOn
elif self.thermostatControl.coolControl.unitControl.getState() == On:
return coolOn
else:
return Off
# Control that sets the target temperature of the active unit control
class ThermostatTempControl(Control):
def __init__(self, name, thermostatControl, **kwargs):
Control.__init__(self, name, **kwargs)
self.className = "Control"
self.thermostatControl = thermostatControl
self.temp = defaultTemp
def getState(self):
return self.temp
def setState(self, temp):
mode = self.thermostatControl.getState()
roomTemp = self.thermostatControl.tempSensor.getState()
debug('debugThermostat', self.name, "setState", "temp:", temp, "mode:", mode, "roomTemp:", roomTemp)
if (mode == modeHeat) or ((mode == modeAuto) and ((temp < defaultTemp) or ((temp == defaultTemp) and (roomTemp < defaultTemp)))):
debug('debugThermostat', self.name, "setting heat target to", temp)
self.thermostatControl.heatControl.tempTargetControl.setState(temp)
elif (mode == modeCool) or ((mode == modeAuto) and ((temp > defaultTemp) or ((temp == defaultTemp) and (roomTemp > defaultTemp)))):
debug('debugThermostat', self.name, "setting cool target to", temp)
self.thermostatControl.coolControl.tempTargetControl.setState(temp)
else: # do nothing
debug('debugThermostat', self.name, "not setting temp")
return
self.temp = temp
| jbuehl/homealone | homealone/resources/thermostatControl.py | thermostatControl.py | py | 8,826 | python | en | code | 0 | github-code | 13 |
33679975749 | import aiopg.sa
from sqlalchemy import (
MetaData, Table, Column,
Integer, String
)
meta = MetaData()
users = Table(
'users', meta,
Column('id', Integer),
Column('creator', Integer),
Column('login', String(50))
)
units = Table(
'units', meta,
Column('id', Integer),
Column('creator', Integer),
Column('name', String(50))
)
class RecordNotFound(Exception):
"""Requested record in database was not found"""
async def get_users(conn):
"""Database query"""
res = await conn.execute(users.select().order_by('id'))
users_record = await res.fetchall()
return users_record
async def get_units(conn):
"""Database query"""
res = await conn.execute(units.select())
units_record = await res.fetchall()
return units_record
async def startup(app):
"""Configuration info for a Postgres connection"""
conf = app['config']['postgres']
engine = await aiopg.sa.create_engine(
database=conf['database'],
user=conf['user'],
password=conf['password'],
host=conf['host'],
port=conf['port'],
minsize=conf['minsize'],
maxsize=conf['maxsize'],
)
app['db'] = engine
async def shutdown(app):
"""Close connection"""
app['db'].close()
await app['db'].wait_closed()
| MaksymMasalov/Aiohttp-task | app/db.py | db.py | py | 1,319 | python | en | code | 0 | github-code | 13 |
14957056494 | from BeautifulSoup import BeautifulSoup
import urllib2
import re
from urlparse import urlparse
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
firstLink = 'https://en.wikipedia.org/wiki/Sleepers'
allLinks = []
allLinks.append(firstLink)
def getLinks(url):
html_page = urllib2.urlopen(url)
soup = BeautifulSoup(html_page)
linkedUrls = []
newList = soup.findAll('div', attrs={'class': 'mw-parser-output'})
for article in newList:
listTemp = article.findAll('a', href=True)
for link in listTemp:
linkedUrls.append(a['href'])
return linkedUrls
allLinks = getLinks(firstLink)
urlCounter = 0
whileLink = firstLink
while (whileLink == firstLink):
for x in allLinks:
urlCounter += 1
if x == "/wiki/Kevin_Bacon":
print("You Win!")
whileLink = x
else:
if allLinks[urlCounter].startswith('/wiki'):
currentLink = 'https://en.wikipedia.org' + allLinks[urlCounter] # Maybe use allLinks.pop([0]) here
linksToAdd = getLinks(currentLink)
allLinks = allLinks + linksToAdd
#???
| ryankavanaugh/WebScraping-DataMining | WikipediaSearch.py | WikipediaSearch.py | py | 1,343 | python | en | code | 0 | github-code | 13 |
73292676816 | import collections
import logging
import yaml
from heat2arm.parser.common.resource import Resource
from heat2arm.parser.template import Template
from heat2arm.parser.testing.testutils import recursive_dict_search
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger("test_template")
# TemplateParsingTestInput is a data structure used for testing the behavior of
# the Template parsing logic against invalid input.
# It contains the following fields:
# - template_data - the raw template data to be used for the test
# - expected_exception - the Exception class expected to be raised
TemplateParsingTestInput = collections.namedtuple(
"TemplateParsingTestInput",
"description template_data expected_exception"
)
class TemplateParsingTestCase(object):
""" FunctionApplicationTestCase represents the set of tests which ensure
the proper behavior for template parsing.
It is meant to be inherited alongside unittest.TestCase.
"""
# _field_names is a dict containing the mapping between common field
# types and their names within a respective template format:
_field_names = {}
# _function_application_test_data is the raw template data
# to be used in test_functions_get_applied:
_function_application_test_data = ""
# _resource_parsing_test_data is the raw template data to be used as
# input in test_parse_resource:
_resource_parsing_test_data = ""
# _template_parsing_test_data is a list of TemplateParsingTestInputs to be
# used as inputs in test_parsing_behavior:
_template_parsing_test_data = []
def test_functions_get_applied(self):
""" test_functions_get_applied tests that running the function
application on a Template's data reduces all available functions.
"""
LOG.debug(
"testing function application correctness on '%s'.",
self._function_application_test_data
)
temp = Template(self._function_application_test_data)
temp.reduce_functions()
# for each registered templating function (which are considered to
# behave correctly, as they have their separate tests), ensure that
# there is no unapplied instance of it in the resulting template:
for func_name in temp._functions:
LOG.debug(
"checking for unapplied function '%s' in '%s'.",
func_name,
temp._template_data
)
self.assertFalse(recursive_dict_search(
temp._template_data, func_name))
def test_parse_resources(self):
""" test_parse_resources tests that running the resource extraction
procedure will yield the appropriate set of resources:
"""
LOG.debug(
"testing resource parsing on '%s'",
self._resource_parsing_test_data
)
temp = Template(self._resource_parsing_test_data)
parsed_resources = temp.parse_resources()
input_resources = yaml.load(self._resource_parsing_test_data)[
self._field_names["resources"]
]
# ensure all the resources defined in the input template have a
# corresponding Resource instance created for them:
for res_name, res_data in input_resources.items():
self.assertTrue(res_name in parsed_resources)
self.assertDictEqual(
parsed_resources[res_name].properties,
res_data.get(self._field_names["properties"], {})
)
def test_invalid_data(self):
""" test_invalid_data tests the behavior of the Template parsing logic
agains flawed or incomplete templates.
"""
for test_input in self._template_parsing_test_data:
LOG.debug(
"testing Template parsing reaction to invalid data '%s': %s.",
test_input.template_data,
test_input.description
)
with self.assertRaises(test_input.expected_exception):
Template(test_input.template_data)
| cloudbase/heat2arm | heat2arm/parser/testing/template_testing.py | template_testing.py | py | 4,081 | python | en | code | 7 | github-code | 13 |
36262440312 | from chimerax.core.state import State
class DataFormat(State):
"""Keep tract of information about various data sources
..attribute:: name
Official name for format.
..attribute:: category
Type of data (STRUCTURE, SEQUENCE, etc.)
..attribute:: suffixes
Sequence of filename extensions in lowercase
starting with period (or empty)
..attribute:: allow_directory
Whether format can be read from a directory.
..attribute:: nicknames
Alternative names for format, usually includes a short abbreviation.
..attribute:: mime_types
Sequence of associated MIME types (or empty)
..attribute:: synopsis
Short description of format
..attribute:: reference_url
URL reference to specification
..attribute:: encoding
None if a binary format (default), otherwise text encoding, *e.g.*, **utf-8**
..attribute:: insecure
True if can execute arbitrary code (*e.g.*, scripts)
..attribute:: allow_directory
True if the format is opened/saved as a directory. This is the only case where
'suffixes' can be empty.
"""
attr_names = ['name', 'category', 'suffixes', 'nicknames', 'mime_types', 'reference_url', 'insecure',
'encoding', 'synopsis', 'allow_directory']
def __init__(self, format_name, category, suffixes, nicknames, mime_types,
reference_url, insecure, encoding, synopsis, allow_directory):
self.name = format_name
self.category = category
self.suffixes = suffixes
self.nicknames = nicknames
self.mime_types = mime_types
self.insecure = insecure
self.encoding = encoding
self.synopsis = synopsis if synopsis else format_name
self.allow_directory = allow_directory
if reference_url:
# sanitize URL
from urllib import parse
r = list(parse.urlsplit(reference_url))
r[1:5] = [parse.quote(p) for p in r[1:5]]
reference_url = parse.urlunsplit(r)
self.reference_url = reference_url
def take_snapshot(self, session, flags):
return { attr_name: getattr(self, attr_name) for attr_name in self.attr_names }
@classmethod
def restore_snapshot(class_obj, session, data):
return class_obj(*[data[attr_name] for attr_name in class_obj.attr_names])
| HamineOliveira/ChimeraX | src/bundles/data_formats/src/format.py | format.py | py | 2,404 | python | en | code | null | github-code | 13 |
4853943630 | import random
def create_show(fireworks, show_time):
fireworks.sort() #O(NlogN)
show = [] #O(1)
remaining_time = show_time #O(1)
while remaining_time > 0 and fireworks: #O(N)
# Select a random firework
firework = random.choice(fireworks)
if firework <= remaining_time: #O(1)
# Add the firework to the show
show.append(firework) #O(1)
remaining_time -= firework #O(1)
else:
# This firework is too long, remove it from consideration
fireworks.remove(firework) #O(N)
return show
#Time Comp: maybeeee O(N**2)
#Space Comp: #O(N) | simachami/Week4-Day1 | fireworks_problem.py | fireworks_problem.py | py | 661 | python | en | code | 0 | github-code | 13 |
45754761676 | from selene import be, have
from selene.support.shared import browser
from summerpatio_web_autotests.data.contacts import Contact
from summerpatio_web_autotests.data.links import Link
from summerpatio_web_autotests.data.textings import Texting
from summerpatio_web_autotests.model.components.burger import Burger
from summerpatio_web_autotests.model.components.header import Header
from datetime import date
class MainPage:
def agree_with_cookies(self):
browser.element('.btn-cookie').click()
return self
def check_agreement(self):
browser.element('.rules').should(have.attribute('href').value(f'{Link.agreement_link.value[0]}'))
return self
def open_main_page_and_agree_with_cookies(self):
browser.open('/')
browser.should(have.title_containing('Летний дворик'))
if browser.element('.btn-cookie').wait_until(be.visible):
self.check_agreement()
self.agree_with_cookies()
def check_burger_menu(self):
date_today = date.today()
browser.element(Burger.burger).should(be.visible).click()
browser.element(Burger.authorize).should(have.text(f'{Texting.authorize.value[0]}'))
browser.element(Burger.footer_text).should(have.text(Texting.support_invite.value[0]))
browser.element(Burger.phone_link).should(have.attribute('href'))
browser.element(Burger.mail_button).should(have.attribute('href').value(Contact.support_link_for_mail.value[0]))
browser.element(Burger.telegram_button).should(have.attribute('href').value(Contact.support_link_for_telegram.value[0]))
browser.element(Burger.offer_link).should(have.attribute('href').value(Link.offer_link.value[0])).\
should(have.text(f'{Texting.offer.value[0]}'))
browser.element(Burger.burger_footer).should(have.text(f'{Texting.developed.value[0]} {Texting.company.value[0]} '
f'{date.strftime(date_today,"%Y")}'))
browser.element(Burger.return_button).click()
return self
def check_logo(self):
browser.element('.logo').should(be.visible).should(have.attribute('width').value(Header.logo['width'])).\
should(have.attribute('height').value(Header.logo['height']))
return self
| aleksandrzavialov/summer_patio_web_autotests | summerpatio_web_autotests/model/pages/main_page.py | main_page.py | py | 2,327 | python | en | code | 0 | github-code | 13 |
16768537552 | """
@author: krakowiakpawel9@gmail.com
@site: e-smartdata.org
"""
import cv2
import numpy as np
original_img = cv2.imread(filename=r'C:\Users\User\PycharmProjects\computer-vision-course\01_basics\images\nerka 1.bmp')
img = original_img.copy()
# cv2.imshow(winname='logo', mat=img)
# cv2.waitKey(0)
# #informacja o pliku
height, width = img.shape[:2]
print(f'Wysokość: {height}px')
print(f'Szerokość: {width}px')
print(f'Rozmiar pliku: {img.size/1000}kB')
# ----------
# ---line---
# ----------
# partition = 108 # parametr podziału
# xWidth = width/partition
# xstepWidth = int((xWidth - int(xWidth//1))*partition/2) # wyśrodkowanie po X
# yHeight = height/partition
# ystepHeight = int((yHeight - int(yHeight//1))*partition/2)# wyśrodkowanie po Y
# print(f'podział={partition}px\nprzesunięcie po X={xstepWidth}px\nprzesunięcie po Y={ystepHeight}px')
# for i in range(1+int(xWidth//1)):
# cv2.line(img=img, pt1=(xstepWidth+partition*i, 0), pt2=(xstepWidth+partition*i, height), color=(0, 0, 0), thickness=1, lineType=16)
# for i in range(1+int(yHeight//1)):
# cv2.line(img=img, pt1=(0, ystepHeight+partition*i), pt2=(width, ystepHeight+partition*i), color=(0, 0, 0), thickness=1, lineType=16)
#
# cv2.imshow(winname='logo', mat=img)
# cv2.waitKey(0)
# ---------------
# ---rectangle---
# ---------------
# img = original_img.copy()
# cv2.rectangle(img=img, pt1=(200, 50), pt2=(400, 230), color=(255, 0, 0), thickness=3)
# cv2.imshow('logo', img)
# cv2.waitKey(0)
# ------------
# ---circle---
# ------------
# img = original_img.copy()
# cv2.circle(img=img, center=(300, 140), radius=90, color=(0, 0, 255), thickness=3)
# cv2.imshow('logo', img)
# cv2.waitKey(0)
# -------------
# ---polygon---
# -------------
# img = original_img.copy()
# pts = np.array([[300, 140], [200, 200], [200, 50], [300, 50]], dtype='int32').reshape((-1, 1, 2))
# cv2.polylines(img=img, pts=[pts], isClosed=False, color=(0, 255, 0), thickness=3)
# cv2.imshow('logo', img)
# cv2.waitKey(0)
# -------------
# ---polygon---
# -------------
# img = original_img.copy()
# pts = np.array([[300, 140], [200, 200], [200, 50], [300, 50]], dtype='int32').reshape((-1, 1, 2))
# cv2.polylines(img=img, pts=[pts], isClosed=True, color=(0, 255, 0), thickness=3)
# cv2.imshow('logo', img)
# cv2.waitKey(0)
# ----------
# ---text---
# ----------
import os
img = original_img.copy()
cv2.putText(
img=img,
text='Python nerka1',
org=(20, 40),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.5,
color=(0, 0, 0),
thickness=2
)
cv2.imshow('logo', img)
directory = r'C:\Users\User\PycharmProjects\computer-vision-course\01_basics\images'
os.chdir(directory) #dostęp do katalogu
cv2.imwrite('zapis.bmp', img) # zapis pliku
print(os.listdir(directory))
cv2.waitKey(0)
| sgol62/computer-vision-course | 01_basics/02_drawing.py | 02_drawing.py | py | 2,778 | python | en | code | 0 | github-code | 13 |
10353143017 | from random import randint
mylist = []
tally = 0
for i in range (0,1000):
diceroll = randint(1,6)
mylist.append(diceroll)
for i in range (0, len(mylist)):
tally += mylist[i]
average = tally/1000
print (average)
| JamesAUre/First-year-of-python | PythonSem1/BasicsDone/week3task5.py | week3task5.py | py | 228 | python | en | code | 0 | github-code | 13 |
38003827758 | # ------------------------------------------------------------
# RunJpsiExample.py
# J. Catmore (James.Catmore@cern.ch)
# Adapted by Cameron Cuthbert (17/07/2011)
# ------------------------------------------------------------
#-------------------------------------------------------------
# User analysis steering algorithm
#-------------------------------------------------------------
from JpsiUpsilonTools.JpsiUpsilonToolsConf import JpsiEEAlg
from JpsiUpsilonTools.JpsiUpsilonToolsConf import JpsiEEExample
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
topSequence += JpsiEEAlg()
topSequence.JpsiEEAlg = Algorithm( "JpsiEEAlg" )
topSequence += JpsiEEExample(OutputLevel = INFO)
# ------------------------
# SET UP FITTER
# ------------------------
include( "configureServices.py" )
include( "configureElectronServices.py" )
# ----------------------------------
# User's analysis requirements here:
# ----------------------------------
from JpsiUpsilonTools.JpsiUpsilonToolsConf import Analysis__JpsiEEFinder
ExampleJpsiEEFinder = Analysis__JpsiEEFinder(name = "JpsiEEFinderName",
OutputLevel = INFO,
authorA = 1, #1=std,2=soft,8=forward
authorB = 1,
identificationA = 15, #1=loose,2=medium,3=tight x5 refers to the ++ ID. E.g. 15 is Loose++
identificationB = 15,
UsePlusPlusHelper = True,
diElectrons = True,
track1Mass = 0.510999,
track2Mass = 0.510999,
UseGSFCaloTracks = False,
TrackOnlyFourVec = False,
UncombinedFourVec = True,
CombinedFourVec = False,
UpdateTracksWithClusterEnergy = True,
thresholdPt = 0.0,
higherPt = 0.0,
invMassUpper = 100000.0,
invMassLower = 0.0,
collAngleTheta = 0.0,
collAnglePhi = 0.0,
Chi2Cut = 200.,
oppChargesOnly = True,
sameChargesOnly = False,
allChargeCombinations = False,
electronCollectionKey = "ElectronAODCollection",
TrackParticleCollection = "TrackParticleCandidate",
TrackParticleCreatorTool = InDetParticleCreatorTool,
V0VertexFitterTool = TrkV0Fitter, # V0 vertex fitter
useV0Fitter = False, # if False a TrkVertexFitterTool will be used
TrkVertexFitterTool = TrkVKalVrtFitter, # VKalVrt vertex fitter
#TrkVertexFitterTool = InDetFullVxFitterTool, # Full Billoir vertex fitter
#TrkVertexFitterTool = InDetFastVxFitterTool, # Fast Billoir vertex fitter
#EgammaTrkRefitterTool = electronRefitterToolGSFCalo,
TrackSelectorTool = InDetTrackSelectorToolEE,
ConversionFinderHelperTool = InDetConversionHelper,
VertexPointEstimator = VtxPointEstimator)
ToolSvc += ExampleJpsiEEFinder
topSequence.JpsiEEAlg.JpsiEEFinderName = ExampleJpsiEEFinder
JpsiEEExample.overwriteMass = False # keep this consistent with overwriteMass above
# Name of output file
topSequence.JpsiEEExample.outputNTupleName = "JpsiEEExample.root"
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/JpsiUpsilonTools/share/RunJpsiEEExample.py | RunJpsiEEExample.py | py | 4,816 | python | en | code | 1 | github-code | 13 |
24884019883 | """
@author: Ludovic
"""
import os
import time
import sys
currentDir = os.getcwd()
Name = input("Creation's name : ")
try:
ImportedBot = open(os.path.expandvars(r'%LOCALAPPDATA%\RoboBuild\Saved\bots\.bot.' + Name + '/' + Name + '.schematic.json'),"r")
except:
print(Name + ".schematic.json not found, please read the instructions")
pause = input("Press Enter to continue ")
sys.exit(1)
partToHack = input("Hacking part (jet_engine) ")
juice = input("New hacked juice value (%) ")
number = input(partToHack + " number (01 or more) ")
FinalCode = ImportedBot.readlines()
line = 0
ImportedBot.close()
i = 0
for code in FinalCode:
FinalCode[i] = code[:-1]
i+=1
print("\nSucessfully imported bot")
StartedTime = time.process_time()
timeStart = time.process_time()
while (line < len(FinalCode) - 1):
if (FinalCode[line].startswith( '\t\t"' + partToHack + '_' + number)):
FinalCode[line + 19] = '\t\t\t\t\t"Juice": { "tag": "EProperty_Float", "float": ' + juice +' },'
if (timeStart + 0.5 < time.process_time()):
percent = int(line / len(FinalCode) * 100)
left = 30 * percent // 100
right = 30 - left
print('\r[', '#' * left, ' ' * right, ']', f' {percent:.0f}%', sep='', end='', flush=True)
timeStart = time.process_time()
line+=1
percent = 100
left = 30 * percent // 100
right = 30 - left
print('\r[', '#' * left, ' ' * right, ']', f' {percent:.0f}%', sep='', end='', flush=True)
print("\n\nSucessfully Converted " + Name + ".schematic.json bot in " + str(int((time.process_time() - StartedTime) * 10) / 10) + " seconds")
OutputJSON = open(os.path.expandvars(r'%LOCALAPPDATA%\RoboBuild\Saved\bots\.bot.' + Name + '/' + Name + '.schematic.json'),"w")
for printingLine in FinalCode:
OutputJSON.write(printingLine + "\n")
OutputJSON.write("}")
OutputJSON.close()
print("\nSucessfully replaced bot")
pause = input("\nPress Enter to continue ") | ludovicb1239/MainAssemblyTools | Creation_Hacker/Hacker.py | Hacker.py | py | 1,974 | python | en | code | 1 | github-code | 13 |
31010013370 | from tratamentoDeDados import *
from prettytable import PrettyTable
somaFinal = 0
cadastro = 0
tamanhoDoEstoque = 3
iD = 1
mercadorias = {1: "0"}
while (cadastro == 0):
control=[True]*4
loop = True
nomeDaMercadoria = input("Digite o nome da mercadoria: \n")
control[0], quantidadeMinima = tratamentoDeDados(int(input("Digite a quantidade mínima da mercadoria: \n")))
control[1], quantidadeAtual = tratamentoDeDados(int(input("Quantidade Atual da mercadoria: \n")))
control[2], quantidadeMaxima = tratamentoDeDados(int(input("Quantidade máxima da mercadoria: \n")))
control[3], valorUnitario = tratamentoDeDados(float(input("Valor unitário da mercadoria: \n")))
valorUnitario = round(valorUnitario,2)
valorTotal = round(quantidadeAtual*valorUnitario,2)
if (control[0] and control[1] and control[2] and control[3]):
if(iD > tamanhoDoEstoque):
flag = True
else:
flag = False
if flag:
resposta = input("O estoque atingiu a cacidade máxima, você deseja substituir alguma mercadoria no estoque para a que está sendo cadastrada? [S/N]")
if(resposta == 'S'):
iD_aux = int(input("Digite o iD da mercadoria: \n"))
mercadorias[iD_aux] = [nomeDaMercadoria,quantidadeMinima,quantidadeAtual,quantidadeMaxima,valorUnitario,valorTotal]
elif(resposta == 'N'):
print("Retornando ao inicio do cadastro")
else:
mercadorias[iD] = [nomeDaMercadoria,quantidadeMinima,quantidadeAtual, quantidadeMaxima,valorUnitario,valorTotal]
iD += 1
maisMercadorias = input("Deseja adicionar mais mercadorias? [S/N] \n")
if maisMercadorias == 'N':
print("O relatório será exibido")
cadastro = 1
tabela = PrettyTable()
tabela.field_names = ["Nome da Mercadoria", "Quantidade Mínima", "Quantidade Atual", "Quantidade Máxima", "Valor Unitário","Valor Total"]
for cont in range(1,len(mercadorias)+1):
somaFinal = somaFinal + mercadorias[cont][5]
tabela.add_row(mercadorias[cont])
tabela2 = PrettyTable()
tabela2.field_names = ["Valor Total"]
tabela2.add_row([somaFinal])
print(tabela)
print("_____________________________________________________")
print(tabela2)
| SamuelConcercio/AtividadeMercadorias | main.py | main.py | py | 2,377 | python | pt | code | 0 | github-code | 13 |
41874291409 | import json
from requests.models import Response
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
from pprint import pprint
from objdict import ObjDict
from collections import Counter
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id="7c5e565854fe420ba58b7ec434a105e1",
client_secret="52364c23fd6c4c8788c48313c576895a"))
# id of the playlist
# maybe use params to get the playlist (userfriendly)
pl_id = 'spotify:playlist:1JRDlOReeuUM28zjNTfq6f'
#night
# https://open.spotify.com/playlist/3STQdPYGFTroG0wZrlnzry?si=697a0fe164784c18
#best from the past
# https://open.spotify.com/playlist/1JRDlOReeuUM28zjNTfq6f?si=1eb20e1d905846c0
def getResponse(pl_id):
"""
getResponse(pl_id) information about the playlist
:param: pl_id: the playlist id
:returns: all information about the playlist as json object
"""
offset = 0
jsonObj = {}
while True:
response = sp.playlist_items(pl_id,
offset=offset,
limit=100,
fields='items.track')
jsonObj.update(response)
# beaks if reaches end
if len(response['items']) == 0:
break
offset += len(response['items'])
# pprint(jsonObj)
return jsonObj
def getArtistURL(res):
"""
getArtistNames(res): names of artists
:param: res: all information about the playlist -> getResponse(pl_id)
:returns: all names of artists in a list
"""
urlList = []
# goes through all 100 songs (or less)
for items in res['items']:
# iterates through all artists
for artist in items['track']['artists']:
#prints the artist name
# pprint(artist['name'])''
urlList.append(artist['external_urls']['spotify'])
pprint(urlList)
return urlList
response = getResponse(pl_id)
getArtistURL(response)
| maximiliansteiger/spotify_playlist_analyzer_py | testing.py | testing.py | py | 2,061 | python | en | code | 0 | github-code | 13 |
32608814871 | import threading
import time
sem = threading.Semaphore(2)
threads = []
# sem.acquire()
# sem.release()
def fun1():
while True:
sem.acquire()
print("1 bohrain")
time.sleep(5)
sem.release()
print("1 release")
def fun2():
while True:
sem.acquire()
print("2 bohrani")
time.sleep(3)
sem.release()
print("2 release")
def fun3():
while True:
sem.acquire()
print("3 bohrani")
time.sleep(2)
sem.release()
print("3 release")
t = threading.Thread(target=fun1)
t.start()
t2 = threading.Thread(target=fun2)
t2.start()
t3 = threading.Thread(target=fun3)
t3.start()
| Amin-mashari/Task-Scheduling | semaphore/semTest.py | semTest.py | py | 691 | python | en | code | 0 | github-code | 13 |
32554947729 | """
-------------------------------------------------------
Lab 11, Task 12
Description:
Returns whether word is on the diagonal of a square matrix
of characters.
-------------------------------------------------------
Author: Mohammad El-Hassan
ID: 169067950
Email: elha7950@mylaurier.ca
__updated__ = "2023-11-30"
-------------------------------------------------------
"""
# Imports
from functions import find_word_diagonal
# Define matrix
matrix = [['c', 'a', 't'], ['d', 'o', 'g'], ['b', 'i', 'g']]
# Call the function
result = find_word_diagonal(matrix, "cog")
# Output result
print(result)
| mohammadelhsn/CP104 | elha7950_l11/src/t12.py | t12.py | py | 622 | python | en | code | 0 | github-code | 13 |
29354444992 | import argparse
import json
import os
from dotenv import load_dotenv
from google.cloud import dialogflow
def create_intent(project_id, display_name, training_phrases_parts, message_texts):
intents_client = dialogflow.IntentsClient()
parent = dialogflow.AgentsClient.agent_path(project_id)
training_phrases = []
for training_phrases_part in training_phrases_parts:
part = dialogflow.Intent.TrainingPhrase.Part(text=training_phrases_part)
training_phrase = dialogflow.Intent.TrainingPhrase(parts=[part])
training_phrases.append(training_phrase)
text = dialogflow.Intent.Message.Text(text=message_texts)
message = dialogflow.Intent.Message(text=text)
intent = dialogflow.Intent(
display_name=display_name, training_phrases=training_phrases, messages=[message]
)
response = intents_client.create_intent(
request={"parent": parent, "intent": intent}
)
print("Intent created: {}".format(response))
def main():
parser = argparse.ArgumentParser(
description='Загрузка вопросов и ответов из файла'
)
parser.add_argument('source', help='Путь к файлу с вопросами')
args = parser.parse_args()
load_dotenv()
project_id = os.getenv('PROJECT_ID')
with open(args.source, 'r') as phrases_file:
phrases_json = phrases_file.read()
phrases = json.loads(phrases_json)
for intent, intent_value in phrases.items():
create_intent(project_id, intent, intent_value['questions'], [intent_value['answer']])
if __name__ == '__main__':
main() | milov52/speach_bot | load_data.py | load_data.py | py | 1,626 | python | en | code | 1 | github-code | 13 |
74527387536 | import requests
import logging
import time
import pandas as pd
BASE_STREAM_API_URL = "http://localhost:8888/youtube_stream_api"
BASE_DEVICE_SCANNER_API_URL = "http://localhost:8887/device_scan_api"
WHITELISTED_DEVICES_CSV_PATH = "whitelisted_devices.csv"
WAIT_TIME_BETWEEN_SCANS_IN_MINUTES = 5
logger = logging.getLogger('master_app')
logger.setLevel(logging.INFO)
logger_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('master_app.log')
file_handler.setFormatter(logger_formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logger_formatter)
logger.addHandler(console_handler)
def get_whitelisted_devices_dict():
df = pd.read_csv(WHITELISTED_DEVICES_CSV_PATH, header=0)
device_names = df["name"].values
device_macs = [x.lower() for x in df["mac"].values]
return {m: n for m, n in zip(device_macs, device_names)}
def scan_for_whitelisted_devices():
url = BASE_DEVICE_SCANNER_API_URL + "/scan"
response = requests.get(url)
if response.status_code != requests.codes.ok:
raise Exception("Scanning not worked")
response_json = response.json()
# The file can be update at any time so we need to perform this at every scanning
whitelist_device_dict = get_whitelisted_devices_dict()
whitelist_device_MACs = list(whitelist_device_dict.keys())
found_device_MACs = list(response_json["device_macs"])
present_whitelist_device_MACs = set(found_device_MACs).intersection(set(whitelist_device_MACs))
if len(present_whitelist_device_MACs) <= 0:
# There are no whitelisted devices
return None
present_device_macs_dict = {x: whitelist_device_dict[x] for x in present_whitelist_device_MACs}
return present_device_macs_dict
def check_if_stream_is_alive():
url = BASE_STREAM_API_URL + "/alive"
response = requests.get(url)
if response.status_code != requests.codes.ok:
raise Exception("Could not check if stream is alive")
response_json = response.json()
is_alive = response_json["alive"]
return is_alive
def check_and_fix_stream_health():
url = BASE_STREAM_API_URL + "/check_health"
response = requests.get(url)
if response.status_code != requests.codes.ok:
raise Exception("Could not check stream health")
response_json = response.json()
return response_json
def start_streaming():
url = BASE_STREAM_API_URL + "/start"
response = requests.get(url)
if response.status_code != requests.codes.ok:
raise Exception("There was an error when tried to START streaming via Stream API")
return response.json()
def stop_streaming():
url = BASE_STREAM_API_URL + "/stop"
response = requests.get(url)
if response.status_code != requests.codes.ok:
raise Exception("There was an error when tried to STOP streaming via Stream API")
return response.json()
def main():
# Streaming will start if this many scans there were no whitelisted devices
no_whitelisted_devices_scan_tolerance = 2
# This variable is increased when there are no whitelisted devices found
no_whitelisted_devices_scan_number = 0
while True:
present_whitelisted_devices = scan_for_whitelisted_devices()
check_and_fix_stream_health()
is_stream_alive = check_if_stream_is_alive()
logger.info("Stream is alive: {0}".format(is_stream_alive))
if present_whitelisted_devices is not None:
logger.info("Present whitelisted devices: {0}".format(present_whitelisted_devices))
if is_stream_alive:
try:
r = stop_streaming()
logger.info("Successfully stopped streaming with Stream API")
logger.info("Response from Stream API: {0}".format(r))
except Exception as e:
logger.error("Error: {0}".format(e))
else:
logger.info("Streaming is stopped already")
else:
logger.info("There are no whitelisted devices")
no_whitelisted_devices_scan_number += 1
logger.info("Current scans without whitelisted device: {0}".format(no_whitelisted_devices_scan_number))
if no_whitelisted_devices_scan_number >= no_whitelisted_devices_scan_tolerance:
logger.info("Scan number without whitelisted devices reached the tolerance. Streaming starts.")
# reset the counter
no_whitelisted_devices_scan_number = 0
if not is_stream_alive:
try:
r = start_streaming()
logger.info("Successfully started streaming with Stream API")
logger.info("Response from Stream API: {0}".format(r))
except Exception as e:
logger.error("Error: {0}".format(e))
else:
logger.info("Already streaming")
logger.info("Now waiting for {0} minutes".format(WAIT_TIME_BETWEEN_SCANS_IN_MINUTES))
time.sleep(int(WAIT_TIME_BETWEEN_SCANS_IN_MINUTES * 60))
if __name__ == "__main__":
main()
| gaborvecsei/YouTube-Live-Stream-Docker | master_app_image/code/start_app.py | start_app.py | py | 5,261 | python | en | code | 56 | github-code | 13 |
38945249942 | """
******************************************************************************************************
Project Introduction to Signal and Image Processing - Group Project: Where is Waldo?
Filename ColorMatching.py
Institution: University of Bern
Python Python 3.6
@author Simon Scheurer, Yves Jegge
@date 28.05.2016
@status Development
******************************************************************************************************
"""
# Import Package #
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Import Modul #
"""
/*----------------------------------------------------------------------------------------------------
Method: color_matching()
------------------------------------------------------------------------------------------------------
This Method takes the original image and find via color matching waldo
------------------------------------------------------------------------------------------------------
Input Parameter: original_image
Output Parameter: Give probability map back with the same size of original image (0....1)
----------------------------------------------------------------------------------------------------*/
"""
def color_matching(image):
# Settings for color Matching #
show_color = False
# Blurring image #
image_blurred = cv2.GaussianBlur(image,(5,3),0)
# Separate colors of image #
red_filtered, white_filtered, pink_filtered, black_filtered = separate_colors(image_blurred)
# Kernels #
kernel_noise = np.ones((2,2))
kernel_small = np.ones((4,3))
kernel_big = np.ones((40,20))
# Remove small objects #
black_filtered = cv2.morphologyEx(black_filtered, cv2.MORPH_OPEN, kernel_noise)
# Remove object with too small and too big size #
red_filtered = remove_image_objects(red_filtered, 8, 200, 1, 8, -1.5, 0.5)
white_filtered = remove_image_objects(white_filtered, 3, 300)
pink_filtered = remove_image_objects(pink_filtered, 5, 300)
black_filtered = remove_image_objects(black_filtered, 12, 300)
# Dilate filters (make objects bigger) #
red_filtered = cv2.dilate(red_filtered, kernel_small, iterations=2)
white_filtered = cv2.dilate(white_filtered, kernel_small, iterations=1)
pink_filtered = cv2.dilate(pink_filtered, kernel_small, iterations=1)
black_filtered = cv2.dilate(black_filtered, kernel_small, iterations=2)
# Find overlaps #
strips_filtered = np.multiply(red_filtered, white_filtered)
hair_hut_filtered = np.multiply(red_filtered, black_filtered)
hair_face_filtered = np.multiply(pink_filtered, black_filtered)
strips_face_filtered = np.multiply(pink_filtered, strips_filtered)
# Dilate filters (make objects bigger) #
strips_filtered = cv2.dilate(strips_filtered, kernel_big, iterations=1)
hair_hut_filtered = cv2.dilate(hair_hut_filtered, kernel_big, iterations=1)
hair_face_filtered = cv2.dilate(hair_face_filtered, kernel_big, iterations=1)
strips_face_filtered = cv2.dilate(strips_face_filtered, kernel_big, iterations=1)
# Find overlaps #
color_filtered = strips_filtered + hair_hut_filtered + hair_face_filtered + strips_face_filtered
# Dilate filters (make objects bigger) #
color_filtered = cv2.dilate(color_filtered, kernel_big, iterations=2)
if show_color:
plt.figure(200)
plt.subplot(2,2,1)
plt.imshow(red_filtered)
plt.subplot(2,2,2)
plt.imshow(white_filtered)
plt.subplot(2,2,3)
plt.imshow(pink_filtered)
plt.subplot(2,2,4)
plt.imshow(black_filtered)
plt.figure(201)
plt.subplot(2,2,1)
plt.imshow(strips_filtered)
plt.subplot(2,2,2)
plt.imshow(hair_hut_filtered)
plt.subplot(2,2,3)
plt.imshow(hair_face_filtered)
plt.subplot(2,2,4)
plt.imshow(strips_face_filtered)
# Cut out only matched areas #
filtered_img = cv2.bitwise_and(image, image, mask = np.uint8(color_filtered >= 3))
return filtered_img
"""
/*----------------------------------------------------------------------------------------------------
Method: separate_colors()
------------------------------------------------------------------------------------------------------
This Method takes the original image and find via color matching waldo
------------------------------------------------------------------------------------------------------
Input Parameter: original_image
Output Parameter: Give probability map back with the same size of original image (0....1)
----------------------------------------------------------------------------------------------------*/
"""
def separate_colors(image):
# Convert to hsv colorspace #
image_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# Filter red #
rh = np.bitwise_or((image_hsv[:, :, 0] < 5), (image_hsv[:, :, 0] > 172))
rs = (image_hsv[:, :, 1] > 100)
rv = (image_hsv[:, :, 2] > 140)
red_filtered = np.uint8(np.bitwise_and(np.bitwise_and(rh, rs), rv))
# Filter white #
wh = np.bitwise_or((image_hsv[:, :, 0] < 65), (image_hsv[:, :, 0] > 165))
ws = (image_hsv[:, :, 1] < 90)
wv = (image_hsv[:, :, 2] > 170)
white_filtered = np.uint8(np.bitwise_and(np.bitwise_and(wh, ws), wv))
# Filter pink #
ph = np.bitwise_or((image_hsv[:, :, 0] < 10), (image_hsv[:, :, 0] > 172))
ps = (image_hsv[:, :, 1] < 90)
pv = (image_hsv[:, :, 2] > 140)
pink_filtered = np.uint8(np.bitwise_and(np.bitwise_and(ph, ps), pv))
# Filter black #
black_filtered = np.uint8((image_hsv[:, :, 2] < 98))
return red_filtered, white_filtered, pink_filtered, black_filtered
"""
/*----------------------------------------------------------------------------------------------------
Method: remove_image_objects()
------------------------------------------------------------------------------------------------------
This Method deletes object which are too small or too big
------------------------------------------------------------------------------------------------------
Input Parameter: image as a input, min size of objects, max size of objects
Output Parameter: image without object which are too small or too big
----------------------------------------------------------------------------------------------------*/
"""
def remove_image_objects(img, min_size, max_size, min_aspect_ratio = 0, max_aspect_ratio = 0, min_angle_ratio = 0, max_angle_ratio = 0):
#find all your connected components (white blobs in your image)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity=8)
#connectedComponentswithStats yields every seperated component with information on each of them, such as size
#the following part is just taking out the background which is also considered a component, but most of the time we don't want that.
sizes = stats[1:, -1]; nb_components = nb_components - 1
#for every component in the image, you keep it only if it's above min_size
area_size = sizes[output-1]
img2 = np.uint8(np.bitwise_and(np.bitwise_and((area_size >= min_size), (area_size <= max_size)), (img > 0)))
if (min_aspect_ratio > 0) and (max_aspect_ratio > 0):
_, contours, _= cv2.findContours(img2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
# Calculate aspect ratio #
x,y,w,h = cv2.boundingRect(contour)
aspect_ratio = float(w)/h
# Interpolate line in object #
[vx,vy,xx,yy] = cv2.fitLine(contour, cv2.DIST_L2,0,0.01,0.01)
angle_ratio = vy/vx
# Remove elements with too small or big aspect ratio (Width/Height) or angle #
if (aspect_ratio > max_aspect_ratio) or (aspect_ratio < min_aspect_ratio) or (angle_ratio < min_angle_ratio) or (angle_ratio > max_angle_ratio):
img2[y:y+h, x:x+w] = 0
return img2
| YvesJegge/IntroductionToSignalAndImageProcessingGroupProject | ColorMatching.py | ColorMatching.py | py | 8,235 | python | en | code | 0 | github-code | 13 |
44432134216 | #!/usr/bin/env python
# encoding: utf-8
from django.conf import settings
from django.contrib.sites.models import Site
def domain(request):
""" Add DOMAIN to context """
current_site = Site.objects.get_current()
domain = getattr(settings, 'DOMAIN', 'http://%s' % current_site.domain)
return {
'DOMAIN': domain,
'site': current_site,
}
| krak3n/Facio-Default-Template | __PROJECT_NAME__/context_processors.py | context_processors.py | py | 375 | python | en | code | 2 | github-code | 13 |
20953547898 | import warnings
from typing import List, Optional, Tuple
import numpy as np
import torch
from skimage.feature import peak_local_max
def BCE_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Simple BCE loss. Used to compute the BCE of the ground truth heatmaps as the BCELoss in Pytorch complains
about instability in FP16 (even with no_grad).
"""
return -(
target * torch.clip(torch.log(input + 1e-10), -100, 100)
+ (1 - target) * torch.clip(torch.log(1 - input + 1e-10), -100, 100)
).mean()
def create_heatmap_batch(
shape: Tuple[int, int], keypoints: List[torch.Tensor], sigma: float, device: torch.device
) -> torch.Tensor:
"""[summary]
Args:
shape (Tuple): H,W
keypoints (List[torch.Tensor]): N Tensors of size K_i x 2 with batch of keypoints.
Returns:
(torch.Tensor): N x H x W Tensor with N heatmaps
"""
batch_heatmaps = [generate_channel_heatmap(shape, keypoints[i], sigma, device) for i in range(len(keypoints))]
batch_heatmaps = torch.stack(batch_heatmaps, dim=0)
return batch_heatmaps
def generate_channel_heatmap(
image_size: Tuple[int, int], keypoints: torch.Tensor, sigma: float, device: torch.device
) -> torch.Tensor:
"""
Generates heatmap with gaussian blobs for each keypoint, using the given sigma.
Max operation is used to combine the heatpoints to avoid local optimum surpression.
Origin is topleft corner and u goes right, v down.
Args:
image_size: Tuple(int,int) that specify (H,W) of the heatmap image
keypoints: a 2D Tensor K x 2, with K keypoints (u,v).
sigma: (float) std deviation of the blobs
device: the device on which to allocate new tensors
Returns:
Torch.tensor: A Tensor with the combined heatmaps of all keypoints.
"""
assert isinstance(keypoints, torch.Tensor)
if keypoints.numel() == 0:
# special case for which there are no keypoints in this channel.
return torch.zeros(image_size, device=device)
u_axis = torch.linspace(0, image_size[1] - 1, image_size[1], device=device)
v_axis = torch.linspace(0, image_size[0] - 1, image_size[0], device=device)
# create grid values in 2D with x and y coordinate centered aroud the keypoint
v_grid, u_grid = torch.meshgrid(v_axis, u_axis, indexing="ij") # v-axis -> dim 0, u-axis -> dim 1
u_grid = u_grid.unsqueeze(0) - keypoints[..., 0].unsqueeze(-1).unsqueeze(-1)
v_grid = v_grid.unsqueeze(0) - keypoints[..., 1].unsqueeze(-1).unsqueeze(-1)
## create gaussian around the centered 2D grids $ exp ( -0.5 (x**2 + y**2) / sigma**2)$
heatmap = torch.exp(
-0.5 * (torch.square(u_grid) + torch.square(v_grid)) / torch.square(torch.tensor([sigma], device=device))
)
heatmap = torch.max(heatmap, dim=0)[0]
return heatmap
def get_keypoints_from_heatmap_scipy(
heatmap: torch.Tensor, min_keypoint_pixel_distance: int, max_keypoints: int = 20
) -> List[Tuple[int, int]]:
"""
Extracts at most 20 keypoints from a heatmap, where each keypoint is defined as being a local maximum within a 2D mask [ -min_pixel_distance, + pixel_distance]^2
cf https://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.peak_local_max
THIS IS SLOW! use get_keypoints_from_heatmap_batch_maxpool instead.
Args:
heatmap : heatmap image
min_keypoint_pixel_distance : The size of the local mask, serves as NMS
max_keypoints: the amount of keypoints to determine from the heatmap, -1 to return all points. Defaults to 20 to limit computational burder
for models that predict random keypoints in early stage of training.
Returns:
A list of 2D keypoints
"""
warnings.warn("get_keypoints_from_heatmap_scipy is slow! Use get_keypoints_from_heatmap_batch_maxpool instead.")
np_heatmap = heatmap.cpu().numpy().astype(np.float32)
# num_peaks and rel_threshold are set to limit computational burden when models do random predictions.
max_keypoints = max_keypoints if max_keypoints > 0 else np.inf
keypoints = peak_local_max(
np_heatmap,
min_distance=min_keypoint_pixel_distance,
threshold_rel=0.1,
threshold_abs=0.1,
num_peaks=max_keypoints,
)
return keypoints[::, ::-1].tolist() # convert to (u,v) aka (col,row) coord frame from (row,col)
def get_keypoints_from_heatmap_batch_maxpool(
heatmap: torch.Tensor,
max_keypoints: int = 20,
min_keypoint_pixel_distance: int = 1,
abs_max_threshold: Optional[float] = None,
rel_max_threshold: Optional[float] = None,
return_scores: bool = False,
) -> List[List[List[Tuple[int, int]]]]:
"""Fast extraction of keypoints from a batch of heatmaps using maxpooling.
Inspired by mmdetection and CenterNet:
https://mmdetection.readthedocs.io/en/v2.13.0/_modules/mmdet/models/utils/gaussian_target.html
Args:
heatmap (torch.Tensor): NxCxHxW heatmap batch
max_keypoints (int, optional): max number of keypoints to extract, lowering will result in faster execution times. Defaults to 20.
min_keypoint_pixel_distance (int, optional): _description_. Defaults to 1.
Following thresholds can be used at inference time to select where you want to be on the AP curve. They should ofc. not be used for training
abs_max_threshold (Optional[float], optional): _description_. Defaults to None.
rel_max_threshold (Optional[float], optional): _description_. Defaults to None.
Returns:
The extracted keypoints for each batch, channel and heatmap; and their scores
"""
# TODO: maybe separate the thresholding into another function to make sure it is not used during training, where it should not be used?
# TODO: ugly that the output can change based on a flag.. should always return scores and discard them when I don't need them...
batch_size, n_channels, _, width = heatmap.shape
# obtain max_keypoints local maxima for each channel (w/ maxpool)
kernel = min_keypoint_pixel_distance * 2 + 1
pad = min_keypoint_pixel_distance
# exclude border keypoints by padding with highest possible value
# bc the borders are more susceptible to noise and could result in false positives
padded_heatmap = torch.nn.functional.pad(heatmap, (pad, pad, pad, pad), mode="constant", value=1.0)
max_pooled_heatmap = torch.nn.functional.max_pool2d(padded_heatmap, kernel, stride=1, padding=0)
# if the value equals the original value, it is the local maximum
local_maxima = max_pooled_heatmap == heatmap
# all values to zero that are not local maxima
heatmap = heatmap * local_maxima
# extract top-k from heatmap (may include non-local maxima if there are less peaks than max_keypoints)
scores, indices = torch.topk(heatmap.view(batch_size, n_channels, -1), max_keypoints, sorted=True)
indices = torch.stack([torch.div(indices, width, rounding_mode="floor"), indices % width], dim=-1)
# at this point either score > 0.0, in which case the index is a local maximum
# or score is 0.0, in which case topk returned non-maxima, which will be filtered out later.
# remove top-k that are not local maxima and threshold (if required)
# thresholding shouldn't be done during training
# moving them to CPU now to avoid multiple GPU-mem accesses!
indices = indices.detach().cpu().numpy()
scores = scores.detach().cpu().numpy()
filtered_indices = [[[] for _ in range(n_channels)] for _ in range(batch_size)]
filtered_scores = [[[] for _ in range(n_channels)] for _ in range(batch_size)]
# determine NMS threshold
threshold = 0.01 # make sure it is > 0 to filter out top-k that are not local maxima
if abs_max_threshold is not None:
threshold = max(threshold, abs_max_threshold)
if rel_max_threshold is not None:
threshold = max(threshold, rel_max_threshold * heatmap.max())
# have to do this manually as the number of maxima for each channel can be different
for batch_idx in range(batch_size):
for channel_idx in range(n_channels):
candidates = indices[batch_idx, channel_idx]
for candidate_idx in range(candidates.shape[0]):
# these are filtered out directly.
if scores[batch_idx, channel_idx, candidate_idx] > threshold:
# convert to (u,v)
filtered_indices[batch_idx][channel_idx].append(candidates[candidate_idx][::-1].tolist())
filtered_scores[batch_idx][channel_idx].append(scores[batch_idx, channel_idx, candidate_idx])
if return_scores:
return filtered_indices, filtered_scores
else:
return filtered_indices
def compute_keypoint_probability(heatmap: torch.Tensor, detected_keypoints: List[Tuple[int, int]]) -> List[float]:
"""Compute probability measure for each detected keypoint on the heatmap
Args:
heatmap: Heatmap
detected_keypoints: List of extreacted keypoints
Returns:
: [description]
"""
# note the order! (u,v) is how we write , but the heatmap has to be indexed (v,u) as it is H x W
return [heatmap[k[1]][k[0]].item() for k in detected_keypoints]
if __name__ == "__main__":
import torch.profiler as profiler
keypoints = torch.tensor([[150, 134], [64, 153]]).cuda()
heatmap = generate_channel_heatmap((1080, 1920), keypoints, 6, "cuda")
heatmap = heatmap.unsqueeze(0).unsqueeze(0).repeat(1, 1, 1, 1)
# heatmap = torch.stack([heatmap, heatmap], dim=0)
print(heatmap.shape)
with profiler.profile(record_shapes=True) as prof:
with profiler.record_function("get_keypoints_from_heatmap_batch_maxpool"):
print(get_keypoints_from_heatmap_batch_maxpool(heatmap, 50, min_keypoint_pixel_distance=5))
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
| tlpss/keypoint-detection | keypoint_detection/utils/heatmap.py | heatmap.py | py | 9,967 | python | en | code | 30 | github-code | 13 |
4261223652 | #negative samples based on monolingual corpora
import numpy as np
import argparse
from sklearn.metrics.pairwise import cosine_similarity
parser = argparse.ArgumentParser()
parser.add_argument("--src-embedding", required=False, default="wmt20-sent.en-ps.ps.emb", help="file with the source side embeddings")
parser.add_argument("--tgt-embedding", required=False, default="wmt20-sent.en-ps.en.emb", help="file with the target side embeddings")
parser.add_argument("--src-negs", required=False, default="mono-1.txt", help="file with the source side based negative samples")
parser.add_argument("--tgt-negs", required=False, default="mono-2.txt", help="file with the target side based negative samples")
parser.add_argument("--limit", required=False, default=0.01, type=int)
parser.add_argument("--laser-scores", required=False, default="laser-scores", help="file with the target side embeddings")
args = parser.parse_args()
#reading LASER embeddings
dim = 1024
x = np.fromfile(args.src-embedding, dtype=np.float32, count=-1)
x.resize(x.shape[0] // dim, dim)
y = np.fromfile(args.tgt-embedding, dtype=np.float32, count=-1)
y.resize(y.shape[0] // dim, dim)
f = open(args.laser-scores,"r")
sc = [float(i.rstrip().lstrip()) for i in f]
sc = np.nonzero(sc)[0][:1000]
#computing cosine similarities on both sides
limit = args.limit
f = open(args.src-negs,"w")
for i in sc:
d = cosine_similarity(x[i].reshape(1,-1),x)[0]
e = np.intersect1d(np.where(d < d[i]*1)[0],np.where(d > d[i]*(1-limit))[0])
for item in e:
f.write(str(i)+','+str(item)+'\n')
e = np.intersect1d(np.where(d > d[i]*1)[0],np.where(d < d[i]*(1+limit))[0])
for item in e:
f.write(str(i)+','+str(item)+'\n')
#print("here")
f = open(args.tgt-negs,"w")
for i in sc:
d = cosine_similarity(y[i].reshape(1,-1),y)[0]
e = np.intersect1d(np.where(d < d[i]*1)[0],np.where(d > d[i]*(1-limit))[0])
for item in e:
f.write(str(i)+','+str(item)+'\n')
e = np.intersect1d(np.where(d > d[i]*1)[0],np.where(d < d[i]*(1+limit))[0])
for item in e:
f.write(str(i)+','+str(item)+'\n')
| Azax4/jhu-parallel-corpus-filtering | neg-samples/neg-pair-mono.py | neg-pair-mono.py | py | 2,096 | python | en | code | 0 | github-code | 13 |
41643419279 |
import pandas as pd
import numpy as np
from sklearn.externals import joblib
#import joblib
from fetchData import main
import os
FEATURES = ["Pregnancies", "Glucose", "BloodPressure", "SkinThickness",
"Insulin", "BMI", "DiabetesPedigreeFunction", "Age"]
data = main("?id", "=", ":1")
data = data[0]
columns = FEATURES
df = pd.DataFrame(data, index=columns)
print(df.shape)
ddmodel = joblib.load('diabetesModel.pkl')
pred = ddmodel.predict_proba(df)
#print(pred)
predClass = pred.argmax()
predproba = round((float(pred[0][predClass]) * 100), 2)
print(predClass, predproba)
| cnikoro/dms_with_semantic_technology | DiabetesDiagnosisProject/diagnose.py | diagnose.py | py | 586 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.