id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1808992 | <filename>emumbaproject/middleware.py<gh_stars>0
"""
Contains all custom middleware classes written for todofehrist app.
"""
import logging
class LoggingRequestResponse:
"""
This class is implements functionality to log all requests
and responses to/from todofehrist RESTful endpoints.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
This method will be invoked by django for every request.
It will log request and corresponding response to default
root logger (set in Settings.py)
"""
response = self.get_response(request)
logging.info({"request": request.__dict__, "response": response.__dict__})
return response
| StarcoderdataPython |
3398369 | <reponame>pavva94/DeepLearningProject
# -*- coding: utf-8 -*-
"""DeepComedy.ipynb
Automatically generated by Colaboratory.
# DeepComedy: AI Generated Divine Comedy
Author: **<NAME>, <NAME>**
This Notebook contains a **text generator RNN** that was trained on the **Divina Commedia** (the *Divine Comedy*) by **<NAME>**.
The structure is extremely complex: the poem is composed by three Cantiche, each Cantica has 33 Terzine, each Terzina is composed by three verses, each verse is composed of 11 syllables, and its rhymes follow an **A-B-A-B-C-B-C** structure.
The final goal of this project is to rewrite one Canto.
"""
# Commented out IPython magic to ensure Python compatibility.
import time
import re
import numpy as np
import pandas as pd
# %tensorflow_version 2.x
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Attention, Flatten, Input, BatchNormalization
from tensorflow.keras.activations import elu, relu, softmax
from tensorflow.keras.metrics import categorical_accuracy, sparse_categorical_crossentropy, categorical_crossentropy
from matplotlib import pyplot as plt
"""# Preliminaries Steps
## Import and initial cleaning
We delete all the special character, numbers and brackets to keep a uniform version of the text.
We also remove the title of each Canto and each introductory text at his start.
Moreover we remove the last row of each Canto to have only terzine.
"""
# Read the Divina Commedia
with open( "DivinaCommedia.txt", 'r', encoding="utf8") as file:
divina_commedia = file.read()
# Replace rare characters
divina_commedia = divina_commedia.replace("ä", "a")
divina_commedia = divina_commedia.replace("é", "è")
divina_commedia = divina_commedia.replace("ë", "è")
divina_commedia = divina_commedia.replace("Ë", "E")
divina_commedia = divina_commedia.replace("ï", "i")
divina_commedia = divina_commedia.replace("Ï", "I")
divina_commedia = divina_commedia.replace("ó", "ò")
divina_commedia = divina_commedia.replace("ö", "o")
divina_commedia = divina_commedia.replace("ü", "u")
divina_commedia = divina_commedia.replace("(", "-")
divina_commedia = divina_commedia.replace(")", "-")
divina_commedia = re.sub(r'[0-9]+', '', divina_commedia)
divina_commedia = re.sub(r'\[.*\r?\n', '', divina_commedia)
divina_commedia = re.sub(r'.*Canto.*\r?\n', '', divina_commedia)
divina_commedia = re.sub(r'.*?\n\n\n\n', "", divina_commedia) # remove the last row of each Canto, it's alone and can ruin the generation on correct terzine
# divina_commedia = divina_commedia.replace(" \n", "\n") # with this i lose the "terzina": results are not so exciting
#divina_commedia = divina_commedia.replace(" \n", "<eot>") # end of terzina
#divina_commedia = divina_commedia.replace("\n", "<eor>")
print(divina_commedia[1:1000])
# Check lenght of text
print(len(divina_commedia))
"""## Vocabulary and Char2Idx
Creation of an vector of ids for each character in the Comedy's vocabulary
"""
# Store unique characters into a dict with numerical encoding
unique_chars = list(set(divina_commedia))
unique_chars.sort() # to make sure you get the same encoding at each run
# Store them in a dict, associated with a numerical index
char2idx = { char[1]: char[0] for char in enumerate(unique_chars) }
"""## Encoding
Encode each character with a numerical vector of predefined length
"""
def numerical_encoding(text, char_dict):
""" Text to list of chars, to np.array of numerical idx """
chars_list = [ char for char in text ]
chars_list = [ char_dict[char] for char in chars_list ]
chars_list = np.array(chars_list)
return chars_list
# Let's see what will look like
print("{}".format(divina_commedia[276:511]))
print("\nbecomes:")
print(numerical_encoding(divina_commedia[276:511], char2idx))
"""# Processing Data for DanteRNN
We need to generate the input for our RNN, the input sequence and an output sequence needs to be of equal length, in which each character is shifted left of one position.
For example, the first verse:
> Nel mezzo del cammin di nostra vita
would be translated in a train sequence as:
`Nel mezzo del cammin di nostra vit`
be associated with the target sequence:
`el mezzo del cammin di nostra vita`
Train and target sets are fundamentally the same matrix, with the train having the last row removed, and the target set having the first removed.
"""
# Apply it on the whole Comedy
encoded_text = numerical_encoding(divina_commedia, char2idx)
print(encoded_text[311:600])
def get_text_matrix(sequence, len_input):
# create empty matrix
X = np.empty((len(sequence)-len_input, len_input))
# fill each row/time window from input sequence
for i in range(X.shape[0]):
X[i,:] = sequence[i : i+len_input]
return X
len_text = 150
text_matrix = get_text_matrix(encoded_text, len_text)
print(text_matrix.shape)
print("100th train sequence:\n")
print(text_matrix[ 100, : ])
print("\n\n100th target sequence:\n")
print(text_matrix[ 101, : ])
"""# Custom Loss
Evaluate the structure of the rhymes, based on the real scheme with the aim to recreate the same exact rhyme structure of the Comedy
"""
from functools import reduce
def divide_versi(y):
doppiozero = False
y_divided = [[]]
for ly in y:
ly = int(ly)
# I have to clean the list of punctuation marks,
# in chartoidx means the numbers 1 to 10 inclusive.
if ly in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
continue
else:
# if it is zero it means \ n so I add a new line
if ly is 0:
if not doppiozero:
y_divided.append([])
doppiozero = True
continue
y_divided[-1].append(ly)
doppiozero = False
if y_divided is not []:
if y[-1] != 0:
# since the last line does not end with 0 it means that it is incomplete and I remove it
y_divided.pop()
# i need to re check because maybe i pop the only one
if len(y_divided) != 0:
if len(y_divided[0]) < 3:
# if the first line is less than 4 I can't do anything about it so I delete it
y_divided.pop(0)
return y_divided
def rhymes_extractor(y_divided):
# I extract the rhyme scheme from y
rhymes = []
for i in range(len(y_divided)):
# with the end of the line (last two letters) I check if the other lines
# end with the same letters
vy = y_divided[i]
last_word_1 = vy[-2:]
# ABA BCB CDC
# I have to check if line i rhymes with line i + 2
if i+2 < len(y_divided):
next_vy = y_divided[i+2]
if last_word_1 == next_vy[-2:]:
rhymes.append((i, i+2))
if i+4 < len(y_divided):
next_vy = y_divided[i+4]
if last_word_1 == next_vy[-2:]:
rhymes.append((i, i+4))
return rhymes
def get_custom_loss(x_batch, y_batch):
summed_custom_loss = 0
# max number of rhymes (arbitrary choosen, it's an hyperparameter)
max_rhymes = 4
x_bin_tot = np.ones(shape=(len(x_batch), max_rhymes), dtype='float32')
y_bin_tot = np.ones(shape=(len(x_batch), max_rhymes), dtype='float32')
# iterate over each vector
for v in range(len(x_batch)):
x = x_batch[v]
y = y_batch[v]
# given that the model returns a matrix with shape (len_text, vocab_size) with the probability
# for each of the vocab_size character i need to use a categorical to choose the best
# then flatten the matrix into a list for evaluating
predicted_text = list(tf.random.categorical(x, num_samples=1).numpy())
x = np.concatenate(predicted_text).ravel().tolist()
# dividing the vector in verse
x_divided = divide_versi(x)
y_divided = divide_versi(y)
# extract the structure of the rhymes from generated and groud truth
x_rhymes = rhymes_extractor(x_divided)
y_rhymes = rhymes_extractor(y_divided)
# it returns me a list with the number of rhyming lines
# Example: [(1,3), (2,4)] means that lines 1 and 3 rhyme and that the
# lines 2 and 4 as well
# I create a vector of 1 for y because the rhymes are always there
y_bin = np.ones(max_rhymes, dtype='float32')
# I create a vector of 1 for the rhymes generated, I will put 0 if it rhyme
# Is NOT present in dante, discount with a 0.5 since there is at least the rhyme
x_bin = np.ones(max_rhymes, dtype='float32')
if x_rhymes == []:
x_bin = np.zeros(max_rhymes, dtype='float32')
# if the generated rhyme is in Dante's original rhymes then I sign it as valid
# I keep maximum max_ryhmes rhymes: I can because in 150-200 characters I don't have more than 5-6 lines
# so in Dante I would have 2 rhymes, I exceed 2 to help the network create even wrong rhymes
for i in range(max_rhymes+1):
if i < len(y_rhymes):
# check dante's rhyme with predicted rhymes, if it not exist set 0.0
if y_rhymes[i] not in x_rhymes:
x_bin[i] = 0.0
# check predicted rhyme with Dante's rhymes, if not exist set 0.5 to increase number of rhymes produced
if i < len(x_rhymes) and x_rhymes[i] not in y_rhymes:
x_bin[i] = 0.5
# concatenate vectors with rhyming encoding
x_bin_tot[v] = x_bin
y_bin_tot[v] = y_bin
# MSE over vector
r = tf.keras.losses.mean_squared_error(y_bin_tot, x_bin_tot)
return np.mean(r)
"""# Training Model
At this point, we can specify the RNN architecture with all its hyperparameters.
## Parameters
"""
# size of vocabulary
vocab_size = len(char2idx)
# size of mini batches during training
batch_size = 200 # 100
# size of training subset at each epoch
subset_size = batch_size * 100
# vector size of char embeddings
embedding_size = 200 # 200 250
lstm_unit_1 = 2048
lstm_unit_2 = 4096
# debug variables
debug_model = False
if debug_model:
lstm_unit_1 = 1024
lstm_unit_2 = 2048
dropout_value = 0.5
hidden_size = 256 # for Dense() layers 250
n_epochs = 75
learning_rate = 0.001 # 0.0001
"""## Metrics"""
def perplexity_metric(loss):
"""Calculates perplexity metric = 2^(entropy) or e^(entropy)"""
return tf.exp(loss)
"""## Custom learning rate"""
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=10):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step ** 1.5)
arg2 = step * ((self.warmup_steps+10) ** -1.3)
lr = tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
return lr
d_model = 500
learning_rate_custom_1 = CustomSchedule(d_model)
plt.plot(learning_rate_custom_1(tf.range(n_epochs, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
learning_rate_custom_2 = tf.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.001,
decay_steps=35,
decay_rate=0.90,
staircase=True)
plt.plot(learning_rate_custom_2(tf.range(n_epochs, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
"""Optimizer selected: Adamax"""
optimizer = tf.keras.optimizers.Adamax(learning_rate=learning_rate_custom_2)
"""## Architecture"""
# Input Layer
X = Input(shape=(None, ), batch_size=batch_size)
# Embedding Layer
embedded = Embedding(vocab_size, embedding_size,
batch_input_shape=(batch_size, None),
embeddings_regularizer=tf.keras.regularizers.L2()
)(X)
# Dense layer
embedded = Dense(embedding_size, relu)(embedded)
# First LSTM
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_1,return_sequences=True,return_state=True)(embedded)
encoder_output = BatchNormalization()(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Dense layer
encoder_output = Dense(embedding_size, activation='relu')(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Concat of first LSTM hidden state
initial_state_double = [tf.concat([hidden_state, hidden_state], 1), tf.concat([hidden_state, hidden_state], 1)]
# Second LSTM
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_2,
return_sequences=True,
return_state=True)(encoder_output, initial_state=initial_state_double)
encoder_output = BatchNormalization()(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Dense layer
encoder_output = Dense(hidden_size, activation='relu')(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Prediction Layer
Y = Dense(units=vocab_size)(encoder_output)
# Compile model
model = Model(inputs=X, outputs=Y)
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True), optimizer=optimizer)
print(model.summary())
"""## Training"""
min_custom_loss = 1.0 # max value for the custom loss
min_custom_epoch = 0 # epoch of minimum custom loss
def train_on_batch(x, y, min_custom_loss):
with tf.GradientTape() as tape:
# returns a tensor with shape (batch_size, len_text)
y_predicted = model(x)
scce = tf.keras.losses.sparse_categorical_crossentropy(y, y_predicted, from_logits = True)
# we cant return a tensor with that shape so we return a float that are summed
custom = get_custom_loss(y_predicted, y)
current_loss = tf.reduce_mean(scce + custom)
gradients = tape.gradient(current_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
perp = perplexity_metric(tf.reduce_mean(scce))
# checking for the best model using custom loss
# needed to do here because here we can save the model
if custom < min_custom_loss:
min_custom_loss = custom
model.save("best_model.h5", overwrite=True)
return current_loss, scce, custom, perp, min_custom_loss
loss_history = []
custom_loss_history = []
perplexity_history = []
for epoch in range(n_epochs):
start = time.time()
# Take subsets of train and target
sample = np.random.randint(0, text_matrix.shape[0]-1, subset_size)
sample_train = text_matrix[ sample , : ]
sample_target = text_matrix[ sample+1 , : ]
for iteration in range(sample_train.shape[0] // batch_size):
take = iteration * batch_size
x = sample_train[ take:take+batch_size , : ]
y = sample_target[ take:take+batch_size , : ]
current_loss, scce, custom, perplexity, new_min_custom_loss = train_on_batch(x, y, min_custom_loss)
# save infos about the new min_custom_loss
if new_min_custom_loss < min_custom_loss:
min_custom_loss = new_min_custom_loss
min_custom_epoch = epoch
loss_history.append(current_loss)
custom_loss_history.append(custom)
perplexity_history.append(perplexity)
print("{}. \t Total-Loss: {} \t Custom-Loss: {} \t Perplexity: {} \t Time: {} sec/epoch".format(
epoch+1, current_loss.numpy(), custom, perplexity, round(time.time()-start, 2)))
model.save(F"/content/gdrive/My Drive/DeepComedyModels/deep_comedy_custom_loss_01_62char.h5")
"""## Graphs"""
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Total Loss', color=color)
ax1.plot(loss_history, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Custom Loss', color=color) # we already handled the x-label with ax1
ax2.plot(custom_loss_history, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
print("The min custom loss is at iteration: {}".format(min_custom_epoch*1000))
plt.plot(perplexity_history)
plt.xlabel("Iterations")
plt.ylabel("Perplexity")
plt.show()
"""# Generative Model
At this point, let's check how the model generates text. In order to do it, we must make some changes to my RNN architecture above.
First, we must change the fixed batch size. After training, we want to feed just one sentence into my Network to make it continue the character sequence. We will feed a string into the model, make it predict the next character, update the input sequence, and repeat the process until a long generated text is obtained. Because of this, the succession of input sequences is now different from training session, in which portions of text were sampled randomly. we now have to set `stateufl = True` in the `LSTM()` layer, so that each LSTM cell will keep in memory the internal state from the previous sequence. With this we make the model remember better sequential information while generating text.
We will instantiate a new `generator` RNN with these new features, and transfer the trained weights of my `RNN` into it.
## Architecture
"""
# Input Layer
X = Input(shape=(None, ), batch_size=1)
embedded = Embedding(vocab_size, embedding_size)(X)
embedded = Dense(embedding_size, relu)(embedded)
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_1,
return_sequences=True,
return_state=True,
stateful=True)(embedded)
encoder_output = BatchNormalization()(encoder_output)
encoder_output = Dropout(dropout_value)(encoder_output)
encoder_output = Dense(embedding_size, activation='relu')(encoder_output)
initial_state_double = [tf.concat([hidden_state, hidden_state], 1), tf.concat([hidden_state, hidden_state], 1)]
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_2,
return_sequences=True,
return_state=True,
stateful=True)(encoder_output, initial_state=initial_state_double)
encoder_output = BatchNormalization()(encoder_output)
encoder_output = Dropout(dropout_value)(encoder_output)
encoder_output = Dense(hidden_size, activation='relu')(encoder_output)
encoder_output = Dropout(dropout_value)(encoder_output)
Y = Dense(units=vocab_size)(encoder_output)
# Compile model
generator = Model(inputs=X, outputs=Y)
generator.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True), optimizer=optimizer)
print(generator.summary())
"""## Loading weights"""
# Import trained weights from RNN to generator
load_file = False
if load_file:
generator.load_weights("best_model.h5")
else:
generator.set_weights(model.get_weights())
"""## Generating methods"""
def generate_text(start_string, model, num_generate = 1000, temperature = 1.0):
# Vectorize input string
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = [] # List to append predicted chars
predicted_ids = []
idx2char = { v: k for k, v in char2idx.items() } # invert char-index mapping
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
# sample next char based on distribution and temperature
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
input_eval = tf.expand_dims([predicted_id], 0) # one letter input
# build the input for the next iteration, based on the last 5 characters generated
# become like a poetry!
#predicted_ids.append(predicted_id)
#if len(predicted_ids) > 5:
# predicted_ids = predicted_ids[1:]
#input_eval = tf.expand_dims(predicted_ids, 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
"""## Text generation"""
# Let's feed the first lines:
start_string = """
Nel mezzo del cammin di nostra vita
mi ritrovai per una selva oscura,
chè la diritta via era smarrita.
"""
for t in [0.1, 0.2, 0.3, 0.5, 1.0]:
print("####### TEXT GENERATION - temperature = {}\n".format(t))
print(generate_text(start_string, generator, num_generate = 1000, temperature = t))
print("\n\n\n")
# Exam mode for 1 Canto so 33 terzine. 4000 characters to write
start_inferno = """
Nel mezzo del cammin di nostra vita
mi ritrovai per una selva oscura,
chè la diritta via era smarrita.
"""
start_purgatorio = """
Per correr miglior acque alza le vele
omai la navicella del mio ingegno,
che lascia dietro a se mar si crudele;
"""
start_paradiso = """
La gloria di colui che tutto move
per l'universo penetra, e risplende
in una parte più e meno altrove.
"""
start_new = """
"""
start = time.time()
generated = generate_text(start_inferno, generator, num_generate = 7000, temperature = 0.1)
print("Time to generate {} characters: {} sec".format(7000, round(time.time()-start, 2)))
print(generated)
"""## Save generated Canto to file for Plagiarism Test and Metrics"""
with open("generated.txt", "w+") as text_file:
text_file.write(generated)
"""# Plagiarism Test
Include the file **ngrams_plagiarism.py** downloaded from Virtuale
This mehod needs two file, we called it generated.txt (the same for the Metrics) and Inferno.txt (the first Canto of the Inferno).
"""
from ngrams_plagiarism import ngrams_plagiarism
gen = open('generated.txt').read()
truth = open('Inferno.txt').read()
ngrams_plagiarism(gen, truth)
"""# Metrics
Include the content of the folder **Deep Comedy Metrics** downloaded from Virtuale.
This method needs one file:
* generated.txt: the file generated by the network
with UTF-8 Encoding!
"""
!python3 main.py
"""# Custom loss used for debug and explaination"""
#@title
#@DEBUG CUSTOM LOSS
x = [[49, 46, 36, 44, 49, 32, 48, 36, 1, 45, 1, 35, 51, 36, 1, 45, 1, 50,
48, 36, 1, 46, 36, 48, 1, 49, 36, 30, 5, 0, 44, 45, 44, 1, 42, 32,
1, 37, 45, 48, 50, 51, 44, 32, 1, 35, 40, 1, 46, 48, 40, 43, 32, 1,
52, 32, 34, 32, 44, 50, 36, 5, 0, 44, 45, 44, 1, 35, 36, 34, 40, 43,
32, 49, 5, 1, 47, 51, 32, 36, 1, 49, 51, 44, 50, 1, 46, 32, 51, 46,
36, 48, 51, 43, 1, 14, 36, 40, 5, 1, 0, 0, 32, 35, 35, 40, 43, 32,
44, 35, 60, 5, 1, 43, 32, 1, 34, 45, 44, 50, 48, 45, 1, 32, 42, 1,
43, 45, 44, 35, 45, 1, 36, 48, 48, 32, 44, 50, 36, 0, 42, 40, 34, 36,
44, 55, 32, 1, 35, 40],
[42, 1, 34, 45, 44, 49, 40, 38, 42, 40, 45, 1, 44, 36, 42, 1, 47, 51,
32, 42, 36, 1, 45, 38, 44, 36, 1, 32, 49, 46, 36, 50, 50, 45, 0, 34,
48, 36, 32, 50, 45, 1, 58, 1, 52, 40, 44, 50, 45, 1, 46, 48, 40, 32,
1, 34, 39, 36, 1, 52, 32, 35, 32, 1, 32, 42, 1, 37, 45, 44, 35, 45,
5, 1, 0, 0, 46, 36, 48, 60, 1, 34, 39, 36, 1, 32, 44, 35, 32, 49,
49, 36, 1, 52, 36, 48, 4, 1, 42, 45, 1, 49, 51, 45, 1, 35, 40, 42,
36, 50, 50, 45, 0, 42, 32, 1, 49, 46, 45, 49, 32, 1, 35, 40, 1, 34,
45, 42, 51, 40, 1, 34, 39, 4, 32, 35, 1, 32, 42, 50, 36, 1, 38, 48,
40, 35, 32, 0, 35, 40,]]
y = [[46, 36, 44, 49, 32, 48, 36, 1, 45, 1, 35, 51, 36, 1, 45, 1, 50, 48,
36, 1, 46, 36, 48, 1, 49, 36, 40, 5, 0, 44, 45, 44, 1, 42, 32, 1,
37, 45, 48, 50, 51, 44, 32, 1, 35, 40, 1, 46, 48, 40, 43, 32, 1, 52,
32, 34, 32, 44, 50, 36, 5, 0, 44, 45, 44, 1, 35, 36, 34, 40, 43, 32,
49, 5, 1, 47, 51, 32, 36, 1, 49, 51, 44, 50, 1, 46, 32, 51, 46, 36,
48, 51, 43, 1, 14, 36, 40, 5, 1, 0, 0, 32, 35, 35, 40, 43, 32, 44,
35, 60, 5, 1, 43, 32, 1, 34, 45, 44, 50, 48, 45, 1, 32, 42, 1, 43,
45, 44, 35, 45, 1, 36, 48, 48, 32, 44, 50, 36, 0, 42, 40, 34, 36, 44,
55, 32, 1, 35, 40, 1], [ 1, 34, 45, 44, 49, 40, 38, 42, 40, 45, 1, 44, 36, 42, 1, 47, 51, 32,
42, 36, 1, 45, 38, 44, 36, 1, 32, 49, 46, 36, 50, 50, 45, 0, 34, 48,
36, 32, 50, 45, 1, 58, 1, 52, 40, 44, 50, 45, 1, 46, 48, 40, 32, 1,
34, 39, 36, 1, 52, 32, 35, 32, 1, 32, 42, 1, 37, 45, 44, 35, 45, 5,
1, 0, 0, 46, 36, 48, 60, 1, 34, 39, 36, 1, 32, 44, 35, 32, 49, 49,
36, 1, 52, 36, 48, 4, 1, 42, 45, 1, 49, 51, 45, 1, 35, 40, 42, 36,
50, 50, 45, 0, 42, 32, 1, 49, 46, 45, 49, 32, 1, 35, 40, 1, 34, 45,
42, 51, 40, 1, 34, 39, 4, 32, 35, 1, 32, 42, 50, 36, 1, 38, 48, 40,
35, 32, 0, 35, 40, 49,] ]
'''
EXPERIMENT
CUSTOM LOSS
'''
from functools import reduce
def divide_versi(y):
doppiozero = False
y_divided = [[]]
for ly in y:
ly = int(ly)
# devo pulire la lista dai segni di punteggiatura,
# in chartoidx significa i numeri da 1 a 10 compresi.
if ly in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]: # con i Tensor non funziona
# if ly is 1 or ly is 2 or ly is 3 or ly is 4 or ly is 5 or ly is 6 or ly is 7 \
# or ly is 8 or ly is 9 or ly is 10:
continue
else:
# se è zero vuol dire \n quindi aggiungo una nuova riga
if ly is 0:
if not doppiozero:
y_divided.append([])
doppiozero = True
continue
y_divided[-1].append(ly)
doppiozero = False
if y_divided is not []:
if y[-1] != 0:
# dato che l'ultima riga non finisce con 0 vuol dire che è incompleta e la rimuovo
y_divided.pop()
# i need to re check because maybe i pop the only one
if len(y_divided) != 0:
if len(y_divided[0]) < 3:
# se la prima riga è minore di 4 non posso farci nulla quindi la elimino
y_divided.pop(0)
return y_divided
def rhymes_extractor(y_divided):
# estraggo lo schema di rime da y
rhymes = []
for i in range(len(y_divided)):
# con la fine del verso (ultime due lettere) controllo se le altre righe
# finiscono con le stesse lettere
vy = y_divided[i]
last_word_1 = vy[-2:]
# ABA BCB CDC
# devo controllare se la riga i fa rima con la riga i+2
if i+2 < len(y_divided):
next_vy = y_divided[i+2]
# print(vy[-2:])
# print(next_vy[-2:])
if last_word_1 == next_vy[-2:]:
rhymes.append((i, i+2))
if i+4 < len(y_divided):
# print(vy[-2:])
# print(next_vy[-2:])
next_vy = y_divided[i+4]
if last_word_1 == next_vy[-2:]:
rhymes.append((i, i+4))
# print(rhymes)
return rhymes
def get_custom_loss(x_batch, y_batch):
summed_custom_loss = 0
# x_batch ha lo shape (200, 200) quindi ho 200 vettori con 200 lettere ognuno
# le 200 lettere sono le feature
# max numero di rime possibili (arbitrario)
max_rhymes = 4
print("Shape di x_batch e y_batch")
print((len(x_batch), len(x_batch[0])))
print((len(y_batch), len(y_batch[0])))
x_bin_tot = np.ones(shape=(len(x_batch), max_rhymes), dtype='float32')
y_bin_tot = np.ones(shape=(len(x_batch), max_rhymes), dtype='float32')
# scorro i 200 vettori
# for (x, y) in zip(x_batch, y_batch): # Non funziona con i tensori
for v in range(len(x_batch)):
x = x_batch[v]
y = y_batch[v]
# given that the model returns a matrix with shape (150, 62) with the probability
# for each of the 62 character i need to use a categorical to choose the best
# then flatten the matrix into a list for evaluating
#predicted_text = list(tf.random.categorical(x, num_samples=1).numpy())
#x = np.concatenate(predicted_text).ravel().tolist()
# dividio il vettore in versi utili
x_divided = divide_versi(x)
y_divided = divide_versi(y)
print("Divisione in versi di x_batch e y_batch")
print(x_divided)
print(y_divided)
# assicuro che il numero di versi siano uguali
# !!! non posso perchè il generato può avere errori e quindi, per esempio,
# avere più o meno versi
# assert len(x_divided) == len(y_divided)
# estraggo lo schema di rime
x_rhymes = rhymes_extractor(x_divided)
y_rhymes = rhymes_extractor(y_divided)
print("Rime dei versi di x_batch e y_batch")
print(x_rhymes)
print(y_rhymes)
# mi ritorna una lista con il numero delle righe che fanno rima
# Esempio: [(1,3), (2,4)] significa che le righe 1 e 3 fanno rima e che le
# righe 2 e 4 pure
# TODO se avessimo due terzine intere si potrebbe valutare rime a 3 righe [aBaBcB]
# creo un vettore di 1 per la y perchè le rime ci sono sempre
y_bin = np.ones(max_rhymes, dtype='float32')
# creo un vettore di 1 per le rime generate, metterò 0 se la rima
# NON è presente in dante, abbuono con uno 0.5 visto che c'è la rima almeno
x_bin = np.ones(max_rhymes, dtype='float32')
if x_rhymes == []:
x_bin = np.zeros(max_rhymes, dtype='float32')
# se la rima generata è nelle rime originali di Dante allora la segno come valida
# tengo massimo max_ryhmes rime: posso perchè in 150-200 caratteri non ho più di 5-6 righe
# quindi in Dante avrei 2 rime, eccedo di 2 per aiutare la rete a creare rime anche sbagliate
for i in range(max_rhymes+1):
if i < len(y_rhymes):
if y_rhymes[i] not in x_rhymes:
x_bin[i] = 0.0
if i < len(x_rhymes) and x_rhymes[i] not in y_rhymes:
x_bin[i] = 0.5
print("Vettore che rappresenta il confronto delle rime tra il generato e Dante dei versi di x_batch e y_batch \n y è sempre 1 mentre il generato ha 1 se la rima c'è in dante o 0.5 se non c'è ")
print(x_bin)
print(y_bin)
# concateno i vettori con l'encoding delle rime
x_bin_tot[v] = x_bin
y_bin_tot[v] = y_bin
print("Matrice dei vettori su cui eseguo la MSE: (x,y)")
print(x_bin_tot)
print(y_bin_tot)
r = tf.keras.losses.mean_squared_error(y_bin_tot, x_bin_tot)
print("Risultato della MSE:")
print(r)
print("Loss finale fatta con la media della MSE")
print(np.mean(r))
# MSE sui vettori
return np.mean(r)
custom_loss = get_custom_loss(x,y) | StarcoderdataPython |
6532904 | from pyexlatex.models.item import SimpleItem
from pyexlatex.models.section.base import TextAreaMixin
from pyexlatex.typing import PyexlatexItems
class Closing(TextAreaMixin, SimpleItem):
name = 'closing'
def __init__(self, closing: PyexlatexItems):
self.closing = closing
super().__init__(self.name, self.closing)
| StarcoderdataPython |
6435604 | from .api_exception import APIException
from .response_handler import ResponseHandler
from .response_wrapper import ResponseWrapper
from .body_wrapper import BodyWrapper
from .query_operations import QueryOperations
| StarcoderdataPython |
9795100 | <filename>blotter/blotter.py<gh_stars>10-100
import pandas as pd
import numpy as np
import json
import re
from collections import namedtuple
from array import array
from . import marketdata
class _Event():
# this class manages the actions which are performed on the Holdings class
# and encapsulates all the data required for this action. The class
# provides a consistent interface regardless of whether the event is
# reconstituted from a string representation in a trading log using
# Blotter.read_log or created from within the context of Blotter. The class
# also manages the implementation details of turning events into a string
# representation for appending to an event log.
def __init__(self, event_type, data):
# data is dictionary of parameters for corresponding Holdings function
self._type = event_type
self._data = data
if 'timestamp' not in data or not isinstance(data['timestamp'], pd.Timestamp): # NOQA
raise ValueError("data must contain key 'timestamp' with "
"pandas.Timestamp as a value")
@classmethod
def fromstring(cls, event_str):
parts = event_str.split("|")
event_type = parts[0]
data = cls.parse_str_data(parts[1])
return cls(event_type, data)
@staticmethod
def parse_str_data(str_data):
data = json.loads(str_data)
data['timestamp'] = pd.Timestamp(data['timestamp'])
if 'prices' in data:
data['prices'] = pd.Series(data['prices'])
return data
@property
def type(self):
"""
Returns the event type
"""
return self._type
@property
def data(self):
"""
Returns the data associated to the event
"""
return self._data
def __repr__(self):
# sorted dictionary representation except with timestamp as first entry
str_repr = (self.type +
'|{"timestamp": ' + json.dumps(str(self.data['timestamp']))
)
keys = list(self.data.keys())
keys.sort()
for key in keys:
if key == 'timestamp':
continue
jkey = json.dumps(key)
if key == 'prices':
str_repr = (str_repr + ", " + jkey + ": " +
self.data[key].to_json())
else:
str_repr += ', ' + jkey + ': ' + json.dumps(self.data[key])
str_repr += '}'
return str_repr
_metadata = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
class Blotter():
"""
This is a financial blotter which is used for maintaing positions and PnL
in conjunction with backtesting a strategy historically.
The main purpose is for calculating historical PnL for both open and
closed trades as well as maintaining marked to market holdings.
This class maintains market pricing data for marking holdings to market as
well as interest rate data for charging interest and margin on open
positions. The class manages interest and margin charges based on a user
defined time of day and also provides functionality for repatriating closed
PnL to the user defined base currency on a daily user defined time.
"""
def __init__(self,
prices=None,
interest_rates=None,
accrual_time=pd.Timedelta(16, unit="h"),
eod_time=pd.Timedelta(16, unit="h"),
sweep_time=pd.Timedelta(16, unit="h"),
base_ccy="USD",
margin_charge=0.015):
"""
Parameters
----------
prices: str
path to folder of data for all traded instruments. Refer to
blotter.MarketData for more information on file format. Names for
FX instruments should be in the form 'XXXYYY' where XXX is the
first currency and YYY is the second currency, e.g. AUDUSD, USDCAD
interest_rates: str
Path to csv of data for all traded interest bearing instruments.
These rates should be daily annualized rates. Refer to
blotter.MarketData for more information on file format.
accrual_time: pandas.Timedelta
Time of day which interest is charged/paid for interest bearing
instruments (FX) as well as margin costs, if no automatic charges
desired set to None
eod_time: pandas.Timedelta
End of day time used for automatic PnL calculation, if no automatic
PnL calculation desired set to None
sweep_time: pandas.Timedelta
Automatic time used for sweeping PnL calculation, if no
automatic sweeping is desired set to None
base_ccy: str
Base currency of blotter, used when sweeping pnl to base currency
margin_charge: float
Interest rate spread above daily base currency interest rate which
is paid on margin, e.g. if daily interest rate is 0.5% annualized,
margin_charge=0.015 implies daily balance paid on margin is
(0.005 + 0.015)/365
"""
actions = []
if accrual_time is not None:
actions.append((accrual_time, "INTEREST"))
actions.append((accrual_time, "MARGIN"))
if eod_time is not None:
actions.append((eod_time, "PNL"))
if sweep_time is not None:
actions.append((sweep_time, "PNL_SWEEP"))
self._actions = actions
self._base_ccy = base_ccy
self._margin_charge = margin_charge
self._event_log = []
# dictionaries of instrument level data
self._gnrc_meta = dict()
self._instr_map = dict()
self._prices = prices
self._rates = interest_rates
self._holdings = Holdings()
self.get_holdings_history = self._holdings.get_holdings_history
self.get_instrument_pnl_history = self._holdings.get_instrument_pnl_history # NOQA
self.get_pnl_history = self._holdings.get_pnl_history # NOQA
def connect_market_data(self):
"""
Initialize MarketData class, should be called before calling trade()
"""
self._mdata = marketdata.MarketData(prices=self._prices,
rates=self._rates)
@property
def event_log(self):
"""
Returns the event log of events which have acted on the Blotter
"""
return self._event_log
def define_generic(self, generic, ccy=None, margin=0, multiplier=1,
commission=0, isFX=False):
"""
Define meta data for a tradeable instruments associated with a generic.
Parameters
----------
generic: str
Name for the instrument type: used for looking up meta data, e.g.
we would define 'CL' and the associated meta data for these type of
contracts
ccy: str
Currency that contract is traded in, default is base currency of
blotter
margin: float
Amount of margin required for contract
multiplier: int
The multiplier to multiply the price by to get the notional
amount of the instrument, should only be applied to futures
commission: float
Commission charged for trading the instrument
isFX: boolean
Indicate if this instrument is an FX instrument. Affects
whether cash balances are updated for calculating payable
interest.
"""
if ccy is None:
ccy = self._base_ccy
self._gnrc_meta[generic] = _metadata(ccy, margin, multiplier,
commission, isFX)
def map_instrument(self, generic, instrument):
"""
Define a mapping between tradeable instruments and generics, used for
looking up meta data on instruments. Note in the case of a single
instrument such as a currency pair the generic and the instrument
can be the same value.
Parameters
----------
generic: str
Name for the instrument type used for looking up meta data, e.g. we
would define 'CL' and the associated meta data for these type of
contracts
instrument: str
Tradeable instrument name
"""
self._instr_map[instrument] = generic
def trade(self, timestamp, instrument, quantity, price, ntc_price=None):
"""
Record an instrument trade in the Blotter. This will also make a
call to automatic_events to trigger all automatic events up to the time
of this trade.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
instrument: str
Tradeable instrument name
quantity: int
Number of instruments traded
price: float
Price of trade
ntc_price: float
No tcost price. Generally mid price but can be
anything, this value is stored for downstream analytics but is
unused in any calculations
"""
# side effects since trade() also manages time state for when to sweep
# pnl and charge/pay interest/margin
self.automatic_events(timestamp)
if ntc_price:
ntc_price = float(ntc_price)
self._trade(timestamp, instrument, int(quantity), float(price),
ntc_price)
def _trade(self, timestamp, instrument, quantity, price, ntc_price=None):
# create and dispatch trade events
events = self._create_trade(timestamp, instrument, quantity, price,
ntc_price)
self.dispatch_events(events)
def _create_trade(self, timestamp, instrument, quantity, price,
ntc_price=None):
# implements trade event logic and updates the cash balances for FX
# instruments where applicable, returns events for these actions
# returns empty trade list when 0 quantity is traded, this is done for
# convenience so user can call trade() method without first validating
# input is non 0 and alternatively calling automatic_events()
if quantity == 0:
return []
generic = self._instr_map[instrument]
metadata = self._gnrc_meta[generic]
com = metadata.commission
ccy = metadata.ccy
mltplier = metadata.multiplier
if ntc_price:
events = [_Event("TRADE", {"timestamp": timestamp,
"instrument": instrument,
"price": price, "quantity": quantity,
"multiplier": mltplier,
"commission": com, "ccy": ccy,
"ntc_price": ntc_price})]
else:
events = [_Event("TRADE", {"timestamp": timestamp,
"instrument": instrument,
"price": price, "quantity": quantity,
"multiplier": mltplier,
"commission": com, "ccy": ccy})]
ev_cashs = []
if metadata.isFX:
counter_quantity = -quantity * price
cash_counter = _Event("CASH", {"timestamp": timestamp, "ccy": ccy,
"quantity": counter_quantity})
base_ccy = instrument[:3]
cash_base = _Event("CASH", {"timestamp": timestamp,
"ccy": base_ccy, "quantity": quantity})
ev_cashs = [cash_counter, cash_base]
events.extend(ev_cashs)
return events
def automatic_events(self, timestamp):
"""
Update the current time of the Blotter, triggering all scheduled events
between previous clock time and new clock time such as interest
charges, margin charges, PnL calculations and PnL sweeps. See
create_events() for more information on the type of events.
Parameters
----------
timestamp: pandas.Timestamp
Time to update clock to and tigger internal events up until
"""
current_time = self._holdings.timestamp
# first event so there is nothing automatic that needs to be done
if current_time is pd.NaT:
return
actions = self._get_actions(current_time, timestamp, self._actions)
for ts, action in actions.iteritems():
events = self.create_events(ts, action)
self.dispatch_events(events)
@staticmethod
def _get_actions(old_ts, new_ts, action_times):
# calculates the actions between two datetimes and returns them as
# ordered pandas.Series, filters out weekends since assumption is
# nothing happens here. This could be extended to allow more advanced
# user defined filtering based on things such as holiday calendars.
# action_times is a list of tuples with Timedelta and string for action
# type
if not action_times:
return pd.Series([])
timestamps = pd.date_range(old_ts, new_ts, normalize=True)
wknds = (timestamps.dayofweek == 5) + (timestamps.dayofweek == 6)
timestamps = timestamps[~wknds]
actions = []
for ts, ac_type in action_times:
ac_ts = timestamps + ts
ac_ts = ac_ts[ac_ts > old_ts]
ac_ts = ac_ts[ac_ts <= new_ts]
# this will give an empty DataFrame is ac_ts is an empty
# DateTimeIndex resulting in no actions as desired
actions.append(pd.Series(ac_type, index=ac_ts))
actions = pd.concat(actions, axis=0)
actions.sort_index(inplace=True)
return actions
def create_events(self, timestamp, action):
"""
Create internal event for updating Holdings class contained within
Blotter instance. Manages creation of INTEREST, MARGIN, PNL and
PNL_SWEEP events based on internal Blotter data.
This method is exposed to allow users greater flexibility in calling
internal events however by default this is automatically called through
automatic_events() and best not called unless user understands what
they are doing.
MARGIN event charges interest in the base currency based on the margin
required for the current open positions at a rate equal to the base
currency interest rate + the margin_charge.
INTEREST events charges interest on the outstanding cash balances in
different currencies based on the current interest rates.
PNL event calculates and saves the PNL based on current market prices
for all open positions.
PNL_SWEEP event repatriates closed PnL for non base currencies to the
base currency based on the current FX rates.
Parameters
----------
timestamp: pandas.Timestamp
Time to create event for
action: str
Type of event to create, supports INTEREST, MARGIN, PNL and
PNL_SWEEP
Returns
-------
A list of events for being dispatched using dispatch_events()
"""
events = []
if action is "INTEREST":
cashs = self._holdings.get_cash_balances()
if not cashs.empty:
rates = self._mdata.rates.loc[timestamp, cashs.index]
rates = self._adjusted_rates(timestamp, rates)
interests = cashs * rates
for ccy, qty in interests.iteritems():
ev = _Event("INTEREST", {"timestamp": timestamp,
"ccy": ccy,
"quantity": qty})
events.append(ev)
elif action is "MARGIN":
# calculate total margin charge
base_hlds_value = np.abs(self.get_holdings_value(timestamp))
int_rate = self._mdata.rates.loc[timestamp, self._base_ccy]
mrate = int_rate + self._margin_charge
mrate = self._adjusted_rates(timestamp, mrate)
charge = 0
for instr, value in base_hlds_value.iteritems():
metadata = self._gnrc_meta[self._instr_map[instr]]
charge += mrate * metadata.margin * value
if charge:
ev = _Event("INTEREST", {"timestamp": timestamp,
"ccy": self._base_ccy,
"quantity": charge})
events.append(ev)
elif action is "PNL":
assets = self._holdings.get_assets()
if assets:
prices = self._get_prices(timestamp, assets)
else:
prices = pd.Series([])
ev = _Event("PNL", {"timestamp": timestamp, "prices": prices})
events.append(ev)
elif action is "PNL_SWEEP":
assets = self._holdings.get_assets()
if assets:
prices = self._get_prices(timestamp, assets)
else:
prices = None
pnls = self._holdings.get_pnl(timestamp, prices, cache=False)
pnl_sweep = pnls.loc[:, 'closed pnl']
for ccy, pnl in pnl_sweep.iteritems():
if ccy is self._base_ccy:
continue
if pnl != 0:
conv_rate = self._get_fx_conversion(timestamp, ccy)
base_pnl = pnl * conv_rate
ev = _Event("PNL_SWEEP", {"timestamp": timestamp,
"ccy1": ccy, "quantity1": -pnl,
"ccy2": self._base_ccy,
"quantity2": base_pnl})
events.append(ev)
else:
raise NotImplementedError("Unknown event type")
return events
@staticmethod
def _adjusted_rates(timestamp, interest_rates):
# adjust rates to actual daily payable amount
interest_rates = interest_rates / 365
if timestamp.dayofweek == 4:
# pay interest for Friday, Saturday, Sunday
interest_rates = interest_rates * 3
return interest_rates
def dispatch_events(self, events):
"""
Update Blotter._holdings based on event. See create_events() for the
type of events supported. This method is best not called directly
unless user understands what is going on.
Parameters
----------
events: list
list of _Event to dispatch
"""
for event in events:
ev_str = str(event)
if event.type == "TRADE":
event.data.pop("ntc_price", None)
self._holdings.record_trade(**event.data)
elif event.type == "CASH":
self._holdings.update_cash(**event.data)
elif event.type == "INTEREST":
self._holdings.charge_interest(**event.data)
elif event.type == "PNL":
self._holdings.get_instrument_pnl(**event.data)
elif event.type == "PNL_SWEEP":
self._holdings.sweep_pnl(**event.data)
else:
raise NotImplementedError("Unknown event type")
self._event_log.append(ev_str)
def _get_prices(self, timestamp, instruments):
prices = []
for instr in instruments:
prices.append(self._mdata.prices[instr].loc[timestamp])
prices = pd.concat(prices, axis=0)
return prices
def get_holdings_value(self, timestamp):
"""
Return pandas.Series of values of holdings converted to Blotter base
currency sorted by index name. Note that for each currency for which
instruments are traded in, FX rates must be available for the given
timestamp in order to convert. E.g. if Blotter base ccy is USD, and an
instrument traded is in AUD, then AUDUSD or USDAUD must be available in
the prices data folder.
Parameters
----------
timestamp: pandas.Timestamp which corresponds to the time for
marking to market blotter holdings
Returns
-------
A pandas.Series with an index of instruments sorted in lexographical
order and values representing the market value of the positions in the
base currency at the time given by the timestamp
"""
if self._holdings.timestamp > timestamp:
raise ValueError('Must mark to market holdings after'
'Holdings.timestamp')
hlds = self._holdings.get_holdings()
if not hlds:
return pd.Series()
base_hlds_value = []
for ccy in hlds:
prices_ccy = self._get_prices(timestamp, hlds[ccy].index)
conv_rate = self._get_fx_conversion(timestamp, ccy)
value = hlds[ccy] * prices_ccy * conv_rate
base_hlds_value.append(value)
base_hlds_value = pd.concat(base_hlds_value, axis=0)
base_hlds_value.sort_index(inplace=True)
return base_hlds_value
def get_trades(self):
"""
Return quantity, multiplier, price, no tcost price, instrument,
currency, and FX conversion rate of executed trades in order of
execution.
The quantity is the number of instruments traded. The multiplier is any
multiplier associated with futures contracts, this should be 1 for FX.
The price is the executed price of the trade. The costless price is an
estimate of the price for execution without any transaction costs,
provided by the user at the time of execution. This value will be NA if
the user did not provide a value. The instrument is the name of the
instrument traded. The currency is the denomination of the instrument
and th FX conversion rate is the FX rate prevailing at the time to
convert through multiplication the instrument currency to the base
Blotter currency.
Returns
-------
A pandas.DataFrame indexed by timestamp with columns ['instrument',
'quantity', 'multiplier', 'price', 'ntc_price', 'ccy', 'fx_to_base'].
Index has name 'timestamp'.
"""
trade_data = []
for ev in self.event_log:
match = re.match("TRADE\|", ev)
if match:
data = _Event.parse_str_data(ev[match.end():])
trade_data.append(data)
trades = pd.DataFrame(trade_data)
trades.set_index("timestamp", inplace=True)
rates = []
# timestamp can be repeated to unpack and iterate through
for t, ccy in zip(trades.index, trades.loc[:, "ccy"].values):
rates.append(self._get_fx_conversion(t, ccy))
trades.loc[:, "fx_to_base"] = rates
order = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
trades = trades.loc[:, order]
return trades
def get_instruments(self):
"""
Get current set of instruments.
Returns
-------
A pandas.DataFrame indexed and lexicographically sorted by instrument
name with numpy.int values representing the number of instruments
"""
hlds = self._holdings.get_holdings()
if not hlds:
return pd.Series()
instr_nums = []
for ccy in hlds:
instr_num = hlds[ccy]
for ast in instr_num.index:
gnrc = self._instr_map[ast]
multiplier = self._gnrc_meta[gnrc].multiplier
instr_num.loc[ast] = instr_num.loc[ast] / multiplier
instr_nums.append(instr_num)
instr_nums = pd.concat(instr_nums, axis=0)
instr_nums.sort_index(inplace=True)
instr_nums = instr_nums.astype(int)
return instr_nums
def _get_fx_conversion(self, timestamp, ccy):
# return rate to multiply through be to convert given ccy
# to base ccy
ccy_pair1 = ccy + self._base_ccy
ccy_pair2 = self._base_ccy + ccy
if ccy == self._base_ccy:
conv_rate = 1
elif ccy_pair1 in self._mdata.prices:
conv_rate = self._mdata.prices[ccy_pair1].loc[timestamp]
conv_rate = conv_rate.values
elif ccy_pair2 in self._mdata.prices:
conv_rate = 1 / self._mdata.prices[ccy_pair2].loc[timestamp]
conv_rate = conv_rate.values
else:
raise(KeyError(ccy_pair1, ccy_pair2))
return float(conv_rate)
def write_log(self, fp):
"""
Write log of blotter events to file. This can be used for
reconstituting blotter. An example output file would look like
TRADE|{"timestamp": "2016-12-01 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 53.46, "quantity": 100}
TRADE|{"timestamp": "2016-12-02 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 55.32, "quantity": 100}
Parameters
----------
fp: str
path to write log to
""" # NOQA
with open(fp, 'w') as thefile:
for line in self._event_log:
thefile.write("%s\n" % line)
def read_log(self, fp):
"""
Reconstitute a Blotter object from an event log. Note that this will
only replay all the events, meta data and market data sources will
need to be reloaded as well. An example input file would look like
TRADE|{"timestamp": "2016-12-01 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 53.46, "quantity": 100, "multiplier": 1}
TRADE|{"timestamp": "2016-12-02 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 55.32, "quantity": 100, "multiplier": 1}
Parameters
----------
fp: str
path to read log from
""" # NOQA
events = self._create_log_events(fp)
self.dispatch_events(events)
@staticmethod
def _create_log_events(fp):
events = []
with open(fp, 'r') as thefile:
for line in thefile:
events.append(_Event.fromstring(line))
return events
def write_meta(self, fp):
"""
Write meta data of associated with instruments in a Blotter to a file.
This can be used later to reconstitute a Blotter. An example output
file file is
{"ccy": "CAD", "margin": 0.1, "multiplier": 100, "commission": 2.5, "isFX": false}|{"CL": ["CLU16", "CLZ16"]}
{"ccy": "CAD", "margin": 0, "multiplier": 1, "commission": 2.5, "isFX": true}|{"USDCAD": ["USDCAD"]}
Parameters
----------
fp: str
path to write meta data
""" # NOQA
# https://stackoverflow.com/questions/483666/python-reverse-invert-a-mapping#485368 # NOQA
inv_map = {}
for k, v in self._instr_map.items():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
for key in inv_map:
inv_map[key].sort()
keys = list(self._gnrc_meta.keys())
keys.sort()
with open(fp, 'w') as myfile:
for key in keys:
meta_data_str = json.dumps(self._gnrc_meta[key]._asdict())
map_str = '{"' + str(key) + '": ' + json.dumps(inv_map[key]) + '}' # NOQA
line = meta_data_str + "|" + map_str + "\n"
myfile.write(line)
def read_meta(self, fp):
"""
Reconstitute the meta data of a Blotter from a file. Reads as input
files output by write_meta(). File formats should be of the following
form
Parameters
----------
fp: str
Path to file. File should have the following format
{"ccy": "CAD", "margin": 0.1, "multiplier": 100, "commission": 2.5,"isFX": false}|{"CL": ["CLU16", "CLZ16"]}
{"ccy": "CAD", "margin": 0, "multiplier": 1, "commission": 2.5, "isFX": true}|{"USDCAD": ["USDCAD"]}
...
""" # NOQA
with open(fp, 'r') as thefile:
for line in thefile:
meta, mapping = line.split("|")
meta_dict = json.loads(meta)
mapping_dict = json.loads(mapping)
generic = list(mapping_dict.keys())[0]
meta_dict['generic'] = generic
self.define_generic(**meta_dict)
instrs = mapping_dict[generic]
for instr in instrs:
self.map_instrument(generic, instr)
class Holdings():
"""
The Holdings class is designed to manage holdings data and PnL data. The
class stores instrument level holdings data on a per currency basis and
calculates PnL on a per currency basis given instrument prices. The class
is primarily designed to manage these aspects from within the context
of the Blotter class however can also provide this functionality stand
alone.
The main features of the Holdings class include:
- Store per currency per instrument holindgs
- Calculate per currency per instrument PnL
- Maintain interest payable cash balances per currency
- Maintain charged/payed interest per currency
- Provide functionality to sweep PnL from one currency to another
- Return historical holdings
- Return historical PnL
Calculating PnL is done on a as of current holdings basis, there is no
functionality for looking up historical holdings for calculating historic
PnL.
Note: For interest bearing instruments, when users are using the Holdings
class standalone, users are responsible for calling charge_interest() at
appropriate intervals and with appropriate interest rates to ensure that
the PnL calculations are correct. This is handled by the Blotter class.
All actions on the Holdings class must follow in time sequential order.
"""
def __init__(self):
self._position_data_per_ccy = {}
self._cash = {}
self._interest = {}
self._pnl_sweep = {}
self._pnl_data = {}
self._timestamp = pd.NaT
@staticmethod
def _make_empty_holding():
holding = namedtuple('holding', ['timestamp', 'trade', 'position',
'avg_pos_price', 'fees',
'avg_sell_price', 'total_sell',
'avg_buy_price', 'total_buy'])
return holding(array('d'), array('d'), array('d'), array('d'),
array('d'), array('d'), array('d'), array('d'),
array('d'))
@staticmethod
def _make_empty_qty():
cash = namedtuple('cash', ['timestamp', 'amount'])
return cash(array('d'), array('d'))
@staticmethod
def _make_empty_hist_pnl():
pnl_hist = namedtuple('hist_pnl', ['time', 'pnl'])
return pnl_hist([], [])
@property
def timestamp(self):
"""
Returns the current timestamp of the Holdings
"""
return self._timestamp
def get_holdings(self):
"""
Get the current amount of instrument holdings. This includes any
multiplier associated with the instrument.
Returns
-------
dictionary
Dictionary with currencies as keys and pandas.Series as values
where that Series contain the most recent holdings for each of the
holdings in a given currency
"""
pos_data = self._position_data_per_ccy
positions = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
idx = list(ccy_pos_data)
idx.sort()
h = pd.Series(index=idx)
for asst in ccy_pos_data:
h.loc[asst] = ccy_pos_data[asst].position[-1]
# filter closed positions
h = h.loc[h != 0]
if not h.empty:
positions[ccy] = h
return positions
def get_holdings_history(self):
"""
Get the full history of the amount of holdings for each instrument
traded (this includes any multiplier associated with the instrument).
Returns
-------
dictionary
Dictionary with currencies as keys and dictionary of pandas.Series
as values where the keys of the nested dictionary are instrument
names and the pandas.Series is a timeseries of holdings
"""
pos_data = self._position_data_per_ccy
positions = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
ccy_positions = dict()
for asst in ccy_pos_data:
pos_array = ccy_pos_data[asst]
ts = self._to_timestamp(pos_array.timestamp)
pos = pd.Series(pos_array.position, index=ts, copy=True)
ccy_positions[asst] = pos
positions[ccy] = ccy_positions
return positions
@staticmethod
def _to_timestamp(array):
# convert array of floats representing POSIX timestamps to a
# list of Timestamps
return [pd.Timestamp.fromtimestamp(i) for i in array]
def get_assets(self):
"""
Get the names of instruments held.
Returns
-------
list
Sorted list of strings of current assets which have holdings
"""
pos_data = self._position_data_per_ccy
asts = []
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
for asst in ccy_pos_data:
if ccy_pos_data[asst].position[-1] != 0:
asts.append(asst)
asts.sort()
return asts
def record_trade(self, timestamp, instrument, price, quantity, multiplier,
commission, ccy):
"""
Record an instrument trade in Holdings. Trades must be time ordered.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
instrument: str
Tradeable instrument name
price: float
Price of trade
quantity: int
Number of instruments traded.
multiplier: int
A number which when multiplied by the price gives the notional
value of a contract. E.g. for trading an ES contract,
the multipler is 50, therefore 1 ES contract with a price of 2081
the notional value of the contract is 2081 x 50$.
commission: float
total commission for the trade
ccy: str
currency of instrument denomination
"""
if quantity == 0:
raise ValueError("Cannot trade 0 quantity of an instrument")
if np.isnan(quantity):
raise ValueError("Cannot trade nan quantity of an instrument")
if multiplier <= 0 or not isinstance(multiplier, int):
raise ValueError("multiplier must be positive integer")
if quantity > 0:
price_attr = "avg_buy_price"
total_attr = "total_buy"
elif quantity < 0:
price_attr = "avg_sell_price"
total_attr = "total_sell"
amount = quantity * multiplier
if ccy in self._position_data_per_ccy:
ccy_holdings = self._position_data_per_ccy[ccy]
else:
ccy_holdings = {}
self._position_data_per_ccy[ccy] = ccy_holdings
if instrument in ccy_holdings:
holdings = ccy_holdings[instrument]
else:
holdings = self._make_empty_holding()
ccy_holdings[instrument] = holdings
# deals with first access being non existent
prev_hldings = self._get_last(holdings, 'position')
avg_price = self._get_last(holdings, price_attr)
total = self._get_last(holdings, total_attr)
if self._timestamp > timestamp:
raise ValueError('Operations on Holdings must follow in time'
' sequential order')
holdings.timestamp.append(timestamp.timestamp())
holdings.position.append(prev_hldings + amount)
holdings.trade.append(amount)
self._timestamp = timestamp
fees = self._get_last(holdings, "fees", default=0)
holdings.fees.append(commission + fees)
aamnt = np.abs(amount)
new_price = (total * avg_price + aamnt * price) / (total + aamnt)
getattr(holdings, price_attr).append(new_price)
getattr(holdings, total_attr).append(total + aamnt)
# when adding to position or flipping position sign update
# average price
ADDING = np.sign(amount) == np.sign(prev_hldings)
NEW_POS = np.sign(amount + prev_hldings) not in {np.sign(prev_hldings), 0} # NOQA
if ADDING:
a_price = holdings.avg_pos_price[-1]
new_pos_price = (a_price * prev_hldings + price * amount) / (prev_hldings + amount) # NOQA
holdings.avg_pos_price.append(new_pos_price)
elif NEW_POS:
holdings.avg_pos_price.append(price)
else:
holdings.avg_pos_price.append(holdings.avg_pos_price[-1])
def _get_last(self, obj, attr, default=0):
try:
value = getattr(obj, attr)[-1]
except IndexError:
value = default
return value
def update_cash(self, timestamp, ccy, quantity):
"""
Update the amount of cash in a certain type of currency, used for
charging interest on that balance.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy: str
currency of cash balance
quantity: float
Amount of cash
"""
self._update_property(timestamp, ccy, quantity, '_cash')
def charge_interest(self, timestamp, ccy, quantity):
"""
Update the amount of interest charged in the account of a currency.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy: str
currency of interest charge/payment
quantity: float
Amount of interest
"""
self._update_property(timestamp, ccy, quantity, '_interest')
def sweep_pnl(self, timestamp, ccy1, quantity1, ccy2, quantity2):
"""
Convert PnL from one currency to another. The user is
responsible for ensuring that the implicit FX rates used are sensible.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy1: str
currency of first leg of sweep
quantity1: float
Amount of currency from first leg of sweep
ccy2: str
currency of second leg of sweep
quantity2: float
Amount of currency from second leg of sweep
Examples
--------
>>> ts = pd.Timestamp('2016-12-01T10:00:00')
aud = 5000
usd = 5000 * 0.80
holder.sweep_pnl(ts, 'AUD', -aud, 'USD', usd)
"""
self._update_property(timestamp, ccy1, quantity1, '_pnl_sweep')
self._update_property(timestamp, ccy2, quantity2, '_pnl_sweep')
def _update_property(self, timestamp, ccy, quantity, attr):
if self._timestamp > timestamp:
raise ValueError('Operations on Holdings must follow in time'
' sequential order')
attr_dict = getattr(self, attr)
if ccy in attr_dict:
field = attr_dict[ccy]
else:
field = self._make_empty_qty()
attr_dict[ccy] = field
prev_amnt = self._get_last(field, "amount", default=0)
self._timestamp = timestamp
field.amount.append(prev_amnt + quantity)
field.timestamp.append(timestamp.timestamp())
def get_cash_balances(self):
"""
Return a pandas.Series of the cash balances for each currency
"""
currencies = list(self._cash)
currencies.sort()
cashs = pd.Series(index=currencies)
for ccy in self._cash:
cashs.loc[ccy] = self._cash[ccy].amount[-1]
cashs = cashs.loc[cashs != 0]
return cashs
def get_instrument_pnl(self, timestamp, prices=None, cache=True):
"""
Calculate and return pnl, closed pnl and open pnl for traded
instruments in each currency.
Parameters
----------
timestamp: pandas.Timestamp
Time of PnL calculation, used for caching the result
prices: pandas.Series
series of instrument prices for current holdings
cache: boolean
Cache this result for later retrieval and advance internal Holdings
event clock
Returns
-------
dictionary
Dictionary with currencies as keys and pandas.DataFrame as values
where the DataFrame contains columns
['pnl', 'closed pnl', 'open pnl'] and the index is the set of
holdings of current instruments
"""
# allows PnL calculation without having to pass dummy series of prices
# when all positions are closed
if prices is None:
prices = pd.Series()
if self._timestamp > timestamp:
raise ValueError('Operations on Holdings must follow in time'
' sequential order')
pos_data = self._position_data_per_ccy
pnls = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
asts = list(ccy_pos_data)
asts.sort()
pos = pd.Series(index=asts)
fees = pd.Series(index=asts)
avg_buy_price = pd.Series(index=asts)
tot_buy = pd.Series(index=asts)
avg_sell_price = pd.Series(index=asts)
tot_sell = pd.Series(index=asts)
avg_pos_price = pd.Series(index=asts)
for asst in ccy_pos_data:
ast_dat = ccy_pos_data[asst]
pos.loc[asst] = self._get_last(ast_dat, 'position')
fees.loc[asst] = self._get_last(ast_dat, 'fees')
avg_buy_price[asst] = self._get_last(ast_dat, 'avg_buy_price')
tot_buy[asst] = self._get_last(ast_dat, 'total_buy')
avg_sell_price[asst] = self._get_last(ast_dat,
'avg_sell_price')
tot_sell[asst] = self._get_last(ast_dat, 'total_sell')
avg_pos_price[asst] = self._get_last(ast_dat, 'avg_pos_price')
# this is required to avoid needing to pass in prices for
# instruments with 0 current holdings but holdings historically
asts_not0 = pos.loc[pos != 0].index
prices_ccy = prices.loc[asts_not0]
if len(asts_not0) == 0:
pos_value = 0.0
ccy_open_pnl = pd.Series(0.0, index=asts)
else:
pos_value = pos.loc[asts_not0].mul(prices_ccy)
ccy_open_pnl = pos.loc[asts_not0].mul(prices_ccy - avg_pos_price.loc[asts_not0]) # NOQA
ccy_pnl = tot_sell * avg_sell_price + pos_value - avg_buy_price * tot_buy - fees # NOQA
ccy_closed_pnl = ccy_pnl - ccy_open_pnl
df_pnl = pd.concat([ccy_pnl, ccy_closed_pnl, ccy_open_pnl], axis=1)
df_pnl.columns = ['pnl', 'closed pnl', 'open pnl']
pnls[ccy] = df_pnl
if cache:
for ccy in pnls:
instr_pnls = pnls[ccy]
for instr in instr_pnls.index:
instr_pnl = instr_pnls.loc[instr, :].tolist()
if ccy in self._pnl_data:
ccy_pnl_datas = self._pnl_data[ccy]
else:
ccy_pnl_datas = {}
self._pnl_data[ccy] = ccy_pnl_datas
if instr in ccy_pnl_datas:
instr_pnl_data = ccy_pnl_datas[instr]
else:
instr_pnl_data = self._make_empty_hist_pnl()
ccy_pnl_datas[instr] = instr_pnl_data
instr_pnl_data.time.append(timestamp)
instr_pnl_data.pnl.append(instr_pnl)
self._timestamp = timestamp
return pnls
def get_pnl(self, timestamp, prices=None, cache=True):
"""
Calculate open, closed and total pnl in each currency where instruments
are traded based on given prices.
Parameters
----------
timestamp: pandas.Timestamp
Time of PnL calculation
prices: pandas.Series
series of instrument prices
cache: boolean
Cache this result for later retrieval and advance internal Holdings
event clock
Returns
-------
pandas.DataFrame
DataFrame with columns ['pnl', 'closed pnl', 'open pnl'] and an
index of currencies of instrument denominations. Note that this
will return a row for each currency that an instrument has ever
been traded in, even if the current PnL in the currency is all
0's due to sweeps.
"""
# allows PnL calculation without having to pass dummy series of prices
# when all positions are closed
if prices is None:
prices = pd.Series()
pnls = self.get_instrument_pnl(timestamp, prices, cache)
ccys = list(set().union(pnls, self._interest, self._pnl_sweep))
ccys.sort()
ccy_pnls = pd.DataFrame(index=ccys,
columns=['pnl', 'closed pnl', 'open pnl'],
dtype='float64')
for ccy in ccys:
try:
pnl_sums = pnls[ccy].sum()
except KeyError:
pnl_sums = pd.Series(0, index=['pnl', 'closed pnl',
'open pnl'])
if ccy in self._interest:
interest = self._get_last(self._interest[ccy], 'amount')
else:
interest = 0
if ccy in self._pnl_sweep:
swept_pnl = self._get_last(self._pnl_sweep[ccy], 'amount')
else:
swept_pnl = 0
pnl_sums.loc['pnl'] = pnl_sums.loc['pnl'] + interest + swept_pnl
pnl_sums.loc['closed pnl'] = (pnl_sums.loc['closed pnl'] +
interest + swept_pnl)
ccy_pnls.loc[ccy] = pnl_sums
return ccy_pnls
def get_pnl_history(self):
"""
Return open, closed and total PnL in each currency where instruments
are traded based on cached values from previous calls to
get_instrument_pnl
Returns
-------
dictionary
Dictionary of pandas.DataFrames where keys are currencies and the
DataFrames have columns ['pnl', 'closed pnl', 'open pnl'] and
index of timestamps
"""
ccy_pnls = self.get_instrument_pnl_history()
ccys = list(set().union(ccy_pnls, self._interest, self._pnl_sweep))
ccys.sort()
hist_pnls = dict()
PNL_COLS = ['pnl', 'closed pnl', 'open pnl']
def reindex(df, index):
df = df.reindex(index, method='ffill')
df = df.fillna(value=0)
return df
for ccy in ccys:
try:
instr_pnls = ccy_pnls[ccy]
instr_idx = pd.DatetimeIndex([])
instrs = list(instr_pnls.keys())
instrs.sort()
for instr in instrs:
instr_idx = instr_idx.union(instr_pnls[instr].index)
instr_pnl_sum = reindex(instr_pnls[instrs[0]], instr_idx)
for instr in instrs[1:]:
pnl = reindex(instr_pnls[instr], instr_idx)
instr_pnl_sum = instr_pnl_sum + pnl
except KeyError:
instr_pnl_sum = pd.DataFrame([], columns=PNL_COLS)
try:
interest_data = self._interest[ccy]
dts = self._to_timestamp(interest_data.timestamp)
interest = pd.DataFrame(0, index=dts, columns=PNL_COLS)
interest.loc[:, 'closed pnl'] = interest_data.amount
interest.loc[:, 'pnl'] = interest_data.amount
interest = interest.groupby(interest.index).last()
except KeyError:
interest = pd.DataFrame([], columns=PNL_COLS)
try:
sweep_data = self._pnl_sweep[ccy]
dts = self._to_timestamp(sweep_data.timestamp)
sweep = pd.DataFrame(0, index=dts, columns=PNL_COLS)
sweep.loc[:, 'closed pnl'] = sweep_data.amount
sweep.loc[:, 'pnl'] = sweep_data.amount
# multiple sweeps can happen at same time which all build on
# each other so only last one is relevant
sweep = sweep.groupby(sweep.index).last()
except KeyError:
sweep = pd.DataFrame([], columns=PNL_COLS)
idx = instr_pnl_sum.index.union(interest.index).union(sweep.index)
pnl_ccy = (reindex(instr_pnl_sum, idx) + reindex(sweep, idx) +
reindex(interest, idx))
hist_pnls[ccy] = pnl_ccy
return hist_pnls
def get_instrument_pnl_history(self):
"""
Return open, closed and total PnL in each currency for each traded
instrument based on cached values from previous calls to
get_instrument_pnl
Returns
-------
dictionary
Dictionary of dictionaries where to top level dictionary contains
keys for each currency where there has been PnL historically and
the nested dictionaries contain keys for each instrument and values
which are pandas.DataFrame with columns
['pnl', 'closed pnl', 'open pnl'] and index of timestamps
"""
pnl_data = self._pnl_data
hist_pnl = dict()
for ccy in pnl_data:
pnl_data_ccy = pnl_data[ccy]
hist_pnl_ccy = dict()
for instr in pnl_data_ccy:
ts = pnl_data_ccy[instr].time
instr_pnl = pnl_data_ccy[instr].pnl
instr_pnl = pd.DataFrame(instr_pnl, index=ts,
columns=['pnl', 'closed pnl',
'open pnl'])
hist_pnl_ccy[instr] = instr_pnl
hist_pnl[ccy] = hist_pnl_ccy
return hist_pnl
| StarcoderdataPython |
9701370 | coating_2500 = "An ultra-low DFT advanced coating system targeted for refinery crude unit and FCC slurry fouling by enhancing tube lubricity and reducing surface tension. Curran 2500 is designed for high temperature DCU, VDU and FCCU crude services, and can be applied to tube exchangers, P&F exchangers and distillation tower components. In independent lab testing this coating has exhibited excellent anti-fouling and anti-coking performance, and is resistant to thermal cycling. Designed to withstand extreme temperatures up to 1200°F. May be applied to heat exchanger tubes, plate & frame exchangers, tube sheets, channels, exchanger components and crude heaters. Can be applied in-situ."
coating_1500 = "Curran 1500 is an advanced two part (100% solids) epoxy coating designed specifically for high temperature immersion service in water, hydrocarbons, and process streams (up to 365 F, 185 C). This coating is an organic/inorganic hybrid, is suitable for immersion services subjected to “cold wall” exposure, and is machinable when fully cured. Can withstand multiple cycling and steam out events subjected to process equipment."
# Import English Language Class
from spacy.lang.en import English
# Create NLP Object
nlp = English()
# Process a string of text with the nlp object
doc_2500 = nlp(coating_2500)
doc_1500 = nlp(coating_1500)
# Iterate over tokens in a Doc
# for token in doc_1500:
# print(token.text)
span = doc_2500[15:40]
# print(span)
# print([token.like_num for token in doc_2500])
import spacy
# Load English Model
nlp = spacy.load("en_core_web_sm")
doc = nlp(str(span))
# for token in doc:
# # Print the text and the predicted part-of speech tag
# print(token.text, token.pos_, token.dep_, token.head.text)
# print(".text - .pos_ - .dep_ - .head.text")
# for token in doc:
# if token.dep_.endswith("mod"):
# print(token.text, token.pos_, token.dep_, token.head.text)
# print(".text - .pos_ - .dep_ - .head.text")
# for token in doc:
# print(token.dep_.find("subj") == True)
# sentencizer = nlp.create_pipe("sentencizer")
# nlp.add_pipe(sentencizer)
# doc_2500 = nlp()
# Iterating Sentence by Sentence
# What exactly is a generator object?
doc = nlp(coating_2500)
for i in doc.sents:
print(i) | StarcoderdataPython |
6524258 | '''
Changes in sales over time:
Line plots are designed to visualize the relationship between two numeric variables,
where each data values is connected to the next one. They are especially useful for
visualizing the change in a number over time since each time point is naturally connected
to the next time point. In this exercise, you'll visualize the change in avocado sales over
three years.
pandas has been imported as pd.
Instructions:
- Get the total number of avocados sold on each date. The DataFrame has two rows for
each date -- one for organic, and one for conventional. Save this as nb_sold_by_date.
- Create a line plot of the number of avocados sold.
- Show the plot.
--------------------------------------------------------------
avocados.head()
date type year avg_price size nb_sold
0 2015-12-27 conventional 2015 0.95 small 9.627e+06
1 2015-12-20 conventional 2015 0.98 small 8.710e+06
2 2015-12-13 conventional 2015 0.93 small 9.855e+06
3 2015-12-06 conventional 2015 0.89 small 9.405e+06
4 2015-11-29 conventional 2015 0.99 small 8.095e+06
---------------------------------------------------------------
'''
# Import matplotlib.pyplot with alias plt
import matplotlib.pyplot as plt
# Get the total number of avocados sold on each date
nb_sold_by_date = avocados.groupby("date")["nb_sold"].sum()
# Create a line plot of the number of avocados sold by date
nb_sold_by_date.plot(kind="line", rot=90)
# Show the plot
plt.show()
| StarcoderdataPython |
1778862 | <filename>testing/procedure_409_test.py
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
# Load data
import datetime
import json
from mldb import mldb
datasetConfig = {
"type": "sparse.mutable",
"id": "iris_dataset"
}
dataset = mldb.create_dataset(datasetConfig)
ts = datetime.datetime.now().isoformat(' ')
with open('./mldb/testing/dataset/iris.data') as f:
for i, line in enumerate(f):
cols = []
line_split = line.split(',')
if len(line_split) != 5:
continue
cols.append(["sepal length", float(line_split[0]), ts])
cols.append(["sepal width", float(line_split[1]), ts])
cols.append(["petal length", float(line_split[2]), ts])
cols.append(["petal width", float(line_split[3]), ts])
cols.append(["class", line_split[4], ts])
dataset.record_row(str(i+1), cols)
dataset.commit()
svd_procedure = "/v1/procedures/svd_iris"
svd_config = {
'type' : 'svd.train',
'params' :
{
"trainingData": {"from" : {"id": "iris_dataset"},
"select": '"petal width", "petal length", "sepal length", "sepal width"'
},
"columnOutputDataset": {
"type": "sparse.mutable",
"id": "svd_iris_col"
},
"rowOutputDataset": {
"id": "iris_svd_row",
'type': "embedding"
},
"numSingularValues": 4,
"numDenseBasisVectors": 2
}
}
r = mldb.perform("PUT", svd_procedure, [], svd_config)
print(r)
print((r["statusCode"]))
r = mldb.perform("PUT", svd_procedure + "/runs/1", [], {})
if not 300 > r["statusCode"] >= 200:
print(r)
request.set_return("FAILURE")
print((r["statusCode"]))
else:
request.set_return("success")
| StarcoderdataPython |
4880130 | from collections import deque
from itertools import chain, islice
def chunks(items, chunksize):
"""Turn generator sequence into sequence of chunks."""
items = iter(items)
for first in items:
chunk = chain((first,), islice(items, chunksize - 1))
yield chunk
deque(chunk, 0)
| StarcoderdataPython |
1833121 | import os
import sys
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath('.')
from flask import Flask, render_template
from flask_cors import CORS
from controllers.clipboard_history_controller import clipboard_history_controller
PREFIX = '/api/v1'
template_folder = os.path.join(base_path, 'templates')
static_folder = os.path.join(base_path, 'static')
app = Flask(__name__, template_folder=template_folder, static_folder=static_folder)
app.config['JSON_SORT_KEYS'] = False
app.secret_key = 'clipboard'
app.register_blueprint(clipboard_history_controller, url_prefix=PREFIX)
CORS(app)
@app.route('/', methods=['GET'])
@app.route('/settings', methods=['GET'])
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(host='localhost', port=8001)
| StarcoderdataPython |
5029764 | import enum
class EventType(enum.Enum):
EVENT_TYPE_UNKNOWN = 0
EVENT_TYPE_REGULAR_CHAT_MESSAGE = 1
EVENT_TYPE_SMS = 2
EVENT_TYPE_VOICEMAIL = 3
EVENT_TYPE_ADD_USER = 4
EVENT_TYPE_REMOVE_USER = 5
EVENT_TYPE_CONVERSATION_RENAME = 6
EVENT_TYPE_HANGOUT = 7
EVENT_TYPE_PHONE_CALL = 8
EVENT_TYPE_OTR_MODIFICATION = 9
EVENT_TYPE_PLAN_MUTATION = 10
EVENT_TYPE_MMS = 11
EVENT_TYPE_DEPRECATED_12 = 12
EVENT_TYPE_OBSERVED_EVENT = 13
EVENT_TYPE_GROUP_LINK_SHARING_MODIFICATION = 14
class ConversationType(enum.Enum):
CONVERSATION_TYPE_UNKNOWN = 0
CONVERSATION_TYPE_ONE_TO_ONE = 1
CONVERSATION_TYPE_GROUP = 2
| StarcoderdataPython |
1821321 | from pycwr.io import read_auto
import matplotlib.pyplot as plt
import numpy as np
from pycwr.draw.RadarPlot import Graph
file = r"C:\Users\zy\Desktop\HID\NUIST.20160707.001054.AR2"
NRadar = read_auto(file)
num = 3
NRadar.fields[num]['dBZ'][:] = np.where(NRadar.fields[num].CC>0.9, NRadar.fields[num].dBZ, np.nan)
NRadar.fields[num]['KDP'][:] = np.where(NRadar.fields[num].CC>0.9, NRadar.fields[num].KDP, np.nan)
NRadar.fields[num]['ZDR'][:] = np.where(NRadar.fields[num].CC>0.9, NRadar.fields[num].ZDR, np.nan)
NRadar.fields[num]['CC'][:] = np.where(NRadar.fields[num].CC>0.9, NRadar.fields[num].CC, np.nan)
fig, ax = plt.subplots()
graph = Graph(NRadar)
str_="CC"
graph.plot_ppi(ax, 3, str_, min_max=[0.85,1])
graph.add_rings(ax, [0, 50, 100, 150])
ax.set_title("(d) Cross correlation ratio, El : 3.4", fontsize=14, loc="left")
ax.set_xlabel("Distance From Radar In East (km)", fontsize=12)
ax.set_ylabel("Distance From Radar In North (km)", fontsize=12)
ax.set_xlim([-150, 150])
ax.set_ylim([-150, 150])
plt.savefig(r"C:\Users\zy\Desktop\HID\201607070010\%s.png"%str_, dpi=600)
plt.show() | StarcoderdataPython |
1645981 | import sys
import os
def greeting(name):
print('Hi,', name)
def test_greeting(capfd):
greeting('Brian')
out, err = capfd.readouterr()
assert out == 'Hi, Brian\n'
def test_multiline(capfd):
greeting('Brian')
greeting('Nerd')
out, err = capfd.readouterr()
assert out == 'Hi, Brian\nHi, Nerd\n'
def test_disabling_capturing(capfd):
print('this output is captured')
with capfd.disabled():
print('output not captured')
print('this output is also captured')
| StarcoderdataPython |
1882107 | <filename>bot/database.py
# -*- coding: utf-8 -*-
import logging
import logging.config
import random
import sqlite3
from enum import Enum
import phrases
logging.config.fileConfig("logging.ini")
logger = logging.getLogger("database")
## === Classes === ##
class Category(Enum):
"""Categories in the database"""
GREET = "3"
LEFT_SERVER = "5"
MENTION = "6,7"
ONLINE = "8"
SHUTDOWN = "9"
class Database(object):
""" For reading and parsing lines in a SQLite database.
Args:
dbFile(unicode): The filepath of the database.
"""
def __init__(self, db_file):
self.db = db_file
def get_column(self, header, table, maximum=None):
""" Gets fields under a column header.
Args:
header(unicode): Name of column's header.
table(unicode): Name of table.
maximum(int, optional): Maximum amount of fields to fetch.
Returns:
fields(list): List of fields under header.
"""
fields = []
table = phrases.clean(table)
connection = sqlite3.connect(self.db)
connection.row_factory = lambda cursor, row: row[0]
c = connection.cursor()
if maximum:
c.execute(f"SELECT {header} FROM {table} LIMIT ?", [maximum])
else:
c.execute(f"SELECT {header} FROM {table}")
fields = c.fetchall()
c.close()
return fields
def get_field(self, field_id, header, table):
""" Gets the field under the specified header by its primary key value.
Args:
field_id(int, str): Unique ID of line the field is in.
header(unicode): Header of the field to fetch.
table(unicode): Name of table to look into.
Returns:
The desired field, or None if the lookup failed.
Raises:
TypeError: If field_id doesn't exist in the table.
Examples:
>>> get_field(123, "firstname", "kings")
Adgar
"""
header = phrases.clean(header)
table = phrases.clean(table)
field = None
connection = sqlite3.connect(self.db)
c = connection.cursor()
statement = f"SELECT {header} FROM {table} WHERE id=?"
logger.debug(statement)
c.execute(statement, [field_id])
try:
field = c.fetchone()[0]
except TypeError:
logger.exception(f"ID '{field_id}' was not in table '{table}'")
c.close()
return field
def get_ids(self, table, conditions=None, splitter=","):
""" Gets the IDs that fit within the specified conditions.
Gets all IDs if conditions is None.
Args:
table(unicode): Name of table to look into.
conditions(list, optional): Categories you want to filter the line by:
{"header of categories 1": "category1,category2",
"header of category 2": "category3"}
Multiple categories under a single header are separated with a comma.
Returns:
ids(list): List of IDs that match the categories.
Raises:
OperationalError: If table or header doesn't exist.
TypeError: If category is neither None nor a dictionary.
Examples:
>>> get_ids({"type": "greeting"})
[1, 2, 3, 5, 9, 15] # Any row that has the type "greeting".
>>> get_ids({"type": "nickname,quip", "by": "Varric"})
# Any row by "Varric" that has the type "nickname" or "quip".
[23, 24, 25, 34, 37, 41, 42, 43]
"""
ids = []
table = phrases.clean(table)
clause = ""
connection = sqlite3.connect(self.db)
connection.row_factory = lambda cursor, row: row[0] # Gets first element for fetchall()
c = connection.cursor()
if conditions:
clause = "WHERE ("
clause_list = [clause,]
substitutes = []
cat_count = 1
header_count = 1
## TODO: Add ability to specify comparison operator (e.g. =, <, LIKE, etc.)
for con in conditions:
if 1 < header_count:
clause_list.append(" AND (")
sub_count = 1
subconditions = conditions[con].split(splitter)
for sub in subconditions:
if 1 < sub_count:
clause_list.append(" OR ")
clause_list.append(f"{phrases.clean(con)}=?")
substitutes.append(sub)
sub_count += 2
clause_list.append(")")
header_count += 2
cat_count = 1
clause = "".join(clause_list)
statement = f"SELECT id FROM {table} {clause}"
logger.debug(f"(get_ids) Substitutes: {substitutes}")
logger.debug(f"(get_ids) SQLite statement: {statement}")
c.execute(statement, substitutes)
else:
c.execute(f"SELECT id FROM {table}")
ids = c.fetchall()
return ids
def random_line(self, header, table, conditions=None, splitter=","):
""" Chooses a random line from the table under the header.
Args:
header(unicode): The header of the random line's column.
table(unicode): Name of the table to look into.
conditions(dict, optional): Categories to filter the line by:
{"header of categories 1": "category1,category2",
"header of category 2": "category3"}
Multiple categories under a single header are separated with a comma.
splitter(unicode, optional): What separates multiple categories
(default is a comma).
Returns:
line(unicode): A random line from the database.
Raises:
OperationalError: If header or table doesn't exist.
TypeError: If category is neither None nor a dictionary.
Examples:
>>> random_line("line", {"type": "greeting"})
Hello.
"""
header = phrases.clean(header)
table = phrases.clean(table)
line = ""
connection = sqlite3.connect(self.db)
c = connection.cursor()
if conditions:
ids = self.get_ids(table, conditions, splitter)
if ids:
line = random.choice(ids)
line = self.get_field(line, header, table)
else:
c.execute(f"SELECT {header} FROM {table} ORDER BY Random() LIMIT 1") # TODO: Take categories into account.
line = c.fetchone()[0]
return line
class DiscordDatabase(Database):
""" An extension of Database for Discord. """
def add_server(self, server):
""" Adds a server record to the database.
Args:
server(discord.Server): Server to add.
"""
pass
def remove_server(self, server):
""" Removes a server from the database.
Args:
server(discord.Server): Server to remove.
"""
pass
class BotDatabase(DiscordDatabase):
""" An extension of DiscordDatabase for functions specific to the bot. """
def add_song(self, url):
""" Adds a song to the database.
Args:
url(str): URL of the song.
"""
pass
def add_playlist(self, name, user):
""" Adds a playlist to the database.
Playlists are bound to one user across all servers.
Args:
name(str): Name of the playlist.
user(discord.Member/User): User who made the playlist.
"""
pass
def add_playlist_song(self, song, playlist):
""" Adds a song to a playlist.
Args:
song(): Song to add.
playlist(): The target playlist.
"""
pass
| StarcoderdataPython |
9601915 | import json
""" Sorts and prints in descending order by duration function call
execution times in execution-times.log.
BrowserLibrary debug option needs to be True to record times to the logfile.
"""
with open("Browser/wrapper/execution-times.log") as log_file:
data = [json.loads(row) for row in log_file]
result = sorted(data, key=lambda i: i["executionTime"], reverse=True)
for item in result:
print(item)
# print(result)
| StarcoderdataPython |
9641232 | import unittest
import io
import os
from os.path import join as pjoin
import shutil
from base64 import encodebytes
from nbformat import write
from nbformat.v4 import (
new_notebook, new_markdown_cell, new_code_cell, new_output,
)
from offlineslides import export_to_offline_slides
png_green_pixel = encodebytes(b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00'
b'\x00\x00\x01\x00\x00x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDAT'
b'\x08\xd7c\x90\xfb\xcf\x00\x00\x02\\\x01\x1e.~d\x87\x00\x00\x00\x00IEND\xaeB`\x82'
).decode('ascii')
class integration_test(unittest.TestCase):
def setUp(self):
nbdir = 'tests'
subdir = pjoin(nbdir)
self.subdir = subdir
if not os.path.isdir(pjoin(nbdir)):
os.makedirs(subdir)
nb = new_notebook()
nb.cells.append(new_markdown_cell(u'Created by test ³'))
cc1 = new_code_cell(source=u'print(2*6)')
cc1.outputs.append(new_output(output_type="stream", text=u'12'))
cc1.outputs.append(new_output(output_type="execute_result",
data={'image/png' : png_green_pixel},
execution_count=1,
))
nb.cells.append(cc1)
with io.open(pjoin(nbdir, 'testnb.ipynb'), 'w',
encoding='utf-8') as f:
write(nb, f, version=4)
def tearDown(self):
shutil.rmtree(self.subdir, ignore_errors=False)
def test_output_without_errors(self):
export_to_offline_slides('tests/testnb.ipynb')
assert os.path.isfile('tests/testnb.slides.offline.html')
assert os.path.isdir('tests/ext/ajax/libs')
assert os.path.isdir('tests/ext/ajax/libs/font-awesome')
assert os.path.isdir('tests/ext/ajax/libs/jquery')
assert os.path.isdir('tests/ext/ajax/libs/mathjax')
assert os.path.isdir('tests/ext/ajax/libs/require.js')
assert os.path.isdir('tests/ext/ajax/libs/reveal.js')
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1754618 | <gh_stars>1-10
import argparse
import csv
import json
import os
import shlex
import shutil
import sys
import tempfile
from unittest.mock import MagicMock, patch
import pandas as pd
from . import REPO_ROOT, TEST_DATA
from .helpers import ChDir, mock_worksheet_helper
try:
PATH = sys.path
sys.path.append(REPO_ROOT)
from ingestion.core import (parse_args, parse_config, xform_df_pre_output,
get_category_ids, main, xform_cats_drupal_taxonomy, append_output,
write_output, read_output)
finally:
sys.path = PATH
class TestCore:
def setup_method(self):
self.mock_worksheet, self.mock_sheet, self.mock_creds = mock_worksheet_helper()
def test_parse_args(self):
# Given
expected = argparse.Namespace(
creds_file='tests/data/dummy-credentials.json',
config_file='tests/data/dummy-config.yml',
taxonomy_file='tests/data/dummy-taxonomy.yml',
schema_file='tests/data/dummy-schema.yml',
output_file='tests/data/dummy-output.json',
)
argv = shlex.split(
'--creds-file tests/data/dummy-credentials.json'
' --config-file tests/data/dummy-config.yml'
' --taxonomy-file tests/data/dummy-taxonomy.yml'
' --schema-file tests/data/dummy-schema.yml'
' --output-file tests/data/dummy-output.json'
)
# When
result = parse_args(argv)
# Then
for key, value in vars(expected).items():
assert vars(result)[key] == value
def test_parse_args_default_stdout(self):
# Given
expected = argparse.Namespace(
creds_file='tests/data/dummy-credentials.json',
config_file='tests/data/dummy-config.yml',
taxonomy_file='tests/data/dummy-taxonomy.yml',
schema_file='tests/data/dummy-schema.yml',
output_file='/dev/stdout',
)
argv = shlex.split(
'--creds-file tests/data/dummy-credentials.json'
' --config-file tests/data/dummy-config.yml'
' --taxonomy-file tests/data/dummy-taxonomy.yml'
' --schema-file tests/data/dummy-schema.yml'
)
# When
result = vars(parse_args(argv))
# Then
for key, value in vars(expected).items():
assert result[key] == value
def test_parse_config(self):
# Given
expected = {
'name': 'foo',
'spreadsheet_key': '<KEY>',
'worksheets': [
{'name': 'bar', 'category': 'baz'},
{'name': 'qux', 'category': 'quux'}
],
'schema_mapping': {
'corge': "{{ record['grault'] or '' }}",
'garply': "{{ record['waldo'] or '' }}"
},
'taxonomy': [
{'id': 1, 'name': "corge", 'parents': []},
{'id': 68, 'name': "grault", 'parents': []},
{'id': 70, 'name': "fred", 'parents': []},
],
'taxonomy_fields': [
'garply', 'waldo'
]
}
dummy_config_file = os.path.join(TEST_DATA, 'dummy-config.yml')
dummy_schema_file = os.path.join(TEST_DATA, 'dummy-schema.yml')
dummy_taxonomy_file = os.path.join(TEST_DATA, 'dummy-taxonomy.yml')
# When
result = parse_config(dummy_config_file, dummy_schema_file, dummy_taxonomy_file)
# Then
assert expected == result
def test_get_category_ids(self):
# Given
record = {
'foo': 'd',
'garply': 'corge, fred',
'waldo': ''
}
taxonomies = {
1: "corge",
68: "grault",
70: "fred",
}
taxonomy_fields = [
'garply', 'waldo'
]
expected = [1, 70]
# When
result = get_category_ids(record, taxonomies, taxonomy_fields)
# Then
assert sorted(expected) == sorted(result)
def test_xform_df_row_to_json(self):
# Given
schema_mapping = {
'corge': "{{ record['grault'] or '' }}",
'garply': "{{ record['waldo'] or '' }}"
}
df_in = pd.DataFrame([
['a', 'b', 'c'], ['d', 'e', 'f']
], columns=['foo', 'waldo', 'grault'])
expected = [
{'corge': 'c', 'garply': 'b'},
{'corge': 'f', 'garply': 'e'},
]
# When
transformed = xform_df_pre_output(df_in, schema_mapping)
# Then
assert expected == transformed
def test_xform_df_row_to_json_blanks(self):
# Given
schema_mapping = {
'corge': "{{ record['grault'] or '' }}",
'garply': "{{ record['waldo'] or '' }}"
}
df_in = pd.DataFrame([
['a', None, None], ['d', 'e', 'f']
], columns=['foo', 'waldo', 'grault'])
expected = [
{'corge': 'f', 'garply': 'e'},
]
# When
transformed = xform_df_pre_output(df_in, schema_mapping)
# Then
assert expected == transformed
def test_xform_df_row_to_json_taxonomy(self):
# Given
schema_mapping = {
'corge': "{{ record['grault'] or '' }}",
'garply': "{{ record['waldo'] or '' }}",
'cats': "{{ category_ids | join(', ') }}",
}
df_in = pd.DataFrame([
['a', 'b', None, 'fred, grault'],
['d', 'e', 'f', 'corge, FRED ']
], columns=['foo', 'waldo', 'grault', 'plugh'])
taxonomies = {
1: "corge",
68: "grault",
70: "fred",
}
taxonomy_fields = [
'plugh',
]
expected = [
{'corge': '', 'garply': 'b', 'cats': '68, 70'},
{'corge': 'f', 'garply': 'e', 'cats': '1, 70'},
]
# When
transformed = xform_df_pre_output(df_in, schema_mapping, taxonomies, taxonomy_fields)
# Then
assert expected == transformed
def test_main_creates_json_file(self):
with \
tempfile.TemporaryDirectory() as tempdir, \
ChDir(tempdir):
# Given
for dummy_file in [
'dummy-credentials.json',
'dummy-config.yml',
'dummy-taxonomy.yml',
'dummy-schema.yml',
]:
dummy_src = os.path.join(TEST_DATA, dummy_file)
dummy_dst = os.path.join(tempdir, dummy_file)
shutil.copy(dummy_src, dummy_dst)
argv = shlex.split(
'--creds-file dummy-credentials.json'
' --config-file dummy-config.yml'
' --taxonomy-file dummy-taxonomy.yml'
' --schema-file dummy-schema.yml'
' --output-file dummy-output.json'
)
args = parse_args(argv)
df_in = pd.DataFrame([
['a', 'b', None, 'fred, grault'],
['d', 'e', 'f', 'corge, FRED ']
], columns=['foo', 'waldo', 'grault', 'plugh'])
mock_get_df_gsheets = MagicMock()
mock_get_df_gsheets.return_value = df_in
with open(os.path.join(TEST_DATA, 'dummy-output.json')) as stream:
expected_json = json.load(stream)
# When
with patch('ingestion.core.get_df_gsheets', mock_get_df_gsheets):
main(args)
# Then
with open('dummy-output.json') as stream:
result_json = json.load(stream)
assert result_json == expected_json
def test_main_creates_csv_file(self):
with \
tempfile.TemporaryDirectory() as tempdir, \
ChDir(tempdir):
# Given
for dummy_file in [
'dummy-credentials.json',
'dummy-config.yml',
'dummy-taxonomy.yml',
'dummy-schema.yml',
]:
dummy_src = os.path.join(TEST_DATA, dummy_file)
dummy_dst = os.path.join(tempdir, dummy_file)
shutil.copy(dummy_src, dummy_dst)
argv = shlex.split(
'--creds-file dummy-credentials.json'
' --config-file dummy-config.yml'
' --taxonomy-file dummy-taxonomy.yml'
' --schema-file dummy-schema.yml'
' --output-file dummy-output.csv'
' --output-format csv'
)
args = parse_args(argv)
df_in = pd.DataFrame([
['a', 'b', None, 'fred, grault'],
['d', 'e', 'f', 'corge, FRED ']
], columns=['foo', 'waldo', 'grault', 'plugh'])
mock_get_df_gsheets = MagicMock()
mock_get_df_gsheets.return_value = df_in
with open(os.path.join(TEST_DATA, 'dummy-output.csv')) as stream:
expected_dicts = list(csv.DictReader(stream))
# When
with patch('ingestion.core.get_df_gsheets', mock_get_df_gsheets):
main(args)
# Then
with open('dummy-output.csv') as stream:
result_dicts = list(csv.DictReader(stream))
assert result_dicts == expected_dicts
def test_xform_cats_drupal_taxonomy_propagate(self):
# Given
record = {
'foo': 'd',
'resource_category': [1, 1],
'waldo': ''
}
taxonomy = [
{'id': 1, 'name': "corge", 'parents': [70]},
{'id': 68, 'name': "grault", 'parents': []},
{'id': 70, 'name': "fred", 'parents': [68]},
]
taxonomy_ids_field = 'resource_category'
expected = record.copy()
expected.update(
category='grault',
subcategory_1='fred',
subcategory_2='corge',
)
# When
result = xform_cats_drupal_taxonomy(taxonomy, taxonomy_ids_field)(record)
# Then
assert sorted(result.items()) == sorted(expected.items())
def test_append_output_json(self):
with \
tempfile.TemporaryDirectory() as tempdir, \
ChDir(tempdir):
# Given
existing_data = [
{
'id': 'foo',
'value': 'bar'
},
{
'id': 'baz',
'value': 'qux'
},
]
new_data = [{'id': 'baz', 'value': 'quux'}]
expected_data = [
{
'id': 'foo',
'value': 'bar'
},
{
'id': 'baz',
'value': 'quux'
},
]
out_fmt = 'json'
out_file = os.path.join(tempdir, 'out.json')
write_output(existing_data, out_file, out_fmt)
# When
append_output(new_data, out_file, 'json', 'id')
# Then
assert expected_data == read_output(out_file, out_fmt)
| StarcoderdataPython |
1976249 | <gh_stars>100-1000
import base64
from pprint import pprint
import redis
import json
from django.conf import settings
from config.celery import app
"""
Helpers to inspect and edit the celery job queue.
Called from `fab celery_*`.
"""
def jobs_pending():
"""List all jobs not yet claimed by a worker."""
r = redis.Redis.from_url(settings.CELERY_BROKER_URL)
for queue in get_queues():
print(f"Pending jobs for {queue}:")
last_task = None
tasks = []
for i, job_json in enumerate(r.lrange(queue, 0, -1)):
job, headers, id = parse_job(job_json)
if headers['task'] != last_task:
last_task = headers['task']
tasks.append((last_task, i))
if not tasks:
print("- empty")
else:
tasks.append((None, i))
for i, (task, index) in enumerate(tasks):
if task:
print(f"- {task}: {index}-{tasks[i+1][1]}")
def job_info(index=0, queue='celery'):
"""Dump json info for a particular job."""
r = redis.Redis.from_url(settings.CELERY_BROKER_URL)
job_json = r.lindex(queue, index)
job, headers, id = parse_job(job_json)
job['body'] = json.loads(base64.b64decode(job['body']))
pprint(job)
def remove_jobs(task_name, queue='celery'):
"""Remove all jobs with a given name from the queue."""
r = redis.Redis.from_url(settings.CELERY_BROKER_URL)
inspected = set()
removed = 0
while True:
job_json = r.lpop(queue)
job, headers, id = parse_job(job_json)
if id in inspected:
break
inspected.add(id)
if headers['task'] != task_name:
r.rpush(queue, job_json)
else:
removed += 1
print(f"Removed {removed} instances of {task_name}")
## helpers ##
def parse_job(job_json):
task = json.loads(job_json)
headers = task['headers']
id = headers['id']
return task, headers, id
def get_queues():
queues = app.control.inspect().active_queues()
return set(q['name'] for listen_queues in queues.values() for q in listen_queues)
| StarcoderdataPython |
6426001 | from flask import render_template
import connexion
app = connexion.App(__name__, specification_dir="config")
app.add_api("swagger.yml")
@app.route("/")
def home():
return render_template("home.html")
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
1804100 | from django.core.mail import send_mail
from django.conf import settings
import logging
logger = logging.getLogger('django')
from celery_tasks.main import celery_app
# 定义一个发送函数, 发送 email:
@celery_app.task(name='send_verify_email')
def send_verify_email(to_email, verify_url):
# 标题
subject = "商城邮箱验证"
# 发送内容:
html_message = '<p>尊敬的用户您好!</p>' \
'<p>感谢您使用商城。</p>' \
'<p>您的邮箱为:%s 。请点击此链接激活您的邮箱:</p>' \
'<p><a href="%s">%s<a></p>' % (to_email, verify_url, verify_url)
# 进行发送
result = send_mail(subject,
"",
settings.EMAIL_FROM,
[to_email],
html_message=html_message)
return result
| StarcoderdataPython |
5014958 | <filename>pymic/loss/cls/nll.py
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
class NLLLoss(nn.Module):
def __init__(self, params):
super(NLLLoss, self).__init__()
self.nll_loss = nn.NLLLoss()
def forward(self, loss_input_dict):
predict = loss_input_dict['prediction']
labels = loss_input_dict['ground_truth']
logsoft = nn.LogSoftmax(dim = 1)
predict = logsoft(predict)
loss = self.nll_loss(predict, labels)
return loss
| StarcoderdataPython |
3581994 | <filename>maptapp/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-19 14:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lat', models.DecimalField(decimal_places=16, max_digits=20)),
('lng', models.DecimalField(decimal_places=16, max_digits=20)),
('address', models.CharField(max_length=255)),
],
options={
'db_table': 'addresses',
},
),
]
| StarcoderdataPython |
1894377 | # -*- coding: utf-8 -*-
"""
/dms/webquest/views_add.py
.. enthaelt den View zum Ergaenzen eines Webquests
Django content Management System
<NAME>
<EMAIL>
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 30.04.2008 Beginn der Arbeit
0.02 05.05.2008 Kopplung an Lernarchiv-Webquest
"""
import datetime
from django.utils.translation import ugettext as _
from dms.views_error import show_error_object_exist
from dms.models import DmsItem
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django import newforms as forms
from dms.queries import save_container_values
from dms.queries import exist_item
from dms.queries import get_site_url
from dms.queries import get_edu_sprache_id
from dms.queries import set_extra_data
from dms.queries import save_item_values
from dms.queries import get_item_container_by_url
from dms.queries import get_item_container_children
from dms.queries import is_file_by_item_container
from dms.queries import get_app
from dms.mail import send_control_email
from dms.roles import *
from dms.views_error import show_error_object_exist
from dms.utils import show_link
from dms.utils import get_tabbed_form
from dms.utils import get_section_choices
from dms.utils import check_name
from dms.utils import get_license_choices
from dms.utils_form import get_folderish_vars_add
from dms.utils_base import ACL_USERS
from dms.edufolder.utils import do_copy
from dms.encode_decode import encode_html
from dms.webquest.views_navigation_left import create_new_menu_webquest
from dms.webquest.help_form import help_form
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
@require_permission('perm_add_folderish')
def webquest_add(request, item_container):
""" neuen Webquest anlegen """
def save_values(request, name, new, item_container):
""" Daten sichern """
community_id = 0
schulverbund_id = 0
new['is_exchangeable'] = item_container.item.is_exchangeable
new['is_browseable'] = True
new['has_user_support'] = True
edu_ic = None
if new.has_key('string_2') and new['string_2'].strip() != '':
url = new['string_2'].strip()
edu_ic = get_item_container_by_url(url)
if edu_ic != None:
if edu_ic.item.app.name != 'dmsEduWebquestItem':
return
item_container_new = do_copy(request, edu_ic, item_container)
item_container_new.item.app = get_app('dmsWebquest') # !!!!!
item_container_new.item.save()
#if new.has_key('name') and new['name'].strip() == '':
# item_container_new.name = new['name'].strip()
do_save = False
if new.has_key('title') and new['title'].strip() == '':
item_container_new.title = new['title'].strip()
do_save = True
if new.has_key('nav_title') and new['nav_title'].strip() == '':
item_container_new.nav_title = new['nav_title'].strip()
do_save = True
if do_save:
item_container_new.save()
# --- muss explizit gesetzt werden
if edu_ic == None:
item_container_new = item_container = save_container_values(request.user,
'dmsWebquest', name, new, item_container)
# --- Informationsseite
new={}
new['name'] = name = _(u'einleitung.html')
new['title'] = _(u'Einleitung')
new['text'] = _(u'<p>\nEinleitung des Webquest\n</p>\n')
ic = save_item_values(request.user, 'dmsDocument', name, new,
item_container_new, True, False)
#ic.part_of_id = part_of_id
ic.order_by = 1000
ic.save()
new={}
new['name'] = name = _(u'aufgabe.html')
new['title'] = _(u'Aufgabe')
new['text'] = _(u'<p>\nAufgabe des Webquest\n</p>\n')
ic = save_item_values(request.user, 'dmsDocument', name, new,
item_container_new, True, False)
#ic.part_of_id = part_of_id
ic.order_by = 1010
ic.save()
new={}
new['name'] = name = _(u'vorgehen.html')
new['title'] = _(u'Vorgehen')
new['text'] = _(u'<p>\nEmpfehlungen zum Vorgehen\n</p>\n')
ic = save_item_values(request.user, 'dmsDocument', name, new,
item_container_new, True, False)
#ic.part_of_id = part_of_id
ic.order_by = 1020
ic.save()
new={}
new['name'] = name = _(u'erwartung.html')
new['title'] = _(u'Erwartung')
new['text'] = _(u'<p>\nErwartung und Hinweise zur Bewertung des Webquest\n</p>\n')
ic = save_item_values(request.user, 'dmsDocument', name, new,
item_container_new, True, False)
#ic.part_of_id = part_of_id
ic.order_by = 1100
ic.save()
# --- Materialpool
new={}
new['name'] = name = _(u'material')
new['has_user_support'] = False
new['is_moderated'] = True
new['title'] = _(u'Material')
new['nav_title'] = _(u'Material')
new['sections'] = ''
ic_pool = item_container = save_container_values(request.user,
'dmsPool', name, new, item_container_new)
ic_pool.order_by = 1050
ic_pool.save()
# --- Ordner
new={}
new['name'] = name = _(u'ergebnisse')
new['has_user_support'] = True
new['is_moderated'] = False
new['title'] = _(u'Ergebnisse')
new['nav_title'] = _(u'Ergebnisse')
new['nav_name_left'] = 'webquest|ergebnisse'
new['sections'] = ''
ic_folder = item_container = save_container_values(request.user,
'dmsPool', name, new, item_container_new)
#ic_folder.part_of_id = item_container.part_of_id
ic_folder.order_by = 1200
ic_folder.save()
# --- User-Verwaltung
new = {}
new['title'] = _(u'User-Verwaltung')
new['nav_title'] = new['title']
new['is_browseable'] = False
acl_item_container = save_container_values(request.user,
'dmsUserFolder', ACL_USERS, new, ic_folder)
acl_item_container.set_is_changeable(False)
# --- linke Navigation
create_new_menu_webquest(item_container_new)
send_control_email(item_container)
class DmsItemForm(forms.Form):
""" Elemente des Eingabeformulars """
string_2 = forms.CharField(required=False, max_length=200,
widget=forms.TextInput(attrs={'size':60}) )
name = forms.CharField(required=False, max_length=60,
widget=forms.TextInput(attrs={'size':20}) )
title = forms.CharField(required=False, max_length=240,
widget=forms.TextInput(attrs={'size':60}) )
nav_title = forms.CharField(required=False, max_length=60,
widget=forms.TextInput(attrs={'size':30}) )
text = forms.CharField(required=False, widget=forms.Textarea(
attrs={'rows':10, 'cols':60, 'id':'ta',
'style':'width:100%;'}) )
section = forms.CharField(required=False, widget=forms.Select(choices=
get_section_choices(item_container.container.sections),
attrs={'size':4, 'style':'width:60%'} ) )
license = forms.ChoiceField(choices=get_license_choices(item_container),
widget=forms.RadioSelect() )
app_name = 'webquest'
my_title = _(u'Webquest anlegen')
# --- Sind Daten vorhanden oder muessen Sie initialiSiert werden?
show_errors = ( request.method == 'POST' )
if request.method == 'POST':
data=request.POST.copy()
else :
data = { 'license': 1, }
# --- Formular mit Daten fuellen und zur Anzeige vorbereiten
f = DmsItemForm(data)
tabs = [ ('tab_base', [ 'string_2', 'section']),
('tab_base_2', [ 'name', 'title', 'nav_title', 'text', ]),
('tab_license', [ 'license', ] ),
]
content = get_tabbed_form(tabs, help_form, app_name , f)
# --- Wurde das Formular aufgerufen und gab es keine Fehler?
if request.method=='POST' and not f.errors:
# --- Umlaute aus Namen entfernen
name = check_name(f.data['name'], True)
new = f.cleaned_data
if not exist_item(item_container, name):
save_values(request, name, new, item_container)
return HttpResponseRedirect(get_site_url(item_container, 'index.html'))
else :
return show_error_object_exist(request, item_container, name)
else:
tEduFolder = get_template('app/webquest/target_edufolder.html')
vars = get_folderish_vars_add(request, item_container, app_name, my_title,
content, show_errors)
vars['text_intro'] = help_form['copyright']['help']
#select_edufolder = show_link('javascript:select_dest_url(document.form_input.dest_folder)',
# _(u'Webquest auswählen ...'), url_extern="_extern")
#vars['top_of_form'] = tEduFolder.render( Context( {'get_eduwebquest': select_edufolder }) )
return render_to_response ( 'app/base_edit.html', vars )
| StarcoderdataPython |
5152070 | # Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend for synchronized command execution. This class is promarily intended
for test purposes. It can only be used to append a new module to a workflow
but not to insert delete or replace modules in an existing workflow.
The synchronized backend can also be used as a base class for the asynchronous
backend.
"""
from typing import Dict
from vizier.engine.backend.base import TaskExecEngine
from vizier.engine.task.base import TaskContext
from vizier.engine.task.processor import ExecResult
from vizier.viztrail.module.output import ModuleOutputs
from vizier.engine.task.processor import TaskProcessor
class SynchronousTaskEngine(TaskExecEngine):
"""Backend that only supports synchronous execution of tasks."""
def __init__(self,
commands: Dict[str, Dict[str, TaskProcessor]],
projects
):
"""Initialize the commands dictionary that contains the task engines
for all commands that can be executed. This is a dictionary of
dictionaries that are keyed by the package identifier. Each internal
dictionary contains the task engines keyed by the command identifier.
Parameters
----------
commands: dict(dict(vizier.engine.packages.task.processor.TaskProcessor))
Dictionary of task processors for executable tasks that are keyed
by the pakage identifier and the command identifier
projects: vizier.engine.project.cache.base.ProjectCache
Cache for project handles
"""
self.commands = commands
self.projects = projects
def can_execute(self, command):
"""Test whether a given command can be executed in synchronous mode. If
the result is True the command can be executed in the same process as
the calling method.
Parameters
----------
command : vizier.viztrail.command.ModuleCommand
Specification of the command that is to be executed
Returns
-------
bool
"""
pckg = command.package_id
cmd = command.command_id
return pckg in self.commands and cmd in self.commands[pckg]
def execute(self, task, command, artifacts, resources=None):
"""Execute a given command. The command will be executed immediately if
the backend supports synchronous excution, i.e., if the .can_excute()
method returns True. The result is the execution result returned by the
respective package task processor.
Raises ValueError if the given command cannot be excuted in synchronous
mode.
Parameters
----------
task: vizier.engine.task.base.TaskHandle
Handle for task for which execution is requested by the controlling
workflow engine
command : vizier.viztrail.command.ModuleCommand
Specification of the command that is to be executed
artifacts: dict
Dictionary of available resource in the database state. The key is
the resource name. Values are resource identifiers.
resources: dict, optional
Optional information about resources that were generated during a
previous execution of the command
Returns
------
vizier.engine.task.processor.ExecResult
"""
if command.package_id in self.commands:
package = self.commands[command.package_id]
if command.command_id in package:
processor = package[command.command_id]
# Get the project handle from the cache
project = self.projects.get_project(task.project_id)
try:
return processor.compute(
command_id=command.command_id,
arguments=command.arguments,
context=TaskContext(
project_id=task.project_id,
datastore=project.datastore,
filestore=project.filestore,
artifacts=artifacts,
resources=resources
)
)
except Exception as ex:
outputs = ModuleOutputs()
outputs.error(ex)
return ExecResult(
is_success=False,
outputs=outputs
)
raise ValueError('cannot execute given command')
| StarcoderdataPython |
154816 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from datahub.access.tests import db_helper
from datahub.databus import channel, settings
# 测试用变量
ARGS = {
"channel_id": 1000,
"clean_id": 6,
"processing_id": "591_new_van",
"cluster_domain": "kafka",
"json_config": '{"abc": "saa"}',
"cluster_port": 9092,
"kafka_bs": "{}:{}".format("kafka", 9092),
"rawdata_id": 123,
"raw_data_name": "test",
"rawdata_id2": 124,
"raw_data_name2": "test2",
"bk_biz_id": 591,
"topic": "{}{}".format("test", 591),
}
@pytest.fixture
def add_rawdata():
with db_helper.open_cursor("mapleleaf") as cur:
# 首先添加一条channel记录,用来指向kafka
db_helper.insert(
cur,
"databus_channel_cluster_config",
cluster_name="testouter",
cluster_type="kafka",
cluster_role="outer",
cluster_domain=ARGS["cluster_domain"],
cluster_backup_ips="",
cluster_port=ARGS["cluster_port"],
zk_domain="zookeeper",
zk_port="2181",
zk_root_path="/",
active=True,
priority=1,
attribute="bkdata",
created_by="",
updated_by="",
description="",
id=ARGS["channel_id"],
)
with db_helper.open_cursor("mapleleaf") as cur:
# 然后在rawdata中增加一条数据接入的记录
db_helper.insert(
cur,
"access_raw_data",
id=ARGS["rawdata_id"],
bk_biz_id=ARGS["bk_biz_id"],
raw_data_name=ARGS["raw_data_name"],
raw_data_alias="for test",
sensitivity="public",
data_source="log",
data_encoding="UTF8",
data_category="log",
data_scenario="log",
bk_app_code="data",
storage_channel_id=ARGS["channel_id"],
storage_partitions=1,
created_by="",
updated_by="",
description="",
)
with db_helper.open_cursor("mapleleaf") as cur:
# 然后在rawdata中增加一条数据接入的记录
db_helper.insert(
cur,
"access_raw_data",
id=ARGS["rawdata_id2"],
bk_biz_id=ARGS["bk_biz_id"],
raw_data_name=ARGS["raw_data_name2"],
raw_data_alias="for test",
sensitivity="public",
data_source="log",
data_encoding="UTF8",
data_category="log",
data_scenario="log",
bk_app_code="data",
storage_channel_id=1,
storage_partitions=1,
created_by="",
updated_by="",
description="",
)
with db_helper.open_cursor("mapleleaf") as cur:
# 然后在clean_info中增加一条清洗配置
db_helper.insert(
cur,
"databus_clean_info",
id=ARGS["clean_id"],
processing_id=ARGS["processing_id"],
raw_data_id=ARGS["rawdata_id"],
pe_config="",
json_config=ARGS["json_config"],
status="started",
created_by="",
updated_by="",
clean_config_name="",
clean_result_table_name="",
clean_result_table_name_alias="",
description="",
)
# 初始化rawdata对应的kafka的topic
channel.create_kafka_topic(ARGS["kafka_bs"], ARGS["topic"])
try:
yield 1
finally:
_delete_rawdata(
ARGS["channel_id"],
ARGS["topic"],
"{}_{}".format(settings.CLEAN_BAD_MSG_TOPIC_PREFIX, ARGS["rawdata_id"]),
)
def _delete_rawdata(channel_id, topic, badmsg_topic):
# 删除kafka上rawdata对应的topic
channel.delete_kafka_topic(channel_id, topic)
# 删除rawdata对应的bad msg的topic
channel.delete_kafka_topic(channel_id, badmsg_topic)
# 删除rawdata中的记录
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(
cur,
"""
DELETE FROM access_raw_data
""",
)
# 删除channel中的记录
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(
cur,
"""
DELETE FROM databus_channel_cluster_config
""",
)
# 删除clean中的记录
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(
cur,
"""
DELETE FROM databus_clean_info
""",
)
| StarcoderdataPython |
1678815 | <reponame>GuyTeichman/RNAlysis
# -*- coding: utf-8 -*-
"""Top-level package for sRNA analysis pipeline."""
__all__ = ['general', 'filtering', 'enrichment']
__name__ = "rnalysis"
__author__ = """<NAME>"""
__email__ = "<EMAIL>"
__version__ = "1.3.4"
__license__ = "MIT"
__attr_file_key__ = "attribute_reference_table"""
__biotype_file_key__ = "biotype_reference_table"
| StarcoderdataPython |
6441328 | from django.shortcuts import render
import shopify
from shopify_app.decorators import shop_login_required
@shop_login_required
def index(request):
products = shopify.Product.find(limit=3)
return render(request, 'home/index.html', {'products': products})
| StarcoderdataPython |
6605190 | <filename>app/database.py
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQL_ACLHEMY_DB = 'mysql+pymysql://dzhonpetrus:root@localhost/treatment_management'
engine = create_engine(SQL_ACLHEMY_DB)
SessionLocal = sessionmaker(bind=engine, autocommit=False, autoflush=False)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | StarcoderdataPython |
3552887 | import tensorflow as tf
def my_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(1, 3, input_shape=[28,28,1]),
tf.keras.layers.Conv2D(1, 3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
return model
if __name__ == "__main__":
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(x_train.shape)
x_train = tf.reshape(x_train, [x_train.shape[0], 28, 28, 1])
x_test = tf.reshape(x_test, [x_test.shape[0], 28, 28, 1])
model = my_model()
print(model.summary())
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16,epochs=5)
print("Now begin evaluation")
model.evaluate(x_test, y_test, verbose=2)
print("Using tf version: " + tf.__version__)
| StarcoderdataPython |
5034375 | <reponame>yuta0306/notion-extensions
from typing import Dict, Union
from .block import Block
__all__ = [
"Divider",
]
class Divider(Block):
"""
Divider
Divider property values of block
Attributes
----------
Methods
-------
clear()
Clear data of title
json()
Return this class as dictionary
"""
TEMPLATE: Dict[str, Union[str, Dict]] = {
"type": "divider",
"divider": {},
}
def __init__(self):
super().__init__()
| StarcoderdataPython |
1778737 | <reponame>attesch/webbreaker
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from webbreaker.threadfix.common.helper import ThreadFixHelper
from webbreaker.threadfix.common.loghelper import ThreadFixLogHelper
threadfixloghelper = ThreadFixLogHelper()
class ThreadFixTeams(object):
def __init__(self):
self.helper = ThreadFixHelper()
self._list_teams()
def _list_teams(self):
teams = self.helper.get_team_list()
if teams:
print("{0:^10} {1:30}".format('ID', 'Name'))
print("{0:10} {1:30}".format('-' * 10, '-' * 30))
for team in teams:
print("{0:^10} {1:30}".format(team['id'], team['name']))
threadfixloghelper.log_info_threadfix_teams_listed_success()
print('\n\n')
else:
threadfixloghelper.log_error_no_team()
| StarcoderdataPython |
1951736 | from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Callable, List, Optional
from rotkehlchen.types import ChecksumEthAddress
if TYPE_CHECKING:
from rotkehlchen.accounting.structures.balance import AssetBalance
class EthereumModule(metaclass=ABCMeta):
"""Interface to be followed by all Ethereum modules"""
# Optional callback to run on a module's startup
# Is optional as opposed to a no-op since at initialization we
# start a greenlet to run it and there is no reason to bring up no-op greenlets
on_startup: Optional[Callable[['EthereumModule'], None]] = None
@abstractmethod
def on_account_addition(self, address: ChecksumEthAddress) -> Optional[List['AssetBalance']]:
"""Actions to run on new ethereum account additions
Can optionally return a list of asset balances determined by the module
"""
...
@abstractmethod
def on_account_removal(self, address: ChecksumEthAddress) -> None:
"""Actions to run on removal of an ethereum account"""
...
@abstractmethod
def deactivate(self) -> None:
"""Actions to run on module's deactivation"""
...
| StarcoderdataPython |
12816661 | from sqlalchemy import (
Column,
Index,
Integer,
Text,
TIMESTAMP,
Boolean
)
from .meta import Base
import datetime
class Login(Base):
__tablename__ = 'login'
id = Column(Integer, primary_key=True)
email = Column(Text, nullable=False)
password = Column(Text, nullable=False)
class Users(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
password = Column(Text, nullable=False)
email = Column(Text, nullable=False)
admin = Column(Boolean, nullable=False)
add_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
update_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
class Language(Base):
__tablename__= 'language'
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
japanese = Column(Text, nullable=False)
english = Column(Text, nullable=False)
chinese = Column(Text, nullable=False)
lock = Column(Boolean, nullable=False)
add_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
update_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
| StarcoderdataPython |
8084783 | <gh_stars>1-10
import os, sys
import zipfile
import subprocess
from shutil import copyfile
# Define the path to love-android-sdl2 here:
path_to_love_android = "C:/Android/love-android-sdl2"
#Record the current working directory at the beginning:
start_directory = os.path.realpath(__file__)
def zipgame(path, loveZip):
for root, dirs, files in os.walk(path):
for file in files:
extension = os.path.splitext(file)[1]
if extension != ".py" and extension != ".love":
loveZip.write(
os.path.join(root, file),
os.path.relpath(os.path.join(root, file),
os.path.join(path, '.')))
if __name__ == '__main__':
currDirect = os.path.dirname(os.path.realpath(__file__))
loveFile = zipfile.ZipFile(
currDirect + '/game.love',
'w',
zipfile.ZIP_DEFLATED)
zipgame(currDirect, loveFile)
loveFile.close()
if os.path.exists(path_to_love_android + '/assets/game.love'):
os.remove(path_to_love_android + '/assets/game.love')
os.rename(currDirect + '/game.love', path_to_love_android + '/assets/game.love')
if not os.path.exists(path_to_love_android):
os.makedirs(path_to_love_android)
batch_file = os.path.join(path_to_love_android, "auto_ant.bat")
with open(batch_file, 'w') as batch:
batch.write("ant debug")
os.chdir(path_to_love_android)
print ("Running Apache Ant debug compilation...")
p = subprocess.Popen(batch_file, shell=True, stdout = subprocess.PIPE)
stdout, stderr = p.communicate()
#print (stdout)
#print p.returncode
copyfile(path_to_love_android + '/bin/love-android-debug.apk', currDirect + '/love-android-debug.apk')
print ("Debug APK created!")
os.chdir(currDirect) | StarcoderdataPython |
6474188 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# @author <EMAIL>
#
#
from ppmessage.db.models import MessagePushTask
from ppmessage.db.models import ConversationUserData
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.core.constant import CONVERSATION_STATUS
def get_app_conversations(redis):
'''
return the conversation uuid list which app_uuid is `app_uuid`
'''
if redis == None:
return []
key = ConversationUserData.__tablename__ + ".conversation_status." + CONVERSATION_STATUS.OPEN
conversations = redis.zrevrange(key, 0, -1) or []
return conversations
def get_message_info(redis, task_uuid):
'''
return the message info which task_uuid is `task_uuid`
'''
# illegal params
if redis is None or task_uuid is None:
return None
return redis_hash_to_dict(redis, MessagePushTask, task_uuid)
def get_message_count(redis, conversation_uuid):
'''
get the total count of the conversation_uuid
'''
# illegal params
if redis is None or conversation_uuid is None:
return 0
key = MessagePushTask.__tablename__ + ".conversation_uuid." + conversation_uuid
return redis.zcard(key)
| StarcoderdataPython |
6493347 | <reponame>Narendra-Git-Hub/website
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-08-24 10:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organisation', '0014_auto_20200824_1033'),
]
operations = [
migrations.RemoveField(
model_name='networkgroup',
name='position',
),
]
| StarcoderdataPython |
3225361 | # coding: utf-8
from flask_login import login_required
from flask import Blueprint, request
from marshmallow import Schema, fields
from app.libs.http import jsonify, error_jsonify
from app.model.data_collection import DataCollection
from app.model.corporate_Info import Info
from app.model.report_time import ReportTime
# 趋势信息获取,得到所有给定企业名称的趋势
bp_admin_trend = Blueprint('admin_trend', __name__, url_prefix='/admin/trend')
class SummaryParaSchema(Schema):
name = fields.String()
@bp_admin_trend.route("/", methods=['POST'])
@login_required
def trend_get():
json = request.get_json()
data, errors = SummaryParaSchema().load(json)
if errors:
return error_jsonify(10000001, errors)
tmp_user = Info.query.filter_by(name=data['name']).first() # 找到企业姓名对应的user_id
if tmp_user is None:
return error_jsonify(10000019)
data_list = DataCollection.query.filter_by(user_id=tmp_user.user_id).all() # 找到所有的填报信息
res = []
for i in data_list:
if i.status == 0 or i.status == 1 or i.status == 4:
continue
tmp = {}
time = ReportTime.query.filter_by(id=i.time_id).first()
amount = int(float(i.check - i.filing) / i.check * 100)
st_time = time.start_time.date().strftime("%Y-%m-%d")
ed_time = time.end_time.date().strftime("%Y-%m-%d")
tmp['amount'] = amount
tmp['date'] = st_time + '-->' + ed_time
res.append(tmp)
return jsonify(res)
| StarcoderdataPython |
11359506 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
class Explosion():
posX = 0
posY = 0
stage = [[]]
stages = [[[]]]
i = 0
def __init__(self, posX, posY):
stageOne = [["E"]]
stageTwo = [["E","E","E"],["E"," ","E"],["E","E","E"]]
stageThree = [[" ","E","E","E"," "],["E"," "," "," ","E"],["E"," "," "," ","E"],["E"," "," "," ","E"],[" ","E","E","E"," "]]
stageFour = [[" "," ","E","E","E"," "," "],[" ","E"," "," "," ","E"," "],["E"," "," "," "," "," ","E"],["E"," "," "," "," "," ","E"],["E"," "," "," "," "," ","E"],[" ","E"," "," "," ","E"," "],[" "," ","E","E","E"," "," "]]
stageFive = [[" "," ","E","E","E","E","E"," "," "],[" ","E","E"," "," "," ","E","E"," "],["E","E"," "," "," "," "," ","E","E"],["E"," "," "," "," "," "," "," ","E"],["E"," "," "," "," "," "," "," ","E"],["E"," "," "," "," "," "," "," ","E"],["E","E"," "," "," "," "," ","E","E"],[" ","E","E"," "," "," ","E","E"," "],[" "," ","E","E","E","E","E"," "," "]]
self.stages.append(stageOne)
self.stages.append(stageTwo)
self.stages.append(stageThree)
self.stages.append(stageFour)
self.posX = posX
self.posY = posY
def paint(self, canvas, width, height):
if(self.i>4):
return True
self.stage = self.stages[self.i]
h = len(self.stage)
w = len(self.stage[0])
for y in range(h):
for x in range(w):
c = self.stage[y][x]
if(c != " "):
Y = int(y+self.posY) - h/2
X = int(x+self.posX) - w/2
if(0<=Y and Y<height and 0<=X and X<width):
canvas[Y][X] = c
self.i+=1
return False
| StarcoderdataPython |
6652025 | import os
import sys
import ast
import functools
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def get_PointNet_train_valid_test_loader(root, target, max_Miller, diffraction, cell_type,
randomly_scale_intensity,
systematic_absence,
batch_size, num_data_workers):
# construct dataset
dataset = PointNetDataset(root=root,
target=target,
max_Miller=max_Miller,
diffraction=diffraction,
cell_type=cell_type,
randomly_scale_intensity=randomly_scale_intensity,
systematic_absence=systematic_absence)
train_ratio = 0.7
valid_ratio = 0.15
total_size = len(dataset)
indices = list(range(total_size))
train_split = int(np.floor(total_size * train_ratio))
train_sampler = SubsetRandomSampler(indices[:train_split])
valid_split = train_split + int(np.floor(total_size * valid_ratio))
valid_sampler = SubsetRandomSampler(indices[train_split:valid_split])
test_sampler = SubsetRandomSampler(indices[valid_split:])
# train DataLoader
train_loader = DataLoader(dataset,
batch_size=batch_size,
num_workers=num_data_workers,
sampler=train_sampler)
# valid DataLoader
valid_loader = DataLoader(dataset,
batch_size=batch_size,
num_workers=num_data_workers,
sampler=valid_sampler)
# test DataLoader
test_loader = DataLoader(dataset,
batch_size=batch_size,
num_workers=num_data_workers,
sampler=test_sampler)
return train_loader, valid_loader, test_loader
class PointNetDataset(Dataset):
def __init__(self, root, target, max_Miller, diffraction, cell_type, randomly_scale_intensity, systematic_absence):
self.root = root
self.target = target
self.max_Miller = max_Miller
self.dtype = diffraction
self.ctype = cell_type
self.scale = randomly_scale_intensity
self.sys_abs = systematic_absence
id_prop_data = pd.read_csv(os.path.join(root, 'id_prop.csv'), \
header=0, sep=',', index_col=None)
id_prop_data = id_prop_data[['material_id', self.target]]
id_prop_data = id_prop_data.sample(frac=1)
self.id_prop = id_prop_data.values
print('randomly scale intensity: {}, systematic absence: {}'.format(self.scale, self.sys_abs))
def __getitem__(self, idx):
material_id, target_prop = self.id_prop[idx]
# load point cloud data (h, k, l, x, y, z, I_hkl)
feat_all = np.load(os.path.join(self.root, material_id+f'_{self.dtype}_{self.ctype}.npy'))
if self.max_Miller > 0:
condition1 = np.where((np.max(feat_all[:,:3], axis=1) <= self.max_Miller) & \
(np.min(feat_all[:,:3], axis=1) >= -self.max_Miller))
feat_select = feat_all[condition1]
assert(feat_select.shape[0] == (2*self.max_Miller+1)**3)
elif self.max_Miller == -1:
condition2 = np.where((np.max(feat_all[:,:3], axis=1) <= 1) & \
(np.min(feat_all[:,:3], axis=1) >= 0) & \
(np.sum(feat_all[:,:3], axis=1) <= 1))
feat_select = feat_all[condition2]
assert(feat_select.shape[0] == 4)
else:
condition3 = np.where((np.min(feat_all[:,:3], axis=1) == 0) & \
(np.sum(feat_all[:,:3], axis=1) == 1))
feat_select = feat_all[condition3]
assert(feat_select.shape[0] == 3)
# ensure permutation invariance
np.random.shuffle(feat_select)
# rescale xyz into [0, 1] and randomly rotate
xyz = feat_select[:,3:6] / 5.
M = np.random.randn(3,3)
Q, __ = np.linalg.qr(M)
xyz = xyz @ Q
# rescale intensity into [0, 1]
intensity = feat_select[:,-1:]
intensity = (np.log(intensity+1E-6) - np.log(1E-6)) / 15.
# randomly scale intensity
if self.scale:
assert (not self.sys_abs)
rand = np.random.random()
intensity *= rand
# systematic absence:
if self.sys_abs:
assert (not self.scale)
intensity = (intensity>1E-3).astype(int)
input_feat = np.concatenate((xyz, intensity), axis=1)
if self.target == 'band_gap':
threshold = 1E-3
elif self.target == 'e_above_hull':
threshold = 0.02
elif self.target == 'bulk_modulus':
threshold = 85.
elif self.target == 'shear_modulus':
threshold = 34.
else:
raise NotImplementedError
return torch.Tensor(input_feat.transpose()), torch.LongTensor([target_prop>threshold])
def __len__(self):
return self.id_prop.shape[0]
| StarcoderdataPython |
8014236 | from functools import wraps
from flask import current_app, abort, request
def requires_debug(view):
@wraps(view)
def _(*args, **kwargs):
strict = not current_app.config.get('FLASK_DEBUG_DISABLE_STRICT',
False)
if not current_app.debug:
if strict:
abort(404) # don't even show we have flask-debug installed
abort(403, 'This function is only available if the application '
'has been started in debug mode.')
msg = []
if strict:
# extra security checks
msg = []
strict_env = {
'SERVER_NAME': '127.0.0.1',
'REMOTE_ADDR': '127.0.0.1',
'SERVER_PORT': '5000',
}
for env, val in strict_env.items():
if request.environ.get(env, None) != val:
msg.append('{} is not {!r}.'
.format(env, val))
if not request.environ.get('SERVER_SOFTWARE', '').startswith(
'Werkzeug/'
):
msg.append('Not running on Werkzeug-Server.')
if 'X-Forwarded-For' in request.headers:
msg.append('Request has a X-Forwarded-For header.')
if msg:
msg.append('Strict security checks are enabled, to prevent '
'security issues in case you have forgotten to '
'disable debugging on a production system. You '
'can disable these by setting '
'FLASK_DEBUG_DISABLE_STRICT to True '
'in your applications configuration.')
if msg:
abort(403, '\n\n'.join(msg))
return view(*args, **kwargs)
return _
| StarcoderdataPython |
12802113 | <reponame>mfbsouza/PID-Rocket
from rocket import Rocket
from pid import PID
import matplotlib.pylab as plt
def main():
kp = 1.5
kd = 2.7
ki = 0.007
height_vals = []
speed_vals = []
accel_vals = []
pid_output = []
setpoint = []
x_vals = []
time = 0.01
simulation_time = 90.0 # 90 seconds
rocket_mass = 10 # 10kg
target_height = 130.0 # 130 meters
rocket_ship = Rocket(rocket_mass)
pid_controller = PID(kp, ki, kd, target_height, 0.0, 1.0)
while (time <= simulation_time):
gas_pedal = pid_controller.compute(time, rocket_ship.get_height())
height, speed, accel = rocket_ship.update(time, gas_pedal)
if time >= 47.0 and time < 48.0:
target_height = 80.0
pid_controller.update_setpoint(target_height)
height_vals.append(height)
speed_vals.append(speed)
accel_vals.append(accel)
pid_output.append(100*gas_pedal)
setpoint.append(target_height)
x_vals.append(time)
time = time + 0.01
# ploting
_, ax = plt.subplots()
ax.plot(x_vals, height_vals, label="height (meters)")
ax.plot(x_vals, speed_vals, label="speed (m/s)")
ax.plot(x_vals, accel_vals, label="acceleration (m/s²)")
ax.plot(x_vals, pid_output, label="PID output")
ax.plot(x_vals, setpoint, label="setpoint")
ax.set(xlabel='time (s)')
ax.grid()
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3518130 | <reponame>Twente-Mining/tezos-reward-distributor
import json
from util.client_utils import clear_terminal_chars
def parse_json_response(client_response, verbose=None):
client_response = clear_terminal_chars(client_response)
# because of disclaimer header; find beginning of response
idx = client_response.find("{")
if idx < 0:
idx = client_response.find("[")
if idx < 0:
idx = client_response.find("\"")
if idx < 0:
raise Exception("Unknown client response format")
response_str = client_response[idx:].strip()
if verbose:
print("parsed json response_str is '{}'".format(response_str))
return json.loads(response_str)
| StarcoderdataPython |
9632172 | import logging
from collections.abc import Sequence
from contextlib import AsyncExitStack
from typing import Optional
import aiohttp
from .cluster import Cluster
from .cluster_config import ClusterConfig
from .config import RegistryConfig, StorageConfig
from .orchestrator.kube_client import KubeClient, NodeWatcher, PodWatcher
from .orchestrator.kube_config import KubeConfig
from .orchestrator.kube_orchestrator import KubeOrchestrator, Orchestrator
logger = logging.getLogger(__name__)
class KubeCluster(Cluster):
_orchestrator: Orchestrator
def __init__(
self,
registry_config: RegistryConfig,
storage_configs: Sequence[StorageConfig],
cluster_config: ClusterConfig,
kube_config: KubeConfig,
trace_configs: Optional[list[aiohttp.TraceConfig]] = None,
) -> None:
self._registry_config = registry_config
self._storage_configs = storage_configs
self._cluster_config = cluster_config
self._kube_config = kube_config
self._trace_configs = trace_configs
self._exit_stack = AsyncExitStack()
@property
def config(self) -> ClusterConfig:
return self._cluster_config
@property
def orchestrator(self) -> Orchestrator:
return self._orchestrator
async def init(self) -> None:
await self._exit_stack.__aenter__()
await self._init_orchestrator()
async def _init_orchestrator(self) -> None:
logger.info(f"Cluster '{self.name}': initializing Orchestrator")
kube_client = KubeClient(
base_url=self._kube_config.endpoint_url,
cert_authority_data_pem=self._kube_config.cert_authority_data_pem,
cert_authority_path=self._kube_config.cert_authority_path,
auth_type=self._kube_config.auth_type,
auth_cert_path=self._kube_config.auth_cert_path,
auth_cert_key_path=self._kube_config.auth_cert_key_path,
token=self._kube_config.token,
token_path=self._kube_config.token_path,
namespace=self._kube_config.namespace,
conn_timeout_s=self._kube_config.client_conn_timeout_s,
read_timeout_s=self._kube_config.client_read_timeout_s,
conn_pool_size=self._kube_config.client_conn_pool_size,
trace_configs=self._trace_configs,
)
node_watcher = NodeWatcher(kube_client, labels=self._get_job_node_labels())
pod_watcher = PodWatcher(kube_client)
orchestrator = KubeOrchestrator(
cluster_name=self.name,
storage_configs=self._storage_configs,
registry_config=self._registry_config,
orchestrator_config=self._cluster_config.orchestrator,
kube_config=self._kube_config,
kube_client=kube_client,
)
orchestrator.register(node_watcher, pod_watcher)
await self._exit_stack.enter_async_context(kube_client)
await self._exit_stack.enter_async_context(node_watcher)
await self._exit_stack.enter_async_context(pod_watcher)
self._orchestrator = orchestrator
def _get_job_node_labels(self) -> dict[str, str]:
labels = {}
if self._kube_config.node_label_job:
labels[self._kube_config.node_label_job] = "true"
return labels
async def close(self) -> None:
await self._exit_stack.__aexit__(None, None, None)
| StarcoderdataPython |
305390 | <gh_stars>0
import rvo2
import numpy as np
import matplotlib.pyplot as plt
sim = rvo2.PyRVOSimulator(1/100., 1, 5, 1.5, 1.5, 0.5, 5)
# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((0, 0))
a1 = sim.addAgent((1, 0))
a2 = sim.addAgent((1, 1))
a3 = sim.addAgent((0, 1))
# Obstacles are also supported.
o1 = sim.addObstacle([(0.1, 0.1), (-0.1, 0.1), (-0.1, -0.1),(0.5,0.5),(0.3,0.3),(0.2,0.2),(0.2,0.5)])
sim.processObstacles()
sim.setAgentPrefVelocity(a0, (5, 5))
sim.setAgentPrefVelocity(a1, (-5, 5))
sim.setAgentPrefVelocity(a2, (-5, -5))
sim.setAgentPrefVelocity(a3, (5, -5))
print('Simulation has %i agents and %i obstacle vertices in it.' %
(sim.getNumAgents(), sim.getNumObstacleVertices()))
print('Running simulation')
for step in range(80):
sim.doStep()
positions = ['(%5.3f, %5.3f)' % sim.getAgentPosition(agent_no)
for agent_no in (a0, a1, a2, a3)]
print('step=%2i t=%.3f %s' % (step, sim.getGlobalTime(), ' '.join(positions)))
a00 = sim.getAgentPosition(a0)
a01 = sim.getAgentPosition(a1)
a02 = sim.getAgentPosition(a2)
a03 = sim.getAgentPosition(a3)
plt.cla()
plt.title("Multi-Agent")
plt.grid(True)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.scatter(a00[0],a00[1],s=40)
plt.scatter(a01[0],a01[1],s=40)
plt.scatter(a02[0],a02[1],s=40)
plt.scatter(a03[0],a03[1],s=40)
plt.scatter(-0.1, -0.1,s=30,c='R')
plt.scatter(0.1, 1,s=30,c='R')
plt.scatter(-0.1, 1,s=30,c='R')
plt.scatter(0.5, 0.5,s=30,c='R')
plt.scatter(0.3, 0.3,s=30,c='R')
plt.scatter(0.2, 0.2,s=30,c='R')
plt.scatter(0.2, 0.5,s=30,c='R')
plt.pause(0.005)
| StarcoderdataPython |
12822421 | # coding: utf-8
from typing import Dict, List # noqa: F401
from fastapi import ( # noqa: F401
APIRouter,
Body,
Cookie,
Depends,
Form,
Header,
Path,
Query,
Request,
Response,
Security,
status,
)
from acapy_wrapper.models.extra_models import TokenModel # noqa: F401
from acapy_wrapper.models.action_menu_fetch_result import ActionMenuFetchResult
from acapy_wrapper.models.perform_request import PerformRequest
from acapy_wrapper.models.send_menu import SendMenu
from api import acapy_utils as au
router = APIRouter()
@router.post(
"/action-menu/{conn_id}/close",
responses={
200: {"model": dict, "description": ""},
},
tags=["action-menu"],
summary="Close the active menu associated with a connection",
)
async def action_menu_conn_id_close_post(
request: Request,
conn_id: str = Path(None, description="Connection identifier"),
) -> dict:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.post(
"/action-menu/{conn_id}/fetch",
responses={
200: {"model": ActionMenuFetchResult, "description": ""},
},
tags=["action-menu"],
summary="Fetch the active menu",
)
async def action_menu_conn_id_fetch_post(
request: Request,
conn_id: str = Path(None, description="Connection identifier"),
) -> ActionMenuFetchResult:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.post(
"/action-menu/{conn_id}/perform",
responses={
200: {"model": dict, "description": ""},
},
tags=["action-menu"],
summary="Perform an action associated with the active menu",
)
async def action_menu_conn_id_perform_post(
request: Request,
conn_id: str = Path(None, description="Connection identifier"),
body: PerformRequest = Body(None, description=""),
) -> dict:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.post(
"/action-menu/{conn_id}/request",
responses={
200: {"model": dict, "description": ""},
},
tags=["action-menu"],
summary="Request the active menu",
)
async def action_menu_conn_id_request_post(
request: Request,
conn_id: str = Path(None, description="Connection identifier"),
) -> dict:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
@router.post(
"/action-menu/{conn_id}/send-menu",
responses={
200: {"model": dict, "description": ""},
},
tags=["action-menu"],
summary="Send an action menu to a connection",
)
async def action_menu_conn_id_send_menu_post(
request: Request,
conn_id: str = Path(None, description="Connection identifier"),
body: SendMenu = Body(None, description=""),
) -> dict:
resp_text = await au.acapy_admin_request_from_request(request)
return resp_text
| StarcoderdataPython |
4919320 | <filename>automapping-stuff/bh_hierarchy_expand.py
import re
from collections import deque
import networkx as nx
from networkx.algorithms import isomorphism
import matplotlib.pyplot as plt
import pandas as pd
import brickschema
from brickschema.namespaces import BRICK, RDFS
from fuzzywuzzy import process, fuzz
class Hierarchy:
def advance(self):
raise NotImplementedError
def expand(self):
new_frontier = []
for node in self.frontier:
for child in self.children(node):
new_frontier.append(child)
self.frontier = new_frontier
class BrickHierarchy(Hierarchy):
_q = """SELECT ?class ?label ?base WHERE {
?class rdfs:label ?label .
{
?class rdfs:subClassOf* brick:Point .
BIND ("point" as ?base)
}
UNION
{
?class rdfs:subClassOf* brick:Equipment .
BIND ("equip" as ?base)
}
}"""
def __init__(self, filename):
self.brick_file = filename
self.g = brickschema.Graph().load_file(filename)
self.brick = {}
for row in self.g.query(self._q):
self.brick[row[0]] = {
# rdfs label of the class
'label': str(row[1]).lower(),
# split the label into words
'tags': tuple(sorted(str(row[1]).lower().split(' '))),
# the Brick class itself
'class': row[0],
# the useful Brick root class
'base': str(row[2]),
}
self.frontier = [BRICK.Temperature_Sensor]
def children(self, node):
return self.g.subjects(predicate=RDFS.subClassOf, object=node)
def all_nodes(self):
q = deque(self.frontier)
while q:
node = q.popleft()
yield node
q.extend(self.children(node))
def to_hierarchy(self):
g = nx.DiGraph()
for node in self.all_nodes():
g.add_node(node)
for node in self.all_nodes():
for child in self.children(node):
g.add_edge(node, child)
return g
@property
def frontier_labels(self):
return [self.brick[node]['label'] for node in self.frontier]
class HaystackHierarchy(Hierarchy):
def __init__(self, filename):
self.haystack_file = filename
df = pd.read_csv(filename)
self.haystack = {}
replacements = {
'equip': 'equipment',
'sp': 'setpoint',
'cmd': 'command',
'elec': 'electrical',
'freq': 'frequency',
'occ': 'occupied',
'temp': 'temperature',
}
for _, row in df.iterrows():
proto = row.pop('proto')
original = proto
tags = set(row.dropna().keys())
for key, value in replacements.items():
proto = re.sub(f"{key}", f"{value}", proto)
if key in tags:
tags.remove(key)
tags.add(value)
tags = tuple(sorted(tags))
self.haystack[tags] = ({
# the essential 'type' of the tag set
'base': 'point' if 'point' in tags else 'equip' if 'equipment' in tags else '',
# a clean text label
'label': proto,
# the original proto string
'proto': original,
# tags associated with the proto
'tags': tags,
})
self.frontier = [("point", "sensor", "temperature")]
def tagset(self, node):
return self.haystack[node]['tags']
def children(self, node):
return [k for k, v in self.haystack.items()
if set(v['tags']).issuperset(self.tagset(node))
and len(v['tags']) == len(self.tagset(node)) + 1]
def to_hierarchy(self):
g = nx.DiGraph()
for node in self.haystack.keys():
g.add_node(node)
for node in self.haystack.keys():
for child in self.children(node):
g.add_edge(node, child)
return g
@property
def frontier_labels(self):
return self.labels_for(self.frontier)
def labels_for(self, queue):
return [self.haystack[node]['label'] for node in queue]
@property
def frontier_tags(self):
return self.tags_for(self.frontier)
def tags_for(self, queue):
return [self.haystack[node]['tags'] for node in queue]
brick = BrickHierarchy("../../Brick.ttl")
ph = HaystackHierarchy("protos.csv")
class MyGraphMapper(isomorphism.DiGraphMatcher):
def __init__(self, brick, haystack):
self.brick = brick
self.brick_hierarchy = brick.to_hierarchy()
nx.draw(self.brick_hierarchy)
plt.savefig('brick.png')
self.ph = haystack
self.ph_hierarchy = haystack.to_hierarchy()
nx.draw(self.ph_hierarchy)
plt.savefig('ph.png')
super().__init__(self.brick_hierarchy, self.ph_hierarchy, self.node_match)
def node_match(self, brick_node, ph_node):
print(brick_node, ph_node)
return True
def semantic_feasibility(self, brick_node, ph_node):
print(brick_node, ph_node)
return True
matcher = MyGraphMapper(brick, ph)
print(matcher.is_isomorphic())
print(matcher.mapping)
# for brick_node in brick.all_nodes():
# brick_tags = brick.brick[brick_node]['tags']
# print(f"{brick_node} {brick_tags}")
# phstack = deque(ph.frontier)
# while phstack:
# node = phstack.popleft()
# print(fuzz.token_sort_ratio(brick_tags, node), node)
# phstack.extend(ph.children(node))
# mapping = {}
# haystackstack = deque(ph.frontier)
# brick_nodes = brick.all_nodes()
# node = next(brick_nodes)
# node_match = ' '.join(brick.brick[node]['tags'])
# print(f"Compare node {node}")
# while len(haystackstack) > 0:
# match_map = {' '.join(ts): ts for ts in haystackstack}
# res = process.extractOne(node_match, match_map.keys())
# assert res is not None
# choice, score = res
# choice = match_map[choice]
# print(f"{brick.brick[node]['label']} -> {ph.haystack[choice]['label']} with score {score}")
# if score < 90:
# new_node = haystackstack.popleft()
# haystackstack.extend(ph.children(new_node))
# continue
# mapping[node] = choice
# haystackstack.extend(ph.children(choice))
# haystackstack.remove(choice)
# try:
# node = next(brick_nodes)
# except StopIteration:
# break
from pprint import pprint
# pprint(mapping)
# print(f"Brick frontier: {brick.frontier_labels}")
# print(f"Haystack frontier: {ph.frontier_labels}")
# for node in brick.frontier:
# print(node)
# res = process.extract(brick.brick[node]['label'], ph.frontier_labels)
# print(res)
| StarcoderdataPython |
1624502 | <reponame>antiprism/antiprism_python
#!/usr/bin/env python3
# Copyright (c) 2014-2016 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Place maximum radius rings of contacting balls around points on a sphere.
Input is a list of coordinates, one set per line.
'''
import argparse
import sys
import math
import re
import anti_lib
from anti_lib import Vec, Mat
def ring_ball_ang(N, a):
target = math.sin(math.pi/(2*N))
ang_range = [0, a/2]
for i in range(100):
diff = (ang_range[1] - ang_range[0])/2
b = (ang_range[0] + ang_range[1])/2
if math.sin(b/2)/math.sin(a-b) > target:
ang_range[1] -= diff
else:
ang_range[0] += diff
return b
def make_ring(R, N, a):
b = ring_ball_ang(N, a)
P = Vec(R*math.sin(a-b), 0, R*math.cos(a-b))
return [P.rot_z(2*math.pi*i/N) for i in range(N)], b
def read_coords():
points = []
while 1:
line = sys.stdin.readline()
if line == '\n':
continue
if line == "":
break
m = re.search('^ *([^ ,]+) *,? *([^ ,]+) *,? *([^ ,\n]+) *$', line)
if not m:
sys.stderr.write(
'error: did not find x, y and z values in following '
'line (1):\n')
sys.stderr.write(line)
sys.exit(1)
else:
try:
points.append(Vec(*[float(m.group(i)) for i in range(1, 3+1)]))
except:
sys.stderr.write(
'error: did not find x, y and z values in following '
'linei (2):\n')
sys.stderr.write(line)
sys.exit(1)
return points
def find_minimum_separation(points):
min_dist2 = 1e100
for i in range(len(points)-1):
for j in range(i+1, len(points)):
v = points[i] - points[j]
dist2 = v.mag2()
if dist2 < min_dist2:
min_dist2 = dist2
return math.sqrt(min_dist2)
def main():
"""Entry point"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'num_balls_on_ring',
help='Number of balls on each ring',
type=int,
nargs='?',
default=10)
parser.add_argument(
'-o', '--outfile',
help='output file name (default: standard output)',
type=argparse.FileType('w'),
default=sys.stdout)
args = parser.parse_args()
ring_centres = read_coords()
if not len(ring_centres):
parser.error('no coordinates in input')
R = ring_centres[0].mag()
dist = find_minimum_separation(ring_centres)
a = math.asin(dist/(2*R))
ball_points, ball_ang = make_ring(R, args.num_balls_on_ring, a)
print('ball radius = %.14f' % (2*R*math.sin(ball_ang/2)), file=sys.stderr)
out = anti_lib.OffFile(args.outfile)
out.print_header(len(ring_centres)*len(ball_points), 0)
for cent in ring_centres:
mat = Mat.rot_from_to(Vec(0, 0, 1), cent)
out.print_verts([mat * p for p in ball_points])
if __name__ == "__main__":
main()
| StarcoderdataPython |
146763 | # type: ignore
from typing import Any
import qtvscodestyle as qtvsc
from PySide6.QtCore import QAbstractTableModel, QModelIndex, Qt
from PySide6.QtGui import QAction, QActionGroup, QTextOption
from PySide6.QtWidgets import (
QCheckBox,
QColorDialog,
QComboBox,
QDateTimeEdit,
QDial,
QDockWidget,
QFileDialog,
QGridLayout,
QGroupBox,
QLabel,
QLCDNumber,
QLineEdit,
QListWidget,
QMainWindow,
QMenuBar,
QProgressBar,
QPushButton,
QRadioButton,
QScrollArea,
QSizePolicy,
QSlider,
QSpinBox,
QSplitter,
QStackedWidget,
QStatusBar,
QTableView,
QTabWidget,
QTextEdit,
QToolBar,
QToolBox,
QToolButton,
QTreeWidget,
QTreeWidgetItem,
QVBoxLayout,
QWidget,
)
from qtvscodestyle.const import FaRegular
class _Group1(QGroupBox):
def __init__(self) -> None:
super().__init__("Group 1")
# VSCode icons
favorite_icon = qtvsc.theme_icon(FaRegular.STAR)
# Widgets
group_push = QGroupBox("Push Button")
group_tool = QGroupBox("Tool Button")
group_radio = QGroupBox("Radio Button")
group_checkbox = QGroupBox("Check Box")
push_button_normal = QPushButton(text="NORMAL")
push_button_toggled = QPushButton(text="TOGGLED")
push_button_secondary_normal = QPushButton(text="NORMAL")
push_button_secondary_toggled = QPushButton(text="TOGGLED")
tool_button_normal, tool_button_toggled, tool_button_text = QToolButton(), QToolButton(), QToolButton()
radio_button_normal_1, radio_button_normal_2 = QRadioButton("Normal 1"), QRadioButton("Normal 2")
checkbox_normal, checkbox_tristate = QCheckBox("Normal"), QCheckBox("Tristate")
# Setup widgets
self.setCheckable(True)
push_button_toggled.setCheckable(True)
push_button_toggled.setChecked(True)
push_button_secondary_toggled.setCheckable(True)
push_button_secondary_toggled.setChecked(True)
tool_button_normal.setIcon(favorite_icon)
tool_button_toggled.setIcon(favorite_icon)
tool_button_text.setIcon(favorite_icon)
tool_button_text.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
tool_button_text.setText("Text")
tool_button_toggled.setCheckable(True)
tool_button_toggled.setChecked(True)
radio_button_normal_1.setChecked(True)
checkbox_normal.setChecked(True)
checkbox_tristate.setTristate(True)
checkbox_tristate.setCheckState(Qt.CheckState.PartiallyChecked)
# Setup qss property
push_button_secondary_normal.setProperty("type", "secondary")
push_button_secondary_toggled.setProperty("type", "secondary")
# Layout
g_layout_push = QGridLayout()
g_layout_push.addWidget(QLabel("Main"), 0, 0)
g_layout_push.addWidget(push_button_normal, 1, 0)
g_layout_push.addWidget(push_button_toggled, 2, 0)
g_layout_push.addWidget(QLabel("Secondary"), 0, 1)
g_layout_push.addWidget(push_button_secondary_normal, 1, 1)
g_layout_push.addWidget(push_button_secondary_toggled, 2, 1)
group_push.setLayout(g_layout_push)
v_layout_tool = QVBoxLayout()
v_layout_tool.addWidget(tool_button_normal)
v_layout_tool.addWidget(tool_button_toggled)
v_layout_tool.addWidget(tool_button_text)
group_tool.setLayout(v_layout_tool)
v_layout_radio = QVBoxLayout()
v_layout_radio.addWidget(radio_button_normal_1)
v_layout_radio.addWidget(radio_button_normal_2)
group_radio.setLayout(v_layout_radio)
v_layout_checkbox = QVBoxLayout()
v_layout_checkbox.addWidget(checkbox_normal)
v_layout_checkbox.addWidget(checkbox_tristate)
group_checkbox.setLayout(v_layout_checkbox)
g_layout_main = QGridLayout(self)
g_layout_main.addWidget(group_push, 0, 0)
g_layout_main.addWidget(group_tool, 0, 1)
g_layout_main.addWidget(group_radio, 1, 0)
g_layout_main.addWidget(group_checkbox, 1, 1)
class _Group2(QGroupBox):
def __init__(self) -> None:
super().__init__("Group 2")
# Widgets
group_spinbox = QGroupBox("Spinbox")
group_combobox = QGroupBox("Combobox")
group_editable = QGroupBox("Line edit")
group_date = QGroupBox("Date time edit")
spinbox_normal, spinbox_suffix = QSpinBox(), QSpinBox()
combobox_normal, combobox_line_edit = QComboBox(), QComboBox()
lineedit_normal, lineedit_warning, lineedit_error = QLineEdit(), QLineEdit(), QLineEdit()
date_time_edit_normal, date_time_edit_calendar = QDateTimeEdit(), QDateTimeEdit()
# Setup ui
self.setCheckable(True)
spinbox_suffix.setSuffix(" m")
texts = ["Item 1", "Item 2", "Item 3"]
combobox_normal.addItems(texts)
combobox_line_edit.addItems(texts)
combobox_line_edit.setEditable(True)
lineedit_normal.setPlaceholderText("Normal")
lineedit_warning.setPlaceholderText("Warning")
lineedit_error.setPlaceholderText("Error")
date_time_edit_calendar.setCalendarPopup(True)
# Setup qss property
lineedit_warning.setProperty("state", "warning")
lineedit_error.setProperty("state", "error")
# Layout
v_layout_spin = QVBoxLayout()
v_layout_spin.addWidget(spinbox_normal)
v_layout_spin.addWidget(spinbox_suffix)
group_spinbox.setLayout(v_layout_spin)
v_layout_combo = QVBoxLayout()
v_layout_combo.addWidget(combobox_normal)
v_layout_combo.addWidget(combobox_line_edit)
group_combobox.setLayout(v_layout_combo)
v_layout_lineedit = QVBoxLayout()
v_layout_lineedit.addWidget(lineedit_normal)
v_layout_lineedit.addWidget(lineedit_warning)
v_layout_lineedit.addWidget(lineedit_error)
group_editable.setLayout(v_layout_lineedit)
v_layout_date = QVBoxLayout()
v_layout_date.addWidget(date_time_edit_normal)
v_layout_date.addWidget(date_time_edit_calendar)
group_date.setLayout(v_layout_date)
g_layout_main = QGridLayout(self)
g_layout_main.addWidget(group_spinbox, 0, 0)
g_layout_main.addWidget(group_combobox, 0, 1)
g_layout_main.addWidget(group_editable, 1, 0)
g_layout_main.addWidget(group_date, 1, 1)
class _TableModel(QAbstractTableModel):
def __init__(self) -> None:
super().__init__()
self._data = [[i * 10 + j for j in range(4)] for i in range(5)]
self._checks = [True if i % 2 == 0 else False for i in range(5)]
def data(self, index: QModelIndex, role: int) -> Any:
if role == Qt.ItemDataRole.DisplayRole:
return self._data[index.row()][index.column()]
elif role == Qt.ItemDataRole.CheckStateRole and index.column() == 1:
return Qt.CheckState.Checked if self._checks[index.row()] else Qt.CheckState.Unchecked
elif role == Qt.ItemDataRole.EditRole and index.column() == 2:
return self._data[index.row()][index.column()]
def rowCount(self, index: QModelIndex) -> int:
return len(self._data)
def columnCount(self, index: QModelIndex) -> int:
return len(self._data[0])
def flags(self, index: QModelIndex) -> Qt.ItemFlag:
flag = super().flags(index)
if index.column() == 1:
flag |= Qt.ItemFlag.ItemIsEditable | Qt.ItemFlag.ItemIsUserCheckable
return flag
elif index.column() == 2:
flag |= Qt.ItemFlag.ItemIsEditable | Qt.ItemFlag.ItemIsSelectable
return flag
elif index.column() == 3:
flag |= Qt.ItemFlag.ItemIsEditable | Qt.ItemFlag.ItemIsSelectable
return flag
def setData(self, index: QModelIndex, value: Any, role: int) -> bool:
if role == Qt.ItemDataRole.CheckStateRole:
self._checks[index.row()] = True if value == Qt.CheckState.Checked else False
return True
return False
def headerData(self, section: int, orientation: Qt.Orientation, role: int = ...) -> Any:
if role != Qt.ItemDataRole.DisplayRole:
return
if orientation == Qt.Orientation.Horizontal:
return ["Normal", "Checkbox", "Spinbox", "LineEdit"][section]
return super().headerData(section, orientation, role)
class _Group3(QGroupBox):
def __init__(self) -> None:
super().__init__("Group 3")
# Widgets
tab_widget = QTabWidget()
tab_text_edit = QTextEdit()
tab_table = QTableView()
tab_list = QListWidget()
tab_tree = QTreeWidget()
# Setup ui
self.setCheckable(True)
tab_widget.setTabsClosable(True)
tab_widget.setMovable(True)
tab_text_edit.append("<b>QtVSCodeStyle</b>")
tab_text_edit.append("VS Code style for QtWidgets application(Qt for python).")
tab_text_edit.append("This project is licensed under the MIT license.")
tab_text_edit.setWordWrapMode(QTextOption.WrapMode.NoWrap)
tab_table.setModel(_TableModel())
tab_table.setSortingEnabled(True)
tab_list.addItems([f"Item {i+1}" for i in range(30)])
tab_list.setAlternatingRowColors(True)
tab_tree.setColumnCount(2)
tree_widget_items = []
for i in range(5):
tree_widget_item = QTreeWidgetItem([f"Item {i+1}" for _ in range(2)])
for j in range(2):
tree_widget_child_item = QTreeWidgetItem([f"Child Item {i+1}_{j+1}" for _ in range(2)])
tree_widget_item.addChild(tree_widget_child_item)
tree_widget_items.append(tree_widget_item)
tab_tree.addTopLevelItems(tree_widget_items)
# layout
tab_widget.addTab(tab_text_edit, "Text Edit")
tab_widget.addTab(tab_table, "Table")
tab_widget.addTab(tab_list, "List")
tab_widget.addTab(tab_tree, "Tree")
v_layout_main = QVBoxLayout(self)
v_layout_main.addWidget(tab_widget)
class _Group4(QGroupBox):
def __init__(self) -> None:
super().__init__("Group 4")
# Widgets
toolbox = QToolBox()
slider = QSlider(Qt.Orientation.Horizontal)
dial_ticks = QDial()
progressbar = QProgressBar()
lcd_number = QLCDNumber()
# Setup ui
self.setCheckable(True)
toolbox.addItem(slider, "Slider")
toolbox.addItem(dial_ticks, "Dial")
toolbox.addItem(progressbar, "Progress Bar")
toolbox.addItem(lcd_number, "LCD Number")
slider.setValue(50)
dial_ticks.setNotchesVisible(True)
progressbar.setValue(50)
lcd_number.setSegmentStyle(QLCDNumber.SegmentStyle.Flat)
lcd_number.display(123)
# Layout
v_layout = QVBoxLayout(self)
v_layout.addWidget(toolbox)
class HomeUI:
def setup_ui(self, win: QWidget) -> None:
# Widgets
h_splitter_1, h_splitter_2 = QSplitter(Qt.Orientation.Horizontal), QSplitter(Qt.Orientation.Horizontal)
# Setup ui
h_splitter_1.setMinimumHeight(350) # Fix bug layout crush
# Layout
h_splitter_1.addWidget(_Group1())
h_splitter_1.addWidget(_Group2())
h_splitter_2.addWidget(_Group3())
h_splitter_2.addWidget(_Group4())
v_layout = QVBoxLayout()
v_layout.addWidget(h_splitter_1)
v_layout.addWidget(h_splitter_2)
widget = QWidget()
widget.setLayout(v_layout)
scroll_area = QScrollArea()
scroll_area.setWidget(widget)
v_main_layout = QVBoxLayout(win)
v_main_layout.addWidget(scroll_area)
class DockUI:
def _setup_ui(self, main_win: QMainWindow) -> None:
# Attribute
left_dock = QDockWidget("Left dock")
right_dock = QDockWidget("Right dock")
top_dock = QDockWidget("Top dock")
bottom_dock = QDockWidget("Bottom dock")
docks = [left_dock, right_dock, top_dock, bottom_dock]
# Setup ui
left_dock.setWidget(QTextEdit("This is the left widget."))
right_dock.setWidget(QTextEdit("This is the right widget."))
top_dock.setWidget(QTextEdit("This is the top widget."))
bottom_dock.setWidget(QTextEdit("This is the bottom widget."))
for dock in docks:
dock.setAllowedAreas(
Qt.DockWidgetArea.LeftDockWidgetArea
| Qt.DockWidgetArea.RightDockWidgetArea
| Qt.DockWidgetArea.BottomDockWidgetArea
| Qt.DockWidgetArea.TopDockWidgetArea
)
# Layout
main_win.setCentralWidget(QTextEdit("This is the central widget."))
main_win.addDockWidget(Qt.DockWidgetArea.LeftDockWidgetArea, left_dock)
main_win.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, right_dock)
main_win.addDockWidget(Qt.DockWidgetArea.TopDockWidgetArea, top_dock)
main_win.addDockWidget(Qt.DockWidgetArea.BottomDockWidgetArea, bottom_dock)
class UI:
def setup_ui(self, main_win: QMainWindow) -> None:
# Icons
home_icon = qtvsc.theme_icon(qtvsc.Vsc.HOME, "activityBar.foreground")
multi_windows_icon = qtvsc.theme_icon(qtvsc.Vsc.MULTIPLE_WINDOWS, "activityBar.foreground")
settings_icon = qtvsc.theme_icon(qtvsc.Vsc.SETTINGS_GEAR, "activityBar.foreground")
folder_open_icon = qtvsc.theme_icon(qtvsc.Vsc.FOLDER)
palette_icon = qtvsc.theme_icon(qtvsc.Vsc.SYMBOL_COLOR)
circle_icon = qtvsc.theme_icon(qtvsc.Vsc.CIRCLE_LARGE_OUTLINE)
clear_icon = qtvsc.theme_icon(qtvsc.Vsc.CLOSE)
# Actions
self.action_change_home = QAction(home_icon, "Move to home")
self.action_change_dock = QAction(multi_windows_icon, "Move to dock")
self.action_open_folder = QAction(folder_open_icon, "Open folder dialog")
self.action_open_color_dialog = QAction(palette_icon, "Open color dialog", main_win)
self.action_enable = QAction(circle_icon, "Enable")
self.action_disable = QAction(clear_icon, "Disable")
self.action_group_toolbar = QActionGroup(main_win)
# Widgets
self.central_window = QMainWindow()
self.stack_widget = QStackedWidget()
activitybar = QToolBar("activitybar")
toolbar = QToolBar("Toolbar")
statusbar = QStatusBar()
menubar = QMenuBar()
tool_button_settings = QToolButton()
tool_button_enable = QToolButton()
tool_button_disable = QToolButton()
self.spacer = QToolButton()
# Setup Actions
self.action_change_home.setCheckable(True)
self.action_change_dock.setCheckable(True)
self.action_change_home.setChecked(True)
self.action_group_toolbar.addAction(self.action_change_home)
self.action_group_toolbar.addAction(self.action_change_dock)
# Setup Widgets
self.spacer.setSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Expanding)
self.spacer.setEnabled(False)
activitybar.setMovable(False)
activitybar.addActions([self.action_change_home, self.action_change_dock])
activitybar.addWidget(self.spacer)
activitybar.addWidget(tool_button_settings)
tool_button_settings.setIcon(settings_icon)
tool_button_settings.setPopupMode(QToolButton.ToolButtonPopupMode.InstantPopup)
tool_button_enable.setDefaultAction(self.action_enable)
tool_button_disable.setDefaultAction(self.action_disable)
toolbar.addActions([self.action_open_folder, self.action_open_color_dialog])
statusbar.addPermanentWidget(tool_button_enable)
statusbar.addPermanentWidget(tool_button_disable)
statusbar.showMessage("Enable")
menu_toggle = menubar.addMenu("&Toggle")
menu_toggle.addActions([self.action_enable, self.action_disable])
menu_dialog = menubar.addMenu("&Dialog")
menu_dialog.addActions([self.action_open_folder, self.action_open_color_dialog])
tool_button_settings.setMenu(menu_toggle)
self.action_enable.setEnabled(False)
# setup custom property
activitybar.setProperty("type", "activitybar")
# layout
stack_1 = QWidget()
home_ui = HomeUI()
home_ui.setup_ui(stack_1)
self.stack_widget.addWidget(stack_1)
stack_2 = QMainWindow()
dock_ui = DockUI()
dock_ui._setup_ui(stack_2)
self.stack_widget.addWidget(stack_2)
self.central_window.setCentralWidget(self.stack_widget)
self.central_window.addToolBar(toolbar)
main_win.setCentralWidget(self.central_window)
main_win.addToolBar(Qt.ToolBarArea.LeftToolBarArea, activitybar)
main_win.setMenuBar(menubar)
main_win.setStatusBar(statusbar)
class WidgetGallery(QMainWindow):
def __init__(self) -> None:
super().__init__()
self._ui = UI()
self._ui.setup_ui(self)
self._setup()
def _setup(self) -> None:
self._ui.action_change_home.triggered.connect(self._change_page)
self._ui.action_change_dock.triggered.connect(self._change_page)
self._ui.action_open_folder.triggered.connect(
lambda: QFileDialog.getOpenFileName(self, "Open File", options=QFileDialog.Option.DontUseNativeDialog)
)
self._ui.action_open_color_dialog.triggered.connect(
lambda: QColorDialog.getColor(parent=self, options=QColorDialog.ColorDialogOption.DontUseNativeDialog)
)
self._ui.action_enable.triggered.connect(self._toggle_state)
self._ui.action_disable.triggered.connect(self._toggle_state)
def _change_page(self) -> None:
action_name = self.sender().text()
self._ui.stack_widget.setCurrentIndex(0 if action_name == "Move to home" else 1)
def _toggle_state(self) -> None:
state = self.sender().text()
self._ui.central_window.centralWidget().setEnabled(state == "Enable")
self._ui.action_enable.setEnabled(state == "Disable")
self._ui.action_disable.setEnabled(state == "Enable")
self.statusBar().showMessage(state)
| StarcoderdataPython |
6631894 | <reponame>icebreaker/dotfiles
import gedit
from FindInProject import FindInProjectPluginInstance
class FindInProjectPlugin(gedit.Plugin):
def __init__(self):
gedit.Plugin.__init__(self)
self._instances = {}
def activate(self, window):
self._instances[window] = FindInProjectPluginInstance(window)
def deactivate(self, window):
self._instances[window].deactivate()
del self._instances[window]
def update_ui(self, window):
pass
| StarcoderdataPython |
6515101 | # coding: utf-8
"""
Graphical user interface for the Phase Retrieval Algorithm based on:
<NAME>.; <NAME>.; <NAME>.; <NAME>.
Phase Retrieval for High-Numerical-Aperture Optical Systems.
Optics Letters 2003, 28 (10), 801.](dx.doi.org/10.1364/OL.28.000801)
The user interface allows to select the PSF files (supported by bioformats oder ome-tiff), adjust PSF and
fit parameters. The PSF can be previewed, the current status of the PR Algorithm can be tracked. Images of the results,
the final data and a comprehensive pdf report can be created.
The GUI uses the algorithms written by <NAME> (Copyright (c) 2016):
https://github.com/david-hoffman/pyOTF
https://github.com/david-hoffman/dphutils
The original phaseretrieval.py has been changed to a threading.Thread class, to allow it to run in parallel with the
tkinter mainloop. This is needed to make the tkinter gui responsive during calculation and allows for intermediate
results to be displayed. In addition some plotting fuctions have been adapted towork with th GUI.
These changes have been made in a forked pyOTF repository:
https://github.com/MartinSt80/pyOTF
Copyright (c) 2019, <NAME>
"""
import os
from ctypes import *
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import javabridge
import bioformats
from pyOTF import phaseretrieval_gui
import TrackingClasses
class ParameterFrame(tk.Frame):
"""
The left frame in the GUI. Select PSF file and result directory, enter PSF and Fit Parameters, load PSF file,
start/stop the Phase Retrieval Algorithm, show the current state of the PR Algorithm in its subframes.
Arguments
----------
parent: _tkinter.tkapp
The parent (tk.Root)
Parameters
-----------
self.current_frame_width: int
Width of the frame in pixels
"""
class FileDialogFrame(tk.LabelFrame):
"""
Contains the Filedialogs and buttons to select PSF file and result directory.
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
frame_text: string
The tk.LabelFrame description
"""
def __init__(self, parent, frame_text):
tk.LabelFrame.__init__(self, parent, text=frame_text)
self.widgets()
def widgets(self):
# tk.Entry for the PSF file
self.psf_file_entry = tk.Entry(self,
textvariable=self.winfo_toplevel().psf_file,
font=("Arial", self.winfo_toplevel().font_size)
)
self.psf_file_entry.grid(row=0, column=0)
# tk.Button, opens a filedialog to select the PSF file
self.psf_button = tk.Button(self,
text="Select PSF file",
font=("Arial", self.winfo_toplevel().font_size),
command=self.winfo_toplevel().select_psf_file
)
self.psf_button.grid(row=0, column=1, sticky=tk.E + tk.W, padx=5, pady=5)
# tk.Entry for the result directory
self.result_dir_entry = tk.Entry(self,
textvariable=self.winfo_toplevel().result_directory,
font=("Arial", self.winfo_toplevel().font_size)
)
self.result_dir_entry.grid(row=1, column=0)
# tk.Button, opens a filedialog to select the result directory
self.result_button = tk.Button(self,
text="Select result directory",
font=("Arial", self.winfo_toplevel().font_size),
command=self.winfo_toplevel().select_result_dir
)
self.result_button.grid(row=1, column=1, sticky=tk.E + tk.W, padx=5, pady=5)
class PsfParamFrame(tk.LabelFrame):
"""
Contains the Entries for the PSF parameters loaded from the PSF file or entered by the user.
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
frame_text: string
The tk.LabelFrame description
"""
def __init__(self, parent, frame_text):
tk.LabelFrame.__init__(self, parent, text=frame_text)
self.widgets()
def widgets(self):
# Generate the widgets for the PSF parameters
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.em_wavelength, 0)
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.num_aperture, 1)
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.refractive_index, 2)
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.xy_res, 3)
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.z_res, 4)
class PrParamFrame(tk.LabelFrame):
"""
Contains the Entries for the Phase Retrieval Algorithm parameters used from default (kwarg)
or entered by the user.
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
frame_text: string
The tk.LabelFrame description
"""
def __init__(self, parent, frame_text):
tk.LabelFrame.__init__(self, parent, text=frame_text)
self.widgets()
def widgets(self):
# Generate the widgets for the PR Algorithm parameters
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.max_iterations, 0)
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.pupil_tolerance, 1)
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.mse_tolerance, 2)
self.master.generate_parameter_entry(self, self.winfo_toplevel().psf_fit_parameters.phase_tolerance, 3)
class PsfButtonFrame(tk.Frame):
"""
Buttons, to load the PSF parameters and date from the selected file and start/stop the PR Algorithm
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.widgets()
def widgets(self):
# Button to load the PSF parameters and date from the selected file
self.load_psf_button = tk.Button(self,
text="Load PSF",
font=("Arial", self.winfo_toplevel().font_size),
command=self.winfo_toplevel().load_psf_file,
width=18
)
self.load_psf_button.grid(row=0, column=0, padx=5, pady=5, sticky=tk.E + tk.W)
# Button to start/stop the PR Algorithm
self.pr_button = tk.Button(self,
text="Start Phase Retrieval",
font=("Arial", self.winfo_toplevel().font_size),
command=self.winfo_toplevel().initiate_pr,
width=18
)
self.pr_button.grid(row=0, column=1, padx=5, pady=5, sticky=tk.E + tk.W)
class PrStatusFrame(tk.LabelFrame):
"""
Displays the current state of the PR Algorithm by progressbar, status message and display of the
current fit convergence.
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
frame_text: string
The tk.LabelFrame description
Attributes
-----------
iteration_text: tk.StringVar
The current iterations snippet for the status string
pupil_diff_text: tk.StringVar
The current iterations snippet for the pupil function difference string
mse_diff_text: tk.StringVar
The current iterations snippet for the mse difference string
"""
def __init__(self, parent, frame_text):
tk.LabelFrame.__init__(self, parent, text=frame_text)
# initiate tk.StringVars
self.iteration_text = tk.StringVar()
self.iteration_text.set(" {} / {}".format(self.winfo_toplevel().pr_state.current_iter.get(),
self.winfo_toplevel().psf_fit_parameters.
max_iterations.value.get()
)
)
self.pupil_diff_text = tk.StringVar()
self.pupil_diff_text.set(" {}".format(self.winfo_toplevel().pr_state.current_pupil_diff.get()))
self.mse_diff_text = tk.StringVar()
self.mse_diff_text.set(" {}".format(self.winfo_toplevel().pr_state.current_mse_diff.get()))
self.widgets()
def widgets(self):
# Create a progress bar to follow the PR Algorithm iterations, fit length to self.master.frame_width
self.progress_bar = ttk.Progressbar(self,
mode='determinate',
max=self.winfo_toplevel().psf_fit_parameters.max_iterations.value.get(),
variable=self.winfo_toplevel().pr_state.current_iter,
length=self.master.current_frame_width
)
self.progress_bar.grid(row=0, column=0, columnspan=2, sticky=tk.E + tk.W, padx=5, pady=5)
# Creates a label which displays the PR Algorithm status
self.status_label = tk.Label(self,
textvariable=self.winfo_toplevel().pr_state.current_state,
font=("Arial", self.winfo_toplevel().font_size),
anchor=tk.W,
justify=tk.LEFT
)
self.status_label.grid(row=1, column=0, columnspan=2, sticky=tk.W, padx=5, pady=5)
# Trace if the user changed the max iterations and the current iteration of the PR algorithm
# Throws exception if field was empty, because user deleted the entry
self.winfo_toplevel().psf_fit_parameters.max_iterations.value.trace('w', self.update_status)
self.winfo_toplevel().pr_state.current_iter.trace('w', self.update_status)
# Generate the status entries
self.generate_status_entry("Current iteration", self.iteration_text, 2)
self.generate_status_entry("Relative difference in the pupil function", self.pupil_diff_text, 3)
self.generate_status_entry("Relative difference in the MSE", self.mse_diff_text, 4)
def generate_status_entry(self, description, value_variable, row_grid):
"""
Generate a status display with fixed description, and a tk.StringVar which can be updated as needed
Arguments
----------
description: string
The fixed descriptional part of the displayed text
value_variable: tk.StringVar
A text snippet which can be updated dynamically
row_grid: int
The row for the grid geometry manager to place the widgets on
"""
name_label = tk.Label(self, text=description, font=("Arial", self.winfo_toplevel().font_size), anchor=tk.E)
name_label.grid(row=row_grid, column=0, sticky=tk.E, padx=2, pady=2)
value_label = tk.Label(self,
textvariable=value_variable,
font=("Arial", self.winfo_toplevel().font_size),
justify=tk.RIGHT,
anchor=tk.E
)
value_label.grid(row=row_grid, column=1, sticky=tk.E, padx=2, pady=2)
def update_status(self, name, m, x):
"""
Updates the status elements, called by trace if the max iterations or the current iteration changed.
Arguments
----------
name: string or list
The internal name of the variable which was changed, or as list of variabels
m: int or ""
Index for name list or empty string
x: string
What operation triggered the trace: 'w': write, 'r': read or 'u': delete
"""
# Check if the maximum iterations were adjusted by the user
try:
if name == 'MAX_ITER':
# Set current iterations to zero and adjust the max of the progress bar
self.winfo_toplevel().pr_state.current_iter.set(0)
self.progress_bar.configure(max=self.winfo_toplevel().psf_fit_parameters.max_iterations.value.get())
# Update the dynamical text snippets in the status display
self.iteration_text.set("{} / {}".format(self.winfo_toplevel().pr_state.current_iter.get(),
self.winfo_toplevel().psf_fit_parameters.max_iterations.
value.get()
)
)
self.pupil_diff_text.set(" {:.2E}".format(self.winfo_toplevel().pr_state.current_pupil_diff.get()))
self.mse_diff_text.set(" {:.2E}".format(self.winfo_toplevel().pr_state.current_mse_diff.get()))
except tk._tkinter.TclError:
pass
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.current_frame_width = None
self.widgets()
def widgets(self):
# Subframe, select PSF and result directory (the widest in ParameterFrame)
self.filedialog_frame = self.FileDialogFrame(self, "Select PSF file & Result directory")
self.filedialog_frame.grid(row=0, column=0, sticky=tk.W+tk.E, padx=5, pady=5)
# Draw the frame
self.filedialog_frame.update()
# get its width and store it (needed to size the progressbar in self.status_frame)
self.current_frame_width = self.filedialog_frame.winfo_width()
# Subframe, contains the entries for the PSF parameters
self.psf_parameter_frame = self.PsfParamFrame(self, "PSF Acquisition Parameters")
self.psf_parameter_frame.grid(row=1, column=0, sticky=tk.W+tk.E, padx=5, pady=5)
# Subframe, contains the entries for the PR fit parameters
self.pr_parameter_frame = self.PrParamFrame(self, "Phase Recovery Parameters")
self.pr_parameter_frame.grid(row=2, column=0, sticky=tk.W+tk.E, padx=5, pady=5)
# Subframe, contains the buttons to load the PSF and start/stop the PR Algorithm
self.action_button_frame = self.PsfButtonFrame(self)
self.action_button_frame.grid(row=3, column=0, sticky=tk.W+tk.E, padx=5, pady=5)
# Subframe, displays the current state of the PR Algorithm
self.status_frame = self.PrStatusFrame(self, "Phase Retrieval Status")
self.status_frame.grid(row=4, column=0, sticky=tk.W, padx=5, pady=5)
def generate_parameter_entry(self, parent, parameter, row_grid):
"""
Generates a line of widgets for the given parameter
Arguments
----------
parent: tk.Frame
Frame in which the widgets should be created
parameter: TrackingClasses.PsfandFitParameters.PsfFitParameter
The parameter object, containing name, value and unit
row_grid: int
The row for the grid geometry manager to place the widgets on
"""
# Generate a name label in column 0
name_label = tk.Label(parent,
text=parameter.name,
font=("Arial", self.winfo_toplevel().font_size),
anchor=tk.E
)
name_label.grid(row=row_grid, column=0, sticky=tk.E, padx=2, pady=2)
# Generate a value entry in column 1
value_entry = tk.Entry(parent, textvariable=parameter.value,
font=("Arial", self.winfo_toplevel().font_size),
width=5,
justify=tk.RIGHT
)
value_entry.grid(row=row_grid, column=1, padx=2, pady=2)
# Generate a unit label in column 2
unit_label = tk.Label(parent, text=parameter.unit,
font=("Arial", self.winfo_toplevel().font_size),
anchor=tk.E
)
unit_label.grid(row=row_grid, column=2, sticky=tk.E, padx=2, pady=2)
class ImageFrame(tk.Frame):
"""
The middle frame in the GUI. Shows xy and xz sections of the PSF with attached sliders. Displays the PR Algorithm
results and convergence.
Arguments
----------
parent: _tkinter.tkapp
The parent (tk.Root)
"""
class PsfFrame(tk.LabelFrame):
"""
Displays xy and xz sections of the loaded PSF.
The sliders can be used to preview different planes of the PSF
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
label_text: string
The tk.LabelFrame description
Attributes
-----------
self.zpos: tk.IntVar
The z current position within the stack
self.ypos: tk.IntVar
The y current position within the stack
"""
def __init__(self, parent, label_text):
tk.LabelFrame.__init__(self, parent, text=label_text)
# Track the position of the slider
self.zpos = tk.IntVar()
self.zpos.set(0)
self.ypos = tk.IntVar()
self.ypos.set(0)
self.widgets()
def widgets(self):
# Initially create placeholder images, as long as no PSF has been loaded, create the sliders
self.psf_xy_figure = self.create_placeholder_psf()
self.psf_xy_figure._tkcanvas.grid(row=0, column=0, padx=5, pady=5)
self.zstack_slider = tk.Scale(self,
label="Z Position",
orient=tk.HORIZONTAL,
font=("Arial", self.winfo_toplevel().font_size),
variable=self.zpos,
state=tk.DISABLED,
name='z-slider'
)
self.zstack_slider.bind("<ButtonRelease-1>", self.update_psf)
self.zstack_slider.grid(row=1, column=0, sticky=tk.W + tk.E, padx=5, pady=5)
self.psf_xz_figure = self.create_placeholder_psf()
self.psf_xz_figure._tkcanvas.grid(row=0, column=1, padx=5, pady=5)
self.ypos_slider = tk.Scale(self,
label="Y Position",
orient=tk.HORIZONTAL,
font=("Arial", self.winfo_toplevel().font_size),
variable=self.ypos,
state=tk.DISABLED,
name='y-slider'
)
self.ypos_slider.bind("<ButtonRelease-1>", self.update_psf)
self.ypos_slider.grid(row=1, column=1, sticky=tk.W + tk.E, padx=5, pady=5)
def create_placeholder_psf(self):
"""
Creates a placeholder PSF figure.
Returns
--------
psf_dummy_figure: FigureCanvasTkAgg
"""
psf_dummy = plt.figure(figsize=(6, 6), dpi=self.winfo_toplevel().figure_dpi)
psf_dummy.text(0.5, 0.5, "No PSF has been loaded.", fontname='Arial', fontsize=16,
horizontalalignment='center')
psf_dummy_figure = FigureCanvasTkAgg(psf_dummy, master=self)
plt.close(psf_dummy)
return psf_dummy_figure
def update_psf(self, event, z_position=None, y_position=None):
"""
Update the current displayed image of the PSF at the current section and updates the image streams.
Arguments
----------
event: tk.Event
If triggered by <ButtonRelease-1> on a stack slider, None if triggered initially
after PSF has been loaded
z_position: int
Position in the stack, from which the psf image is created
y_position: int
Position in the stack, from which the psf image is created
Returns
---------
psf_figure: FigureCanvasTkAgg
"""
def __update_it(psf_view, zpos=None, ypos=None, aspect=1):
if psf_view == 'xy':
# track old psf figure, create a new one and then destroy old one
# (needed to remove geometry manager jumping around)
obsolete_canvas = self.psf_xy_figure
self.psf_xy_figure = __create_psf(psf_view, zpos)
self.psf_xy_figure._tkcanvas.grid(row=0, column=0, padx=5, pady=5)
if psf_view == 'xz':
obsolete_canvas = self.psf_xz_figure
self.psf_xz_figure = __create_psf(psf_view, ypos, aspect)
self.psf_xz_figure._tkcanvas.grid(row=0, column=1, padx=5, pady=5)
obsolete_canvas._tkcanvas.destroy()
def __create_psf(psf_view, current_stack_pos, voxel_aspect=1):
"""
Creates an image (FigureCanvasTkAgg) of the PSF at the current section and updates the image stream.
Arguments
----------
current_stack_pos: int
Position in the stack, from which the image is created
Returns
---------
psf_figure: FigureCanvasTkAgg
"""
# Create the matplotlib.Figure and configure it
psf = plt.figure(figsize=(6, 6), dpi=self.winfo_toplevel().figure_dpi)
psf_ax = psf.add_axes([0, 0, 1, 1])
psf_ax.xaxis.set_visible(False)
psf.patch.set_facecolor('black')
# Create the requested image and store it in its image stream
if psf_view == 'xy':
psf_ax.matshow(self.winfo_toplevel().psf_fit_parameters.psf_data[int(current_stack_pos)],
cmap="inferno"
)
self.winfo_toplevel().image_streams.reset_image_stream(self.winfo_toplevel().image_streams.
psf_image_stream_xy,
psf,
)
if psf_view == 'xz':
psf_ax.matshow(self.winfo_toplevel().psf_fit_parameters.psf_data[:, int(current_stack_pos), :],
cmap="inferno",
aspect=voxel_aspect
)
self.winfo_toplevel().image_streams.reset_image_stream(self.winfo_toplevel().image_streams.
psf_image_stream_xz,
psf,
)
# Create the image for display and return it
psf_figure = FigureCanvasTkAgg(psf, master=self)
plt.close(psf)
return psf_figure
# initial update after PSF file has been loaded, update both PSF images
if event is None:
__update_it('xy', zpos=z_position)
self.zstack_slider.configure(state=tk.NORMAL, to=self.winfo_toplevel().psf_fit_parameters.z_size - 1)
__update_it('xz', ypos=y_position, aspect=self.winfo_toplevel().psf_fit_parameters.voxel_aspect)
self.ypos_slider.configure(state=tk.NORMAL, to=self.winfo_toplevel().psf_fit_parameters.xy_size - 1)
# One of the sliders have been moved, update the corresponding PSF image
else:
# get current position of the slider
stack_position = event.widget.get()
# update the corresponding PSF image
if event.widget == self.zstack_slider:
__update_it('xy', zpos=stack_position)
if event.widget == self.ypos_slider:
__update_it('xz', ypos=stack_position)
def __init__(self, parent, ):
tk.Frame.__init__(self, parent)
self.widgets()
def widgets(self):
# This frame displays xy and xz sections of the loaded PSF file, and sliders to change the section
self.psf_frame = self.PsfFrame(self, "PSF preview")
self.psf_frame.grid(row=0, column=0, padx=5, pady=5)
# This frame displays the results of the PR Algorithm
self.pr_result_frame = ResultFrame(self,
'Phase Retrieval Results',
'No phase retrieval results yet.',
figure_width=12,
figure_height=5,
)
self.pr_result_frame.grid(row=1, column=0, padx=5, pady=5, sticky=tk.E+tk.W)
# This frame displays the PR Algorithm convergence
self.pr_mse_frame = ResultFrame(self,
'Phase Retrieval Error',
'No phase retrieval results yet.',
figure_width=12,
figure_height=3,
)
self.pr_mse_frame.grid(row=2, column=0, padx=5, pady=5, sticky=tk.E+tk.W)
class ZernikeFrame(tk.Frame):
"""
The right frame in the GUI. Displays the results of the Zernike Decomposition graphically and as discrete values.
Allows to save the results images, the Zernike Decomposition results as .csv,
and generate a comprehensive pdf report.
Arguments
----------
parent: _tkinter.tkapp
The parent (tk.Root)
"""
class ZernikeCoefficientFrame(tk.LabelFrame):
"""
Displays the results of the Zernike Decomposition as discrete values. Generates a list of
Zernike Polynomials, with name, phase coefficient value
Arguments
----------
parent: _tkinter.tkapp
The parent (tk.Root)
"""
def __init__(self, parent, label_text):
tk.LabelFrame.__init__(self, parent, text=label_text)
self.widgets()
def widgets(self):
# Generate an entry for each Zernike Polynomial
rows = range(0, len(self.winfo_toplevel().zernike_results.zernike_polynomials))
for row, polynomial in zip(rows, self.winfo_toplevel().zernike_results.zernike_polynomials):
# Set the font to bold for the "important" polynomials
if polynomial.order in self.winfo_toplevel().zernike_results.important_coeff_orders:
temp_label = tk.Label(self,
text=polynomial.name,
font=("Arial", self.winfo_toplevel().font_size, 'bold'),
anchor=tk.E
)
else:
temp_label = tk.Label(self,
text=polynomial.name,
font=("Arial", self.winfo_toplevel().font_size),
anchor=tk.E
)
temp_label.grid(row=row, column=0, sticky=tk.E, pady=2)
# Format the phase coefficient value, make it bold if "important"
value_string = ' {:.2f}'.format(polynomial.value)
if polynomial.order in self.winfo_toplevel().zernike_results.important_coeff_orders:
temp_label = tk.Label(self,
text=value_string,
font=("Arial", self.winfo_toplevel().font_size, 'bold'),
anchor=tk.E
)
else:
temp_label = tk.Label(self,
text=value_string,
font=("Arial", self.winfo_toplevel().font_size),
anchor=tk.E
)
temp_label.grid(row=row, column=1, sticky=tk.E)
# If no results yet, in_tolerance is None --> don't display it
if polynomial.in_tolerance is not None:
if polynomial.in_tolerance:
temp_label = tk.Label(self,
text='OK!',
font=("Arial", self.winfo_toplevel().font_size),
fg='green'
)
else:
temp_label = tk.Label(self,
text='Not OK!',
font=("Arial", self.winfo_toplevel().font_size),
fg='red'
)
temp_label.grid(row=row, column=2)
def update_entries(self):
# To update, destroy all label entries and recreate
for widget in self.winfo_children():
widget.destroy()
self.widgets()
class ResultButtonFrame(tk.LabelFrame):
"""
Buttons to save the results of the PR Algorithm, and the Zernike Decomposition.
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
"""
def __init__(self, parent, label_text):
tk.LabelFrame.__init__(self, parent, text=label_text)
self.widgets()
def widgets(self):
# Check if the PR Algorithm has finished or been restarted, activate or deactivate report buttons
self.winfo_toplevel().pr_state.pr_finished.trace('w', self.toggle_buttons)
# Buttons to trigger the saving functions
self.save_pr_result_button = tk.Button(self,
text="Phase & Magnitude as .png",
command=self.save_pr_image,
)
self.save_pr_result_button.grid(row=0, column=0, padx=5, pady=5, )
self.save_zernike_img_button = tk.Button(self,
text="Zernike Coeff. as .png",
command=self.save_zd_image,
)
self.save_zernike_img_button.grid(row=1, column=0, padx=5, pady=5, )
self.save_zernike_values_button = tk.Button(self,
text="Save fit results as .xlsx",
command=self.save_zd_values,
)
self.save_zernike_values_button.grid(row=0, column=1, padx=5, pady=5, )
self.save_pdf_report = tk.Button(self,
text="Create pdf report",
command=self.generate_pdf_report,
)
self.save_pdf_report.grid(row=1, column=1, padx=5, pady=5, )
for child_button in self.winfo_children():
child_button.configure(font=("Arial", self.winfo_toplevel().font_size),
state=tk.DISABLED,
width=23
)
def toggle_buttons(self, n, m, x):
"""
Switches the button to active when the PR Algorithm has finished, to inactive when a new PSF has been loaded.
Arguments
----------
n: string or list
The internal name of the variable which was changed, or as list of variabels
m: int or ""
Index for name list or empty string
x: string
What operation triggered the trace: 'w': write, 'r': read or 'u': delete
"""
for child in self.winfo_children():
if self.winfo_toplevel().pr_state.pr_finished.get():
child.configure(state=tk.NORMAL)
else:
child.configure(state=tk.DISABLED)
def save_pr_image(self):
""" Save the phase and magnitude images from the image stream to the disk. """
save_file = os.path.join(self.winfo_toplevel().result_directory.get(),
os.path.splitext(self.winfo_toplevel().psf_filename)[0] + '_pr_results.png')
try:
with open(save_file, "wb") as f:
f.write(self.winfo_toplevel().image_streams.pr_result_image_stream.getvalue())
except FileNotFoundError as pop_up_alert:
messagebox.showwarning("Invalid File Path", str(pop_up_alert))
def save_zd_image(self):
""" Save the graphical results of the Zernike Decomposition from the image stream to the disk. """
save_file = os.path.join(self.winfo_toplevel().result_directory.get(),
os.path.splitext(self.winfo_toplevel().psf_filename)[0] + '_zd_results.png')
try:
with open(save_file, "wb") as f:
f.write(self.winfo_toplevel().image_streams.zd_decomposition_image_stream.getvalue())
except FileNotFoundError as pop_up_alert:
messagebox.showwarning("Invalid File Path", str(pop_up_alert))
def save_zd_values(self):
""" Save the Results of the Zernike Decomposition and the parameters
for the PSF and the PR Fit to a .xlsx file.
"""
xlsx_path = os.path.join(self.winfo_toplevel().result_directory.get(),
os.path.splitext(self.winfo_toplevel().psf_filename)[0] + '_zd_results.xlsx')
try:
TrackingClasses.ZdResultWorkbook(xlsx_path,
self.winfo_toplevel().psf_file.get(),
self.winfo_toplevel().zernike_results,
self.winfo_toplevel().pr_state,
psf_fit_parameters=self.winfo_toplevel().psf_fit_parameters,
)
except Exception as pop_up_alert:
messagebox.showwarning("Saving results as .xlsx failed", str(pop_up_alert))
def generate_pdf_report(self):
""" Generate a pdf report with all results and save it."""
pdf_path = os.path.join(self.winfo_toplevel().result_directory.get(),
os.path.splitext(self.winfo_toplevel().psf_filename)[0] + '_report.pdf')
pdf_report = TrackingClasses.PdfReport(pdf_path,
self.winfo_toplevel().psf_file.get(),
self.winfo_toplevel().psf_fit_parameters,
self.winfo_toplevel().zernike_results,
self.winfo_toplevel().image_streams,
self.winfo_toplevel().pr_state
)
try:
pdf_report.create_pdf_report()
except Exception as pop_up_alert:
messagebox.showwarning("Creating a .pdf-report failed", str(pop_up_alert))
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.widgets()
def widgets(self):
# Generate a result frame which displays the results of the Zernike Decomposition
self.zernike_frame = ResultFrame(self,
'Zernike Decomposition Results',
'No zernike decomposition results yet.',
figure_width=6,
figure_height=6
)
self.zernike_frame.grid(row=0, column=0, padx=5, pady=5)
# Lists the named Zernike polynomials and their phase coefficients
self.coefficient_frame = self.ZernikeCoefficientFrame(self, "Decomposed Zernike Coefficients")
self.coefficient_frame.grid(row=1, column=0, padx=5, pady=5, sticky=tk.E+tk.W)
# Buttons to save the results
self.result_button_frame = self.ResultButtonFrame(self, "Save Results")
self.result_button_frame.grid(row=2, column=0, padx=5, pady=5, sticky=tk.E+tk.W)
class ResultFrame(tk.LabelFrame):
"""
LabelFrame which displays a matplotlib.pyplot.Figure on the GUI. It is initialized with a placeholder figure.
Arguments
----------
parent: tk.Frame
The parent frame (tk.Frame)
label_text: string
The tk.LabelFrame description
placeholder_text: string
The placeholder text displayed after initialization.
figure_width: int
plt.Figure width in inches
figure_height: int
plt.Figure height in inches
Attributes
-----------
self.figure: FigureCanvasTkAgg
The current displayed figure
"""
def __init__(self, parent, label_text, placeholder_text, figure_width=None, figure_height=None):
tk.LabelFrame.__init__(self, parent, text=label_text)
self.placeholder_text = placeholder_text
self.figure_width = figure_width
self.figure_height = figure_height
self.initiate()
def initiate(self):
""" Generate a placeholder figure of the appropriate size and resolution. """
white_space = plt.figure(figsize=(self.figure_width, self.figure_height), dpi=self.winfo_toplevel().figure_dpi)
white_space.text(0.5, 0.5, self.placeholder_text, fontname='Arial', fontsize=16, horizontalalignment='center')
self.figure = FigureCanvasTkAgg(white_space, master=self)
self.figure._tkcanvas.grid(row=0, column=0, padx=5, pady=5)
plt.close(white_space)
def show_results(self, result_figure):
""" Replace the current figure.
Arguments
---------
result_figure: plt.Figure
"""
obsolete_canvas = self.figure
self.figure = FigureCanvasTkAgg(result_figure, master=self)
self.figure._tkcanvas.grid(row=0, column=0, padx=5, pady=5)
obsolete_canvas._tkcanvas.destroy()
plt.close(result_figure)
def reset(self):
""" Replace the current figure wit a placeholder figure. """
obsolete_canvas = self.figure
self.initiate()
obsolete_canvas._tkcanvas.destroy()
class MainWindow(tk.Tk):
""" The tk.root for the GUI.
Arguments
----------
screen_height: int
Actual screen height after windows scaling
scaling_factor: int
Scaling factor set for the screen by Windows
"""
def __init__(self, screen_height, scaling_factor):
tk.Tk.__init__(self)
# start a JVM, needed to run the bioformats class used in bioformats_helper.PsfImageDataAndParameters
javabridge.start_vm(class_path=bioformats.JARS)
# Set up the main window and font size and figure resolution according to the screen resolution
# TODO: test this for different environments
self.title("Phase retrieval from PSF")
self.window_height = int(0.7 * screen_height)
self.scaling_factor = scaling_factor
self.window_width = int(1.43 * self.window_height)
window_size = '{}x{}'.format(str(self.window_width), str(self.window_height))
self.geometry(window_size)
self.resizable(False, False)
self.font_size = int((30 * 1080) / (screen_height * self.scaling_factor))
self.figure_dpi = int((180 * 1080) / screen_height)
# Instantiate the class tracking PSF and fit parameters
self.psf_fit_parameters = TrackingClasses.PsfandFitParameters()
# Initialize the variables tracking PSF file and results directory
self.psf_file = tk.StringVar()
self.psf_file.set('Select a PSF file...')
self.psf_directory = tk.StringVar()
self.psf_directory.set('D:\\')
self.psf_filename = ""
self.result_directory = tk.StringVar()
self.result_directory.set('Select a result directory...')
# Instantiate the class tracking the state of the Phase Retrieval Algorithm
self.pr_state = TrackingClasses.PrState()
# Instantiate the class tracking the Phase Retrieval Algorithm results
self.phase_retrieval_results = phaseretrieval_gui.PhaseRetrievalResult()
# Instantiate the class tracking the Zernike Decomposition results
self.zernike_results = TrackingClasses.ZernikeDecomposition()
# Instantiate the class storing the GUI images as Bytestreams
self.image_streams = TrackingClasses.ResultImageStreams()
# Create the first level frames
self.left_frame = ParameterFrame(self)
self.left_frame.grid(row=0, column=0, sticky=tk.N)
self.middle_frame = ImageFrame(self)
self.middle_frame.grid(row=0, column=1, sticky=tk.N)
self.right_frame = ZernikeFrame(self)
self.right_frame.grid(row=0, column=2, sticky=tk.N)
def select_psf_file(self):
""" Open a filedialog to select a PSF file, store it"""
psf_path = filedialog.askopenfilename(initialdir=self.psf_directory,
title="Select PSF file..."
)
self.psf_file.set(psf_path)
psf_dir, self.psf_filename = os.path.split(psf_path)
self.psf_directory.set(psf_dir)
def select_result_dir(self):
""" Open a filedialog to select a result directory, store it"""
self.result_directory.set(filedialog.askdirectory(initialdir=self.psf_directory,
title="Select result directory...",
)
)
def load_psf_file(self):
"""
Linked to the self.left_frame.action_button_frame.load_psf_button . Loads the PSF parameters and PSF data
from the selected file and displays the data (self.middle_frame.psf_frame)
"""
# Load the PSF file store parameters and data
self.psf_fit_parameters.read_data_and_parameters(self.psf_file.get())
# If loading the PSF was successful...
if self.psf_fit_parameters.is_initiated:
# ...display PSF on the GUI and initialize the sliders
starting_zpos = self.psf_fit_parameters.z_size // 2
self.middle_frame.psf_frame.zpos.set(starting_zpos)
starting_xypos = self.psf_fit_parameters.xy_size // 2
self.middle_frame.psf_frame.ypos.set(starting_xypos)
# Also triggered by changes of the sliders --> tk.Event
self.middle_frame.psf_frame.update_psf(None, z_position=starting_zpos, y_position=starting_xypos)
# ...reset the stored results in case there was a previous Phase Retrieval Algorithm run
self.phase_retrieval_results.reset_pr_result()
self.zernike_results.initialize_polynomial_list()
# ... reset the GUI display
self.middle_frame.pr_result_frame.reset()
self.middle_frame.pr_mse_frame.reset()
self.right_frame.zernike_frame.reset()
self.right_frame.coefficient_frame.update_entries()
def initiate_pr(self):
"""
Linked to the self.left_frame.action_button_frame.pr_button . Checks, whether all parameters for the
Phase Retrieval Algorithm are set, resets the current state of the GUI and the internals, starts the
algorithm in its own thread, reconfigures the button to allow stopping the the thread and starts monitoring
the current state of the thread
"""
# Check if all parameters are set
if self.psf_fit_parameters.verify():
# Reset internals and GUI display
self.pr_state.reset_state()
self.middle_frame.pr_result_frame.reset()
self.middle_frame.pr_mse_frame.reset()
self.right_frame.zernike_frame.reset()
self.zernike_results.initialize_polynomial_list()
self.right_frame.coefficient_frame.update_entries()
# Initialize the Phase Retrieval Thread and start it
self.pr_thread = phaseretrieval_gui.PhaseRetrievalThreaded(self.psf_fit_parameters.psf_data_prepped,
self.psf_fit_parameters.psf_parameter_dict,
self.pr_state,
self.phase_retrieval_results,
**self.psf_fit_parameters.fit_parameter_dict,
)
self.pr_thread.daemon = True
self.pr_thread.start()
# Reconfigure the Phase Retrieval Button to allow to stop the thread
self.left_frame.action_button_frame.pr_button.configure(text="Stop Phase Retrieval", command=self.stop_pr)
self.pr_state.current_state.set("Phase retrieval running...")
# After 250 ms call the monitoring function
self.after(250, self.check_pr_results)
def check_pr_results(self):
"""
Checks every 250 ms, whether phaseretrieval_gui.PhaseRetrievalThreaded is still running. If the thread
is not alive(finished or has been aborted), update the GUI display, reset the PR_button to allow the start
of the next Phase Retrieval Algorithm.
Calls itself, if the thread is still running and updates the GUI display every five iterations.
"""
# Check if the algorithm is still running
if self.pr_thread.is_alive():
self.left_frame.status_frame.update()
self.after(250, self.check_pr_results)
# Update the GUI every five iterations
if self.pr_state.current_iter.get() > 0 and self.pr_state.current_iter.get() % 5 == 0:
self.display_pr_results()
# If the thread has stopped, update the GUI display and reset the Phase Retrieval Button
else:
self.display_pr_results()
self.display_zd_results()
# Also triggered by traced variable changes, which call it with three arguments
self.left_frame.status_frame.update_status(None, None, None)
self.left_frame.action_button_frame.pr_button.configure(text="Start Phase Retrieval",
command=self.initiate_pr)
self.pr_state.pr_finished.set(True)
def display_pr_results(self):
""" Generates figures from the Phase Retrieval Algorithm Results and the differences in pupil function and mse.
Stores the figures in the image streams and updates the GUI display
"""
# Create the Phase Retrieval result figure, store it as a byte stream and update GUI display
result_figure, _ = self.phase_retrieval_results.plot_gui(self.figure_dpi)
self.image_streams.reset_image_stream(self.image_streams.pr_result_image_stream, result_figure)
self.middle_frame.pr_result_frame.show_results(result_figure)
# Plot the errors in a figure, store it as a byte stream and update GUI display
mse_figure, _ = self.phase_retrieval_results.plot_convergence_gui(self.figure_dpi,
self.psf_fit_parameters.
max_iterations.value.get())
self.image_streams.reset_image_stream(self.image_streams.pr_fiterror_image_stream, mse_figure)
self.middle_frame.pr_mse_frame.show_results(mse_figure)
def display_zd_results(self):
""" Generate a figure from the Zernike Decomposition REsult, store the figure in the image streams and
update the GUI display
"""
# Do the Zerniken Decomposition
self.phase_retrieval_results.fit_to_zernikes(120)
# Create the Zernike Decomposition result figure, store it as a byte stream and update GUI display
zernike_figure, _ = self.phase_retrieval_results.zd_result.plot_named_coefs_gui(self.figure_dpi)
self.image_streams.reset_image_stream(self.image_streams.zd_decomposition_image_stream, zernike_figure)
self.right_frame.zernike_frame.show_results(zernike_figure)
self.zernike_results.decomposition_from_phase_retrieval(self.phase_retrieval_results, self.psf_fit_parameters.
phase_tolerance.value.get())
self.right_frame.coefficient_frame.update_entries()
def stop_pr(self):
"""Sets the stop_pr flag, breaks out of the iteration loop in phaseretrieval_gui.PhaseRetrievalThreaded"""
self.pr_thread.stop_pr.set()
def clean_up(self):
"""Ensures that the JVM is killed, before tk.root is destroyed"""
if messagebox.askokcancel("Quit", "Do you want to quit?"):
javabridge.kill_vm()
self.destroy()
if __name__ == "__main__":
user32 = windll.user32
virtual_screen_height = user32.GetSystemMetrics(1)
user32.SetProcessDPIAware(1)
screen_height = user32.GetSystemMetrics(1)
scaling = screen_height / virtual_screen_height
app = MainWindow(screen_height, scaling)
app.protocol("WM_DELETE_WINDOW", app.clean_up)
app.mainloop()
| StarcoderdataPython |
4878824 | <reponame>dcos/dcos-bot-branches
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""IAM mock endpoint.
"""
import logging
from exceptions import EndpointException
from mocker.endpoints.recording import (
RecordingHTTPRequestHandler,
RecordingTcpIpEndpoint,
)
# pylint: disable=C0103
log = logging.getLogger(__name__)
# pylint: disable=R0903
class IamHTTPRequestHandler(RecordingHTTPRequestHandler):
"""A request hander class mimicking DC/OS IAM operation.
Attributes:
USERS_PATH_PREFIX (str): API path under which users endpoint should
be available. The uid of the user is extracted from request path
itself.
"""
USERS_PATH_PREFIX = '/acs/api/v1/users/'
def _calculate_response(self, base_path, *_):
"""Answer the query for user data basing on the endpoint context.
Please refer to the description of the BaseHTTPRequestHandler class
for details on the arguments and return value of this method.
Raises:
EndpointException: request URL path is unsupported
"""
ctx = self.server.context
with ctx.lock:
# copy.deepcopy() can also be used here, instead of locking
users = ctx.data['users']
if not base_path.startswith(self.USERS_PATH_PREFIX) or \
base_path == self.USERS_PATH_PREFIX:
msg = "Path `{}` is not supported yet".format(base_path)
blob = msg.encode('utf-8')
raise EndpointException(code=500, reason=blob)
uid = base_path[len(self.USERS_PATH_PREFIX):]
if uid not in users:
res = {"title": "Bad Request",
"description": "User `{}` not known.".format(uid),
"code": "ERR_UNKNOWN_USER_ID"
}
blob = self._convert_data_to_blob(res)
raise EndpointException(code=400,
reason=blob,
content_type='application/json')
return self._convert_data_to_blob(users[uid])
class IamEndpoint(RecordingTcpIpEndpoint):
"""Endpoint that mimics DC/OS IAM.
Attributes:
users: list of users defined in the mock by default.
"""
users = ["root", "bozydar", "jadwiga"]
@staticmethod
def _user_dict_from_uid(uid):
"""Helper function that creates default user data basing on the provided
uid.
"""
return {"is_remote": False,
"uid": uid,
"url": "/acs/api/v1/users/{}".format(uid),
"description": "user `{}`".format(uid),
"is_service": False
}
def reset(self, *_):
"""Reset the endpoint to the default/initial state."""
with self._context.lock:
super().reset()
self._context.data["users"] = {
uid: self._user_dict_from_uid(uid) for uid in self.users}
# pylint: disable=C0103
def __init__(self, port, ip=''):
"""Initialize a new IamEndpoint"""
super().__init__(port, ip, IamHTTPRequestHandler)
self._context.data["users"] = {
uid: self._user_dict_from_uid(uid) for uid in self.users}
def add_user(self, aux):
"""Add UID/user to the endpoint, so that it starts responding with
200 OK to queries about it.
"""
uid = aux["uid"]
with self._context.lock:
users = self._context.data["users"]
assert uid not in users, "User already defined"
users[uid] = self._user_dict_from_uid(uid)
log.debug("User `%s` has been added to IamEndpoint", uid)
def del_user(self, aux):
"""Remove UID/user from the endpoint. Queries for it will no longer
result in 200 OK
"""
uid = aux["uid"]
with self._context.lock:
users = self._context.data["users"]
assert uid in users, "User does not exist yet"
del users[uid]
log.debug("User `%s` has been removed from IamEndpoint", uid)
| StarcoderdataPython |
11358259 | import logging
import azure.functions as func
import json
import os
from azure.storage.blob import BlobServiceClient
#
# Azure Blob Integration
#
graph_connection_string = os.environ["AzureGraphStorage"]
graph_container = os.environ["AzureGraphContainer"]
blob_service_client = BlobServiceClient.from_connection_string(conn_str=graph_connection_string)
graph_container_client = blob_service_client.get_container_client(container=graph_container)
def upload_blob(img_temp_file,target_file,properties):
metadata = { "parent_document_name": base64.encodebytes(bytes(properties[0],'utf8')).decode("utf-8")}
blob_client = graph_container_client.upload_blob(name=target_file, data=img_temp_file, metadata=metadata, overwrite=True)
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Graphout Python HTTP trigger function processed a request.')
try:
body = json.dumps(req.get_json())
except ValueError:
return func.HttpResponse(
"Invalid body",
status_code=400
)
if body:
result = compose_response(body)
return func.HttpResponse(result, mimetype="application/json")
else:
return func.HttpResponse(
"Invalid body",
status_code=400
)
def compose_response(json_data):
# Receive input data
in_data = json.loads(json_data)
if 'values' in in_data:
documents = in_data['values']
elif 'documents' in in_data:
documents = in_data['documents']
# Prepare the Output before the loop
results = {}
results["values"] = []
# Through each document
for document in documents:
output_record = transform_value(document)
if output_record != None:
results["values"].append(output_record)
return json.dumps(results, ensure_ascii=False)
## Perform an operation on a record
def transform_value(value):
try:
if 'recordId' in value:
recordId = value['recordId']
elif 'id' in value:
recordId = value['id']
except AssertionError as error:
return None
data = value
if 'data' in value:
data = value['data']
if not data :
return ({
"recordId": recordId,
"data": {
"text": "no data found."
}
})
try:
# Get the document key
# {
# "text": "<NAME>",
# "type": "Location",
# "subtype": null,
# "offset": 11,
# "length": 18,
# "score": 0.46
# },
# NER
if 'entities' in data:
# Create a vertex per KP
for entity in entities:
pass
pass
# Linked Entities
if 'linked_entities' in data:
# Create a vertex per KP
for entity in entities:
pass
pass
# Relations
if 'relations' in data:
# Create a vertex per KP
for rel in relations:
pass
pass
# PII Entities
if 'pii_entities' in data:
# Create a vertex per KP and link it to the document
pass
# Azure Redis Cache to store
concatenated_string = "All Entities have been graph'ed."
upload_blob
except:
return (
{
"recordId": recordId,
"errors": [ { "message": "Could not complete operation for record." } ]
})
return ({
"recordId": recordId,
"data": {
"text": concatenated_string
}
})
| StarcoderdataPython |
149394 | <filename>database.py
import sqlite3
__all__ = ['Database']
class Database(object):
__vars__ = []
def __init__(self, name):
self._name = name
def _execute(self, command, args=None):
connection = sqlite3.connect("exel.db")
cursor = connection.cursor()
if args is None:
out = cursor.execute(command).fetchall()
else:
out = cursor.execute(command, args).fetchall()
connection.commit()
connection.close()
return out
| StarcoderdataPython |
6477959 | <reponame>padmec-reservoir/impress<gh_stars>1-10
"""
Module for implementation of multiscale mesh and CoarseVolumes objects functionalities
"""
from . finescaleMesh import FineScaleMesh
from ..msCoarseningLib import algoritmo
from ..msCoarseningLib.partitionTools import partitionManager
from . serialization import IMPRESSPickler, IMPRESSUnpickler
from . meshComponents import MoabVariable
from . mscorePymoab import MsCoreMoab
from . meshComponentsMS import MoabVariableMS, MeshEntitiesMS
from ..meshHandle.configTools.configClass import variableInit, coarseningInit
from pymoab import core, types, rng
import numpy as np
import pickle
from scipy.sparse import lil_matrix
print('Initializing Finescale Mesh for Multiscale Methods')
class FineScaleMeshMS(FineScaleMesh):
def __init__(self, mesh_file, dim=3, var_config=None, load=False):
self.var_config = var_config
super().__init__(mesh_file, dim, load=load)
print("Creating Coarse Grid")
self.coarse = MultiscaleCoarseGrid(self, var_config, load = load)
self.enhance_entities()
def __getitem__ (self, key):
if not isinstance(key, int):
raise ValueError("Invalid key type provided")
return self.coarse.elements[key]
def enhance_entities(self):
for i, el in zip(range(len(self.coarse.elements)), self.coarse.elements):
el(i, self.coarse)
def init_entities(self):
self.nodes = MeshEntitiesMS(self.core, entity_type= "node")
self.edges = MeshEntitiesMS(self.core, entity_type= "edges")
self.faces = MeshEntitiesMS(self.core, entity_type= "faces")
if self.dim == 3:
self.volumes = MeshEntitiesMS(self.core, entity_type="volumes")
def save_variables(self, name_file):
self.core.mb.write_file('saves/' + name_file + '.h5m')
with open('saves/' + name_file + '.imp', 'wb') as fp:
pickle.dump([(tags.name_tag, tags.var_type, tags.data_size, tags.data_format, tags.data_density) for tags in self.var_handle_list], fp)
for elements in self.coarse.elements:
elements.save_variables(name_file)
def load_variables(self):
self.var_handle_list = []
with open(self.mesh_file.split('.')[0]+'.imp', 'rb') as fp:
tag_list = pickle.load(fp)
for tags in tag_list:
self.create_variable(name_tag = tags[0], var_type = tags[1], data_size = tags[2], data_format = tags[3], data_density = tags[4], create = False)
def create_variable( self, name_tag, var_type= "volumes", data_size=1, data_format= "float", data_density= "sparse",
entity_index=None, level=0, coarse_num=0, create = True):
var = MoabVariableMS(self.core, data_size = data_size, var_type = var_type, data_format = data_format, name_tag = name_tag, data_density = data_density, entity_index = entity_index, level = level, coarse_num = coarse_num, create = create)
exec(f'self.{name_tag} = var')
self.var_handle_list.append(var)
return var
def to_moab(self):
for vars in self.var_handle_list:
vars.to_moab()
for elements in self.coarse.elements:
elements.to_moab()
def to_numpy(self):
for vars in self.var_handle_list:
vars.to_numpy()
for elements in self.coarse.elements:
elements.to_numpy()
def init_variables(self):
self.var_handle_list = []
if self.var_config is None:
self.var_config = variableInit()
for command in self.var_config.get_var(self.core.level):
exec(command)
def init_partition(self):
coarse_config = coarseningInit()
partitioner = partitionManager(self, coarse_config)
[partition_tag, _] = partitioner()
if isinstance(partition_tag, str) and partition_tag == 'parallel':
return self.init_partition_parallel()
else:
if self.dim == 2:
partition_moab = MoabVariable(self.core, data_size=1,
var_type="faces", data_format="int",
name_tag="Partition", data_density="dense")
elif self.dim == 3:
partition_moab = MoabVariable(self.core, data_size=1,
var_type="volumes", data_format="int",
name_tag="Partition", data_density="dense")
partition_moab[:] = partition_tag
return partition_moab
def init_partition_parallel(self):
if self.dim == 3:
partition = MoabVariable(self.core, data_size=1,
var_type="volumes", data_format="int",
name_tag="Parallel", data_density="sparse")
elif self.dim == 2:
partition = MoabVariable(self.core, data_size=1, var_type="faces",
data_format="int", name_tag="Parallel",
data_density="sparse")
return partition
class CoarseVolume(FineScaleMeshMS):
def __init__(self, father_core, dim, i, coarse_vec, var_config=None , load = False, mesh_file = None):
self.var_config = var_config
self.dim = dim
self.mesh_file = mesh_file
self.level = father_core.level + 1
self.coarse_num = i
print("Level {0} - Volume {1}".format(self.level,self.coarse_num))
self.core = MsCoreMoab(father_core, i, coarse_vec)
self.init_entities()
if not load:
self.init_variables()
self.init_coarse_variables()
else:
self.load_variables()
self.macro_dim()
def init_variables(self):
self.var_handle_list = []
if self.var_config is None:
self.var_config = variableInit()
for command in self.var_config.get_var(self.core.level, self.coarse_num):
exec(command)
def save_variables(self, name_file):
name = self.core.id_name
name = name[(name.find("ID") + 3):]
with open('saves/'+name_file+name+'.imp', 'wb') as fp:
pickle.dump([(tags.name_tag, tags.var_type, tags.data_size, tags.data_format, tags.data_density) for tags in self.var_handle_list], fp)
def load_variables(self):
self.var_handle_list = []
name = self.core.id_name
name = name[(name.find("ID") + 3):]
with open(self.mesh_file.split('.')[0]+name+'.imp', 'rb') as fp:
tag_list = pickle.load(fp)
for tags in tag_list:
self.create_variable(name_tag = tags[0], var_type = tags[1], data_size = tags[2], data_format = tags[3], data_density = tags[4], level = self.level, coarse_num = self.coarse_num, create = False, suffix = name)
return
def create_variable(self, name_tag, var_type="volumes", data_size=1, data_format="float", data_density="sparse",
entity_index=None, level=0, coarse_num=0, create = True, suffix = None):
var = MoabVariableMS(self.core, data_size = data_size, var_type = var_type, data_format = data_format, name_tag = name_tag, data_density = data_density, entity_index = entity_index, level = level, coarse_num = coarse_num, create = create)
if suffix is not None:
name_tag = name_tag.replace(suffix, '')
exec(f'self.{name_tag} = var')
self.var_handle_list.append(var)
return var
def to_moab(self):
for variables in self.var_handle_list:
variables.to_moab()
def to_numpy(self):
for variables in self.var_handle_list:
variables.to_numpy()
def __call__(self, i, general):
self.nodes.enhance(i, general)
self.edges.enhance(i, general)
self.faces.enhance(i, general)
if self.dim == 3:
self.volumes.enhance(i, general)
def init_coarse_variables(self):
pass
class GetCoarseItem(object):
def __init__(self, adj, tag, dic):
self.fun = adj
self.tag = tag
self.dic = dic
def __len__(self):
return len(self.dic)
def __call__(self, item):
tmp = self.dic[item]
el_list = rng.Range()
for e in tmp:
el_list.insert(e[0])
return self.fun(self.tag, el_list)
def __getitem__(self, item):
if isinstance(item, int):
return self.fun(self.tag, self.dic[item].get_array(), flat=True).astype(np.int64)
elif isinstance(item, slice):
start = item.start
step = item.step
stop = item.stop
if step == None:
step = 1
if start == None:
start = 0
if stop == None:
stop = len(self.dic)
array = np.array(range(start, stop, step))
s = np.array([])
for el in array:
s = np.concatenate((s, self.__getitem__(int(el))))
return s
class MultiscaleCoarseGrid(object):
def __init__(self, M, var_config, load = False):
self.mb = M.core.mb
self.M = M
self.partition = M.init_partition()
self.elements = [CoarseVolume(M.core, M.dim, i,
self.partition[:].ravel() == i, var_config)
for i in range(self.partition[:].max()+1 )]
self.num_coarse = len(self.elements)
self.num = {"nodes": 0, "node": 0, "edges": 1, "edge": 1, "faces": 2,
"face": 2, "volumes": 3, "volume": 3,0: 0, 1: 1, 2: 2, 3: 3}
self.local_volumes_tag = [volume.core.handleDic[volume.core.id_name] for volume in self.elements]
self.father_tag = M.core.handleDic[M.core.id_name]
self.global_tag = M.core.handleDic["GLOBAL_ID"]
self._all_volumes = M.core.all_volumes
self._all_faces = M.core.all_faces
self._all_edges = M.core.all_edges
self._all_nodes = M.core.all_nodes
if self.M.dim == 2:
self.find_coarse_neighbours_2d()
self.interfaces_edges = GetCoarseItem(self.mb.tag_get_data, self.father_tag, self._edges)
self.interfaces_nodes = GetCoarseItem(self.mb.tag_get_data, self.father_tag, self._nodes)
self.iface_coarse_neighbors = self._internal_edges(M)
elif self.M.dim == 3:
self.find_coarse_neighbours_3d()
self.interfaces_faces = GetCoarseItem(self.mb.tag_get_data, self.father_tag, self._faces)
self.interfaces_edges = GetCoarseItem(self.mb.tag_get_data, self.father_tag, self._edges)
self.interfaces_nodes = GetCoarseItem(self.mb.tag_get_data, self.father_tag, self._nodes)
self.iface_coarse_neighbors = self._internal_faces(M)
def _internal_faces(self, M):
faces = np.array([self._faces[el][0] for el in range (len(self._faces))], dtype=np.uint64)
faces = self.mb.tag_get_data(self.father_tag, faces, flat=True)
partition = self.partition[:].ravel()
external = faces[self.num_internal_faces:]
external_volumes = M.faces.bridge_adjacencies(external, interface="faces",target="volumes")
ext_neigh = np.zeros((external_volumes.shape[0],2))
ext_neigh[:,0], ext_neigh[:,1] = partition[external_volumes].ravel(), partition[external_volumes].ravel()
if self.num_internal_faces == 0:
return ext_neigh
internal = faces[0:self.num_internal_faces]
internal_volumes = M.faces.bridge_adjacencies(internal, interface="faces",target="volumes")
int_neigh = np.vstack((partition[internal_volumes[:,0]],partition[internal_volumes[:,1]])).T
return np.vstack((int_neigh,ext_neigh)).astype(np.int64)
def _internal_edges(self, M):
edges = np.array([self._edges[el][0] for el in range (len(self._edges))], dtype=np.uint64)
edges = self.mb.tag_get_data(self.father_tag, edges, flat=True)
partition = self.partition[:].ravel()
external = edges[self.num_internal_edges:]
external_faces = M.edges.bridge_adjacencies(external, interface="edges",target="faces")
ext_neigh = np.zeros((external_faces.shape[0],2))
ext_neigh[:,0], ext_neigh[:,1] = partition[external_faces].ravel(), partition[external_faces].ravel()
if self.num_internal_edges == 0:
return ext_neigh
internal = edges[0:self.num_internal_edges]
internal_faces = M.faces.bridge_adjacencies(internal, interface="edges",target="faces")
int_neigh = np.vstack((partition[internal_faces[:,0]],partition[internal_faces[:,1]])).T
return np.vstack((int_neigh,ext_neigh)).astype(np.int64)
def find_coarse_neighbours_2d(self):
self.all_nodes_neighbors = rng.Range()
self.all_edges_neighbors = rng.Range()
self.all_faces_neighbors = rng.Range()
self._faces_neighbors = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.uint32)
self._edges_neighbors = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.uint32)
self._nodes_neighbors = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.uint32)
self.faces_connectivities = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.bool)
self.edges_connectivities = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.bool)
self.nodes_connectivities = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.bool)
edges_array = self.M.core.internal_edges.get_array()
adj_array = self.mb.get_ord_adjacencies(edges_array, 2)[0]
tg = self.mb.tag_get_handle('Partition')
parts = self.mb.tag_get_data(tg, adj_array).reshape(-1,2)
boundaries = self.M.core.boundary_edges.get_array()
boundary_face = self.M.core.mb.get_ord_adjacencies(boundaries, 2)[0]
self.all_faces_neighbors.insert(boundary_face)
self.all_edges_neighbors.insert(boundaries)
boundary_parts = self.mb.tag_get_data(tg, boundary_face, flat=True)
indx = np.where(parts[:,0] != parts[:,1])[0]
parts = parts[indx]
inters_edges = edges_array[indx]
self._edges, self.num_internal_edges = self.M.core.mb.get_interface_faces2(
self.edges_connectivities, parts, inters_edges, boundaries, boundary_parts,
self.num_coarse, self._edges_neighbors)
print('Matrix of coarse edges adjacencies created')
if inters_edges.size == 0:
inters_nodes = np.array([], dtype=np.uint64)
indx = np.array([], dtype=np.int64)
coarse_jagged = np.array([], dtype=np.uint64)
else:
self.all_edges_neighbors.insert(inters_edges)
self.all_faces_neighbors.insert(adj_array.reshape(-1,2)[indx].ravel())
inters_nodes = np.unique(self.mb.get_ord_adjacencies(inters_edges, 0)[0])
self.all_nodes_neighbors.insert(inters_nodes)
aux_tuple = self.M.core.mb.get_ord_adjacencies(inters_nodes, 2)
temp_jagged = np.delete(np.array(np.split(aux_tuple[0], aux_tuple[1]), dtype=object), -1)
jagged_index = np.array([temp_jagged[i].size
for i in range(temp_jagged.shape[0])], dtype=np.int32)
jagged_index = np.cumsum(jagged_index, dtype=np.int32)[:-1]
coarse_array = self.M.core.mb.tag_get_data(tg, np.concatenate(temp_jagged), flat=True)
coarse_jagged = np.array(np.split(coarse_array, jagged_index), dtype=object)
coarse_jagged = np.array([np.unique(coarse_jagged[i])
for i in range(coarse_jagged.shape[0])], dtype=object)
indx = np.array([coarse_jagged[i].size > 2 for i in range(coarse_jagged.shape[0])])
boundaries = self.M.core.boundary_nodes.get_array()
self.all_nodes_neighbors.insert(boundaries)
aux_tuple = self.M.core.mb.get_ord_adjacencies(boundaries, 2)
temp_jagged = np.delete(np.array(np.split(aux_tuple[0], aux_tuple[1]), dtype=object), -1)
jagged_index = np.array([temp_jagged[i].size for i in range(temp_jagged.shape[0])], dtype=np.int32)
jagged_index = np.cumsum(jagged_index, dtype=np.int32)[:-1]
boundary_parts = self.M.core.mb.tag_get_data(tg, np.concatenate(temp_jagged), flat=True)
boundary_parts = np.array(np.split(boundary_parts, jagged_index), dtype=object)
boundary_parts = np.array([np.unique(boundary_parts[i]).astype(np.int32) \
for i in range(boundary_parts.shape[0])], dtype=np.object)
self._nodes, self.num_internal_nodes = self.M.core.mb.get_interface_entities2(
self.nodes_connectivities, inters_nodes, coarse_jagged, indx,
boundaries, boundary_parts, self.num_coarse, self._nodes_neighbors)
print('Matrix of coarse nodes adjacencies created')
self._faces_neighbors = self._faces_neighbors.tocsr()
self._edges_neighbors = self._edges_neighbors.tocsr()
self._nodes_neighbors = self._nodes_neighbors.tocsr()
self.faces_connectivities = self.faces_connectivities.tocsr()
self.edges_connectivities = self.edges_connectivities.tocsr()
self.nodes_connectivities = self.nodes_connectivities.tocsr()
self.connectivities = (self.nodes_connectivities, self.edges_connectivities, self.faces_connectivities)
def find_coarse_neighbours_3d(self):
self.all_nodes_neighbors = rng.Range()
self.all_edges_neighbors = rng.Range()
self.all_faces_neighbors = rng.Range()
self.all_volumes_neighbors = rng.Range()
self._faces_neighbors = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.uint32)
self._edges_neighbors = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.uint32)
self._nodes_neighbors = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.uint32)
self.faces_connectivities = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.bool)
self.edges_connectivities = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.bool)
self.nodes_connectivities = lil_matrix((self.num_coarse,self.num_coarse+1), dtype=np.bool)
faces_array = self.M.core.internal_faces.get_array()
adj_array = self.mb.get_ord_adjacencies(faces_array, 3)[0]
tg = self.mb.tag_get_handle('Partition')
parts = self.mb.tag_get_data(tg, adj_array).reshape(-1,2)
boundaries = self.M.core.boundary_faces.get_array()
boundary_vol = self.M.core.mb.get_ord_adjacencies(boundaries, 3)[0]
self.all_volumes_neighbors.insert(boundary_vol)
self.all_faces_neighbors.insert(boundaries)
boundary_parts = self.mb.tag_get_data(tg, boundary_vol, flat=True)
indx = np.where(parts[:,0]!=parts[:,1])[0]
parts = parts[indx]
inters_faces = faces_array[indx]
self._faces, self.num_internal_faces = self.M.core.mb.get_interface_faces2(
self.faces_connectivities, parts, inters_faces, boundaries, boundary_parts,
self.num_coarse, self._faces_neighbors)
print('Matrix of coarse faces adjacencies created')
if inters_faces.size == 0:
inters_edges = np.array([], dtype=np.uint64)
indx = np.array([], dtype=np.int64)
coarse_jagged = np.array([], dtype=np.uint64)
else:
self.all_faces_neighbors.insert(inters_faces)
self.all_volumes_neighbors.insert(adj_array.reshape(-1,2)[indx].ravel())
inters_edges = np.unique(self.mb.get_ord_adjacencies(inters_faces, 1)[0])
self.all_edges_neighbors.insert(inters_edges)
aux_tuple = self.M.core.mb.get_ord_adjacencies(inters_edges, 3)
temp_jagged = np.delete(np.array(np.split(aux_tuple[0], aux_tuple[1]), dtype=object), -1)
jagged_index = np.array([temp_jagged[i].size
for i in range(temp_jagged.shape[0])], dtype=np.int32)
jagged_index = np.cumsum(jagged_index, dtype=np.int32)[:-1]
coarse_array = self.M.core.mb.tag_get_data(tg, np.concatenate(temp_jagged), flat=True)
coarse_jagged = np.array(np.split(coarse_array, jagged_index), dtype=object)
coarse_jagged = np.array([np.unique(coarse_jagged[i])
for i in range(coarse_jagged.shape[0])], dtype=object)
indx = np.array([coarse_jagged[i].size > 2 for i in range(coarse_jagged.shape[0])])
boundaries = self.M.core.boundary_edges.get_array()
self.all_edges_neighbors.insert(boundaries)
aux_tuple = self.M.core.mb.get_ord_adjacencies(boundaries, 3)
temp_jagged = np.delete(np.array(np.split(aux_tuple[0], aux_tuple[1]), dtype=object), -1)
jagged_index = np.array([temp_jagged[i].size for i in range(temp_jagged.shape[0])], dtype=np.int32)
jagged_index = np.cumsum(jagged_index, dtype=np.int32)[:-1]
boundary_parts = self.M.core.mb.tag_get_data(tg, np.concatenate(temp_jagged), flat=True)
boundary_parts = np.array(np.split(boundary_parts, jagged_index), dtype=object)
boundary_parts = np.array([np.unique(boundary_parts[i]).astype(np.int32) \
for i in range(boundary_parts.shape[0])], dtype=np.object)
self._edges, self.num_internal_edges = self.M.core.mb.get_interface_entities2(
self.edges_connectivities, inters_edges, coarse_jagged, indx,
boundaries, boundary_parts, self.num_coarse, self._edges_neighbors)
print('Matrix of coarse edges adjacencies created')
if inters_faces.size == 0:
inters_nodes = np.array([], dtype=np.uint64)
indx = np.array([], dtype=np.int64)
coarse_jagged = np.array([], dtype=np.uint64)
else:
inters_nodes = np.unique(self.mb.get_ord_adjacencies(inters_faces, 0)[0])
self.all_nodes_neighbors.insert(inters_nodes)
aux_tuple = self.M.core.mb.get_ord_adjacencies(inters_nodes, 3)
temp_jagged = np.delete(np.array(np.split(aux_tuple[0], aux_tuple[1]), dtype=object), -1)
jagged_index = np.array([temp_jagged[i].size for i in range(temp_jagged.shape[0])], dtype=np.int32)
jagged_index = np.cumsum(jagged_index, dtype=np.int32)[:-1]
coarse_array = self.M.core.mb.tag_get_data(tg, np.concatenate(temp_jagged), flat=True)
coarse_jagged = np.array(np.split(coarse_array, jagged_index), dtype=object)
coarse_jagged = np.array([np.unique(coarse_jagged[i])
for i in range(coarse_jagged.shape[0])], dtype=object)
indx = np.array([coarse_jagged[i].size>2 for i in range(coarse_jagged.shape[0])])
boundaries = self.M.core.boundary_nodes.get_array()
self.all_nodes_neighbors.insert(boundaries)
aux_tuple = self.M.core.mb.get_ord_adjacencies(boundaries, 3)
temp_jagged = np.delete(np.array(np.split(aux_tuple[0], aux_tuple[1]), dtype=object), -1)
jagged_index = np.array([temp_jagged[i].size for i in range(temp_jagged.shape[0])], dtype=np.int32)
jagged_index = np.cumsum(jagged_index, dtype=np.int32)[:-1]
boundary_parts = self.M.core.mb.tag_get_data(tg, np.concatenate(temp_jagged), flat=True)
boundary_parts = np.array(np.split(boundary_parts, jagged_index), dtype=object)
boundary_parts = np.array([np.unique(boundary_parts[i])
for i in range(boundary_parts.shape[0])], dtype=object)
self._nodes, self.num_internal_nodes = self.M.core.mb.get_interface_entities2(
self.nodes_connectivities, inters_nodes, coarse_jagged, indx, boundaries,
boundary_parts, self.num_coarse, self._nodes_neighbors)
print('Matrix of coarse nodes adjacencies created')
self._faces_neighbors = self._faces_neighbors.tocsr()
self._edges_neighbors = self._edges_neighbors.tocsr()
self._nodes_neighbors = self._nodes_neighbors.tocsr()
self.faces_connectivities = self.faces_connectivities.tocsr()
self.edges_connectivities = self.edges_connectivities.tocsr()
self.nodes_connectivities = self.nodes_connectivities.tocsr()
self.connectivities = (self.nodes_connectivities, self.edges_connectivities, self.faces_connectivities)
def iface_neighbors(self, x):
tmp = self._faces_neighbors[x].A[0]
indx = np.nonzero(tmp)[0]
return indx, (tmp[indx]-1).astype(np.int64)
def iedge_neighbors(self, x):
tmp = self._edges_neighbors[x].A[0]
indx = np.nonzero(tmp)[0]
return indx, (tmp[indx]-1).astype(np.int64)
def inode_neighbors(self, x):
tmp = self._nodes_neighbors[x].A[0]
indx = np.nonzero(tmp)[0]
return indx, (tmp[indx]-1).astype(np.int64)
def father_to_local_id(self, vec_range, element, target):
flag = self.num[element]
vec = self.create_range_vec(vec_range)
if flag == 0:
handle = self.range_index(vec, self._all_nodes)
elif flag == 1:
handle = self.range_index(vec, self._all_edges)
elif flag == 2:
handle = self.range_index(vec, self._all_faces)
elif flag == 3:
handle = self.range_index(vec, self._all_volumes)
return self.mb.tag_get_data(self.local_volumes_tag[target],handle)
def neighbours(self, x,y, element):
flag = self.num[element]
if flag == 0:
return self.mb.tag_get_data(self.father_tag,
self._nodes[self._nodes_neighbors[x,y]-1].get_array(), flat=True).astype(np.int64)
elif flag == 1:
return self.mb.tag_get_data(self.father_tag,
self._edges[self._edges_neighbors[x,y]-1].get_array(), flat=True).astype(np.int64)
elif flag == 2:
return self.mb.tag_get_data(self.father_tag,
self._faces[self._faces_neighbors[x,y]-1].get_array(), flat=True).astype(np.int64)
@property
def all_interface_nodes(self):
return self.mb.tag_get_data(self.father_tag,
self.all_nodes_neighbors.get_array(), flat=True).astype(np.int64)
@property
def all_interface_edges(self):
return self.mb.tag_get_data(self.father_tag,
self.all_edges_neighbors.get_array(), flat=True).astype(np.int64)
@property
def all_interface_faces(self):
return self.mb.tag_get_data(self.father_tag,
self.all_faces_neighbors.get_array(), flat=True).astype(np.int64)
@property
def all_interface_volumes(self):
return self.mb.tag_get_data(self.father_tag,
self.all_volumes_neighbors.get_array(), flat=True).astype(np.int64)
def create_range_vec(self, index):
range_vec = None
if isinstance(index, int) or isinstance(index, np.integer):
range_vec = np.array([index]).astype("uint")
elif isinstance(index, np.ndarray):
if index.dtype == "bool":
range_vec = np.where(index)[0]
else:
range_vec = index
elif isinstance(index, slice):
start = index.start
stop = index.stop
step = index.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
if start < 0:
start = len(self) + start + 1
if stop < 0:
stop = len(self) + stop + 1
range_vec = np.arange(start, stop, step).astype('uint')
elif isinstance(index, list):
range_vec = np.array(index)
return range_vec
def read_data(self, name_tag, index_vec = np.array([]), range_el = None):
if range_el is None:
range_el = self._all_volumes
if index_vec.size > 0:
range_el = self.range_index(index_vec,range_el)
try:
handle_tag = self.M.core.handleDic[name_tag]
return self.mb.tag_get_data(handle_tag, range_el)
except KeyError:
print("Tag not found")
def range_index(self, vec_index, range_handle = None):
if range_handle is None:
range_handle = self._all_volumes
if vec_index.dtype == "bool":
vec = np.where(vec_index)[0]
else:
vec = vec_index.astype("uint")
handles = np.asarray(range_handle)[vec.astype("uint")].astype("uint")
return rng.Range(handles)
| StarcoderdataPython |
6677185 | DATA_DIR = '../data/modelnet40v1'
IMG_SUFFIX_LIST = ['.jpg', '.jpeg']
TRAIN_FOLDER_NAME = 'train'
VALIDATION_FOLDER_NAME = 'test'
NUM_WORKERS = 8
NORMALIZATION_MEAN = [0.485, 0.456, 0.406]
NORMALIZATION_STD = [0.229, 0.224, 0.225]
CLASSIFICATION_THRESHOLD = 0.5
F1_AVERAGE='macro' | StarcoderdataPython |
279935 | class Solution:
def findMaximumXOR(self, nums: List[int]) -> int:
L = len(bin(max(nums))) - 2
nums = [[(num >> i) & 1 for i in range(L)][::-1] for num in nums]
maxXor, trie = 0, {}
for num in nums:
currentNode, xorNode, currentXor = trie, trie, 0
for bit in num:
currentNode = currentNode.setdefault(bit, {})
toggledBit = 1 - bit
if toggledBit in xorNode:
currentXor = (currentXor << 1) | 1
xorNode = xorNode[toggledBit]
else:
currentXor = currentXor << 1
xorNode = xorNode[bit]
maxXor = max(maxXor, currentXor)
return maxXor | StarcoderdataPython |
6422489 | # load modules
from dataclasses import dataclass
from typing import List, Union
from . import Metadata, Score
# definition class
@dataclass(frozen=True)
class ScoreCollection:
scores: Union[List[Score.Score], List, None]
metadata: Union[Metadata.Metadata, None]
# definition function
def gen(response):
if response is not None:
instance = ScoreCollection(
scores=Score.gen_list(response.get('scores')),
metadata=Metadata.gen(response.get('metadata'))
)
return instance
def gen_list(response):
if response is not None:
if len(response) == 0:
return []
else:
return [gen(v) for v in response]
| StarcoderdataPython |
3408405 | <gh_stars>0
from .mm_tasks import *
from .ofa_task import OFATask | StarcoderdataPython |
1771370 | def first_last6(nums):
if not 6 in nums:
return False
else:
if nums[0] == 6 or nums[-1] == 6:
return True
else:
return False
def same_first_last(nums):
if nums != [] and nums[0] == nums[-1]:
retorno = True
else:
retorno = False
return retorno
def common_end(a, b):
if a[0] == b[0] or a[-1] == b[-1]:
retorno = True
else:
retorno = False
return retorno
def maior_ponta(nums):
c=1
if nums[0] >= nums[-1]:
pos1 = nums[0]
del nums[:]
while c <=3:
nums.append(pos1)
c=c+1
return nums
elif nums[0] < nums[-1]:
pos1 = nums[-1]
del nums[:]
while c <=3:
nums.append(pos1)
c=c+1
return nums
def sum2(nums):
if len(nums) >=2 :
result = nums[0] + nums[1]
del nums[:]
nums=result
return nums
elif len(nums) == 1 :
return nums[0]
elif len(nums) == 0 :
return 0
def middle_way(a, b):
pos1=a[1]
pos2=b[1]
del a[:]
a.append(pos1)
a.append(pos2)
return a
def date_fashion(eu, par):
if eu <=2 or par <=2:
result= 0
return result
elif eu >= 8 or par >= 8:
result = 2
return result
elif eu < 8 or par <8:
result = 1
return result
def squirrel_play(temp, is_summer):
if temp >= 60 and temp <=90 and not is_summer:
return True
else:
if temp >= 60 and temp <=100 and is_summer:
return True
else:
return False
def pego_correndo(speed, is_birthday):
if is_birthday :
speed-=5
if speed <= 60:
return 0
elif speed >=61 and speed <=80:
return 1
elif speed >= 81:
return 2
def alarm_clock(day, vacation):
if not vacation:
if day == 6 or day == 0:
return '10:00'
else:
return '7:00'
else:
if day == 6 or day == 0:
return 'off'
else:
return '10:00' | StarcoderdataPython |
11292756 | <gh_stars>0
def sfl(L):
""" (list) -> bool
Precondition: len(L) >= 2
Return True if and only if first item of the list is same as last.
>>> sfl([3, 4, 2, 8, 3])
True
>>> sfl([a, b, c])
False
"""
return (L[0] == L[-1])
def is_longer(L1, L2):
""" (list, list) -> bool
Return True if and only if the length of L1 is longer than L2.
>>> is_longer([1, 2, 3], [4, 5])
True
>>> is_longer([a, b, c], [d, e, f])
False
"""
return len(L1) > len(L2)
if __name__ == '__main__':
a = input("What is the list? ").split()
b = input("Another please: ").split()
c = input("One more: ").split()
d = is_longer(b, c)
e = sfl(a)
if e is True:
print("The items at the beginning and end of List 1 are the same.")
else:
print("The items at the beginning and end of List 1 are not the"
" same.")
if d is True:
print("The second list is longer than the third.")
else:
print("The second list is not longer than the third.")
| StarcoderdataPython |
5163745 | <filename>Community/lease_history_ip/lease_history_ip_access.py
# Copyright 2019 BlueCat Networks (USA) Inc. and its affiliates
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: BlueCat Networks
# Date: 2019-03-14
# Gateway Version: 18.10.2
# Description: Lease History IP Access
# Various Flask framework items.
import os
import sys
import codecs
import psycopg2
from flask import g, jsonify, request
from urllib.parse import urlparse
from main_app import app
from bluecat import route
from bluecat import util
import config.default_config as config
from bluecat.util import rest_exception_catcher
from bluecat.util import rest_workflow_permission_required
from bluecat.server_endpoints import empty_decorator
from bluecat.server_endpoints import get_result_template
def module_path():
return os.path.dirname(os.path.abspath(str(__file__)))
def get_resource_text():
return util.get_text(module_path(), config.language)
def get_configuration(api, config_name):
configuration = api.get_configuration(config_name)
return configuration
def raw_table_data(*args, **kwargs):
"""Returns table formatted data for display in the TableField component"""
# pylint: disable=unused-argument
text = get_resource_text()
return {
"columns": [
{"title": text['title_mac_address']},
{"title": text['title_lease_time']},
{"title": text['title_expiry_time']},
{"title": text['title_update_count']},
],
"columnDefs": [
{"className": "dt-body-right", "targets": [3]}
],
"data": []
}
def load_lease_history(config_id, ip_address):
"""
Load Lease_Summary_TABLE as json format.
:param ip_address: IP Address for query.
:return: result
"""
db_address = os.environ['BAM_IP']
connector = psycopg2.connect(host=db_address, database="proteusdb", user="bcreadonly")
cursor = connector.cursor()
sql = \
"select " \
"long2mac(mac_address), " \
"to_char(lease_time, 'YYYY/MM/DD HH24:MI:SS'), " \
"to_char(expire_time, 'YYYY/MM/DD HH24:MI:SS'), " \
"update_count " \
"from " \
"public.lease_summary " \
"where " \
"conf_id = %d and long2ip4(ip_address) = '%s'" \
"order by expire_time desc" % (config_id, ip_address)
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
connector.close()
text = get_resource_text()
data = {
"columns": [
{"title": text['title_mac_address']},
{"title": text['title_lease_time']},
{"title": text['title_expiry_time']},
{"title": text['title_update_count']},
],
"columnDefs": [
{"className": "dt-body-right", "targets": [3]}
],
"data": []
}
for row in result:
data['data'].append([row[0].upper(), row[1], row[2], row[3]])
return data
def query_history_by_ip_endpoint(workflow_name, element_id, permissions, result_decorator=None):
"""Endpoint for retrieving the selected objects"""
# pylint: disable=unused-argument
endpoint = 'query_history_by_ip'
function_endpoint = '%squery_history_by_ip' % workflow_name
view_function = app.view_functions.get(function_endpoint)
if view_function is not None:
return endpoint
if not result_decorator:
result_decorator = empty_decorator
g.user.logger.info('Creating endpoint %s', endpoint)
@route(app, '/%s/%s' % (workflow_name, endpoint), methods=['POST'])
@rest_workflow_permission_required(permissions)
@rest_exception_catcher
# pylint: disable=unused-variable
def query_history_by_ip():
"""Retrieve a list of properties for the table"""
# pylint: disable=broad-except
try:
configuration = get_configuration(g.user.get_api(), config.default_configuration)
ip_address = request.form['ip_address']
data = load_lease_history(configuration.get_id(), ip_address)
# If no entities were found reutrn with failure state and message
result = get_result_template()
result['status'] = 'SUCCESS'
result['data'] = {"table_field": data}
return jsonify(result_decorator(result))
except Exception as e:
result = get_result_template()
result['status'] = 'FAIL'
result['message'] = str(e)
return jsonify(result_decorator(result))
return endpoint
| StarcoderdataPython |
11308093 | import rasa.nlu.training_data.entities_parser
import rasa.nlu.training_data.synonyms_parser
import rasa.nlu.training_data.lookup_tables_parser
from rasa.nlu.training_data.loading import load_data
from rasa.nlu.training_data.message import Message
from rasa.nlu.training_data.training_data import TrainingData
| StarcoderdataPython |
3284680 | # Tencent is pleased to support the open source community by making PocketFlow available.
#
# Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Get a list of idle GPUs.
This script sorts GPUs in the ascending order of memory usage, and return the top-k ones.
"""
import os
import sys
import subprocess
# get the required number of idle GPUs
assert len(sys.argv) == 2
nb_idle_gpus = int(sys.argv[1])
# assume: idle gpu has no more than 50% of total card memory used
mem_usage_ulimit = .8
# command to execute to get gpu id and corresponding memory used
# and total memory. It gives output in the format
# gpu id, memory used, total memory
cmd = 'nvidia-smi --query-gpu=index,memory.used,memory.total ' \
'--format=csv,noheader,nounits'
gpu_smi_output = subprocess.check_output(cmd, shell=True)
gpu_smi_output = gpu_smi_output.decode('utf-8')
idle_gpus = []
for gpu in gpu_smi_output.split(sep='\n')[:-1]:
(gpu_id, mem_used, mem_total) = [int(value) for value in gpu.split(sep=',')]
mem_usage = float(mem_used) / mem_total
if mem_usage < mem_usage_ulimit:
idle_gpus += [(gpu_id, mem_usage)]
idle_gpus.sort(key=lambda x: x[1])
idle_gpus = [x[0] for x in idle_gpus] # only keep GPU ids
if len(idle_gpus) < nb_idle_gpus:
raise ValueError('not enough idle GPUs; idle GPUs are: {}'.format(idle_gpus))
idle_gpus = idle_gpus[:nb_idle_gpus]
idle_gpus_str = ','.join([str(idle_gpu) for idle_gpu in idle_gpus])
print(idle_gpus_str)
| StarcoderdataPython |
11266431 | # Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# ffmpeg must be installed
from modules.runtime.scenario.scenario_generation.drone_challenge import DroneChallengeScenarioGeneration
from modules.runtime.commons.parameters import ParameterServer
from modules.runtime.viewer.matplotlib_viewer import MPViewer
from modules.runtime.viewer.video_renderer import VideoRenderer
import os
param_server = ParameterServer()
scenario_generation = DroneChallengeScenarioGeneration(num_scenarios=1, random_seed=0, params=param_server)
viewer = MPViewer(params=param_server, x_range=[-30, 30], y_range=[-30, 30])
sim_step_time = param_server["simulation"]["step_time",
"Step-time used in simulation",
0.2]
sim_real_time_factor = param_server["simulation"]["real_time_factor",
"execution in real-time or faster", 1]
scenario, idx = scenario_generation.get_next_scenario()
# Rendering WITHOUT intermediate steps
video_renderer = VideoRenderer(renderer=viewer, world_step_time=sim_step_time)
for _ in range(0, 10): # run 5 scenarios in a row, repeating after 3
scenario, idx = scenario_generation.get_next_scenario()
world_state = scenario.get_world_state()
for _ in range(0, 5):
video_renderer.drawWorld(world_state, scenario._eval_agent_ids, idx)
world_state.step(sim_step_time)
video_renderer.export_video(filename="examples/scenarios/test_video_step")
| StarcoderdataPython |
3549904 | from ..factory import Type
class checkChatUsernameResultPublicChatsTooMuch(Type):
pass
| StarcoderdataPython |
199185 | <reponame>bmwant/podmena<filename>podmena/parser.py<gh_stars>10-100
import re
class RegexParser(object):
def __init__(self):
self.pattern = re.compile(
r'<span .+></span>:<span .+>([\w_]+)</span>:</div>')
def parse(self, text):
return self.pattern.findall(text)
| StarcoderdataPython |
9644701 | <filename>py_to_win_app/py_to_win_app.py
import os
import re
import shutil
import subprocess
import sys
import zipfile
from contextlib import contextmanager
from pathlib import Path
from typing import Iterable, Union
import requests
from genexe.generate_exe import generate_exe
__all__ = ["Project"]
_PYTHON_VERSION_REGEX = re.compile(r"^(\d+|x)\.(\d+|x)\.(\d+|x)$")
_GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py"
_PYTHON_URL = "https://www.python.org/ftp/python"
_HEADER_NO_CONSOLE = (
"""import sys, os"""
+ """\nif sys.executable.endswith('pythonw.exe'):"""
+ """\n sys.stdout = open(os.devnull, 'w')"""
+ """\n sys.stderr = open(os.path.join(os.getenv(\'TEMP\'), """
+ """\'stderr-{}\'.format(os.path.basename(sys.argv[0]))), "w")"""
+ """\n\n"""
)
_DEFAULT_IGNORE_PATTERNS = [
"__pycache__",
"*.pyc",
"build.py",
]
@contextmanager
def _log(message):
print(message)
yield
print("Done.\n")
class Project:
def __init__(
self, input_dir: str, main_file: str, app_name: str = None
) -> None:
"""TODO
Args:
input_dir (str): Directory where your source files are.
main_file (str): Path to entry point, e.g. `main.py`
app_name (str, optional): App's name. If `None` then project's directory name will be used. Defaults to `None`.
""" # noqa
self._path = Path().cwd()
self._input_path = self._path / input_dir
self._main_file = main_file
self._app_name = app_name if app_name is not None else self._path.name
(self._path / "build").mkdir(exist_ok=True)
self._build_path: Path = None
self._source_path: Path = None
self._exe_path: Path = None
self._pydist_path: Path = None
self._requirements_path: Path = None
(self._path / "dist").mkdir(exist_ok=True)
self._dist_path: Path = None
def build(
self,
python_version: str,
pydist_dir: str = "pydist",
requirements_file: str = "requirements.txt",
extra_pip_install_args: Iterable[str] = (),
build_dir: str = None,
source_dir: str = None,
# TODO ignore_input: Iterable[str] = (),
show_console: bool = False,
exe_name: str = None,
icon_file: Union[str, Path, None] = None,
# TODO: download_dir: Union[str, Path] = None,
) -> None:
"""TODO
Args:
python_version (str): Embedded python version
requirements_file (str, optional): Path to `requirements.txt` file. Defaults to `"requirements.txt"`.
extra_pip_install_args (Iterable[str], optional): Arguments to be appended to the `pip install` command during installation of requirements. Defaults to `()`.
build_dir (str, optional): Directory to place build to. If `None` then `app_name` attribute will be used. Defaults to `None`.
pydist_dir (str, optional): Subdirectory where to place Python embedde interpreter. Defaults to `"pydist"`.
source_dir (str, optional): Subdirectory where to place source code. If `None` then `app_nam` attribute will be used. Defaults to `None`.
show_console (bool, optional): Show console window or not. Defaults to `False`.
exe_name (str, optional): Name of `.exe` file. If `None` then name will be the same as `main_file`. Defaults to `None`.
icon_file (Union[str, Path, None], optional): Path to icon file. Defaults to `None`.
Raises:
ValueError: If wrong Python version provided.
""" # noqa
if not self._is_correct_version(python_version):
raise ValueError(
f"Specified python version `{python_version}` "
"does not have the correct format, it should be of format: "
"`x.x.x` where `x` is a positive number."
)
self._requirements_path = self._path / requirements_file
if build_dir is not None:
self._build_path = self._path / "build" / build_dir
else:
self._build_path = self._path / "build" / self._app_name
self._pydist_path = self._build_path / pydist_dir
if source_dir is not None:
self._source_path = self._build_path / source_dir
else:
self._source_path = self._build_path / self._app_name
self._make_empty_build_dir()
self._copy_source_files()
# download embedded python and extract it to build directory
download_path = Path.home() / "Downloads"
embedded_file_path = self._download_python_dist(
download_path=download_path, python_version=python_version
)
self._extract_embedded_python(embedded_file_path)
# download `get_pip.py` and copy it to build directory
getpippy_file_path = self._download_getpippy(
download_path=download_path
)
with _log(
f"Coping `{getpippy_file_path}` file to `{self._pydist_path}`"
):
shutil.copy2(getpippy_file_path, self._pydist_path)
self._patch_pth_file(python_version=python_version)
# self._extract_pythonzip(python_version=python_version)
self._install_pip()
self._install_requirements(
requirements_file_path=self._requirements_path,
extra_pip_install_args=list(extra_pip_install_args),
)
icon_file_path = Path(icon_file) if icon_file is not None else None
self._make_startup_exe(
show_console=show_console, icon_file_path=icon_file_path
)
if exe_name is not None:
self._rename_exe_file(new_file_name=exe_name)
print(
f"\nBuild done! Folder `{self._build_path}` "
"contains your runnable application!\n"
)
def make_dist(
self, file_name: str = None, delete_build_dir: bool = False
) -> Path:
if file_name is None:
file_name = self._app_name
zip_file_path = self._path / "dist" / file_name
builds_dir = self.path / "build"
with _log(f"Making zip archive {zip_file_path}"):
shutil.make_archive(
base_name=str(zip_file_path),
format="zip",
root_dir=str(builds_dir),
base_dir=str(self.build_path.relative_to(builds_dir)),
)
self._dist_path = zip_file_path
if delete_build_dir:
self._delete_build_dir()
return zip_file_path
@property
def path(self) -> Path:
return self._path
@property
def input_path(self) -> None:
return self._input_path
@property
def app_name(self) -> str:
return self._app_name
@property
def build_path(self) -> Path:
return self._build_path
@property
def source_path(self) -> Path:
return self._source_path
@property
def exe_path(self) -> Path:
return self._exe_path
@property
def pydist_path(self) -> Path:
return self._pydist_path
@property
def requirements_path(self) -> Path:
return self._requirements_path
@property
def dist_path(self) -> Path:
return self._dist_path
@staticmethod
def _is_correct_version(python_version) -> bool:
return re.match(_PYTHON_VERSION_REGEX, python_version)
@staticmethod
def _get_short_version(python_version: str) -> str:
return "".join(python_version.split(".")[:2]) # 3.9.7 -> 39
@staticmethod
def _execute_os_command(command: str, cwd: str = None) -> str:
"""Execute terminal command"""
with _log(f"Running command: {command}"):
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.getcwd() if cwd is None else cwd,
)
# Poll process for new output until finished
while True:
nextline = process.stdout.readline().decode("UTF-8")
if nextline == "" and process.poll() is not None:
break
sys.stdout.write(nextline)
sys.stdout.flush()
output = process.communicate()[0]
exit_code = process.returncode
if exit_code == 0:
print(output)
return output
else:
raise Exception(command, exit_code, output)
@staticmethod
def _download_file(
url: str, local_file_path: Path, chunk_size: int = 128
) -> None:
"""Download streaming a file url to `local_file_path`"""
r = requests.get(url, stream=True)
with open(local_file_path, "wb") as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
@staticmethod
def _unzip(zip_file_path: Path, destination_dir_path: Path) -> None:
with zipfile.ZipFile(zip_file_path, "r") as zip_file:
zip_file.extractall(destination_dir_path)
@staticmethod
def _download_python_dist(download_path: Path, python_version: str):
embedded_file_name = f"python-{python_version}-embed-amd64.zip"
embedded_file_path = download_path / embedded_file_name
with _log("Downloading embedde python."):
if embedded_file_path.is_file():
return embedded_file_path
Project._download_file(
url=f"{_PYTHON_URL}/{python_version}/{embedded_file_name}",
local_file_path=embedded_file_path,
)
if not embedded_file_path.is_file():
raise RuntimeError("Download failed!")
return embedded_file_path
@staticmethod
def _download_getpippy(download_path: Path) -> Path:
getpippy_file_path = download_path / "get-pip.py"
with _log("Downloading `get-pip.py`."):
if getpippy_file_path.exists():
return getpippy_file_path
Project._download_file(
url=_GET_PIP_URL, local_file_path=getpippy_file_path
)
if not getpippy_file_path.exists():
raise RuntimeError("Download failed!")
return getpippy_file_path
def _make_empty_build_dir(self) -> None:
# Delete build folder if it exists
if self._build_path.is_dir():
with _log(
f"Existing build directory found, "
f"removing contents from `{self._build_path}`"
):
shutil.rmtree(self._build_path)
self._build_path.mkdir()
def _extract_embedded_python(self, embedded_file_path: Path) -> None:
with _log(
f"Extracting `{embedded_file_path}` to `{self._pydist_path}`"
):
self._unzip(
zip_file_path=embedded_file_path,
destination_dir_path=self._pydist_path,
)
def _copy_source_files(self) -> None:
ignore_patterns = []
ignore_patterns.append(self._build_path.name)
ignore_patterns += _DEFAULT_IGNORE_PATTERNS
if not self._source_path.is_dir():
self._source_path.mkdir()
with _log(
f"Copying files from `{self._input_path}` "
+ f"to `{self._source_path}`."
):
shutil.copytree(
src=self._input_path,
dst=self._source_path,
ignore=shutil.ignore_patterns(*ignore_patterns),
dirs_exist_ok=True,
)
def _patch_pth_file(self, python_version: str) -> None:
short_version = Project._get_short_version(python_version)
pth_file_name = f"python{short_version}._pth" # python39._pth
pth_file_path = self._pydist_path / pth_file_name
pythonzip_file_name = f"python{short_version}.zip" # python39.zip
with _log(
f"Generating `{pth_file_path}` with uncommented `import site` line"
):
if self._pydist_path == self._build_path:
relative_path_to_source = "."
else:
relative_path_to_source = ".."
relative_path_to_source += f"\\{self._source_path.name}"
pth_file_content = (
f"{pythonzip_file_name}\n"
+ f"{relative_path_to_source}\n\n"
+ "# Uncomment to run site.main() automatically\n"
+ "import site\n"
)
pth_file_path.write_text(pth_file_content, encoding="utf8")
def _extract_pythonzip(self, python_version: str) -> None:
"""Extract pythonXX.zip zip file to pythonXX.zip folder
and delete pythonXX.zip zip file
"""
short_version = Project._get_short_version(python_version)
pythonzip_file_name = f"python{short_version}.zip" # python39.zip
pythonzip_file_path = self._pydist_path / pythonzip_file_name
pythonzip_dir_path = Path(pythonzip_file_path)
with _log(
f"Extracting `{pythonzip_file_path}` to `{pythonzip_dir_path}`"
):
pythonzip_file_path = pythonzip_file_path.rename(
pythonzip_file_path.with_suffix(".temp_zip")
)
Project._unzip(pythonzip_file_path, pythonzip_dir_path)
pythonzip_file_path.unlink()
def _install_pip(self) -> None:
with _log("Installing `pip`"):
Project._execute_os_command(
command="python.exe get-pip.py --no-warn-script-location",
cwd=str(self._pydist_path),
)
if not (self._pydist_path / "Scripts").exists():
raise RuntimeError("Can not install `pip` with `get-pip.py`!")
def _install_requirements(
self,
requirements_file_path: Path,
extra_pip_install_args: list[str],
):
"""
Install the modules from requirements.txt file
- extra_pip_install_args (optional `List[str]`) :
pass these additional arguments to the pip install command
"""
with _log("Installing requirements"):
scripts_dir_path = self._pydist_path / "Scripts"
if extra_pip_install_args:
extra_args_str = " " + " ".join(extra_pip_install_args)
else:
extra_args_str = ""
try:
cmd = (
"pip3.exe install "
+ "--no-cache-dir --no-warn-script-location "
+ f"-r {str(requirements_file_path)}{extra_args_str}"
)
Project._execute_os_command(
command=cmd, cwd=str(scripts_dir_path)
)
return
except Exception:
print("Installing modules one by one")
modules = requirements_file_path.read_text().splitlines()
for module in modules:
try:
print(f"Installing {module} ...", end="", flush=True)
cmd = "pip3.exe install --no-cache-dir "
f"--no-warn-script-location {module}{extra_args_str}"
Project._execute_os_command(
command=cmd, cwd=str(scripts_dir_path)
)
print("done")
except Exception:
print("FAILED TO INSTALL ", module)
with (
self._build_path / "FAILED_TO_INSTALL_MODULES.txt"
).open(mode="a") as f:
f.write(module + "\n")
print("\n")
def _make_startup_exe(
self,
show_console: bool,
icon_file_path: Union[Path, None],
) -> Path:
"""Make the startup exe file needed to run the script"""
relative_pydist_dir = self._pydist_path.relative_to(self._build_path)
relative_source_dir = self._source_path.relative_to(self._build_path)
exe_file_path = self._build_path / Path(self._main_file).with_suffix(
".exe"
)
python_entrypoint = "python.exe"
command_str = (
f"{{EXE_DIR}}\\{relative_pydist_dir}\\{python_entrypoint} "
+ f"{{EXE_DIR}}\\{relative_source_dir}\\{self._main_file}"
)
with _log(f"Making startup exe file `{exe_file_path}`"):
generate_exe(
target=exe_file_path,
command=command_str,
icon_file=icon_file_path,
show_console=show_console,
)
if not show_console:
main_file_path = self._source_path / self._main_file
main_file_content = main_file_path.read_text(
encoding="utf8", errors="surrogateescape"
)
if _HEADER_NO_CONSOLE not in main_file_content:
main_file_path.write_text(
str(_HEADER_NO_CONSOLE + main_file_content),
encoding="utf8",
errors="surrogateescape",
)
self._exe_path = exe_file_path
return exe_file_path
def _rename_exe_file(self, new_file_name: str) -> Path:
if new_file_name.lower().endswith(".exe"): # new_name.exe -> new_name
new_file_name = new_file_name.lower().rstrip(".exe")
new_exe_path = self.exe_path.with_stem(new_file_name)
with _log(f"Renaming {self.exe_path} -> {new_exe_path}"):
self._exe_path = self._exe_path.rename(new_exe_path)
return self.exe_path
def _delete_build_dir(self) -> None:
with _log(f"Removing build folder {self._build_path}!"):
shutil.rmtree(self.build_path)
self._build_path = self._source_path = self._pydist_path = None
| StarcoderdataPython |
6540446 | <filename>src/node/ext/ldap/tests/test_ugm_principals.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from node.base import BaseNode
from node.ext.ldap import LDAPNode
from node.ext.ldap import ONELEVEL
from node.ext.ldap import testing
from node.ext.ldap.filter import LDAPFilter
from node.ext.ldap.ugm import Group
from node.ext.ldap.ugm import Groups
from node.ext.ldap.ugm import GroupsConfig
from node.ext.ldap.ugm import Role
from node.ext.ldap.ugm import Roles
from node.ext.ldap.ugm import RolesConfig
from node.ext.ldap.ugm import Ugm
from node.ext.ldap.ugm import User
from node.ext.ldap.ugm import Users
from node.ext.ldap.ugm import UsersConfig
from node.ext.ldap.ugm._api import member_attribute
from node.ext.ldap.ugm._api import member_format
from node.ext.ldap.ugm._api import PrincipalAliasedAttributes
from node.tests import NodeTestCase
from odict import odict
layer = testing.LDIF_principals
gcfg = GroupsConfig(
baseDN='dc=my-domain,dc=com',
attrmap=odict((
('rdn', 'cn'),
('id', 'cn')
)),
scope=ONELEVEL,
queryFilter='(objectClass=groupOfNames)',
objectClasses=['groupOfNames']
)
rcfg = RolesConfig(
baseDN='ou=roles,dc=my-domain,dc=com',
attrmap=odict((
('rdn', 'cn'),
('id', 'cn')
)),
scope=ONELEVEL,
queryFilter='(objectClass=groupOfNames)',
objectClasses=['groupOfNames'],
defaults={}
)
class TestUGMPrincipals(NodeTestCase):
layer = layer
def test_user_basics(self):
props = testing.props
ucfg = testing.ucfg
# Create a LDAPUsers node and configure it. In addition to the key
# attribute, the login attribute also needs to be unique, which will
# be checked upon calling ids() the first time
self.assertEqual(sorted(ucfg.attrmap.items()), [
('id', 'sn'),
('login', 'cn'),
('rdn', 'ou'),
('sn', 'sn'),
('telephoneNumber', 'telephoneNumber')
])
# Query all user ids. Set ``cn`` as login attribute. In this case,
# values are unique and therefore suitable as login attr
users = Users(props, ucfg)
self.assertEqual(
users.ids,
[u'Meier', u'Müller', u'Schmidt', u'Umhauer']
)
# Principals idbydn
self.assertEqual(
users.idbydn('cn=user3,ou=customers,dc=my-domain,dc=com'),
'Schmidt'
)
self.assertEqual(
users.idbydn('cN=user3, ou=customers,dc=MY-domain,dc= com'),
'Schmidt'
)
err = self.expect_error(
KeyError,
users.idbydn,
'cN=inexistent, ou=customers,dc=MY-domain,dc= com'
)
self.assertEqual(
str(err),
"'cN=inexistent, ou=customers,dc=MY-domain,dc= com'"
)
# Get a user by id (utf-8 or unicode)
mueller = users[u'Müller']
self.assertTrue(isinstance(mueller, User))
self.assertTrue(mueller is users[u'Müller'])
# The real LDAP node is on ``context``
self.assertEqual(
repr(mueller.context),
'<cn=user2,ou=customers,dc=my-domain,dc=com:cn=user2 - False>'
)
# The '?' is just ``__repr__`` going to ascii, the id is in proper unicode
self.check_output("<User object 'M...ller' at ...>", repr(mueller))
self.assertEqual(mueller.id, u'Müller')
# A user has a login
self.assertEqual(mueller.login, 'user2')
# And attributes
self.assertTrue(isinstance(mueller.attrs, PrincipalAliasedAttributes))
self.check_output("""
Aliased <LDAPNodeAttributes object 'cn=user2' at ...>
""", repr(mueller.attrs))
context_attrs = sorted(mueller.attrs.context.items())
self.assertEqual(context_attrs[:-1], [
(u'cn', u'user2'),
(u'objectClass', [u'top', u'person']),
(u'sn', u'Müller'),
(u'telephoneNumber', u'1234')
])
self.assertEqual(context_attrs[-1][0], u'userPassword')
self.assertEqual(sorted(mueller.attrs.items()), [
('id', u'Müller'),
('login', u'user2'),
('telephoneNumber', u'1234')
])
# Query all user nodes
self.check_output("""
[<User object 'Meier' at ...>,
<User object 'M...ller' at ...>,
<User object 'Schmidt' at ...>,
<User object 'Umhauer' at ...>]
""", str([users[id] for id in sorted(users.keys())]))
self.assertEqual([repr(users[id].context) for id in sorted(users.keys())], [
'<cn=user1,dc=my-domain,dc=com:cn=user1 - False>',
'<cn=user2,ou=customers,dc=my-domain,dc=com:cn=user2 - False>',
'<cn=user3,ou=customers,dc=my-domain,dc=com:cn=user3 - False>',
'<cn=n?sty\, User,ou=customers,dc=my-domain,dc=com:cn=n?sty\, User - False>'
])
# test object classes changes in config
self.assertEqual(
users.context.child_defaults['objectClass'],
['person']
)
users.context.child_defaults['objectClass'] = [
'person',
'extensibleObject'
]
self.assertEqual(
mueller.context.attrs['objectClass'],
['top', 'person']
)
mueller()
self.assertEqual(
sorted(mueller.context.attrs['objectClass']),
['extensibleObject', 'person', 'top']
)
# note, by default, existing object classes missing in configured
# creation default object classes are NOT removed.
users.context.child_defaults['objectClass'] = ['person']
mueller()
self.assertEqual(
sorted(mueller.context.attrs['objectClass']),
['extensibleObject', 'person', 'top']
)
def test_authentication(self):
props = testing.props
ucfg = testing.ucfg
users = Users(props, ucfg)
mueller = users[u'Müller']
# Authenticate a user, via the user object. (also see 'via LDAPUsers'
# below, after passwd, this is to make sure, that LDAPUsers.authenticate
# does not work on a cached copy)
self.assertTrue(mueller.authenticate('foo2'))
self.assertFalse(mueller.authenticate('bar'))
# Change a users password, supplying the old password, via the user object
oldpw = 'foo2'
newpw = 'new'
mueller.passwd(oldpw, newpw)
self.assertFalse(mueller.authenticate('foo2'))
self.assertTrue(mueller.authenticate('new'))
# And via LDAPUsers::
oldpw = newpw
newpw = 'newer'
users.passwd(mueller.id, oldpw, newpw)
# Authenticate a user via LDAPUsers, either by id or by login, but not
# both. The id is returned if sucessful, otherwise None
self.assertFalse(users.authenticate('wrong', 'creds'))
self.assertEqual(users.authenticate(mueller.login, 'newer'), u'Müller')
self.assertFalse(users.authenticate(id='wrong', pw='cresd'))
self.assertFalse(users.authenticate(id=mueller.id, pw='bar'))
self.assertEqual(users.authenticate(id=mueller.id, pw='newer'), u'Müller')
def test_create_user(self):
# Create new User. Provide some user defaults in user configuration.
# A default is either the desired value or a callback accepting the
# principals node and the id and returns the desired value.
def telephoneNumberDefault(node, id):
# default value callback function
return '123'
props = testing.props
add_ucfg = UsersConfig(
baseDN='ou=customers,dc=my-domain,dc=com',
attrmap=odict((
('rdn', 'cn'),
('id', 'sn'),
('login', 'cn'),
('telephoneNumber', 'telephoneNumber'),
('sn', 'sn')
)),
scope=ONELEVEL,
queryFilter='(objectClass=person)',
objectClasses=['top', 'person'],
defaults={
'sn': 'Surname',
'telephoneNumber': telephoneNumberDefault,
},
)
users = Users(props, add_ucfg)
self.assertEqual(
sorted(users.ids),
[u'Müller', u'Schmidt', u'Umhauer', u'sn_binary']
)
user = users.create(
'newid',
login='newcn',
id='ID Ignored', # gets ignored, id is taken from pid arg
sn='Surname Ignored' # gets ignored, id maps to sn, thus id rules
)
self.assertTrue(isinstance(user, User))
self.assertEqual(
repr(user.context),
'<cn=newcn,ou=customers,dc=my-domain,dc=com:cn=newcn - True>'
)
self.assertEqual(sorted(user.attrs.items()), [
('id', u'newid'),
('rdn', u'newcn'),
('telephoneNumber', u'123')
])
self.assertEqual(sorted(user.context.attrs.items()), [
(u'cn', u'newcn'),
(u'objectClass', [u'top', u'person']),
(u'sn', u'newid'),
(u'telephoneNumber', u'123')
])
self.assertEqual(
sorted(users.ids),
[u'Müller', u'Schmidt', u'Umhauer', u'newid', u'sn_binary']
)
err = self.expect_error(
KeyError,
users.create,
'newid'
)
self.assertEqual(
str(err).replace('u"', '"'),
'"Principal with id \'newid\' already exists."'
)
err = self.expect_error(
ValueError,
users.__setitem__,
'foo',
BaseNode()
)
self.assertEqual(str(err), "Given value not instance of 'User'")
self.assertEqual(
repr(users['newid'].context),
'<cn=newcn,ou=customers,dc=my-domain,dc=com:cn=newcn - True>'
)
# Persist and reload
users()
users.reload = True
self.assertEqual(
sorted(users.keys()),
[u'Müller', u'Schmidt', u'Umhauer', u'newid', 'sn_binary'])
self.assertEqual(
repr(users['newid'].context),
'<cn=newcn,ou=customers,dc=my-domain,dc=com:cn=newcn - False>'
)
# Delete User
del users['newid']
users.context()
def test_search(self):
props = testing.props
ucfg = testing.ucfg
users = Users(props, ucfg)
# Search for users
schmidt = users['Schmidt']
self.assertEqual(
users.search(criteria=dict(sn=schmidt.attrs['sn']), exact_match=True),
[u'Schmidt']
)
self.assertEqual(
sorted(users.search()),
[u'Meier', u'Müller', u'Schmidt', u'Umhauer']
)
self.assertEqual(users.search(attrlist=['login']), [
(u'Meier', {'login': [u'user1']}),
(u'Müller', {'login': [u'user2']}),
(u'Schmidt', {'login': [u'user3']}),
(u'Umhauer', {'login': [u'nästy, User']})
])
self.assertEqual(
users.search(criteria=dict(sn=schmidt.attrs['sn']), attrlist=['login']),
[(u'Schmidt', {'login': [u'user3']})]
)
# By default, search function is paginated. To control the LDAP search
# behavior in more detail, ``raw_search`` can be used
results = users.raw_search()
self.assertEqual(results, [u'Meier', u'Müller', u'Schmidt', u'Umhauer'])
results, cookie = users.raw_search(page_size=3, cookie='')
self.assertEqual(results, [u'Meier', u'Müller', u'Schmidt'])
results, cookie = users.raw_search(page_size=3, cookie=cookie)
self.assertEqual(results, [u'Umhauer'])
self.assertEqual(cookie, b'')
# Only attributes defined in attrmap can be queried
self.expect_error(
KeyError,
users.search,
criteria=dict(sn=schmidt.attrs['sn']),
attrlist=['description']
)
self.assertEqual(
users.search(
criteria=dict(sn=schmidt.attrs['sn']),
attrlist=['telephoneNumber']
),
[(u'Schmidt', {'telephoneNumber': [u'1234']})]
)
filter = LDAPFilter('(objectClass=person)')
filter &= LDAPFilter('(!(objectClass=inetOrgPerson))')
filter |= LDAPFilter('(objectClass=some)')
# normally set via principals config
original_search_filter = users.context.search_filter
self.assertEqual(
original_search_filter,
'(&(objectClass=person)(!(objectClass=inetOrgPerson)))'
)
users.context.search_filter = filter
self.assertEqual(
users.search(),
[u'Meier', u'Müller', u'Schmidt', u'Umhauer']
)
filter = LDAPFilter('(objectClass=person)')
filter &= LDAPFilter('(objectClass=some)')
# normally set via principals config
users.context.search_filter = filter
self.assertEqual(users.search(), [])
users.context.search_filter = original_search_filter
def test_changed_flag(self):
props = testing.props
ucfg = testing.ucfg
users = Users(props, ucfg)
# The changed flag
self.assertFalse(users.changed)
self.check_output("""
<class 'node.ext.ldap.ugm._api.Users'>: None
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
""", users.treerepr())
self.assertEqual(
repr(users[users.values()[1].name].context),
'<cn=user2,ou=customers,dc=my-domain,dc=com:cn=user2 - False>'
)
self.check_output("""
<dc=my-domain,dc=com - False>
...
<cn=user2,ou=customers,dc=my-domain,dc=com:cn=user2 - False>
<cn=user3,ou=customers,dc=my-domain,dc=com:cn=user3 - False>
<cn=n?sty\, User,ou=customers,dc=my-domain,dc=com:cn=n?sty\, User - False>
...
<cn=user1,dc=my-domain,dc=com:cn=user1 - False>
...
""", users.context.treerepr())
users['Meier'].attrs['telephoneNumber'] = '12345'
self.assertTrue(users['Meier'].attrs.changed)
self.assertTrue(users['Meier'].changed)
self.assertTrue(users.changed)
self.check_output("""
<dc=my-domain,dc=com - True>
...
<cn=user2,ou=customers,dc=my-domain,dc=com:cn=user2 - False>
<cn=user3,ou=customers,dc=my-domain,dc=com:cn=user3 - False>
<cn=n?sty\, User,ou=customers,dc=my-domain,dc=com:cn=n?sty\, User - False>
...
<cn=user1,dc=my-domain,dc=com:cn=user1 - True>
...
""", users.context.treerepr())
users['Meier'].attrs.context.load()
self.assertFalse(users['Meier'].attrs.changed)
self.assertFalse(users['Meier'].changed)
self.assertFalse(users.changed)
self.check_output("""
<dc=my-domain,dc=com - False>
...
<cn=user2,ou=customers,dc=my-domain,dc=com:cn=user2 - False>
<cn=user3,ou=customers,dc=my-domain,dc=com:cn=user3 - False>
<cn=n?sty\, User,ou=customers,dc=my-domain,dc=com:cn=n?sty\, User - False>
...
<cn=user1,dc=my-domain,dc=com:cn=user1 - False>
...
""", users.context.treerepr())
def test_invalidate(self):
props = testing.props
ucfg = testing.ucfg
users = Users(props, ucfg)
# Make sure data is loaded and trees are initialized
users.context.treerepr()
users.treerepr()
# Invalidate principals
self.assertEqual(len(users.storage.keys()), 4)
self.assertEqual(len(users.context.storage.keys()), 5)
users.invalidate(u'Inexistent')
self.assertEqual(len(users.storage.keys()), 4)
self.assertEqual(len(users.context.storage.keys()), 5)
self.assertEqual(
sorted(users.storage.keys()),
[u'Meier', u'Müller', u'Schmidt', u'Umhauer']
)
user_container = users[u'Schmidt'].context.parent.storage
self.assertEqual(len(user_container.keys()), 7)
users.invalidate(u'Schmidt')
self.assertEqual(
sorted(users.storage.keys()),
[u'Meier', u'Müller', u'Umhauer']
)
self.assertEqual(len(user_container.keys()), 6)
self.assertEqual(len(users.context.keys()), 5)
users.invalidate()
self.assertEqual(len(users.storage.keys()), 0)
self.assertEqual(len(users.context.storage.keys()), 0)
def test_group_basics(self):
props = testing.props
ucfg = testing.ucfg
users = Users(props, ucfg)
# A user does not know about it's groups if initialized directly
err = self.expect_error(
AttributeError,
lambda: users['Meier'].groups
)
self.assertEqual(str(err), "'NoneType' object has no attribute 'groups'")
# Create a LDAPGroups node and configure it
groups = Groups(props, gcfg)
self.assertEqual(groups.keys(), [u'group1', u'group2'])
self.assertEqual(groups.ids, [u'group1', u'group2'])
group = groups['group1']
self.assertTrue(isinstance(group, Group))
self.assertEqual(sorted(group.attrs.items()), [
('member', [
u'cn=user3,ou=customers,dc=my-domain,dc=com',
u'cn=user2,ou=customers,dc=my-domain,dc=com'
]),
('rdn', u'group1')
])
self.assertEqual(sorted(group.attrs.context.items()), [
(u'cn', u'group1'),
(u'member', [
u'cn=user3,ou=customers,dc=my-domain,dc=com',
u'cn=user2,ou=customers,dc=my-domain,dc=com'
]),
(u'objectClass', [u'top', u'groupOfNames'])
])
self.assertEqual(
groups.context.child_defaults,
{'objectClass': ['groupOfNames']}
)
def test_add_group(self):
props = testing.props
groups = Groups(props, gcfg)
group = groups.create('group3')
self.assertEqual(sorted(group.attrs.items()), [
('member', ['cn=nobody']),
('rdn', u'group3')
])
self.assertEqual(sorted(group.attrs.context.items()), [
(u'cn', u'group3'),
(u'member', ['cn=nobody']),
(u'objectClass', [u'groupOfNames'])
])
groups()
self.assertEqual(groups.ids, [u'group1', u'group2', u'group3'])
# XXX: dummy member should be created by default value callback,
# currently a __setitem__ plumbing on groups object
res = groups.context.ldap_session.search(
queryFilter='cn=group3',
scope=ONELEVEL
)
self.assertEqual(res, [
('cn=group3,dc=my-domain,dc=com', {
'member': [b'cn=nobody'],
'objectClass': [b'groupOfNames'],
'cn': [b'group3']
})
])
# Delete create group
del groups['group3']
groups()
def test_membership(self):
props = testing.props
groups = Groups(props, gcfg)
# Directly created groups object have no access to it's refering users
err = self.expect_error(
AttributeError,
lambda: groups['group1'].member_ids
)
self.assertEqual(str(err), "'NoneType' object has no attribute 'users'")
# Currently, the member relation is computed hardcoded and maps to
# object classes. This will propably change in future. Right now
# 'posigGroup', 'groupOfUniqueNames', and 'groupOfNames' are supported
self.assertEqual(member_format(['groupOfUniqueNames']), 0)
self.assertEqual(member_attribute(['groupOfUniqueNames']), 'uniqueMember')
self.assertEqual(member_format(['groupOfNames']), 0)
self.assertEqual(member_attribute(['groupOfNames']), 'member')
self.assertEqual(member_format(['posixGroup']), 1)
self.assertEqual(member_attribute(['posixGroup']), 'memberUid')
err = self.expect_error(Exception, member_format, 'foo')
expected = 'Can not lookup member format for object-classes: foo'
self.assertEqual(str(err), expected)
err = self.expect_error(Exception, member_attribute, 'foo')
expected = 'Can not lookup member attribute for object-classes: foo'
self.assertEqual(str(err), expected)
self.assertEqual(groups['group1']._member_format, 0)
self.assertEqual(groups['group1']._member_attribute, 'member')
# Create a UGM object
ucfg = layer['ucfg']
ugm = Ugm(props=props, ucfg=ucfg, gcfg=gcfg)
# Fetch users and groups
self.assertTrue(isinstance(ugm.users, Users))
self.assertTrue(isinstance(ugm.groups, Groups))
self.assertEqual(ugm.groups._key_attr, 'cn')
group_1 = ugm.groups['group1']
self.assertEqual(len(group_1.users), 2)
self.assertTrue(isinstance(group_1.users[0], User))
self.assertEqual(
sorted([it.name for it in group_1.users]),
[u'Müller', u'Schmidt']
)
group_2 = ugm.groups['group2']
self.assertEqual([it.name for it in group_2.users], [u'Umhauer'])
schmidt = ugm.users['Schmidt']
self.assertEqual(schmidt.group_ids, [u'group1'])
self.assertEqual(len(schmidt.groups), 1)
self.assertTrue(isinstance(schmidt.groups[0], Group))
self.assertEqual([it.name for it in schmidt.groups], [u'group1'])
# Add and remove user from group
group = ugm.groups['group1']
self.assertEqual(group.member_ids, [u'Schmidt', u'Müller'])
self.assertEqual(
group.translate_key('Umhauer'),
u'cn=nästy\\, User,ou=customers,dc=my-domain,dc=com'
)
group.add('Umhauer')
self.assertEqual(sorted(group.attrs.items()), [
('member', [
u'cn=user3,ou=customers,dc=my-domain,dc=com',
u'cn=user2,ou=customers,dc=my-domain,dc=com',
u'cn=nästy\\, User,ou=customers,dc=my-domain,dc=com'
]),
('rdn', u'group1')
])
self.assertEqual(
group.member_ids,
[u'Schmidt', u'Müller', u'Umhauer']
)
group()
del group['Umhauer']
self.assertEqual(group.member_ids, [u'Schmidt', u'Müller'])
# Delete Group
groups = ugm.groups
group = groups.create('group3')
group.add('Schmidt')
groups()
self.assertEqual(
groups.keys(),
[u'group1', u'group2', u'group3']
)
self.assertEqual(len(groups.values()), 3)
self.assertTrue(isinstance(groups.values()[0], Group))
self.assertEqual(
[it.name for it in groups.values()],
[u'group1', u'group2', u'group3']
)
self.assertEqual(
[it.name for it in ugm.users['Schmidt'].groups],
[u'group1', u'group3']
)
self.assertEqual(group.member_ids, [u'Schmidt'])
del groups['group3']
groups()
self.assertEqual(groups.keys(), [u'group1', u'group2'])
self.assertEqual(ugm.users['Schmidt'].group_ids, ['group1'])
def test_zzz_principal_roles(self):
# XXX: add users and groups before deleting them.
# then we can remove ``zzz`` test ordering hack from function name
props = testing.props
ucfg = layer['ucfg']
# Test role mappings. Create container for roles if not exists.
node = LDAPNode('dc=my-domain,dc=com', props)
node['ou=roles'] = LDAPNode()
node['ou=roles'].attrs['objectClass'] = ['organizationalUnit']
node()
# Test accessing unconfigured roles.
ugm = Ugm(props=props, ucfg=ucfg, gcfg=gcfg, rcfg=None)
user = ugm.users['Meier']
self.assertEqual(ugm.roles(user), [])
err = self.expect_error(
ValueError,
ugm.add_role,
'viewer',
user
)
self.assertEqual(str(err), 'Role support not configured properly')
err = self.expect_error(
ValueError,
ugm.remove_role,
'viewer',
user
)
self.assertEqual(str(err), 'Role support not configured properly')
# Configure role config represented by object class 'groupOfNames'
ugm = Ugm(props=props, ucfg=ucfg, gcfg=gcfg, rcfg=rcfg)
roles = ugm._roles
self.assertTrue(isinstance(roles, Roles))
# No roles yet.
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
""", roles.treerepr())
# Test roles for users.
user = ugm.users['Meier']
self.assertEqual(ugm.roles(user), [])
# Add role for user, role gets created if not exists.
ugm.add_role('viewer', user)
self.assertEqual(roles.keys(), [u'viewer'])
role = roles[u'viewer']
self.assertTrue(isinstance(role, Role))
self.assertEqual(role.member_ids, [u'Meier'])
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
""", roles.treerepr())
ugm.roles_storage()
# Query roles for principal via ugm object.
self.assertEqual(ugm.roles(user), ['viewer'])
# Query roles for principal directly.
self.assertEqual(user.roles, ['viewer'])
# Add some roles for 'Schmidt'.
user = ugm.users['Schmidt']
user.add_role('viewer')
user.add_role('editor')
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.Role'>: editor
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
""", roles.treerepr())
self.assertEqual(user.roles, ['viewer', 'editor'])
ugm.roles_storage()
# Remove role 'viewer'.
ugm.remove_role('viewer', user)
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.Role'>: editor
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
""", roles.treerepr())
# Remove role 'editor', No other principal left, remove role as well.
user.remove_role('editor')
self.assertEqual(roles.storage.keys(), ['viewer'])
self.assertEqual(roles.context._deleted_children, set([u'cn=editor']))
self.assertEqual(roles.keys(), [u'viewer'])
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
""", roles.treerepr())
ugm.roles_storage()
# Test roles for group.
group = ugm.groups['group1']
self.assertEqual(ugm.roles(group), [])
ugm.add_role('viewer', group)
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.Group'>: group1
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
""", roles.treerepr())
self.assertEqual(ugm.roles(group), ['viewer'])
self.assertEqual(group.roles, ['viewer'])
group = ugm.groups['group2']
group.add_role('viewer')
group.add_role('editor')
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.Group'>: group1
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.Group'>: group2
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
<class 'node.ext.ldap.ugm._api.Role'>: editor
<class 'node.ext.ldap.ugm._api.Group'>: group2
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
""", roles.treerepr())
ugm.roles_storage()
# If role already granted, an error is raised.
err = self.expect_error(
ValueError,
group.add_role,
'editor'
)
self.assertEqual(str(err), "Principal already has role 'editor'")
self.assertEqual(group.roles, ['viewer', 'editor'])
ugm.remove_role('viewer', group)
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.Group'>: group1
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.Role'>: editor
<class 'node.ext.ldap.ugm._api.Group'>: group2
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
""", roles.treerepr())
group.remove_role('editor')
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.Group'>: group1
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
""", roles.treerepr())
ugm.roles_storage()
# If role not exists, an error is raised.
err = self.expect_error(
ValueError,
group.remove_role,
'editor'
)
self.assertEqual(str(err), "Role not exists 'editor'")
# If role is not granted, an error is raised.
err = self.expect_error(
ValueError,
group.remove_role,
'viewer'
)
self.assertEqual(str(err), "Principal does not has role 'viewer'")
# Roles return ``Role`` instances on ``__getitem__``
role = roles['viewer']
self.assertTrue(isinstance(role, Role))
# Group keys are prefixed with 'group:'
self.assertEqual(role.member_ids, [u'Meier', u'group:group1'])
# ``__getitem__`` of ``Role`` returns ``User`` or ``Group`` instance,
# depending on key.
self.assertTrue(isinstance(role['Meier'], User))
self.assertTrue(isinstance(role['group:group1'], Group))
# A KeyError is raised when trying to access an inexistent role member.
self.expect_error(KeyError, role.__getitem__, 'inexistent')
# A KeyError is raised when trying to delete an inexistent role member.
self.expect_error(KeyError, role.__delitem__, 'inexistent')
# Delete user and check if roles are removed.
self.check_output("""
<class 'node.ext.ldap.ugm._api.Ugm'>: None
<class 'node.ext.ldap.ugm._api.Users'>: users
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
<class 'node.ext.ldap.ugm._api.Groups'>: groups
<class 'node.ext.ldap.ugm._api.Group'>: group1
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.Group'>: group2
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
""", ugm.treerepr())
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.User'>: Meier
<class 'node.ext.ldap.ugm._api.Group'>: group1
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
""", roles.treerepr())
users = ugm.users
del users['Meier']
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
<class 'node.ext.ldap.ugm._api.Role'>: viewer
<class 'node.ext.ldap.ugm._api.Group'>: group1
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
""", roles.treerepr())
self.assertEqual(
users.storage.keys(),
[u'Schmidt', u'Müller', u'Umhauer']
)
self.assertEqual(
users.keys(),
[u'Müller', u'Schmidt', u'Umhauer']
)
self.check_output("""
<class 'node.ext.ldap.ugm._api.Users'>: users
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
""", users.treerepr())
# Delete group and check if roles are removed.
del ugm.groups['group1']
self.check_output("""
<class 'node.ext.ldap.ugm._api.Roles'>: roles
""", roles.treerepr())
self.check_output("""
<class 'node.ext.ldap.ugm._api.Ugm'>: None
<class 'node.ext.ldap.ugm._api.Users'>: users
<class 'node.ext.ldap.ugm._api.User'>: M...ller
<class 'node.ext.ldap.ugm._api.User'>: Schmidt
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
<class 'node.ext.ldap.ugm._api.Groups'>: groups
<class 'node.ext.ldap.ugm._api.Group'>: group2
<class 'node.ext.ldap.ugm._api.User'>: Umhauer
""", ugm.treerepr())
ugm()
| StarcoderdataPython |
159265 | <reponame>isb-cgc/ISB-CGC-Webapp
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sharing', '0001_initial'),
('cohorts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Workbook',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=2024)),
('description', models.CharField(max_length=2024)),
('date_created', models.DateTimeField(auto_now_add=True)),
('last_date_saved', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('is_public', models.BooleanField(default=False)),
('owner', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL)),
('shared', models.ManyToManyField(to='sharing.Shared_Resource')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Workbook_Last_View',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('test', models.DateTimeField(auto_now_add=True, null=True)),
('last_view', models.DateTimeField(auto_now=True, auto_now_add=True)),
('user', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL)),
('workbook', models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Workbook')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Workbook_Perms',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('perm', models.CharField(default=b'READER', max_length=10, choices=[(b'READER', b'Reader'), (b'OWNER', b'Owner')])),
('user', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, blank=True)),
('workbook', models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Workbook')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Worksheet',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=2024)),
('description', models.CharField(max_length=2024)),
('last_date_saved', models.DateTimeField(auto_now_add=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('workbook', models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Workbook')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Worksheet_cohort',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('cohort', models.ForeignKey(on_delete=models.CASCADE, to='cohorts.Cohort')),
('worksheet', models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Worksheet')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Worksheet_comment',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('content', models.CharField(max_length=2024)),
('user', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL)),
('worksheet', models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Worksheet')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Worksheet_gene',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('gene', models.CharField(max_length=2024)),
('worksheet', models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Worksheet')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Worksheet_plot',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('type', models.CharField(max_length=1024, null=True)),
('active', models.BooleanField(default=True)),
('cohort', models.ForeignKey(on_delete=models.CASCADE, related_name='worksheet_plot.cohort', blank=True, to='workbooks.Worksheet_cohort', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Worksheet_variable',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=2024)),
('type', models.CharField(max_length=1024, null=True, blank=True)),
('url_code', models.CharField(max_length=2024)),
('feature', models.ForeignKey(on_delete=models.CASCADE, blank=True, to='projects.User_Feature_Definitions', null=True)),
('worksheet', models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Worksheet')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='worksheet_plot',
name='color_by',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='worksheet_plot.color_by', blank=True, to='workbooks.Worksheet_variable', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='worksheet_plot',
name='worksheet',
field=models.ForeignKey(on_delete=models.CASCADE, to='workbooks.Worksheet', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='worksheet_plot',
name='x_axis',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='worksheet_plot.x_axis', blank=True, to='workbooks.Worksheet_variable', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='worksheet_plot',
name='y_axis',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='worksheet_plot.y_axis', blank=True, to='workbooks.Worksheet_variable', null=True),
preserve_default=True,
),
]
| StarcoderdataPython |
3280510 | """Compare the speed of exact one-norm calculation vs. its estimation.
"""
from __future__ import division, print_function, absolute_import
import time
import numpy as np
import scipy.sparse.linalg
class BenchmarkOneNormEst(object):
params = [
[2, 3, 5, 10, 30, 100, 300, 500, 1000],
['exact', 'onenormest']
]
param_names = ['n', 'solver']
goal_time = 0.5
def setup(self, n, solver):
np.random.seed(1234)
nrepeats = 100
shape = (n, n)
# Sample the matrices.
self.matrices = []
for i in range(nrepeats):
M = np.random.randn(*shape)
self.matrices.append(M)
def time_onenormest(self, n, solver):
if solver == 'exact':
# Get the exact values of one-norms of squares.
for M in self.matrices:
M2 = M.dot(M)
scipy.sparse.linalg.matfuncs._onenorm(M)
elif solver == 'onenormest':
# Get the estimates of one-norms of squares.
for M in self.matrices:
scipy.sparse.linalg.matfuncs._onenormest_matrix_power(M, 2)
| StarcoderdataPython |
11281652 | <reponame>ehsankorhani/python-lessons<filename>27-advanced-oop/soild-srp.py
# class Car:
# def __init__(self, speed: float, odometer: int):
# self.speed = speed
# self.odometer = odometer
# def accelerate(self):
# return self.speed * 1.1
# def save_current_odometer(self):
# pass
class CarDB:
def save_current_odometer(self, obj):
pass
class Car:
def __init__(self, speed: float, odometer: int):
self.speed = speed
self.odometer = odometer
self._db = CarDB()
def accelerate(self):
return self.speed * 1.1
def save_current_odometer(self):
self._db.save_current_odometer(obj=self)
| StarcoderdataPython |
4815678 | #!/usr/bin/env python
# just testing basic parallelization that will be used in the actual project
from __future__ import division,unicode_literals
from future.builtins import map,zip, range
import numpy as np
import itertools as it
# setup proper logging
import logging
logger = logging.getLogger('psnobfit')
logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
log_stream_handler = logging.StreamHandler()
log_stream_handler.setLevel(logging.DEBUG)
log_formatter = logging.Formatter('%(asctime)s %(name)s/%(levelname)-9s %(message)s')
log_stream_handler.setFormatter(log_formatter)
logger.addHandler(log_stream_handler)
del logging, log_stream_handler, log_formatter
# read cmd line options
from optparse import OptionParser
opt_parser = OptionParser()
opt_parser.add_option("-p", "--profile", dest="client_profile", default="unissh", action="store_const",
help="the profile to use for ipython.parallel")
options, args = opt_parser.parse_args()
# START: create remote evaluators and a few (or one) special one for #
# generating new points
logger.info("init")
from IPython.parallel import Client, require
c = Client(profile=options.client_profile)
c.clear() # clears remote engines
c.purge_results('all') # all results are memorized in the hub
if len(c.ids) < 2:
raise Exception('I need at least 2 clients.')
nbGens = min(1, len(c.ids) - 1)
generators = c.load_balanced_view(c.ids[:nbGens])
evaluators = c.load_balanced_view(c.ids[nbGens:])
# MAX number of tasks in total
MAX = 5000
# length of test data, sent over the wire
DIMSIZE = 10
# when adding machines, this is the number of additional tasks
# beyond the number of free machines
new_extra = DIMSIZE
# import some packages (also locally)
with c[:].sync_imports():
from IPython.utils.timing import time # time.time & time.clock for cpu time
#import time
from random import random
from numpy import pi, sum
import numpy
import math
# the actual function
def func_one(tid, data):
return tid, 1
def func_sum(tid, data):
'x is either a number or a list/vector of numbers'
time.sleep(math.log(1 + random()))
return tid, numpy.sum(data)
def func_eval(tid, data):
np = numpy
data = data * numpy.pi / 2
v = np.multiply(np.cos(numpy.pi + data), np.sin(data + numpy.pi / 2))
v = np.exp(np.linalg.norm(v - 1, 1) / len(data))
#s = np.sin(data[::2] + numpy.pi / 2)
#c = np.cos(data[1::2])
# v += np.sum(s) + np.sum(c) #np.append(s,c))
# time.sleep(1e-3)
#time.sleep(1e-2 + math.log(1 + random()))
return tid, v
func = func_eval
# some stats
added = 0
queue_size = 0
added = 0
nb_finished = 0
nb_generated = 0
loops = 0
tasks_added = 0
cum_sum = 0
best_x = None
best_obj = numpy.infty
last_best = best_obj
def status():
global last_best
s = '*' if last_best != best_obj else ' '
logger.info(
"pend %4d | + %2d | tot: %4d | finished: %4d | gen: %3d | best_obj: %.10f %s" %
(queue_size, new, added, nb_finished, nb_generated, best_obj, s))
last_best = best_obj
logger.info("start")
start_time = time.time()
# pending is the set of jobs we are expecting in each loop
pending = set([])
pending_generators = set([])
new_points = []
# collects all returns
results = []
allx = dict() # store all x vectors
def gen_points(new, DIMSIZE, cur_best_res=None, cur_best_x=None):
'''
generates @new new points, depends on results and allx
'''
np = numpy
#lambda rp : 10 * (np.random.rand(DIMSIZE) )
FACT = 3
OFF = 0
if np.random.random() < .2 or not cur_best_res:
return np.array([FACT * (np.random.rand(DIMSIZE) + OFF) for _ in range(new)])
# better local value new best point
ret = []
for i in range(new):
rv = (np.random.rand(DIMSIZE) - .5) / 5
# make it sparse
sp = np.random.rand(DIMSIZE) < .9
rv[sp] = 0
#import scipy
#rv = scipy.sparse.rand(DIMSIZE, 1, 0.1)
ret.append(np.minimum(2, np.maximum(0, rv + cur_best_x)))
return np.array(ret)
# itertools counter for successive task ID numbers
tid_counter = it.count(0)
while pending or added < MAX:
evaluators.spin() # check outstanding tasks
loops += 1
# get new points if they have arrived
# check if we have to generate new points
if not new_points:
if results:
cur_best_res = min(results, key=lambda _: _[1])
cur_best_x = allx[cur_best_res[0]]
else:
cur_best_res, cur_best_x = None, None
new = len(c.ids) - queue_size + new_extra
# at the end, make sure to not add more tasks then MAX
new = min(new, MAX - added)
# update the counter
added += new
new_points_tasks = generators.map_async(
gen_points,
[new],
[DIMSIZE],
[cur_best_res],
[cur_best_x],
ordered=False)
# print ">>>", new_points_tasks.msg_ids
list(map(pending_generators.add, new_points_tasks.msg_ids))
finished_generators = pending_generators.difference(generators.outstanding)
pending_generators = pending_generators.difference(finished_generators)
# if we have generated points in the queue, eval the function
for msg_id in finished_generators:
res = generators.get_result(msg_id)
nb_generated += len(res.result)
for g in res.result:
#logger.info('new points "%s" = %s' % (msg_id, g))
cs = max(1, min(5, len(res.result)))
newt = evaluators.map_async(func, tids, vals, chunksize=cs, ordered=False)
cum_sum += 1
# check, if we have to create new tasks
queue_size = len(pending)
if queue_size <= len(c.ids) + new_extra and added < MAX:
tasks_added += 1
new = len(c.ids) - queue_size + new_extra
# at the end, make sure to not add more tasks then MAX
new = min(new, MAX - added)
# update the counter
added += new
status()
# create new tasks
tids, vals = list(it.islice(tid_counter, new)), gen_points(new, DIMSIZE)
chunksize = max(1, min(new, len(c.ids)))
newt = evaluators.map_async(func, tids, vals, chunksize=chunksize, ordered=False)
allx.update(list(zip(tids, vals)))
list(map(pending.add, newt.msg_ids))
else:
new = 0
# finished is the set of msg_ids that are complete
finished = pending.difference(evaluators.outstanding)
# update pending to exclude those that just finished
pending = pending.difference(finished)
# collect results from finished tasks
for msg_id in finished:
# we know these are done, so don't worry about blocking
res = evaluators.get_result(msg_id)
nb_finished += len(res.result)
# each job returns a list of length chunksize
for t in res.result:
logger.debug("result '%s' = %s" % (msg_id, t))
results.append(t)
cum_sum += 1 # t[1] ## just to test how many results come back
if t[1] < best_obj:
best_obj = t[1]
best_x = allx[t[0]]
# wait for 'pending' jobs or 1/1000s
evaluators.wait(None, 1e-3) # pending.union(pending_generators), 1e-3)
status()
logger.debug("queues:")
for k, v in sorted(evaluators.queue_status().items()):
logger.debug("%5s: %s" % (k, v))
logger.info("pending: %s" % pending)
logger.info("added in total: %s" % added)
#logger.info("results: %s" % sorted([r[0] for r in results]))
logger.info("# machines = %s" % len(c.ids))
logger.info("# results = %s" % len(results))
logger.info("cum_sum = %s" % cum_sum)
logger.info(
"# total loops %s | of that, %s times tasks were added | %.4f%%" %
(loops, tasks_added, tasks_added / float(loops) * 100.))
ttime = time.time() - start_time
evalspersec = added / ttime
logger.info("total time: %s [s] | %.5f [feval/s]" % (ttime, evalspersec))
logger.info("best:")
logger.info(" obj. value: %f" % best_obj)
logger.info(" x:\n%s" % best_x)
logger.info("finished")
| StarcoderdataPython |
1617415 | import urllib
import urllib2
import socket
import os.path
if os.path.exists('/home/pi/probereqs.log'):
with open('/home/pi/probereqs.log') as f:
probedata = f.read()
url = '_API_URL'
values = { 'device': socket.gethostname(), 'data': probedata }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
with open('/home/pi/probereqs.log', 'w'): pass
| StarcoderdataPython |
6683759 | <filename>patchMap_predict.py
import numpy as np
import cv2
from keras.models import load_model
import scipy.io as sio
base_path_hazyImg = 'image/'
base_path_result = 'patchMap/'
imgname = 'waterfall.tif'
modelDir = 'PMS-Net.h5'
print ("Process image: ", imgname)
hazy_sample = cv2.imread(base_path_hazyImg + imgname)
hazy_sample = cv2.resize(hazy_sample,(640,480))
hazy_input = np.reshape(hazy_sample,(1, 480, 640, 3))
model = load_model(modelDir)
patchMap = model.predict(hazy_input, verbose = 1)
patchMap = np.reshape(patchMap,(-1,1))
patchMap = np.reshape(patchMap,(480, 640))
patchMap = np.float64(patchMap)
imgname = imgname.replace('.tif','')
print('saveDir:',base_path_result + imgname + '.mat')
sio.savemat(base_path_result + imgname + '.mat',{"patchMap":patchMap})
| StarcoderdataPython |
1920202 | __author__ = '<NAME>'
__author_email__ = '<EMAIL>'
from datetime import datetime, timedelta
import json
from pytz import timezone
with open('./config.json', 'r') as file:
conf = json.loads(file.read())
TIMEZONE = str(conf['timezone'])
class notifyOwnerToSetSchedule():
"""
Class that contains all logic to notify owner for User Story SetSchedule
"""
def __init__(self, serverurl, serverparkingsetscheduleurl, notificationtype, supportemail, contentfolder, notificationemail, notificationemailsubject, daysbeforeevent, tokenizer, db):
self.serverurl = serverurl
self.serverparkingsetscheduleurl = serverparkingsetscheduleurl
self.notification = notificationtype
self.supportemail = supportemail
self.contentfolder = contentfolder
self.notificationemail = notificationemail
self.notificationemailsubject = notificationemailsubject
self.timeToEvent = daysbeforeevent
self.tokenizer = tokenizer
self.parkingDb = db
def findOwnersWhoNeedToSetSchedule(self, fromdate, daystoevent, unittest=False):
"""
Find all owners that need to set their schedule based
:param fromdate: date from which the number of days are counted, as Python datetime
:param daystoevent: number of days as int; used for filtering the events that are occuring from now within daystoevent (e.g. 5 -> filter is set on all poievents that take place within 5 days)
:return: ['eventdescription', 'eventstart', 'eventendhour', 'unitaddress', 'ownername', 'owneremail', 'setscheduleurl'] for all events occuring within daysbeforeevent
"""
# Return: None or ('poieventid', 'eventdescription', 'unitid', 'unitname', 'ownername', 'owneremail')
result = []
if unittest:
result = self.parkingDb.QueryAvailablePoiEventsWithinGivenAmountofdays(fromdate, daystoevent, unittest=True)
else:
result = self.parkingDb.QueryAvailablePoiEventsWithinGivenAmountofdays(fromdate, daystoevent)
ownersList = []
for item in result:
if not self.parkingDb.CheckIfUnitHasScheduleForGivenPoieventid(item['unitid'], item['poieventid']):
url = self.generateOwnerSetScheduleUrl(item['poieventid'], item['unitid'])
ownersList.append({ 'eventdescription': item['eventdescription'], 'eventstart': self.generateDatetimeString(item['eventdate'] + timedelta(hours=-1), TIMEZONE), 'eventendhour': self.generateTimeString(item['eventdate'] + timedelta(hours=3), TIMEZONE), 'unitaddress': item['unitaddress'], 'ownername': item['ownername'], 'owneremail': item['owneremail'], 'setscheduleurl': url })
return ownersList
def generateDatetimeString(self, inputdate, tz):
"""
Generate datetimestring for the given timezone
:param inputdate: Python datetime object that needs to be transformed
:param tz: timezone string
:return: string formatted
"""
result = ""
format = ""
if tz == 'Europe/Brussels':
# hard-coded based on https://en.wikipedia.org/wiki/Date_format_by_country
# TODO: fetch from CLDR later (internationalization enhancement)
format = "%A %d %B %Y %-H:%M"
if format:
result = inputdate.strftime(format)
return result
def generateTimeString(self, inputdate, tz):
"""
Generate timestring for the given timezone
:param inputdate: Python datetime object that needs to be transformed
:param tz: timezone string
:return: string formatted
"""
result = ""
format = ""
if tz == 'Europe/Brussels':
# hard-coded based on https://en.wikipedia.org/wiki/Date_format_by_country
# TODO: fetch from CLDR later (internationalization enhancement)
format = "%-H:%M"
if format:
result = inputdate.strftime(format)
return result
def generateOwnerSetScheduleUrl(self, poieventid, unitid):
"""
Generate secure, unique ownersetschedule url for email to owner
:param poieventid: PK of POIEVENTS table as int
:param unitid: PK of UNIT table as int
:return: key as string
"""
token = self.tokenizer.createSetScheduleToken(poieventid, unitid)
return self.serverurl + self.serverparkingsetscheduleurl + "?token=" + token
def sendEmailsToOwners(self, emailclient, emailtemplate, daystoevent):
"""
Send emails to all owners that require notification; filter on poievents that are occuring within days
Remark: if an owner has multiple units, he'll receive one separate email for each unit (could be optimised later)
:param emailclient: emailclient as object
:param emailtemplate: emailtemplate as object
:param daystoevent: number of days as int; used for filtering the events that are occuring from now within daystoevent (e.g. 5 -> filter is set on all poievents that take place within 5 days)
:return: number of emails sent (currently pretty dumb total)
"""
result = self.findOwnersWhoNeedToSetSchedule(timezone(TIMEZONE).localize(datetime.now()), daystoevent)
for item in result:
msg = emailtemplate.adaptNotifyOwnerEmailTemplate(item['ownername'], item['unitaddress'], item['eventdescription'], item['eventstart'], item['eventendhour'], item['setscheduleurl'])
emailclient.sendEmail([item['owneremail']], self.contentfolder + self.notificationemail, self.supportemail, self.notificationemailsubject)
# TODO: add checks for sendEmail (e.g. error messages means email not sent) - probably by reading our mailbox and scanning for Google Delivery Status Notification (DSN) emails, see http://stackoverflow.com/questions/5298285/detecting-if-an-email-is-a-delivery-status-notification-and-extract-informatio
return result
| StarcoderdataPython |
3283108 | """
Common database model definitions.
These models are 'generic' and do not fit a particular business logic object.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.exceptions import ValidationError
class InvenTreeSetting(models.Model):
"""
An InvenTreeSetting object is a key:value pair used for storing
single values (e.g. one-off settings values).
The class provides a way of retrieving the value for a particular key,
even if that key does not exist.
"""
class Meta:
verbose_name = "InvenTree Setting"
verbose_name_plural = "InvenTree Settings"
@classmethod
def get_setting(cls, key, backup_value=None):
"""
Get the value of a particular setting.
If it does not exist, return the backup value (default = None)
"""
try:
setting = InvenTreeSetting.objects.get(key__iexact=key)
return setting.value
except InvenTreeSetting.DoesNotExist:
return backup_value
@classmethod
def set_setting(cls, key, value, user, create=True):
"""
Set the value of a particular setting.
If it does not exist, option to create it.
Args:
key: settings key
value: New value
user: User object (must be staff member to update a core setting)
create: If True, create a new setting if the specified key does not exist.
"""
if not user.is_staff:
return
try:
setting = InvenTreeSetting.objects.get(key__iexact=key)
except InvenTreeSetting.DoesNotExist:
if create:
setting = InvenTreeSetting(key=key)
else:
return
setting.value = value
setting.save()
key = models.CharField(max_length=50, blank=False, unique=True, help_text=_('Settings key (must be unique - case insensitive'))
value = models.CharField(max_length=200, blank=True, unique=False, help_text=_('Settings value'))
description = models.CharField(max_length=200, blank=True, unique=False, help_text=_('Settings description'))
def validate_unique(self, exclude=None):
""" Ensure that the key:value pair is unique.
In addition to the base validators, this ensures that the 'key'
is unique, using a case-insensitive comparison.
"""
super().validate_unique(exclude)
try:
setting = InvenTreeSetting.objects.exclude(id=self.id).filter(key__iexact=self.key)
if setting.exists():
raise ValidationError({'key': _('Key string must be unique')})
except InvenTreeSetting.DoesNotExist:
pass
class Currency(models.Model):
"""
A Currency object represents a particular unit of currency.
Each Currency has a scaling factor which relates it to the base currency.
There must be one (and only one) currency which is selected as the base currency,
and each other currency is calculated relative to it.
Attributes:
symbol: Currency symbol e.g. $
suffix: Currency suffix e.g. AUD
description: Long-form description e.g. "Australian Dollars"
value: The value of this currency compared to the base currency.
base: True if this currency is the base currency
"""
symbol = models.CharField(max_length=10, blank=False, unique=False, help_text=_('Currency Symbol e.g. $'))
suffix = models.CharField(max_length=10, blank=False, unique=True, help_text=_('Currency Suffix e.g. AUD'))
description = models.CharField(max_length=100, blank=False, help_text=_('Currency Description'))
value = models.DecimalField(default=1.0, max_digits=10, decimal_places=5, validators=[MinValueValidator(0.00001), MaxValueValidator(100000)], help_text=_('Currency Value'))
base = models.BooleanField(default=False, help_text=_('Use this currency as the base currency'))
class Meta:
verbose_name_plural = 'Currencies'
def __str__(self):
""" Format string for currency representation """
s = "{sym} {suf} - {desc}".format(
sym=self.symbol,
suf=self.suffix,
desc=self.description
)
if self.base:
s += " (Base)"
else:
s += " = {v}".format(v=self.value)
return s
def save(self, *args, **kwargs):
""" Validate the model before saving
- Ensure that there is only one base currency!
"""
# If this currency is set as the base currency, ensure no others are
if self.base:
for cur in Currency.objects.filter(base=True).exclude(pk=self.pk):
cur.base = False
cur.save()
# If there are no currencies set as the base currency, set this as base
if not Currency.objects.exclude(pk=self.pk).filter(base=True).exists():
self.base = True
# If this is the base currency, ensure value is set to unity
if self.base:
self.value = 1.0
super().save(*args, **kwargs)
| StarcoderdataPython |
357128 | <filename>auction/bid/models.py
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from auction.bid.managers import AuctionManager, BidManager
import pendulum
class AuctionItem(models.Model):
name = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
image_url = models.TextField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
current_highest_bid = models.FloatField(default=100)
starting_amount = models.FloatField()
start_day = models.DateField(default=pendulum.today)
end_day = models.DateField(default=pendulum.tomorrow)
active = models.BooleanField(default=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
bid_count = models.IntegerField(default=0)
objects = models.Manager()
AuctionManager = AuctionManager()
class Bid(models.Model):
auction = models.ForeignKey(AuctionItem, on_delete=models.CASCADE)
bid_amount = models.FloatField()
bidder = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
email_sent = models.BooleanField(default=False)
email_sent_at = models.DateTimeField(blank=True, null=True)
objects = models.Manager()
BidManager = BidManager()
| StarcoderdataPython |
152027 | <gh_stars>10-100
from textgenrnn import textgenrnn
textgen = textgenrnn(name="my.poem") # 给模型起个名字,比如`poem`, 之后生成的模型文件都会以这个名字为前缀
textgen.reset() # 重置模型
textgen.train_from_file( # 从数据文件训练模型
file_path = '../datasets/cn/5_chars_poem_2600.txt', # 文件路径
new_model = True, # 训练新模型
num_epochs = 1, # 训练轮数
word_level = False, # True:词级别,False:字级别
rnn_bidirectional = True, # 是否使用Bi-LSTM
max_length = 25, # 一条数据的最大长度
)
| StarcoderdataPython |
9783249 | import http.client
import requests
import random
import string
import threading
import time
import ssl
from bs4 import BeautifulSoup
from datetime import datetime
withdraw = True
getLoggedinAddress = True
withdrawCompletely = False
unregisteredLogin = False
def getSession():
length_of_string = 40
letters_and_digits = string.ascii_lowercase + string.digits
random_string = ""
for _ in range(length_of_string):
random_string += random.choice(letters_and_digits)
print(random_string)
ci_session = "ci_session=" + random_string
return ci_session
def getAddress():
if(unregisteredLogin):
URL = "http://192.168.3.11:3000/get-single-address"
elif(withdrawCompletely):
URL = "http://192.168.3.11:3000/linked-address/Withdraw"
elif(getLoggedinAddress == True):
URL = "http://192.168.3.11:3000/linked-address/LoggedIn"
else:
URL = "http://192.168.3.11:3000/linked-address/Pending"
r = requests.get(url = URL)
data = r.json()
if(data['message']=="SUCCESS"):
address = data['data']
return address.strip()
else:
return ""
def main(args):
while(True):
f = open("status.txt", "r")
if(f.read().strip()=="0"):
f.close()
break
f.close()
start_at = datetime.now()
address = getAddress()
if(not address):
print("No Data!")
break
ci_session = getSession()
print("address: "+address)
print("ci_session: "+ci_session)
print("----------------")
conn = http.client.HTTPSConnection("byteminer.live")
# conn = http.client.HTTPSConnection("byteminer.live", context = ssl._create_unverified_context())
payload = "username="+address+"&password=<PASSWORD>&reference_user_id="
headers = {
'authority': 'byteminer.live',
'accept': '*/*',
'x-requested-with': 'XMLHttpRequest',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://byteminer.live',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://byteminer.live/z',
'accept-language': 'en-US,en;q=0.9',
'cookie': ci_session
}
conn.request("POST", "/ajax_auth", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
if('success' in str(data)):
print(".")
elif('Duplicate' in str(data)):
URL = "http://192.168.3.11:3000/update-address/"+address+"/BadAddress"
requests.get(url = URL)
print("Duplicate Error, dont use this address again")
continue
else:
URL = "http://20.198.178.250:3000/update-address/"+address+"/LoginFailed"
requests.get(url = URL)
print("Login Failed")
continue
URL = "http://192.168.3.11:3000/update-address/"+address+"/LoggedIn"
requests.get(url = URL)
print("Login Success")
rememberCode = res.headers["Set-Cookie"].split(";")[0]
if(withdraw==False):
print("----------------")
continue
payload = ''
headers = {
'authority': 'byteminer.live',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://byteminer.live/dashboard',
'accept-language': 'en-US,en;q=0.9',
'cookie': rememberCode + "; " + ci_session
}
conn.request("GET", "/withdrawal", payload, headers)
res = conn.getresponse()
data = res.read()
html = data.decode("utf-8")
soup = BeautifulSoup(html, 'html.parser')
current_user_address = "NO ADDRESS"
for li in soup.select('#transfer input'):
current_user_address = li.get("value")
for li in soup.select('#transfer h1'):
balance = li.text.split(":")[1].strip()
payload = 'amount=' + balance
headers = {
'authority': 'byteminer.live',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://byteminer.live',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://byteminer.live/withdrawal',
'accept-language': 'en-US,en;q=0.9',
'cookie': rememberCode + "; " + ci_session
}
conn.request("POST", "/withdrawal", payload, headers)
res = conn.getresponse()
data = res.read()
if(withdrawCompletely):
URL = "http://20.198.178.250:3000/update-address/"+address+"/Complete"
requests.get(url = URL)
else:
URL = "http://20.198.178.250:3000/update-address/"+address+"/Withdraw"
requests.get(url = URL)
print("---")
print("Address : " + address)
print("Current User : " + current_user_address)
print("Valid : " + str(address == current_user_address))
print("Withdraw Success : " + balance)
print("---")
end_at = datetime.now()
diff = end_at- start_at
print("Finish in " + (str(diff.microseconds/1000)) + "ms")
print("################")
def run_main_thread(args):
try:
main(args)
except:
exit()
f = open("status.txt", "w")
f.write("1")
f.close()
threadCount = 1
if(threadCount>1):
for i in range(threadCount):
t = threading.Thread(target=main, args=(i,))
t.start()
else:
main(0) | StarcoderdataPython |
9745071 | <gh_stars>0
"""
<NAME>
Wed Mar 25 19:23:07 2020
Python 2 - DAT-129 - Spring 2020
Lecture Notes
"""
#PLAN
import urllib
from bs4 import BeautifulSoup
def getSearchURL(term, number):
# assembles a query against goodreads.com give a search term
#url = 'https://www.goodreads.com/search?query=%s' % (str(term))
url = 'https://www.goodreads.com/search?page=%d&qid=gI6LAXCK22&query=%s' % (number, str(term))
return url
def getPageText(url):
# Given a URL, fetches the raw HTML
# build the request object from the given URL
req = urllib.request.Request(url)
# access the network via computer's standard gateway to actually retrieve the HTML from goodreads server
with urllib.request.urlopen(req) as response:
return response.read()
def main():
term = 'pokemon'
number = 1
totaltitles = 0
subtitles = 0
i = 1
while i < 6:
print('Page:', i, '...', end='')
url = getSearchURL(term, i)
pageText = getPageText(url)
soup = BeautifulSoup(pageText, 'html.parser')
bookatags = soup.find_all('a', 'bookTitle')
for book in bookatags:
title = book.find('span').string
# print('\t', title)
totaltitles += 1
if ":" in title:
subtitles += 1
print(' Done')
# print('~~~~~~~~~~~~~~~~~~~~~Page:', i,'~~~~~~~~~~~~~~~~~~~~~')
i += 1
print('\nTotal titles: ', totaltitles)
subts = int(subtitles/totaltitles*100)
print(subtitles, 'books contain a subtitle')
print(subts, '% of books have subtitles', sep='')
if __name__ == "__main__":
main()
| StarcoderdataPython |
386798 | <gh_stars>100-1000
"""NVR that setups all components for a camera."""
from __future__ import annotations
import logging
from queue import Empty, Queue
from threading import Thread
from typing import TYPE_CHECKING, Dict, List, Union
import cv2
import viseron.mqtt
from viseron import helpers
from viseron.camera import FFMPEGCamera
from viseron.camera.frame import Frame
from viseron.const import (
THREAD_STORE_CATEGORY_NVR,
TOPIC_FRAME_PROCESSED_OBJECT,
TOPIC_FRAME_SCAN_POSTPROC,
)
from viseron.data_stream import DataStream
from viseron.helpers.filter import Filter
from viseron.motion import MotionDetection
from viseron.mqtt.binary_sensor import MQTTBinarySensor
from viseron.mqtt.camera import MQTTCamera
from viseron.mqtt.sensor import MQTTSensor
from viseron.mqtt.switch import MQTTSwitch
from viseron.post_processors import PostProcessorFrame
from viseron.recorder import FFMPEGRecorder
from viseron.watchdog.thread_watchdog import RestartableThread
from viseron.zones import Zone
if TYPE_CHECKING:
from viseron.detector.detected_object import DetectedObject
LOGGER = logging.getLogger(__name__)
class MQTTInterface:
"""Handles MQTT connection."""
def __init__(self, config):
self.config = config
self._status_state = None
self.status_attributes = {}
self.devices = {}
if viseron.mqtt.MQTT.client:
self.devices["motion_detected"] = MQTTBinarySensor(
config, "motion_detected"
)
self.devices["object_detected"] = MQTTBinarySensor(
config, "object_detected"
)
for label in config.object_detection.labels:
self.devices[label.label] = MQTTBinarySensor(
config,
f"object_detected {label.label}",
)
self.devices["switch"] = MQTTSwitch(config)
self.devices["camera"] = MQTTCamera(config)
self.devices["sensor"] = MQTTSensor(config, "status")
DataStream.subscribe_data(
f"{config.camera.name_slug}/status", self.status_state_callback
)
def publish_image(self, object_frame, motion_frame, zones, resolution):
"""Publish image to MQTT."""
if viseron.mqtt.MQTT.client:
# Draw on the object frame if it is supplied
frame = object_frame if object_frame else motion_frame
if self.config.motion_detection.mask:
helpers.draw_motion_mask(
frame.decoded_frame_mat_rgb,
self.config.motion_detection.mask,
)
if self.config.object_detection.mask:
helpers.draw_object_mask(
frame.decoded_frame_mat_rgb,
self.config.object_detection.mask,
)
if motion_frame and frame.motion_contours:
helpers.draw_contours(
frame.decoded_frame_mat_rgb,
frame.motion_contours,
resolution,
self.config.motion_detection.area,
)
helpers.draw_zones(frame.decoded_frame_mat_rgb, zones)
helpers.draw_objects(
frame.decoded_frame_mat_rgb,
frame.objects,
resolution,
)
# Write a low quality image to save bandwidth
ret, jpg = cv2.imencode(
".jpg", frame.decoded_frame_mat_rgb, [int(cv2.IMWRITE_JPEG_QUALITY), 75]
)
if ret:
self.devices["camera"].publish(jpg.tobytes())
def status_state_callback(self, state):
"""Update status state."""
self.status_state = state
@property
def status_state(self):
"""Return status state."""
return self._status_state
@status_state.setter
def status_state(self, state):
self._status_state = state
self.devices["sensor"].publish(state, attributes=self.status_attributes)
def on_connect(self):
"""On established MQTT connection."""
for device in self.devices.values():
device.on_connect()
class FFMPEGNVR:
"""Performs setup of all needed components for recording.
Controls starting/stopping of motion detection, object detection, camera, recording.
Also handles publishing to MQTT.
"""
nvr_list: Dict[str, object] = {}
def __init__(self, config, detector):
self.setup_loggers(config)
self._logger.debug("Initializing NVR thread")
# Use FFMPEG to read from camera. Used for reading/recording
self.camera = FFMPEGCamera(config, detector)
self._mqtt = MQTTInterface(config)
self.config = config
self.kill_received = False
self.camera_grabber = None
self._objects_in_fov = []
self._labels_in_fov = []
self._reported_label_count = {}
self._object_return_queue = Queue(maxsize=10)
self._object_filters = {}
self._object_decoder = f"{config.camera.name_slug}.object_detection"
DataStream.subscribe_data(
f"{config.camera.name_slug}/{TOPIC_FRAME_PROCESSED_OBJECT}",
self._object_return_queue,
)
for object_filter in config.object_detection.labels:
self._object_filters[object_filter.label] = Filter(
config, self.camera.resolution, object_filter
)
self.zones: List[Zone] = []
for zone in config.camera.zones:
self.zones.append(
Zone(
zone,
self.camera.resolution,
config,
)
)
self._motion_frames = 0
self._motion_detected = False
self._motion_only_frames = 0
self._motion_max_timeout_reached = False
self._motion_return_queue = Queue(maxsize=5)
self._motion_decoder = f"{config.camera.name_slug}.motion_detection"
if config.motion_detection.timeout or config.motion_detection.trigger_detector:
self.motion_detector = MotionDetection(config, self.camera)
DataStream.subscribe_data(
self.motion_detector.topic_processed_motion, self._motion_return_queue
)
if config.motion_detection.trigger_detector:
self.camera.stream.decoders[self._motion_decoder].scan.set()
if config.object_detection.enable:
self.camera.stream.decoders[self._object_decoder].scan.clear()
else:
if config.object_detection.enable:
self.camera.stream.decoders[self._object_decoder].scan.set()
self.camera.stream.decoders[self._motion_decoder].scan.clear()
self.idle_frames = 0
self._post_processor_topic = (
f"{config.camera.name_slug}/{TOPIC_FRAME_SCAN_POSTPROC}"
)
self.start_camera()
# Initialize recorder
self._start_recorder = False
self.recorder = FFMPEGRecorder(config)
self.nvr_list[config.camera.name_slug] = self
RestartableThread(
name=str(self),
target=self.run,
stop_target=self.stop,
thread_store_category=THREAD_STORE_CATEGORY_NVR,
daemon=False,
register=True,
).start()
if viseron.mqtt.MQTT.client:
self.setup_mqtt()
self._logger.debug("NVR thread initialized")
def __repr__(self):
"""Insert name_slug in name."""
return __name__ + "." + self.config.camera.name_slug
def setup_loggers(self, config):
"""Set up custom log names and levels."""
self._logger = logging.getLogger(__name__ + "." + config.camera.name_slug)
if getattr(config.camera.logging, "level", None):
self._logger.setLevel(config.camera.logging.level)
self._motion_logger = logging.getLogger(
__name__ + "." + config.camera.name_slug + ".motion"
)
if getattr(config.motion_detection.logging, "level", None):
self._motion_logger.setLevel(config.motion_detection.logging.level)
elif getattr(config.camera.logging, "level", None):
self._motion_logger.setLevel(config.camera.logging.level)
self._object_logger = logging.getLogger(
__name__ + "." + config.camera.name_slug + ".object"
)
if getattr(config.object_detection.logging, "level", None):
self._object_logger.setLevel(config.object_detection.logging.level)
elif getattr(config.camera.logging, "level", None):
self._object_logger.setLevel(config.camera.logging.level)
def setup_mqtt(self):
"""Set up various MQTT elements."""
self._mqtt.on_connect()
self._mqtt.status_state = "connecting"
self.recorder.on_connect()
for zone in self.zones:
zone.on_connect()
# We subscribe to the switch topic to toggle camera on/off
viseron.mqtt.MQTT.subscribe(
viseron.mqtt.SubscribeTopic(
self._mqtt.devices["switch"].command_topic, self.toggle_camera
)
)
def toggle_camera(self, message):
"""Toggle reading from camera on/off."""
if message.payload.decode() == "ON":
self.start_camera()
elif message.payload.decode() == "OFF":
self.stop_camera()
def start_camera(self):
"""Start reading from camera."""
if not self.camera_grabber or not self.camera_grabber.is_alive():
self._logger.debug("Starting camera")
self.camera_grabber = RestartableThread(
name="viseron.camera." + self.config.camera.name_slug,
target=self.camera.capture_pipe,
poll_timer=self.camera.poll_timer,
poll_timeout=self.config.camera.frame_timeout,
poll_target=self.camera.release,
daemon=True,
register=True,
)
self.camera_grabber.start()
def stop_camera(self):
"""Stop reading from camera."""
self._logger.debug("Stopping camera")
self.camera.release()
self.camera_grabber.stop()
self.camera_grabber.join()
if self.recorder.is_recording:
self.recorder.stop_recording()
def event_over_check_motion(
self, obj: DetectedObject, object_filters: Dict[str, Filter]
):
"""Check if motion should stop the recorder."""
if object_filters.get(obj.label) and object_filters[obj.label].require_motion:
if self.motion_detected:
self._motion_max_timeout_reached = False
self._motion_only_frames = 0
return False
else:
self._motion_max_timeout_reached = False
self._motion_only_frames = 0
return False
return True
def event_over_check_object(
self, obj: DetectedObject, object_filters: Dict[str, Filter]
):
"""Check if object should stop the recorder."""
if obj.trigger_recorder:
if not self.event_over_check_motion(obj, object_filters):
return False
return True
def event_over(self):
"""Return if ongoing motion and/or object detection is over."""
for obj in self.objects_in_fov:
if not self.event_over_check_object(obj, self._object_filters):
return False
for zone in self.zones:
for obj in zone.objects_in_zone:
if not self.event_over_check_object(obj, zone.object_filters):
return False
if self.config.motion_detection.timeout and self.motion_detected:
# Only allow motion to keep event active for a specified period of time
if self._motion_only_frames >= (
self.camera.stream.output_fps * self.config.motion_detection.max_timeout
):
if not self._motion_max_timeout_reached:
self._motion_max_timeout_reached = True
self._logger.debug(
"Motion has stalled recorder for longer than max_timeout, "
"event considered over anyway"
)
return True
self._motion_only_frames += 1
return False
return True
def start_recording(self, frame):
"""Start recorder."""
recorder_thread = Thread(
target=self.recorder.start_recording,
args=(frame, self.objects_in_fov, self.camera.resolution),
)
recorder_thread.start()
if (
self.config.motion_detection.timeout
and not self.camera.stream.decoders[self._motion_decoder].scan.is_set()
):
self.camera.stream.decoders[self._motion_decoder].scan.set()
self._logger.info("Starting motion detector")
def stop_recording(self):
"""Stop recorder."""
if self.idle_frames % self.camera.stream.output_fps == 0:
self._logger.info(
"Stopping recording in: {}".format(
int(
self.config.recorder.timeout
- (self.idle_frames / self.camera.stream.output_fps)
)
)
)
if self.idle_frames >= (
self.camera.stream.output_fps * self.config.recorder.timeout
):
if not self.config.motion_detection.trigger_detector:
self.camera.stream.decoders[self._motion_decoder].scan.clear()
self._logger.info("Pausing motion detector")
self.recorder.stop_recording()
def get_processed_object_frame(self) -> Union[None, Frame]:
"""Return a frame along with detections from the object detector."""
try:
return self._object_return_queue.get_nowait().frame
except Empty:
return None
def filter_fov(self, frame):
"""Filter field of view."""
objects_in_fov = []
labels_in_fov = []
for obj in frame.objects:
if self._object_filters.get(obj.label) and self._object_filters[
obj.label
].filter_object(obj):
obj.relevant = True
objects_in_fov.append(obj)
labels_in_fov.append(obj.label)
if self._object_filters[obj.label].trigger_recorder:
obj.trigger_recorder = True
if self._object_filters[obj.label].post_processor:
DataStream.publish_data(
(
f"{self._post_processor_topic}/"
f"{self._object_filters[obj.label].post_processor}"
),
PostProcessorFrame(self.config, frame, obj),
)
self.objects_in_fov = objects_in_fov
self.labels_in_fov = labels_in_fov
@property
def objects_in_fov(self):
"""Return all objects in field of view."""
return self._objects_in_fov
@objects_in_fov.setter
def objects_in_fov(self, objects):
if objects == self._objects_in_fov:
return
if viseron.mqtt.MQTT.client:
attributes = {}
attributes["objects"] = [obj.formatted for obj in objects]
self._mqtt.devices["object_detected"].publish(bool(objects), attributes)
self._objects_in_fov = objects
@property
def labels_in_fov(self):
"""Return all labels in field of view."""
return self._labels_in_fov
@labels_in_fov.setter
def labels_in_fov(self, labels):
self._labels_in_fov, self._reported_label_count = helpers.report_labels(
labels,
self._labels_in_fov,
self._reported_label_count,
self._mqtt.devices,
)
def filter_zones(self, frame):
"""Filter all zones."""
for zone in self.zones:
zone.filter_zone(frame)
def get_processed_motion_frame(self) -> Union[None, Frame]:
"""Return a frame along with motion contours from the motion detector."""
try:
return self._motion_return_queue.get_nowait().frame
except Empty:
return None
def filter_motion(self, motion_contours):
"""Filter motion."""
_motion_found = bool(
motion_contours.max_area > self.config.motion_detection.area
)
if _motion_found:
self._motion_frames += 1
self._motion_logger.debug(
"Consecutive frames with motion: {}, "
"max area size: {}".format(
self._motion_frames, motion_contours.max_area
)
)
if self._motion_frames >= self.config.motion_detection.frames:
if not self.motion_detected:
self.motion_detected = True
return
else:
self._motion_frames = 0
if self.motion_detected:
self.motion_detected = False
@property
def motion_detected(self):
"""Return if motion is detected."""
return self._motion_detected
@motion_detected.setter
def motion_detected(self, motion_detected):
self._motion_detected = motion_detected
self._motion_logger.debug(
"Motion detected" if motion_detected else "Motion stopped"
)
if viseron.mqtt.MQTT.client:
self._mqtt.devices["motion_detected"].publish(motion_detected)
def trigger_recorder(self, obj: DetectedObject, object_filters: Dict[str, Filter]):
"""Check if object should start the recorder."""
# Discard object if it requires motion but motion is not detected
if (
obj.trigger_recorder
and object_filters.get(obj.label)
and object_filters.get(obj.label).require_motion # type: ignore
and not self.motion_detected
):
return False
if obj.trigger_recorder:
return True
return False
def process_object_event(self):
"""Process any detected objects to see if recorder should start."""
if not self.recorder.is_recording:
for obj in self.objects_in_fov:
if self.trigger_recorder(obj, self._object_filters):
self._start_recorder = True
return
for zone in self.zones:
for obj in zone.objects_in_zone:
if self.trigger_recorder(obj, zone.object_filters):
self._start_recorder = True
return
def process_motion_event(self):
"""Process motion to see if it has started or stopped."""
if self.motion_detected:
if (
self.config.motion_detection.trigger_detector
and self.config.object_detection.enable
and not self.camera.stream.decoders[self._object_decoder].scan.is_set()
):
self.camera.stream.decoders[self._object_decoder].scan.set()
self._logger.debug("Starting object detector")
if (
not self.recorder.is_recording
and self.config.motion_detection.trigger_recorder
):
self._start_recorder = True
elif (
self.config.object_detection.enable
and self.camera.stream.decoders[self._object_decoder].scan.is_set()
and not self.recorder.is_recording
and self.config.motion_detection.trigger_detector
):
self._logger.debug("Not recording, pausing object detector")
self.camera.stream.decoders[self._object_decoder].scan.clear()
def update_status_sensor(self):
"""Update MQTT status sensor."""
if not viseron.mqtt.MQTT.client:
return
status = "unknown"
if self.recorder.is_recording:
status = "recording"
elif (
self.config.object_detection.enable
and self.camera.stream.decoders[self._object_decoder].scan.is_set()
):
status = "scanning_for_objects"
elif self.camera.stream.decoders[self._motion_decoder].scan.is_set():
status = "scanning_for_motion"
attributes = {}
attributes["last_recording_start"] = self.recorder.last_recording_start
attributes["last_recording_end"] = self.recorder.last_recording_end
if (
status != self._mqtt.status_state
or attributes != self._mqtt.status_attributes
):
self._mqtt.status_attributes = attributes
self._mqtt.status_state = status
def run(self):
"""
Collect information from detectors and stop/start recordings.
Main thread for the NVR.
Handles:
- Filter motion/object detections
- Starting/stopping of recordings
- Publishes status information to MQTT.
Speed is determined by FPS
"""
self._logger.debug("Waiting for first frame")
self.camera.frame_ready.wait()
self._logger.debug("First frame received")
self.idle_frames = 0
while not self.kill_received:
self.update_status_sensor()
self.camera.frame_ready.wait()
# Filter returned objects
processed_object_frame = self.get_processed_object_frame()
if processed_object_frame:
# Filter objects in the FoV
self.filter_fov(processed_object_frame)
# Filter objects in each zone
self.filter_zones(processed_object_frame)
if self.config.object_detection.log_all_objects:
self._object_logger.debug(
"All objects: %s",
[obj.formatted for obj in processed_object_frame.objects],
)
else:
self._object_logger.debug(
"Objects: %s", [obj.formatted for obj in self.objects_in_fov]
)
# Filter returned motion contours
processed_motion_frame = self.get_processed_motion_frame()
if processed_motion_frame:
# self._logger.debug(processed_motion_frame.motion_contours)
self.filter_motion(processed_motion_frame.motion_contours)
self.process_object_event()
self.process_motion_event()
if (
processed_object_frame or processed_motion_frame
) and self.config.camera.publish_image:
self._mqtt.publish_image(
processed_object_frame,
processed_motion_frame,
self.zones,
self.camera.resolution,
)
# If we are recording and no object is detected
if self._start_recorder:
self._start_recorder = False
self.start_recording(processed_object_frame)
elif self.recorder.is_recording and self.event_over():
self.idle_frames += 1
self.stop_recording()
continue
self.idle_frames = 0
self._logger.info("Exiting NVR thread")
def stop(self):
"""Stop processing of events."""
self._logger.info("Stopping NVR thread")
self.kill_received = True
# Stop frame grabber
self.camera.release()
self.camera_grabber.join()
# Stop potential recording
if self.recorder.is_recording:
self.recorder.stop_recording()
| StarcoderdataPython |
3329589 | <filename>podcast/admin.py
from django.contrib import admin
# Register your models here.
from .models import Podcast, Category, Series, Advertisement
admin.site.register(Podcast)
admin.site.register(Category)
admin.site.register(Series)
admin.site.register(Advertisement)
| StarcoderdataPython |
3254583 | import argparse
import datetime
import logging
import multiprocessing
import os
import sys
from multicrypto.ellipticcurve import secp256k1
from multicrypto.address import convert_public_key_to_address, convert_private_key_to_wif_format, \
validate_pattern
from multicrypto.coins import coins
from multicrypto.scripts import validate_hex_script, convert_script_to_p2sh_address
from multicrypto.utils import get_qrcode_image
from multicrypto.validators import check_coin_symbol
logger = logging.getLogger(__name__)
N = secp256k1.n # order of the curve
G = secp256k1.G # generator point
def save_qr_code(out_dir, address, wif_private_key):
if not out_dir:
return
address_image = get_qrcode_image(address, error_correct='low')
address_image.save(os.path.join(out_dir, address + '.png'))
if wif_private_key:
private_key_image = get_qrcode_image(wif_private_key, error_correct='high')
private_key_image.save(os.path.join(out_dir, address + '_private_key.png'))
print('QR codes were saved in directory {}'.format(out_dir))
def generate_address(worker_num, coin_settings, pattern, compressed, segwit, out_dir, found, quit):
if segwit:
prefix_bytes = coin_settings['script_prefix_bytes']
else:
prefix_bytes = coin_settings['address_prefix_bytes']
secret_prefix_bytes = coin_settings['secret_prefix_bytes']
seed = secp256k1.gen_private_key()
point = seed * G
counter = 0
start_time = datetime.datetime.now()
while not quit.is_set():
address = convert_public_key_to_address(point, prefix_bytes, compressed, segwit)
if address.startswith(pattern):
private_key = (seed + counter) % N
wif_private_key = convert_private_key_to_wif_format(
private_key, secret_prefix_bytes, compressed)
print('Address: {}\nPrivate key: {}'.format(address, wif_private_key))
save_qr_code(out_dir, address, wif_private_key)
found.set()
quit.set()
return address, wif_private_key
point += G
counter += 1
if counter % 10000000 == 0:
print('worker: {}, checked {}M addresses ({}/sec)'.format(
worker_num, counter / 1000000,
counter // (datetime.datetime.now() - start_time).seconds))
sys.stdout.flush()
def get_args():
parser = argparse.ArgumentParser(description='Multi coin vanity generation script')
parser.add_argument('-p', '--pattern', type=str, required=False, default='',
help='Pattern which generated address should contain')
parser.add_argument('-s', '--symbol', type=check_coin_symbol, required=True,
help='Symbol of the coin i.e. BTC')
parser.add_argument('-i', '--input_script', type=str, required=False,
help='Generate address based on input script for P2SH transactions')
parser.add_argument('-c', '--cores', type=int, required=False, default=1,
help='How many cores we would like to use. Default 1 core.')
parser.add_argument('-u', '--uncompressed', action='store_true',
help='Generate address based on uncompressed wif private key format')
parser.add_argument('-w', '--segwit', action='store_true',
help='Generate segwit (P2SH-P2WPKH) address')
parser.add_argument('-d', '--output_dir', type=str, required=False,
help='Directory where QR codes with address and private key will be stored')
return parser.parse_args()
def start_workers(args):
coin_symbol = args.symbol
pattern = args.pattern
workers = args.cores
compressed = not args.uncompressed
segwit = args.segwit
output_dir = args.output_dir
input_script = args.input_script
if segwit and not compressed:
raise Exception('Segwit addresses must used compressed public key representation')
jobs = []
try:
validate_pattern(pattern, coin_symbol, segwit)
if input_script:
validate_hex_script(input_script)
address = convert_script_to_p2sh_address(
input_script, coins[coin_symbol]['script_prefix_bytes'])
save_qr_code(output_dir, address, None)
print(address)
return
except Exception as e:
logger.error(e)
return
print('Looking for pattern {} for {} using {} workers'.format(
pattern, coins[coin_symbol]['name'], workers))
quit = multiprocessing.Event()
found = multiprocessing.Event()
for i in range(workers):
p = multiprocessing.Process(
target=generate_address,
args=(i, coins[coin_symbol], pattern, compressed, segwit, output_dir, found, quit))
jobs.append(p)
p.start()
found.wait()
def main():
args = get_args()
start_workers(args)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1629399 | from itertools import product
from typing import Dict
import pandas as pd
from tpcp import HyperParameter, OptimizableParameter, PureParameter, cf, make_optimize_safe
from tpcp._dataset import Dataset
from tpcp._pipeline import OptimizablePipeline, Pipeline
class DummyPipeline(Pipeline):
def __init__(self, para_1=None, para_2=None, optimized=False):
self.para_1 = para_1
self.para_2 = para_2
self.optimized = optimized
class DummyOptimizablePipeline(OptimizablePipeline):
optimized: OptimizableParameter[bool]
para_1: PureParameter
para_2: HyperParameter
def __init__(self, para_1=None, para_2=None, optimized=False):
self.para_1 = para_1
self.para_2 = para_2
self.optimized = optimized
@make_optimize_safe
def self_optimize(self, dataset: Dataset, **kwargs):
self.optimized = self.para_2
return self
class MutableCustomClass:
test: str
class MutableParaPipeline(OptimizablePipeline):
optimized: OptimizableParameter[bool]
para_mutable: OptimizableParameter[bool]
def __init__(self, para_normal=3, para_mutable: Dict = cf(MutableCustomClass()), optimized=False):
self.para_normal = para_normal
self.para_mutable = para_mutable
self.optimized = optimized
@make_optimize_safe
def self_optimize(self, dataset: Dataset, **kwargs):
self.optimized = True
self.para_mutable.test = True
return self
class DummyDataset(Dataset):
def create_index(self) -> pd.DataFrame:
return pd.DataFrame({"value": list(range(5))})
class DummyGroupedDataset(Dataset):
def create_index(self) -> pd.DataFrame:
return pd.DataFrame(list(product("abc", range(5))), columns=["v1", "v2"])
def dummy_single_score_func(pipeline, data_point):
return data_point.groups[0]
def create_dummy_score_func(name):
return lambda x, y: getattr(x, name)
def create_dummy_multi_score_func(names):
return lambda x, y: {"score_1": getattr(x, names[0]), "score_2": getattr(x, names[1])}
def dummy_multi_score_func(pipeline, data_point):
return {"score_1": data_point.groups[0], "score_2": data_point.groups[0] + 1}
def dummy_error_score_func(pipeline, data_point):
if data_point.groups[0] in [0, 2, 4]:
raise ValueError("Dummy Error for {}".format(data_point.groups[0]))
return data_point.groups[0]
def dummy_error_score_func_multi(pipeline, data_point):
tmp = dummy_error_score_func(pipeline, data_point)
return {"score_1": tmp, "score_2": tmp}
| StarcoderdataPython |
1879064 | #!/usr/bin/env python
'''
Contains all file-reading code and mapping code to generate results used for HRI17 and RSS17 papers.
'''
import sys
import os
import time
sys.path.append('../src')
from entity import Entity
from mapper import *
from file_io import *
from object_defs import *
if __name__ == "__main__":
# Define problem elements
dir = os.path.dirname(__file__)
filename = os.path.join(dir, '../data/example/')
source_objects_filename = filename + "source_objects.txt"
target_objects_filename = filename + "target_objects.txt"
object_order = ["small_orange", "large_orange", "small_green", "large_green", "small_blue", "large_blue"]
problem = MappingProblem(source_objects_filename, target_objects_filename, object_order)
mapper = Mapper(problem)
# List of hints to be provided
hints = ["red_2", "red_1", "green_1", "green_2", "blue_1", "blue_2"]
# Initialize hypothesis space and feature set space
hs, fss = mapper.init()
# Loop through each mapping hint
i = 0
for hint in hints:
if i < object_order:
# Get objects referenced in hint
srcObj = object_order[i]
tgtObj = hints[i]
# Update prediction
prediction, hs, fss = mapper.update(srcObj, tgtObj, hs, fss)
if prediction is not None:
print "Prediction: " + str(IO.readablePrediction(mapper.src, prediction)) + "\n"
i += 1
| StarcoderdataPython |
5095826 | <gh_stars>1-10
import os
import csv
import shutil
import pathlib
from urllib.request import urlopen
from io import BytesIO
from zipfile import ZipFile, BadZipFile
from django.db import transaction
from django.core.management import BaseCommand
from django.contrib.gis.utils import LayerMapping
import districts
from districts.models import District
from offices.models import Office, Area
# it's fine if the public can see this URL -- all it provides is the ability the *view* the spreadsheet,
# but nobody can edit it. Worth noting that all the information in that sheet is about public officials,
# public offices, and other public information.
GOOGLE_SHEET_CSV_DOWNLOAD_URL = 'https://docs.google.com/spreadsheets/d/1oj1fc_CODd0waEYQFqluQhl8zZP0qjMNJHN0DlFfHr8/export?format=csv&id=1oj1fc_CODd0waEYQFqluQhl8zZP0qjMNJHN0DlFfHr8&gid=1344433296'
DISTRICTS_APP_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(districts.__file__)))
OFFICE_TO_SHAPEFILE_URLS_CSV_FILENAME = 'Bay Area Elected Offices - District Boundary File Links.csv'
CSV_WITH_POSITIONS_AND_DISTRICTS = f'{DISTRICTS_APP_DIRECTORY}/source_csv/{OFFICE_TO_SHAPEFILE_URLS_CSV_FILENAME}'
DJANGO_MODEL_TO_SHAPEFILE_KEY = {
'mpoly': 'MULTIPOLYGON',
}
class Command(BaseCommand):
help = "idempotently imports all shapefiles linked to by voter_info/districts/{OFFICE_TO_SHAPEFILE_URLS_CSV_FILENAME}"
@transaction.atomic
def handle(self, *args, **options):
self.download_district_boundary_csv()
self.download_shapefiles_from_urls_in_csv()
def download_district_boundary_csv(self):
response = urlopen(GOOGLE_SHEET_CSV_DOWNLOAD_URL)
csv_contents = response.read()
# 'wb' mode is the mode to write bytes to the file. the urlopen response.read() object returns
# bytes, not a string. We just write the bytes directly to not futz with unicode conversion at all
with open(CSV_WITH_POSITIONS_AND_DISTRICTS, 'wb') as csv_with_positions_and_districts:
csv_with_positions_and_districts.write(csv_contents)
def download_shapefiles_from_urls_in_csv(self):
"""
The TEC spreadsheet lists all offices and URL download links to appropriate shapefiles.
This method reads the CSV (in OFFICE_TO_SHAPEFILE_URLS_CSV_FILENAME), and downloads
the zipfiles at the linked URLs. The urls are then extracted into the districts/shape_files
folder.
After zipfile extraction, the shapefiles are loaded into the postgres database as Districts and Offices
"""
self.already_connected_area_ids = set()
with open(CSV_WITH_POSITIONS_AND_DISTRICTS) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
district_name = row['Jurisdiction Level']
office_name = row['Office']
office_description = row['Office Description']
shapefile_archive_download_link = row['linkurl']
print(f"Importing shapefiles for {district_name} - {office_name}")
try:
response = urlopen(shapefile_archive_download_link)
except Exception as e:
print(f" error downloading from {shapefile_archive_download_link}: {e}")
# make a directory in voter_info/districts/shape_files/$district/$offfice/
try:
zipfile = ZipFile(BytesIO(response.read()))
except BadZipFile as e:
print(f" *******************************************************************************")
print(f" ERROR: error with file from link: {shapefile_archive_download_link}: {e}")
print(f" *******************************************************************************")
continue
# extract into a jurisidction/office tree folder structure.
relative_path = f'shape_files/{district_name}/{office_name}/'
target_extraction_path = f'{DISTRICTS_APP_DIRECTORY}/{relative_path}'
# empty out any old shapefiles in there.
try:
shutil.rmtree(target_extraction_path)
print(f" cleaned old shapefiles in {relative_path}")
except FileNotFoundError:
# it's ok if we tried to delete an old shapefile directory but there wasn't one
# there yet -- just means this is the first time we downloaded this shapefile
pass
print(f" extracting to {relative_path}")
zipfile.extractall(target_extraction_path)
self.extract_shapefile_into_database(target_extraction_path, district_name,
office_name, office_description)
def extract_shapefile_into_database(self, extracted_folder_path, district_name, office_name, office_description):
"""
this method takes the folder of an expanded zip archive of shapefiles. It imports the areas
contained in the shapefiles and saves them to the provided office and district.
"""
district, created = District.objects.get_or_create(name=district_name)
office = self.upsert_office_for_district(district, office_name, office_description)
self.upsert_areas_and_office(office, extracted_folder_path)
def upsert_areas_and_office(self, office, path_to_shapefile_zip_extraction):
"""
for the provided Office record, imports the shapefiles and ties to that office
"""
print(f' importing areas for office: "{office.name}"')
# each zipfile can have different folder structure to the shapefile, so we
# use pathlib to search down until it finds a '.shp' (shapefile)
paths_to_shapefiles = pathlib.Path(path_to_shapefile_zip_extraction).glob('**/*.shp')
paths = [path for path in paths_to_shapefiles]
if len(paths) > 1:
print(f" More than one shapefile found for {path_to_shapefile_zip_extraction}")
if len(paths) == 0:
print(f" *******************************************************************")
print(f" no shapefile found after extracting into {path_to_shapefile_zip_extraction}")
print(f" *******************************************************************")
for path in paths:
shapefile_path = '/'.join(path.parts)
office.shape_file_name = shapefile_path
office.save()
# this instantiation and saving of the LayerMapping creats all the Area db rows
# and saves them,
layer_mapping = LayerMapping(Area, shapefile_path, DJANGO_MODEL_TO_SHAPEFILE_KEY,
transform=False, encoding='iso-8859-1')
layer_mapping.save(strict=True)
areas_to_save_to_district = Area.objects.exclude(id__in=self.already_connected_area_ids)
for area in areas_to_save_to_district.all():
area.office = office
area.save()
self.already_connected_area_ids.add(area.id)
assert(Area.objects.filter(office_id__isnull=True).count() == 0)
def upsert_office_for_district(self, district, office_name, office_description):
"""
inserts or updates an office in a given district, using the district and office name
as unique identifiers
"""
office_for_district, was_created = Office.objects.get_or_create(name=office_name, district=district)
# the description can be modified in the TEC spreadsheet without us wanting to create
# a new office db row.
if office_for_district.description != office_description:
office_for_district.description = office_description
office_for_district.save()
print(f' {"created" if was_created else "updated"} office: {office_name} for district: {district.name}')
return office_for_district
| StarcoderdataPython |
1889321 | #!/usr/bin/env python3
# File : atbash.py
# Author : <NAME>
# Email : <EMAIL>
# Created Time : 2021/10/8 23:29
# Description :
def atbash_encode(plaintext):
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphabet_map = alphabet[::-1] + alphabet.lower()[::-1]
alphabet += alphabet.lower()
ciphertext = ""
for i in plaintext:
index = alphabet.find(i)
if index != -1:
ciphertext += alphabet_map[index]
else:
ciphertext += i
return ciphertext
def module_test_api():
p = "flag{ok_atbash_flag}"
c = atbash_encode(p)
print(c)
p = atbash_encode(c)
print(p)
if __name__ == "__main__":
module_test_api()
| StarcoderdataPython |
6679699 | <reponame>NeolithEra/afg
from .scenarios import Supervisor
| StarcoderdataPython |
101517 | """
LRGAN
-----
Implements the latent regressor GAN well described in the BicycleGAN paper[1].
It introduces an encoder network which maps the generator output back to the latent
input space. This should help to prevent mode collapse and improve image variety.
Losses:
- Generator: Binary cross-entropy + L1-latent-loss (Mean Absolute Error)
- Discriminator: Binary cross-entropy
- Encoder: L1-latent-loss (Mean Absolute Error)
Default optimizer:
- torch.optim.Adam
Custom parameter:
- lambda_z: Weight for the reconstruction loss for the latent z dimensions.
References
----------
.. [1] https://arxiv.org/pdf/1711.11586.pdf
"""
import torch
from torch.nn import L1Loss
from vegans.utils.networks import Generator, Adversary, Encoder
from vegans.models.unconditional.AbstractGANGAE import AbstractGANGAE
class LRGAN(AbstractGANGAE):
"""
Parameters
----------
generator: nn.Module
Generator architecture. Produces output in the real space.
adversary: nn.Module
Adversary architecture. Produces predictions for real and fake samples to differentiate them.
encoder: nn.Module
Encoder architecture. Produces predictions in the latent space.
x_dim : list, tuple
Number of the output dimensions of the generator and input dimension of the discriminator / critic.
In the case of images this will be [nr_channels, nr_height_pixels, nr_width_pixels].
z_dim : int, list, tuple
Number of the latent dimensions for the generator input. Might have dimensions of an image.
optim : dict or torch.optim
Optimizer used for each network. Could be either an optimizer from torch.optim or a dictionary with network
name keys and torch.optim as value, i.e. {"Generator": torch.optim.Adam}.
optim_kwargs : dict
Optimizer keyword arguments used for each network. Must be a dictionary with network
name keys and dictionary with keyword arguments as value, i.e. {"Generator": {"lr": 0.0001}}.
lambda_z: float
Weight for the reconstruction loss for the latent z dimensions.
adv_type: "Discriminator", "Critic" or "Autoencoder"
Indicating which adversarial architecture will be used.
feature_layer : torch.nn.*
Output layer used to compute the feature loss. Should be from either the discriminator or critic.
If `feature_layer` is not None, the original generator loss is replaced by a feature loss, introduced
[here](https://arxiv.org/abs/1606.03498v1).
fixed_noise_size : int
Number of images shown when logging. The fixed noise is used to produce the images in the folder/images
subdirectory, the tensorboard images tab and the samples in get_training_results().
device : string
Device used while training the model. Either "cpu" or "cuda".
ngpu : int
Number of gpus used during training if device == "cuda".
folder : string
Creates a folder in the current working directory with this name. All relevant files like summary, images, models and
tensorboard output are written there. Existing folders are never overwritten or deleted. If a folder with the same name
already exists a time stamp is appended to make it unique.
"""
#########################################################################
# Actions before training
#########################################################################
def __init__(
self,
generator,
adversary,
encoder,
x_dim,
z_dim,
optim=None,
optim_kwargs=None,
lambda_z=10,
adv_type="Discriminator",
feature_layer=None,
fixed_noise_size=32,
device=None,
ngpu=0,
folder="./veganModels/LRGAN",
secure=True):
super().__init__(
generator=generator, adversary=adversary, encoder=encoder,
x_dim=x_dim, z_dim=z_dim, optim=optim, optim_kwargs=optim_kwargs, adv_type=adv_type, feature_layer=feature_layer,
fixed_noise_size=fixed_noise_size, device=device, ngpu=ngpu, folder=folder, secure=secure
)
self.lambda_z = lambda_z
self.hyperparameters["lambda_z"] = lambda_z
if self.secure:
assert self.encoder.output_size == self.z_dim, (
"Encoder output shape must be equal to z_dim. {} vs. {}.".format(self.encoder.output_size, self.z_dim)
)
def _define_loss(self):
loss_functions = super()._define_loss()
loss_functions.update({"L1": L1Loss()})
return loss_functions
#########################################################################
# Actions during training
#########################################################################
def _calculate_generator_loss(self, X_batch, Z_batch, fake_images=None):
if fake_images is None:
fake_images = self.generate(z=Z_batch)
fake_Z = self.encode(x=fake_images)
if self.feature_layer is None:
fake_predictions = self.predict(x=fake_images)
gen_loss_original = self.loss_functions["Generator"](
fake_predictions, torch.ones_like(fake_predictions, requires_grad=False)
)
else:
gen_loss_original = self._calculate_feature_loss(X_real=X_batch, X_fake=fake_images)
latent_space_regression = self.loss_functions["L1"](
fake_Z, Z_batch
)
gen_loss = gen_loss_original + self.lambda_z*latent_space_regression
return {
"Generator": gen_loss,
"Generator_Original": gen_loss_original,
"Generator_L1": self.lambda_z*latent_space_regression
}
def _calculate_encoder_loss(self, X_batch, Z_batch, fake_images=None):
if fake_images is None:
fake_images = self.generate(z=Z_batch).detach()
fake_Z = self.encode(x=fake_images)
latent_space_regression = self.loss_functions["L1"](
fake_Z, Z_batch
)
return {
"Encoder": latent_space_regression
}
def _calculate_adversary_loss(self, X_batch, Z_batch, fake_images=None):
if fake_images is None:
fake_images = self.generate(z=Z_batch).detach()
fake_predictions = self.predict(x=fake_images)
real_predictions = self.predict(x=X_batch)
adv_loss_fake = self.loss_functions["Adversary"](
fake_predictions, torch.zeros_like(fake_predictions, requires_grad=False)
)
adv_loss_real = self.loss_functions["Adversary"](
real_predictions, torch.ones_like(real_predictions, requires_grad=False)
)
adv_loss = 0.5*(adv_loss_fake + adv_loss_real)
return {
"Adversary": adv_loss,
"Adversary_fake": adv_loss_fake,
"Adversary_real": adv_loss_real,
"RealFakeRatio": adv_loss_real / adv_loss_fake
}
| StarcoderdataPython |
6633415 | <gh_stars>1-10
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from functools import partial
import cv2
from utils.dataset import parse_fn
from utils.losses import generator_loss, discriminator_loss, gradient_penalty
from utils.models import Generator, Discriminator
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
dataset = 'celeb_a' # 'cifar10', 'fashion_mnist', 'mnist'
log_dirs = 'logs_wgan_2'
batch_size = 64
# learning rate
lr = 0.0002
# Random vector noise size
z_dim = 128
# Critic updates per generator update
n_dis = 5
# Gradient penalty weight
gradient_penalty_weight = 10.0
# Load datasets and setting
AUTOTUNE = tf.data.experimental.AUTOTUNE # 自動調整模式
combine_split = tfds.Split.TRAIN + tfds.Split.VALIDATION + tfds.Split.TEST
train_data, info = tfds.load(dataset, split=combine_split, data_dir='/home/share/dataset/tensorflow-datasets', with_info=True)
train_data = train_data.shuffle(1000)
train_data = train_data.map(parse_fn, num_parallel_calls=AUTOTUNE)
train_data = train_data.batch(batch_size, drop_remainder=True) # 如果最後一批資料小於batch_size,則捨棄該批資料
train_data = train_data.prefetch(buffer_size=AUTOTUNE)
# Create networks
generator = Generator((1, 1, z_dim))
discriminator = Discriminator((64, 64, 3))
generator.summary()
discriminator.summary()
# Create optimizers
g_optimizer = tf.keras.optimizers.Adam(lr, beta_1=0.5)
d_optimizer = tf.keras.optimizers.Adam(lr, beta_1=0.5)
@tf.function
def train_generator():
with tf.GradientTape() as tape:
# sample data
noise = tf.random.normal(shape=(batch_size, 1, 1, z_dim))
# create image
fake_img = generator(noise, training=True)
# predict real or fake
fake_logit = discriminator(fake_img, training=True)
# calculate generator loss
g_loss = generator_loss(fake_logit)
gradients = tape.gradient(g_loss, generator.trainable_variables)
g_optimizer.apply_gradients(zip(gradients, generator.trainable_variables))
return g_loss
@tf.function
def train_discriminator(real_img):
with tf.GradientTape() as t:
z = tf.random.normal(shape=(batch_size, 1, 1, z_dim))
fake_img = generator(z, training=True)
real_logit = discriminator(real_img, training=True)
fake_logit = discriminator(fake_img, training=True)
real_loss, fake_loss = discriminator_loss(real_logit, fake_logit)
gp = gradient_penalty(partial(discriminator, training=True), real_img, fake_img)
d_loss = (real_loss + fake_loss) + gp * gradient_penalty_weight
D_grad = t.gradient(d_loss, discriminator.trainable_variables)
d_optimizer.apply_gradients(zip(D_grad, discriminator.trainable_variables))
return real_loss + fake_loss, gp
def combine_images(images, col=10, row=10):
images = (images + 1) / 2
images = images.numpy()
b, h, w, _ = images.shape
images_combine = np.zeros(shape=(h*col, w*row, 3))
for y in range(col):
for x in range(row):
images_combine[y*h:(y+1)*h, x*w:(x+1)*w] = images[x+y*row]
return images_combine
def train_wgan():
# Create tensorboard logs
model_dir = log_dirs + '/models/'
os.makedirs(model_dir, exist_ok=True)
summary_writer = tf.summary.create_file_writer(log_dirs)
# Create fixed noise for sampling
sample_noise = tf.random.normal((100, 1, 1, z_dim))
for epoch in range(25):
for step, real_img in enumerate(train_data):
# training discriminator
d_loss, gp = train_discriminator(real_img)
# save discriminator loss
with summary_writer.as_default():
tf.summary.scalar('discriminator_loss', d_loss, d_optimizer.iterations)
tf.summary.scalar('gradient_penalty', gp, d_optimizer.iterations)
# training generator
if d_optimizer.iterations.numpy() % n_dis == 0:
g_loss = train_generator()
# save generator loss
with summary_writer.as_default():
tf.summary.scalar('generator_loss', g_loss, g_optimizer.iterations)
print('G Loss: {:.2f}\tD loss: {:.2f}\tGP Loss {:.2f}'.format(g_loss, d_loss, gp))
# save sample
if g_optimizer.iterations.numpy() % 100 == 0:
x_fake = generator(sample_noise, training=False)
save_img = combine_images(x_fake)
# save fake images
with summary_writer.as_default():
tf.summary.image(dataset, [save_img], step=g_optimizer.iterations)
# save model
if epoch != 0:
generator.save_weights(model_dir + "generator-epochs-{}.h5".format(epoch))
if __name__ == '__main__':
train_wgan()
# for epoch in range(1):
# print('Start of epoch {}'.format(epoch))
# for step, real_img in enumerate(train_data):
# print(real_img.shape)
# save = (real_img.numpy()[0] + 1) * 127.5
# cv2.imwrite('test.png', save[..., ::-1])
# assert real_img.shape[0] == 64 | StarcoderdataPython |
3343999 | <reponame>RohanDalton/sygaldry
import os
__author__ = "<NAME>"
class EnvironmentSingleton(type):
_instance = None
def __call__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(EnvironmentSingleton, cls).__call__(*args, **kwargs)
else:
pass
return cls._instance
class Environment(metaclass=EnvironmentSingleton):
"""
The environment is used to determine parts of your config.
"""
def __init__(self):
self._config = os.environ
def __getitem__(self, item):
return self._config.get(item, None)
def __setitem__(self, item, value):
raise NotImplementedError
def keys(self):
return self._config.keys()
if __name__ == "__main__":
pass
| StarcoderdataPython |
5000697 |
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class FcoeNic(Base):
'''
Operations that can be performed on FCOE-capable CNA devices
'''
moid = 'ha-cli-handler-fcoe-nic'
def disable(self, nicname):
'''
Disable rediscovery of FCOE storage on behalf of an FCOE-capable CNA upon next boot.
:param nicname: string, The CNA adapter name (vmnicX)
:returns: string
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.fcoe.nic.Disable',
nicname=nicname,
)
def list(self):
'''
List FCOE-capable CNA devices.
:returns: vim.EsxCLI.fcoe.nic.list.NicDevice[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.fcoe.nic.List',
)
def discover(self, nicname):
'''
Initiate FCOE adapter discovery on behalf of an FCOE-capable CNA.
:param nicname: string, The CNA adapter name (vmnicX)
:returns: string
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.fcoe.nic.Discover',
nicname=nicname,
) | StarcoderdataPython |
4854571 | <gh_stars>0
from elasticsearch_dsl import analyzer, Date, Document, Index, Text, Integer, Keyword, Double
class Listing(Document):
id = Integer()
listing_url = Text()
scrape_id = Integer()
last_scraped = Keyword()
crawled_date = Date()
name = Text(analyzer='snowball')
host_id = Integer()
host_is_superhost = Keyword()
host_identity_verified = Text(fields={'raw': Keyword()})
room_type = Text(fields={'raw': Keyword()})
accommodates = Integer()
guests_included = Integer()
minimum_nights = Integer()
maximum_nights = Integer()
calendar_updated = Text(fields={'raw': Keyword()})
instant_bookable = Keyword()
is_business_travel_ready = Keyword()
cancellation_policy = Text(fields={'raw': Keyword()})
price = Integer()
availability_30 = Integer()
availability_60 = Integer()
availability_90 = Integer()
availability_365 = Integer()
number_of_reviews = Integer()
first_review = Text(fields={'raw': Keyword()})
last_review = Text(fields={'raw': Keyword()})
review_scores_rating = Integer()
review_scores_accuracy = Integer()
review_scores_cleanliness = Integer()
review_scores_checkin = Integer()
review_scores_communication = Integer()
review_scores_location = Integer()
review_scores_value = Integer()
overall_rating = Double() | StarcoderdataPython |
1852024 | <filename>server/intrinsic/management/commands/intrinsic_update_citations.py
from django.core.management.base import BaseCommand
from common.models import PaperCitation
from intrinsic.models import IntrinsicImagesAlgorithm
class Command(BaseCommand):
args = ''
help = 'Fix intrinsic images citations'
def handle(self, *args, **options):
bell2014_densecrf, _ = PaperCitation.objects.get_or_create(
slug='bell2014_densecrf',
authors='<NAME>, <NAME>, <NAME>',
title='Intrinsic Images in the Wild',
journal='ACM Transactions on Graphics (SIGGRAPH 2014)',
inline_citation='[Bell et al. 2014]',
url='http://intrinsic.cs.cornell.edu',
)
zhao2012_nonlocal, _ = PaperCitation.objects.get_or_create(
slug='zhao2012_nonlocal',
authors='<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>',
title='A Closed-form Solution to Retinex with Non-local Texture Constraints',
journal='IEEE Transaction on Pattern Analysis and Machine Intelligence (TPAMI)',
inline_citation='[Zhao et al. 2012]',
url='http://www.ece.nus.edu.sg/stfpage/eletp/Papers/pami12_intrinsic.pdf',
)
garces2012_clustering, _ = PaperCitation.objects.get_or_create(
slug='garces2012_clustering',
authors='<NAME>, <NAME>, <NAME>, <NAME>',
title='Intrinsic Images by Clustering',
journal='Computer Graphics Forum (Eurographics Symposium on Rendering)',
inline_citation='[Garces et al. 2012]',
url='http://www-sop.inria.fr/reves/Basilic/2012/GMLG12/',
)
grosse2009_retinex, _ = PaperCitation.objects.get_or_create(
slug='grosse2009_retinex',
authors='<NAME>, <NAME>, <NAME>, <NAME>',
title='Ground truth dataset and baseline evaluations for intrinsic image algorithms',
journal='Proceedings of the International Conference on Computer Vision (ICCV)',
inline_citation='[Grosse et al. 2009]',
url='http://www.cs.toronto.edu/~rgrosse/intrinsic/',
)
shen2011_optimization, _ = PaperCitation.objects.get_or_create(
slug='shen2011_optimization',
authors='<NAME>, <NAME>, <NAME>, <NAME>',
title='Intrinsic Images Using Optimization',
journal='Proceedings of Computer Vision and Pattern Recognition (CVPR)',
inline_citation='[Shen et al. 2011]',
url='http://cs.bit.edu.cn/~shenjianbing/cvpr11.htm',
)
print IntrinsicImagesAlgorithm.objects \
.filter(slug='bell2014_densecrf') \
.update(citation=bell2014_densecrf)
print IntrinsicImagesAlgorithm.objects \
.filter(slug='zhao2012_nonlocal') \
.update(citation=zhao2012_nonlocal)
print IntrinsicImagesAlgorithm.objects \
.filter(slug='garces2012_clustering') \
.update(citation=garces2012_clustering)
print IntrinsicImagesAlgorithm.objects \
.filter(slug='grosse2009_color_retinex') \
.update(citation=grosse2009_retinex)
print IntrinsicImagesAlgorithm.objects \
.filter(slug='grosse2009_grayscale_retinex') \
.update(citation=grosse2009_retinex)
print IntrinsicImagesAlgorithm.objects \
.filter(slug='shen2011_optimization') \
.update(citation=shen2011_optimization)
| StarcoderdataPython |
5152503 | <reponame>MessireToaster/CoEvolution
#!/usr/bin/env python
"""
Make plots out of dictionaries of test results, loaded from pickled files.
"""
import pickle
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# path to pickled results
paths = ["../Results/FinalResults.pickle"]
bins = range(9) # Bins for solvability histogram
group = True # Group executions with the same radical followed by a number
# Rename dictionary keys
rename = {
"NNSGA_f": "NNSGA Global PATA-EC (5)",
"NSGA2_f": "NSGA2 Random Environments (5)",
"NNSGA_3f": "NSGA2 Local + Global PATA-EC (5)",
"NNSGA_4f": "NNSGA Global PATA-EC + Unique (1)",
"POET_": "POET (5)"
}
# Fill this list to keep only the following keys
keep = [
"POET_",
"NNSGA_f",
"NSGA2_f",
"NNSGA_3f",
"NNSGA_4f"
]
# Fill this list to remove the following keys
pop = [
]
# ----------------------------------------------------------------------------------------------------------------------
res = dict()
for p in paths:
with open(p, "rb") as f:
res = {**pickle.load(f), **res}
nb_env = len(res[list(res.keys())[0]])
if group:
batches = dict()
for key in res.keys():
alg = re.sub("\d+$", "", str(key))
print(alg)
if alg in batches.keys():
batches[alg] += res[key].copy()
else:
batches[alg] = res[key].copy()
else:
batches = res.copy()
# KEEP
all_keys = list(batches.keys())
if len(keep) > 0:
for p in all_keys:
delete = True
for s in keep:
if re.match(s, p):
delete = False
break
if delete and p in batches.keys():
batches.pop(p)
# POP
all_keys = list(batches.keys())
for p in all_keys:
delete = False
for s in pop:
if re.match(s, p):
delete = True
break
if delete and p in batches.keys():
batches.pop(p)
# RENAME
algorithm_batches = dict()
for k in batches.keys():
if k in rename.keys():
algorithm_batches[rename[k]] = batches[k].copy()
else:
algorithm_batches[k] = batches[k].copy()
print("--- Mean solvability ---")
sns.set_style('whitegrid')
for key in algorithm_batches.keys():
solvabilty = list()
for ev in algorithm_batches[key]:
scores = list()
for i in range(len(ev)):
scores.append(ev[i][0])
solvabilty.append(np.array(scores).max())
print("\t", key, ":", np.array(solvabilty).mean())
points = np.histogram(solvabilty, bins=bins, density=True)[0]
plt.title(f"Solvability score distribution - CollectBall ({nb_env} Test Environments)")
plt.plot(points, label=f"{key}")
plt.xlabel("Maximum Fitness")
plt.ylabel("Test Environment proportion")
plt.legend()
plt.show()
labels = [" "]
count = 1
solv0 = dict()
for key in res.keys():
solvabilty = list()
scores = np.zeros((len(res[key]), len(res[key][0])))
for i in range(len(res[key])):
sc = list()
for j in range(len(res[key][i])):
sc.append(res[key][i][j][0])
scores[i] = np.array(sc)
scores = scores.T
gen = list()
for i in range(len(scores)):
gen.append(scores[i].mean())
if group:
alg = re.sub("\d+$", "", str(key))
if alg in solv0.keys():
solv0[alg] = max(solv0[alg], np.array(gen).max())
else:
solv0[alg] = np.array(gen).max()
else:
solv0[key] = np.array(gen).max()
solv = dict()
for k in solv0.keys():
if k in rename.keys():
solv[rename[k]] = solv0[k]
else:
solv[k] = solv0[k]
gen = dict()
for key in algorithm_batches.keys():
radical = re.sub("\d+$", "", str(key))
if radical in gen.keys():
gen[radical].append(solv[key])
else:
gen[radical] = [solv[key]]
for key in gen.keys():
plt.title(f"Generalization score of the best agent - CollectBall ({nb_env} Test Environments)")
for s in gen[key]:
plt.plot(count, s, "ob", label=key)
plt.text(count+0.05, s, round(s, 2))
count += 1
if key in rename.keys():
labels.append(rename[key])
else:
labels.append(key)
sns.set_style('whitegrid')
plt.ylabel("Mean fitness over environments")
plt.xticks(np.arange(count), labels) # Set text labels.
plt.xlim(0.8, count-0.8)
plt.show()
| StarcoderdataPython |
85603 | import logging
__version__ = "2.0.6"
logging.getLogger(__name__).addHandler(logging.NullHandler())
| StarcoderdataPython |
279567 | # The main script
from downloadsprites import downloadSprites
from compresssprites import compressMain
downloadSprites()
compressMain()
| StarcoderdataPython |
3408468 | <reponame>nestor-san/cooperation-fit
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('User must havee a valid email adress')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Organization(models.Model):
"""Organization that will be able to create projects"""
name = models.CharField(max_length=255, unique=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
description = models.TextField(blank=True)
website = models.URLField(max_length=255, blank=True)
address = models.CharField(max_length=255, blank=True)
country = models.CharField(max_length=255)
def __str__(self):
return self.name
class CooperatorProfile(models.Model):
"""Profile of a cooperator"""
name = models.CharField(max_length=255)
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True
)
description = models.TextField()
skills = models.TextField(blank=True)
website = models.URLField(max_length=255, blank=True)
def __str__(self):
return self.name
class PortfolioItem(models.Model):
"""Portfolio items of a cooperator"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
name = models.CharField(max_length=255)
description = models.TextField(blank=True)
link = models.URLField(max_length=255, blank=True)
def __str__(self):
return self.name
class Project(models.Model):
"""Cooperation project"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
organization = models.ForeignKey(
Organization,
on_delete=models.CASCADE
)
description = models.TextField(blank=True)
ref_link = models.URLField(max_length=255, blank=True)
def __str__(self):
return self.name
class Cooperation(models.Model):
"""Actual cooperation between an organization and a volunteer"""
name = models.CharField(max_length=255)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
related_name='user'
)
voluntary = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
related_name='voluntary'
)
start_date = models.DateField(auto_now_add=True)
end_date = models.DateField(null=True)
is_private = models.BooleanField(default=False)
def __str__(self):
return self.name
class Review(models.Model):
"""A review of a cooperation"""
name = models.CharField(max_length=255)
cooperation = models.ForeignKey(
Cooperation,
on_delete=models.CASCADE)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
related_name='reviewer'
)
reviewed = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
related_name='reviewed'
)
review = models.TextField()
def __str__(self):
return self.name
class Message(models.Model):
"""A message between users"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='sender'
)
recipient = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='recipient'
)
message = models.TextField()
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.message
| StarcoderdataPython |
161812 | <filename>train_ae.py<gh_stars>1-10
import torch
import chess
import fire
from random import random, uniform
from utils import create_path
from torch.nn import functional as F
from models.role.ae import AE
from models.random_player import RandomPlayer
from pathlib import Path
from utils.features import board_to_feat
from torch.utils.data import TensorDataset, DataLoader
def l1_loss(submodules, x):
L = 0
for m in submodules:
x = F.relu(m(x))
L += x.abs().mean()
return L
def train(model, submodules, loader, optimizer, N, device):
model.train()
total_loss = 0
for x, *_ in loader:
x = x.to(device)
y = model(x)
loss = F.binary_cross_entropy(y, x) + l1_loss(submodules, x)
total_loss += loss.item() * len(x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return total_loss / N
@torch.no_grad()
def eval(model, submodules, loader, N, device):
model.eval()
total_loss = 0
for x, *_ in loader:
x = x.to(device)
y = model(x)
loss = F.binary_cross_entropy(y, x) + l1_loss(submodules, x)
total_loss += loss.item() * len(x)
return total_loss / N
def main(batch_size: int = 128,
num_kernels: int = 16,
num_hidden: int = 1024,
num_epochs: int = 100,
lr: float = 1e-4,
decay: float = 1e-4,
z_dim: int = 4096,
save_dir: Path = Path('./data')):
data = torch.load(save_dir / 'board_positions.pth')
num_channels = data.shape[1]
# data = list(torch.unbind(data))
# print(data[0].shape)
# input()
N = len(data)
len_val = N // 10
len_tr = N - len_val
tr_data = data[:len_tr]
vl_data = data[len_tr:]
tr_set = TensorDataset(tr_data)
vl_set = TensorDataset(vl_data)
tr_loader = DataLoader(tr_set,
batch_size=batch_size,
shuffle=True)
vl_loader = DataLoader(vl_set,
batch_size=batch_size,
shuffle=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AE(in_dim=(8, 8),
num_channels=num_channels,
num_kernels=num_kernels,
kernel_size=(3, 3),
num_hidden=num_hidden,
z_dim=z_dim,)
submodules = list(model.children())
model.to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=decay)
best = 100
for epoch in range(num_epochs):
tr_loss = train(model, submodules, tr_loader, optimizer, len_tr, device)
vl_loss = eval(model, submodules, vl_loader, len_val, device)
if vl_loss < best:
print("BEST")
best = vl_loss
torch.save(model, save_dir / "ae.pth")
print(f'Epoch {epoch:4d}> tr_loss: {tr_loss:.4f}, vl_loss: {vl_loss:.4f}')
def collect_data(data_len: int,
expert_path: Path = Path("./stockfish"),
save_dir: Path = Path('./data')):
create_path(save_dir)
stockfish = chess.engine.SimpleEngine.popen_uci(expert_path)
random_player = RandomPlayer()
data = []
def go(board, rand_t):
player = (stockfish if random() > rand_t else random_player)
m = player.play(board, chess.engine.Limit(time=uniform(1e-6, 1e-4))).move
board.push(m)
return board_to_feat(board), board.occupied
positions = []
board = chess.Board()
data.append(board_to_feat(board))
positions.append(board.occupied)
for i in range(data_len):
if i < data_len // 25:
rand_t = 0.25
elif i < data_len // 60:
rand_t = 0.5
else:
rand_t = 0.8
feat, occupied = go(board, rand_t)
if board.occupied in positions:
continue
positions.append(occupied)
data.append(feat)
if board.is_game_over():
board = chess.Board()
stockfish.quit()
data = torch.stack(data)
torch.save(data, save_dir / "board_positions.pth")
if __name__ == '__main__':
fire.Fire(collect_data)
| StarcoderdataPython |
9601978 | default_app_config = 'kungfucms.apps.dashboard.apps.DashboardConfig'
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.