seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25815068031 | import csv
from utils import DateCounter
def pad(filename: str):
#---------------read_in---------------
data = []
with open(filename, mode='r') as file:
reader = csv.reader(file)
for row in reader:
data.append(row)
#---------------padding---------------
newdata = []
newdata.append(data[0])
for n in range(1, len(data)-1):
counter = DateCounter(data[n][1])
counter.count()
# print(counter.current_date())
if (data[n][0] != data[n+1][0]) or (counter.current_date() == data[n+1][1]): # 用户变更,或日起连续,不需要补齐
newdata.append(data[n])
else:
newdata.append(data[n])
while counter.current_date() != data[n+1][1]:
newdata.append(data[n].copy())
newdata[-1][1] = counter.current_date()
counter.count()
#--------------write_back--------------
with open(filename, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerows(newdata) | UX404/Financial-data-processing | data_process/padding.py | padding.py | py | 1,068 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.reader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "utils.DateCounter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 28,
"usage_type": "call"
}
] |
12100401616 | '''
Aoccdrnig to a rscheearch at Cmabrigde Uinervtisy, it deosn't mttaer in waht oredr the ltteers in a wrod are, the olny iprmoatnt tihng is taht the frist and lsat ltteers be at the rghit pclae. The rset can be a toatl mses and you can sitll raed it wouthit porbelm. Tihs is bcuseae the huamn mnid deos not raed ervey lteter by istlef, but the wrod as a wlohe
input: String arr[words in a dictionary]
output: all sets of words that are 'equivalent'
sample input: ['word', 'string', 'field', 'flied', 'fried', 'array', 'fired']
'''
'''
Idea:
Let's use a hashtable:
Keys = a touple containing the first and last characters of that word
Values = a touple: a set of inner characters and a list of words.
'''
from collections import Counter
def print_equivalent(words):
ht = dict()
for word in words:
word = word.strip('\'s')
key = (word[0], word[-1])
inner_letters = Counter(word[1:-1])
if key not in ht.keys():
ht[key] = [(inner_letters, set([word]))]
else:
matched = False
for entry in ht[key]:
if entry[0] == inner_letters:
entry[1].add(word)
matched = True
if not matched:
new_entry = (inner_letters, set([word]))
ht[key].append(new_entry)
for key in ht.keys():
for entry in ht[key]:
if len(entry[1]) > 1:
print (' '.join(entry[1]))
if __name__ == '__main__':
import file_utils, os
#print_equivalent([word for word in file_utils.read_file(os.path.join(os.environ['PYTHONPATH'], 'dic.txt')).splitlines() if len(word) > 2])
print_equivalent(['word', 'fired', 'fried', 'flied', 'field', 'felid'])
| Shaywei/MyDevTools | Python/misc/mock_iterview_quinn.py | mock_iterview_quinn.py | py | 1,757 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 29,
"usage_type": "call"
}
] |
38870006486 | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import imageio
import os
#
# conda activate tf
# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/
# conda install -c conda-forge cudatoolkit=11.2 cudnn=8.1.0
# o pip install tensorflow
# pip install tensorflow-gpu
# o pip install gym
# o pip install gym[atari]
# o pip install autorom[accept-rom-license]
# pip install keras
# pip install keras-rl2
# o pip install imageio
# https://adventuresinmachinelearning.com/atari-space-invaders-dueling-q/
#
# uses PERSI to make training more efficient (take this out?)
# Fix for weird thing
# https://stackoverflow.com/questions/68614547/tensorflow-libdevice-not-found-why-is-it-not-found-in-the-searched-path
# export XLA_FLAGS=--xla_gpu_cuda_data_dir=/home/kali/.local/lib/python3.8/site-packages/jaxlib/cuda
#Use xming X server for windows
#run using
#echo "export DISPLAY=localhost:0.0" >> ~/.bashrc
#. ~/.bashrc
# export DISPLAY=[IP]:0.0
STORE_PATH = "tensorboard" # Path to where tensorboard logs are stored
MAX_EPSILON = 1 # Maximum probability of choosing a random action in epsilon-greedy algorithm
MIN_EPSILON = 0.1 # Minimum probability of choosing a random action in epsilon-greedy algorithm
EPSILON_MIN_ITER = 500000 # Number of iterations after which epsilon will have decreased from MAX_EPSILON to MIN_EPSILON
GAMMA = 0.99 # Discount factor in reinforcement learning
BATCH_SIZE = 32 # Number of samples used in each iteration of training
TAU = 0.08 # Hyperparameter for soft updating of the target network
POST_PROCESS_IMAGE_SIZE = (105, 80, 1) # Size of processed images used as input to the neural network
DELAY_TRAINING = 50000 # Number of time steps to wait before starting training
BETA_DECAY_ITERS = 500000 # Number of iterations after which beta will have decayed from MAX_BETA to MIN_BETA
MIN_BETA = 0.4 # Minimum value of beta parameter
MAX_BETA = 1.0 # Maximum value of beta parameter
NUM_FRAMES = 4 # Number of frames stacked together as input to the neural network
GIF_RECORDING_FREQ = 100 # Frequency with which GIFs are recorded during training
MODEL_SAVE_FREQ = 100 # Frequency with which the trained model is saved
# Create an environment for the Space Invaders game, using the RGB array render mode
env = gym.make("SpaceInvaders-v0", render_mode="rgb_array")
# Get the number of possible actions in the game
num_actions = env.action_space.n
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
# Initialize the model using the parent's constructor
super(DQModel, self).__init__()
# Save whether the model uses the dueling architecture
self.dueling = dueling
# Create the first convolutional layer with 16 filters, each of size 8x8, using a stride of 4
self.conv1 = keras.layers.Conv2D(16, (8, 8), (4, 4), activation='relu')
# Create the second convolutional layer with 32 filters, each of size 4x4, using a stride of 2
self.conv2 = keras.layers.Conv2D(32, (4, 4), (2, 2), activation='relu')
# Create a flatten layer to flatten the output of the second convolutional layer
self.flatten = keras.layers.Flatten()
# Create a dense layer with the specified hidden size, using the He normal kernel initializer
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
# Create a dense layer with the specified number of actions, using the He normal kernel initializer
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
# If the model uses the dueling architecture
if dueling:
# Create a dense layer with the specified hidden size, using the He normal kernel initializer
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
# Create a dense layer with a single output, using the He normal kernel initializer
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
# Create a lambda layer to subtract the mean from the outputs of the advantage layer
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
# Create an Add layer to combine the value and advantage outputs
self.combine = keras.layers.Add()
# Define the forward pass of the model
def call(self, input):
# Pass the input through the first convolutional layer and apply ReLU activation
x = self.conv1(input)
# Pass the output of the first convolutional layer through the second convolutional layer and apply ReLU activation
x = self.conv2(x)
# Flatten the output of the second convolutional layer
x = self.flatten(x)
# Pass the output of the flatten layer through the advantage dense layer and apply ReLU activation
adv = self.adv_dense(x)
# Pass the output of the advantage dense layer through the advantage output layer
adv = self.adv_out(adv)
# If the model uses the dueling architecture
if self.dueling:
# Pass the output of the flatten layer through the value dense layer and apply ReLU activation
v = self.v_dense(x)
# Pass the output of the value dense layer through the value output layer
v = self.v_out(v)
# Pass the output of the advantage output layer through the lambda layer to subtract the mean
norm_adv = self.lambda_layer(adv)
# Pass the value and advantage outputs through the Add layer to combine them
combined = self.combine([v, norm_adv])
# Return the combined output
return combined
# If the model doesn't use the dueling architecture, return the advantage output
return adv
def huber_loss(loss):
return 0.5 * loss ** 2 if abs(loss) < 1.0 else abs(loss) - 0.5
# The Huber loss function is a loss function that is more robust
# than the mean squared error loss function. It is defined as the
# mean squared error loss function for small values of the error,
# but becomes a mean absolute error loss function for larger values of the error.
# This makes it more resilient to the effects of outliers, since the loss for these points
# is not squared and therefore not disproportionately large compared to the rest of the data.
# Was experimenting with this, but tf.keras.losses.Huber() is more efficient.
primary_network = DQModel(256, num_actions, True)
target_network = DQModel(256, num_actions, True)
# each model has 256 hidden units.
primary_network.compile(optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.Huber())
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
class Node:
def __init__(self, left, right, is_leaf: bool = False, idx = None):
self.left = left
self.right = right
self.is_leaf = is_leaf
self.value = sum(n.value for n in (left, right) if n is not None)
self.parent = None
self.idx = idx # this value is only set for leaf nodes
if left is not None:
left.parent = self
if right is not None:
right.parent = self
@classmethod
def create_leaf(cls, value, idx):
leaf = cls(None, None, is_leaf=True, idx=idx)
leaf.value = value
return leaf
# This code defines a basic class for a Node in a tree data structure.
# The Node class has several attributes, including left and right for
# the left and right child nodes, is_leaf for whether the node is a leaf node,
# value for the value of the node, parent for the parent node, and idx for the index of the node.
# The __init__ method is used to initialize a new Node object, and takes several arguments
# including left, right, is_leaf, and idx. The value attribute is set to the sum of
# the values of the left and right child nodes, and the parent attributes of the left and
# right child nodes are set to the new Node object. The create_leaf class method can be used
# to create a new leaf Node with a given value and index.
def create_tree(input: list):
nodes = [Node.create_leaf(v, i) for i, v in enumerate(input)]
leaf_nodes = nodes
while len(nodes) > 1:
inodes = iter(nodes)
nodes = [Node(*pair) for pair in zip(inodes, inodes)]
return nodes[0], leaf_nodes
# This code defines a method to create a tree of nodes
def retrieve(value: float, node: Node):
if node.is_leaf:
return node
if node.left.value >= value:
return retrieve(value, node.left)
else:
return retrieve(value - node.left.value, node.right)
# This code defines a method to create a tree of nodes
def update(node: Node, new_value: float):
change = new_value - node.value
node.value = new_value
propagate_changes(change, node.parent)
def propagate_changes(change: float, node: Node):
node.value += change
if node.parent is not None:
propagate_changes(change, node.parent)
class Memory(object):
def __init__(self, size: int):
self.size = size
self.curr_write_idx = 0
self.available_samples = 0
self.buffer = [(np.zeros((POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1]), dtype=np.float32), 0.0, 0.0, 0.0) for i in range(self.size)]
self.base_node, self.leaf_nodes = create_tree([0 for i in range(self.size)])
self.frame_idx = 0
self.action_idx = 1
self.reward_idx = 2
self.terminal_idx = 3
self.beta = 0.4
self.alpha = 0.6
self.min_priority = 0.01
def append(self, experience: tuple, priority: float):
self.buffer[self.curr_write_idx] = experience
self.update(self.curr_write_idx, priority)
self.curr_write_idx += 1
# reset the current writer position index if creater than the allowed size
if self.curr_write_idx >= self.size:
self.curr_write_idx = 0
# max out available samples at the memory buffer size
if self.available_samples + 1 < self.size:
self.available_samples += 1
else:
self.available_samples = self.size - 1
def update(self, idx: int, priority: float):
update(self.leaf_nodes[idx], self.adjust_priority(priority))
def adjust_priority(self, priority: float):
return np.power(priority + self.min_priority, self.alpha)
def sample(self, num_samples: int):
sampled_idxs = []
is_weights = []
sample_no = 0
while sample_no < num_samples:
sample_val = np.random.uniform(0, self.base_node.value)
samp_node = retrieve(sample_val, self.base_node)
if NUM_FRAMES - 1 < samp_node.idx < self.available_samples - 1:
sampled_idxs.append(samp_node.idx)
p = samp_node.value / self.base_node.value
is_weights.append((self.available_samples + 1) * p)
sample_no += 1
# apply the beta factor and normalise so that the maximum is_weight < 1
is_weights = np.array(is_weights)
is_weights = np.power(is_weights, -self.beta)
is_weights = is_weights / np.max(is_weights)
# now load up the state and next state variables according to sampled idxs
states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
next_states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
actions, rewards, terminal = [], [], []
for i, idx in enumerate(sampled_idxs):
for j in range(NUM_FRAMES):
states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 1][self.frame_idx][:, :, 0]
next_states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 2][self.frame_idx][:, :, 0]
actions.append(self.buffer[idx][self.action_idx])
rewards.append(self.buffer[idx][self.reward_idx])
terminal.append(self.buffer[idx][self.terminal_idx])
return states, np.array(actions), np.array(rewards), next_states, np.array(terminal), sampled_idxs, is_weights
# The Memory class is used to store past experiences from the environment in a replay buffer,
# which is then used to train the reinforcement learning model. The Memory class uses a priority
# queue implemented as a sum tree data structure to prioritize experiences in the replay buffer
# according to their importance, with more important experiences being more likely to be sampled for training.
memory = Memory(200000)
# preprocesses an image to be inputted into the network
def image_preprocess(image, new_size=(105, 80)):
# convert to greyscale, resize and normalize the image
# image = image[0]
#print(image)
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize(image, new_size)
image = image / 255
return image
# chooses an action (epsilon greedy function)
def choose_action(state, primary_network, eps, step):
if step < DELAY_TRAINING:
return random.randint(0, num_actions - 1)
else:
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(tf.reshape(state, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)).numpy()))
# Updates from primary network
def update_network(primary_network, target_network):
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
# Processes the state stack.
def process_state_stack(state_stack, state):
for i in range(1, state_stack.shape[-1]):
state_stack[:, :, i - 1].assign(state_stack[:, :, i])
state_stack[:, :, -1].assign(state[:, :, 0])
return state_stack
# Records a gif replay of the entire game using imageio.
def record_gif(frame_list, episode, fps=50):
if(len(frame_list) > 50):
imageio.mimsave(STORE_PATH + "\\SPACE_INVADERS_EPISODE-eps{}-r{}.gif".format(episode, reward), frame_list, fps=fps) #duration=duration_per_frame)ation_per_frame)
def get_per_error(states, actions, rewards, next_states, terminal, primary_network, target_network):
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
# the action selection from the primary / online network
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
# the q value for the prim_action_tp1 from the target network
q_from_target = target_network(next_states)
updates = rewards + (1 - terminal) * GAMMA * q_from_target.numpy()[:, prim_action_tp1]
target_q[:, actions] = updates
# calculate the loss / error to update priorites
error = [huber_loss(target_q[i, actions[i]] - prim_qt.numpy()[i, actions[i]]) for i in range(states.shape[0])]
return target_q, error
def train(primary_network, memory, target_network):
states, actions, rewards, next_states, terminal, idxs, is_weights = memory.sample(BATCH_SIZE)
target_q, error = get_per_error(states, actions, rewards, next_states, terminal, primary_network, target_network)
for i in range(len(idxs)):
memory.update(idxs[i], error[i])
loss = primary_network.train_on_batch(states, target_q, is_weights)
return loss
num_episodes = 1501
# In practice, model weights are saved as multiples of 100. Therefore, set num_episodes to be a multiple of 100 + 1 (0 counts as an episode)
eps = MAX_EPSILON
render = False # If true, will show bot working in real time. Set false to save on graphics power.
train_writer = tf.summary.create_file_writer(STORE_PATH + "/DuelingQPERSI_{}".format(dt.datetime.now().strftime('%d%m%Y%H%M')))
steps = 0
for i in range(num_episodes):
state = env.reset()
state = image_preprocess(state[0])
state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1],
NUM_FRAMES)))
cnt = 1
avg_loss = 0
tot_reward = 0
if i % GIF_RECORDING_FREQ == 0:
frame_list = []
while True:
if render:
env.render()
action = choose_action(state_stack, primary_network, eps, steps)
next_state, reward, terminated, truncated, info = env.step(action)
done = terminated or truncated
tot_reward += reward
if i % GIF_RECORDING_FREQ == 0:
frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
next_state = image_preprocess(next_state)
old_state_stack = state_stack
state_stack = process_state_stack(state_stack, next_state)
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network)
update_network(primary_network, target_network)
_, error = get_per_error(tf.reshape(old_state_stack, (1, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)), np.array([action]), np.array([reward]), tf.reshape(state_stack, (1, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)), np.array([done]), primary_network, target_network)
# store in memory
memory.append((next_state, action, reward, done), error[0])
else:
loss = -1
# store in memory - default the priority to the reward
memory.append((next_state, action, reward, done), reward)
avg_loss += loss
# linearly decay the eps and PER beta values
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
beta = MIN_BETA + ((steps - DELAY_TRAINING) / BETA_DECAY_ITERS) * \
(MAX_BETA - MIN_BETA) if steps < BETA_DECAY_ITERS else \
MAX_BETA
memory.beta = beta
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print("Episode: {}, Reward: {}, avg loss: {:.5f}, eps: {:.3f}".format(i, tot_reward, avg_loss, eps))
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print("Pre-training...Episode: {}".format(i))
if i % GIF_RECORDING_FREQ == 0:
record_gif(frame_list, i, tot_reward)
break
cnt += 1
if i % MODEL_SAVE_FREQ == 0: # and i != 0:
primary_network.save_weights(STORE_PATH + "/checkpoints/cp_primary_network_episode_{}.ckpt".format(i))
target_network.save_weights(STORE_PATH + "/checkpoints/cp_target_network_episode_{}.ckpt".format(i))
#primary_network
#target_network
#primary_network = DQModel(256, num_actions, True)
#target_network = DQModel(256, num_actions, True)
# primary_network.load_weights(STORE_PATH + "/checkpoints/cp_primary_network_episode_1000.ckpt")
# target_network.load_weights(STORE_PATH + "/checkpoints/cp_target_network_episode_1000.ckpt")
# env = gym.make("SpaceInvaders-v0", render_mode="human")
# render = True
# for i in range(1):
# state = env.reset()
# state = image_preprocess(state[0])
# state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
# POST_PROCESS_IMAGE_SIZE[1],
# NUM_FRAMES)))
# cnt = 1
# avg_loss = 0
# tot_reward = 0
# if i % GIF_RECORDING_FREQ == 0:
# frame_list = []
# while True:
# if render:
# env.render()
# action = choose_action(state_stack, primary_network, 0, 51000) # guarantees primary network is chosen
# next_state, reward, terminated, truncated, info = env.step(action)
# done = terminated or truncated
# tot_reward += reward
# #if i % GIF_RECORDING_FREQ == 0:
# # frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
# next_state = image_preprocess(next_state)
# old_state_stack = state_stack
# state_stack = process_state_stack(state_stack, next_state) | nhovadia/CSCI4830_final_project | SpaceInvaders_Training.py | SpaceInvaders_Training.py | py | 21,331 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gym.make",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.Model",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "tensorflow.kera... |
37158413403 | from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
data = load_iris()
X = data.data
X[:, 0] /= 2.54
X[:, 1] /= 100
def scikit_pca(X):
X_std = StandardScaler().fit_transform(X)
sklearn_pca = PCA(n_components=2)
X_transf = sklearn_pca.fit_transform(X_std)
plt.figure(figsize=(11,11))
plt.scatter(X_transf[:,0], X_transf[:,1], s=600, color='#8383c4', alpha=0.56)
plt.title('PCA via scikit-learn (using SVD)', fontsize=20)
plt.xlabel('Petal Width', fontsize=15)
plt.ylabel('Sepal Length', fontsize=15)
plt.show()
scikit_pca(X) | QiliWu/Python-datavis | datavis/PCA.py | PCA.py | py | 668 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 13,
"usage_type": "call"
},
... |
20913974107 | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Book API",
default_version='v1',
description="API for books and authors",
terms_of_service="https://www.yourapp.com/terms/",
contact=openapi.Contact(email="contact@yourapp.com"),
license=openapi.License(name="Your License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('book.urls')),
path('api/', include('api.urls')),
path('user/', include('users.urls')),
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('api/token-auth/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token-refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api-auth/vote', include('rest_framework.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| almazuulu/BookCatalogue | bookcatalogue/bookcatalogue/urls.py | urls.py | py | 1,502 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "drf_yasg.views.get_schema_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": ... |
16627798046 | from flask import Flask, request
import json
app = Flask(__name__)
# create python dictionary to hold user data
user_account = {
1:
{"first_name": 'betty',
"last_name": 'joy',
"phone_number": '0493827405',
"email_address": 'bettyjoy@accounts.com'
}
}
user_count = 1
@app.route('/get_user/<id>', methods=["GET"])
def get_user(id):
return user_account[int(id)]
@app.route('/create_user', methods=["POST"])
def create_user():
global user_count
user_count += 1
value = json.loads(request.data)
new_id = user_count
user_account[new_id] = value
return str(user_count)
@app.route('/update_user/<id>', methods=["PUT"])
def update_user(id):
value = json.loads(request.data)
user_account[int(id)] = value
return user_account[int(id)]
@app.route('/delete_user/<id>', methods=["DELETE"])
def delete_user(id):
global user_count
user_account.pop(int(id))
user_count -= 1
return "user deleted"
| rikiapst/cephaloPy | user.py | user.py | py | 995 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"li... |
21952104219 | import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from autoslug.utils import slugify
from rainboard.models import (
Forge,
Namespace,
Project,
Robotpkg,
update_github,
update_gitlab,
)
class Command(BaseCommand):
help = "run project creation stuff" # noqa: A003
def add_arguments(self, parser):
parser.add_argument("org")
parser.add_argument("project")
def handle(self, org, project, *args, **options): # noqa: C901
path = settings.RAINBOARD_RPKG
logger = logging.getLogger("rainboard.management.project")
org = Namespace.objects.get(slug=org)
slug = slugify(project)
logger.warning("looking for %s / %s", org, slug)
project = Project.objects.filter(slug=slug)
if project.exists():
logger.warning("found %s", project.get())
else:
logger.warning("not found. let's get it from github & gitlab")
github = Forge.objects.get(slug="github")
for data in github.api_list(f"/orgs/{org.slug}/repos"):
if slugify(data["name"]) == slug:
logger.warning("found on github / %s", org)
update_github(github, org, data)
break
for user in Namespace.objects.filter(group=False):
for data in github.api_list(f"/users/{user.slug}/repos"):
if slugify(data["name"]) == slug:
logger.warning("found on github / %s", user)
update_github(github, user, data)
break
gitlab = Forge.objects.get(slug="gitlab")
for data in gitlab.api_list("/projects"):
if slugify(data["name"]) == slug:
logger.warning("found on gitlab / %s", data["namespace"]["name"])
update_gitlab(gitlab, data)
project = Project.objects.get(slug=slug)
for slug in [project.slug, project.slug.replace("_", "-")]:
for pkg in itertools.chain(
path.glob(f"*/{slug}{project.suffix}"),
path.glob(f"*/py-{slug}{project.suffix}"),
):
obj, created = Robotpkg.objects.get_or_create(
name=pkg.name,
category=pkg.parent.name,
project=project,
)
if created:
logger.warning("found on robotpkg %s", obj)
obj.update(pull=False)
for rpkg in project.robotpkg_set.all():
logger.warning("updating images for %s", rpkg)
rpkg.update_images()
logger.warning("Done")
| Gepetto/dashboard | rainboard/management/commands/project.py | project.py | py | 2,763 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.RAINBOARD_RPKG",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 27,
"usage_type": ... |
29462931299 | from evdev import ecodes
from datetime import datetime, timedelta
from four_button_pages import StaticMenu, TokenEntryMenu, PAYGStatusMenu, ServiceMenu
class FourButtonUserInterface(object):
BACKLIGHT_TIMEOUT = 300
def __init__(self, lcd, conn, kbd, static_pages):
self.conn = conn
self.disp = lcd
self.kbd = kbd
self.static_pages = static_pages
self.last_key_pressed = datetime.now()
self.selected_menu = None
self.current_menu = None
self.index = 0
self.last_index = 1
self.last_menu_number = 0
self.menus = [
('PAYG Status', PAYGStatusMenu(self.conn)),
('Enter Token', TokenEntryMenu(self.conn)),
('LAN Status', StaticMenu(self.static_pages[16])),
('WiFi Status', StaticMenu(self.static_pages[17])),
('General Status', StaticMenu(self.static_pages[0])),
('Solar Status', StaticMenu(self.static_pages[12])),
('Battery Status', StaticMenu(self.static_pages[18])),
('Solar History', StaticMenu(self.static_pages[14])),
('Service Menu', ServiceMenu(self.conn)),
]
self.alarm_menus = [
StaticMenu(self.static_pages[2]), # VE Bus error
StaticMenu(self.static_pages[3]), # VE Bus alarm
StaticMenu(self.static_pages[13]), # Solar error
]
def start(self):
self.disp.clear()
self.update_menu_list()
def key_pressed(self):
self.last_key_pressed = datetime.now()
for event in self.kbd.read():
if event.type == ecodes.EV_KEY and event.value == 1:
self.update_current_menu(event.code)
def tick(self):
self.display_alarms()
self.update_current_menu(None)
self.update_backlight_status()
def update_backlight_status(self):
if self.last_key_pressed + timedelta(seconds=self.BACKLIGHT_TIMEOUT) < datetime.now():
self.disp.on = False
else:
self.disp.on = True
def display_alarms(self):
for alarm in self.alarm_menus:
alarm.enter(self.conn, self.disp) # It will only display if the menu actually exists
def get_available_menus(self):
menus = []
for menu in self.menus:
if menu[1].is_available(self.conn):
menus.append(menu)
return menus
def update_menu_list(self):
menus = self.get_available_menus()
number_menus = len(menus)
if number_menus < self.last_menu_number:
self.index = 0
self.last_menu_number = number_menus
if number_menus == 0:
top_string = ' Victron Energy '
bottom_string = ' '.ljust(16, ' ')
elif number_menus == 1:
top_string = menus[0][0].ljust(15, ' ') + '>'
bottom_string = ' '.ljust(16, ' ')
else:
if self.index < self.last_index:
top_string = menus[self.index][0].ljust(15, ' ') + '>'
bottom_string = menus[self.index + 1][0].ljust(15, ' ') + ' '
else:
top_string = menus[self.index - 1][0].ljust(15, ' ') + ' '
bottom_string = menus[self.index][0].ljust(15, ' ') + '>'
self.disp.display_string(top_string, 1)
self.disp.display_string(bottom_string, 2)
self.selected_menu = menus[self.index][1]
def menu_list_loop(self, key_pressed):
number_of_menus = len(self.get_available_menus())
if key_pressed == ecodes.KEY_UP:
if self.index > 0:
self.last_index = self.index
self.index -= 1
self.update_menu_list()
if key_pressed == ecodes.KEY_DOWN:
if self.index < number_of_menus - 1:
self.last_index = self.index
self.index += 1
self.update_menu_list()
if key_pressed == ecodes.KEY_RIGHT:
self.current_menu = self.selected_menu
self.current_menu.enter(self.conn, self.disp)
else:
self.update_menu_list()
def update_current_menu(self, key_pressed):
if self.current_menu is not None and not self.current_menu.update(self.conn, self.disp, key_pressed):
self.current_menu = None
key_pressed = None
self.disp.clear()
self.update_menu_list()
if self.current_menu is None:
self.menu_list_loop(key_pressed)
| victronenergy/dbus-characterdisplay | four_button_ui.py | four_button_ui.py | py | 4,511 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "four_button_pages.PAYGStatusMenu",
"line_number": 22,
"usage_type": "call"
},
{
"api_name... |
29224887814 | """ Plot cross-section profile: Multiple cross-sections
"""
import sys
sys.path.append('/home/zhouyj/software/seis_view')
import numpy as np
import matplotlib.pyplot as plt
from reader import read_ctlg, slice_ctlg
import warnings
warnings.filterwarnings("ignore")
# i/o paths
fctlg = 'input/catalog_example.csv'
title = 'Example Cross-Section View: Multiple Cross-Sections'
fout = 'output/example_prof_multi-cross-sec.pdf'
# catalog info
lon_rng = [102.23, 102.32]
lat_rng = [29.14, 29.25]
dep_rng = [5, 15]
dep_corr = 0
mag_corr = 1.
ref_pnts = np.array(\
[[102.26,29.235],[102.285,29.165],
[102.25,29.2],[102.29,29.21],
[102.255,29.18],[102.295,29.19]]) # [lon,lat]
pnt_names = ["A","A'","B","B'","C","C'"]
prof_wids = [1.5,1,1] # km
# fig params
fig_size = (10*0.8, 10*0.8)
subplots = [212,221,222]
mark_size = 5.
alpha=0.8
color = 'tab:blue'
fsize_label = 12
fsize_title = 14
xlabel = 'Along-Profile Distance (km)'
ylabel = 'Depth (km)'
subplot_rect = {'left':0.08, 'right':0.96, 'bottom':0.08, 'top':0.95, 'wspace':0.1, 'hspace':0.1}
# read catalog
events = read_ctlg(fctlg)
events = slice_ctlg(events, lat_rng=lat_rng, lon_rng=lon_rng, dep_rng=dep_rng)
lat = np.array(list(events['lat']))
lon = np.array(list(events['lon']))
dep = np.array(list(events['dep'])) + dep_corr
mag = (np.array(list(events['mag'])) + mag_corr) * mark_size
num_events = len(events)
# calc along profile dist
def calc_prof(ref_pnt):
prof_dist, prof_dep, prof_mag = [], [], []
cos_lat = np.cos(ref_pnt[0][1]*np.pi/180)
vec_ab = ref_pnt[1] - ref_pnt[0]
vec_ab[0] *= cos_lat
abs_ab = np.linalg.norm(vec_ab)
for i in range(num_events):
loc_c = np.array([lon[i], lat[i]])
vec_ac = loc_c - ref_pnt[0]
vec_ac[0] *= cos_lat
abs_ac = np.linalg.norm(vec_ac)
cos = vec_ac.dot(vec_ab) / abs_ab / abs_ac
if abs_ac * (1-cos**2)**0.5 > prof_wid/111.: continue
if cos<0 or abs_ac*cos>abs_ab: continue
prof_dist.append(abs_ac * cos * 111)
prof_dep.append(dep[i])
prof_mag.append(mag[i])
return prof_dist, prof_dep, prof_mag, abs_ab*111
def plot_label(xlabel=None, ylabel=None, yvisible=True):
if xlabel: plt.xlabel(xlabel, fontsize=fsize_label)
if ylabel: plt.ylabel(ylabel, fontsize=fsize_label)
plt.setp(ax.xaxis.get_majorticklabels(), fontsize=fsize_label)
plt.setp(ax.yaxis.get_majorticklabels(), fontsize=fsize_label, visible=yvisible)
# start plot
plt.figure(figsize=fig_size)
for i,subplot in enumerate(subplots):
# get specific params
prof_wid = prof_wids[i]
# plot subplot
plt.subplot(subplot)
ax = plt.gca()
ax.invert_yaxis()
# proj to proile
prof_dist, prof_dep, prof_mag, abs_ab = calc_prof(ref_pnts[2*i:2*i+2])
plt.scatter(prof_dist, prof_dep, prof_mag, color=color, edgecolor='none', alpha=alpha)
# plot ref pnt
plt.annotate(pnt_names[2*i], (0,dep_rng[0]), fontsize=fsize_label, va='top', ha='center')
plt.annotate(pnt_names[2*i+1], (abs_ab,dep_rng[0]), fontsize=fsize_label, va='top', ha='center')
# fill edge
edgex = [0,0,abs_ab,abs_ab]
edgey = [dep_rng[0],dep_rng[1],dep_rng[0],dep_rng[1]]
plt.scatter(edgex, edgey, alpha=0)
if i==0: plot_label(xlabel,ylabel)
elif i==1: plot_label(ylabel=ylabel)
else: plot_label(yvisible=False)
plt.suptitle(title, fontsize=fsize_title)
plt.subplots_adjust(**subplot_rect)
plt.savefig(fout)
plt.show()
| ali4413/Seismicity-Visualization | Python/plot_prof_multi-cross-sec.py | plot_prof_multi-cross-sec.py | py | 3,432 | python | en | code | null | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
38710107620 | from discord_slash import cog_ext
from discord.ext import commands
from txns import get_wallet
import asyncio
from embeds import *
import discord
import pytz
from datetime import datetime
import random
from client import client
from txns import *
from whitelist import ghosts, ghostsIcons, fo_rank1, fo_rank2, fo_rank3, fo_rank4, fo_rank5
sign_ups = []
def get_guild():
guild = client.get_guild(936698039941345370)
return guild
guild = get_guild()
class DripCog(commands.Cog):
@cog_ext.cog_slash(name="drip", description="Drips Out 1-5 $EXP Every 6 Hours!")
@commands.cooldown(1, 10, commands.BucketType.guild)
async def drip_claim(self, ctx):
if ctx.channel.id == 937750181154279485 or ctx.channel.id == 936801867340582982:
await ctx.send(embed=embedWrongChannelDrip, hidden=True)
return
else:
userid = str(ctx.author.id)
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(userid)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
return
else:
balance = await get_balance(wallet, 811721471)
if balance == -1:
await ctx.send(embed=embedNoOptEXP)
return
else:
utc = pytz.timezone('UTC')
lastdrip_datetime = datetime.strptime(lastdrip, '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=utc)
now = datetime.now(utc)
time_diff = now - lastdrip_datetime
total_seconds = time_diff.total_seconds()
if total_seconds < 6 * 60 * 60:
next_claim = ((60*60*6) - total_seconds)
timer = ((datetime.utcfromtimestamp(next_claim)).strftime('%HH %MM %SS')).lstrip('0')
if timer.startswith("H "):
dt = timer[2:]
else:
dt = timer
embedNoDrip = discord.Embed(
title=f"You have already made a drip claim less than 6 hours ago!",
description=f"Please come back when your timer resets...",
color=0xFF1C0A,
)
embedNoDrip.set_footer(text=f"Next Claim In {dt} ⏱️")
await ctx.send(embed=embedNoDrip, hidden=True)
return
else:
exp = [1, 2, 3, 4, 5]
random_exp = random.choice(exp)
new_exp = int(drip_exp + random_exp)
current_time = (datetime.now(utc)).strftime('%Y-%m-%dT%H:%M:%SZ')
txnid = await send_assets("Angels Of Ares", fallen_order_main, wallet, 811721471, "EXP", random_exp)
embedDrip.add_field(name=f"Dripped out {random_exp} $EXP to <@{ctx.author.id}>!", value=f"[Txn Link](https://algoexplorer.io/tx/{txnid})", inline=True)
await ctx.send(embed=embedDrip)
embedDrip.clear_fields()
await add_drip(wallet, current_time, new_exp)
@drip_claim.error
async def drip_claim_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(embed=embedCD, hidden=True)
class WingRevenueCog(commands.Cog):
@cog_ext.cog_slash(name="wing-revenue", description="Admin Use Only!")
async def wing_count(self, ctx):
await ctx.defer()
if ctx.author.id != 805453439499894815:
await ctx.send(embed=embedAdminOnly)
return
else:
totalwings = 0
send_data = []
wallets = await get_all_wallets()
for wallet in wallets:
wingscount = 0
if wallet["address"] != "AOAZMP5WTCCHOPKZZICV5KEZ7IH6BRFIDI47ONQU42QNOTTAW4ACZVXDHA":
account_info = algod_client.account_info(wallet['address'])
assets = account_info.get("assets", [])
for asset in assets:
if asset["amount"] > 0 and asset["asset-id"] in angel_wings:
wingscount = asset["amount"]
if wingscount != 0:
send_data.append([wallet["address"], wingscount, wallet["userid"]])
totalwings += wingscount
current_time = (datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ')
await update_wings(wallet["address"], current_time, wingscount)
totalwings_with_angel = int(totalwings*1.33333)
payment_per_wing = round(350/totalwings_with_angel, 3)
embedAW.set_footer(text=f"All Algorand Drops Are Successful! 🧙♂️")
await send_revenue(send_data, payment_per_wing)
embedAW.add_field(name=f"View Revenue Wallet Below:", value=f"[AlgoExplorer Link](https://algoexplorer.io/address/{angel_wings_wallet})", inline=False)
embedAW.add_field(name=f"-----------------------------------------------", value="", inline=False)
embedAW.add_field(name=f"Total Staked Angel Wings", value=f"{totalwings_with_angel}", inline=False)
embedAW.add_field(name=f"Payment Sent Per Angel Wing", value=f"{payment_per_wing}A", inline=False)
await ctx.send(embed=embedAW)
embedAW.clear_fields()
send_data = []
class StakingCog(commands.Cog):
@cog_ext.cog_slash(name="admin-staking-drop", description="Admin Use Only!")
async def send_staking(self, ctx):
if ctx.author.id != 805453439499894815:
await ctx.send(embed=embedAdminOnly)
return
else:
embedStaking = discord.Embed(
title="Staking Rewards Drop Commencing...",
color=0xFF1C0A,
)
embedStaking.set_footer(text=f"Please wait while I gather The Order and The Ghosts Of Algo 🧙♂️")
message = await ctx.send(embed=embedStaking)
send_data = []
wallets = await get_all_wallets()
total_staked = 0
total_staked_ghosts = 0
total_order_sent = 0
total_exp_sent = 0
for wallet in wallets:
if wallet["address"] != "AOAZMP5WTCCHOPKZZICV5KEZ7IH6BRFIDI47ONQU42QNOTTAW4ACZVXDHA":
account_info = algod_client.account_info(wallet['address'])
assets = account_info.get("assets", [])
ghostcount = 0
ghosticoncount = 0
fo_1count = 0
fo_2count = 0
fo_3count = 0
fo_4count = 0
fo_5count = 0
for asset in assets:
if asset["amount"] > 0 and asset["asset-id"] in ghosts:
ghostcount += 1
if asset["amount"] > 0 and asset["asset-id"] in ghostsIcons:
ghosticoncount += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank1:
fo_1count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank2:
fo_2count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank3:
fo_3count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank4:
fo_4count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank5:
fo_5count += 1
count = fo_1count + fo_2count + fo_3count + fo_4count + fo_5count
ghosts_final_count = ghostcount + ghosticoncount
total_exp = ghostcount + (ghosticoncount*5) + (fo_1count*3) + (fo_2count*5) + (fo_3count*8) + (fo_4count*12) + (fo_5count*25)
total_order = count
send_data.append([wallet["address"], count, ghosts_final_count, total_order, total_exp])
total_staked += count
total_staked_ghosts += ghosts_final_count
total_order_sent += total_order
total_exp_sent += total_exp
await staking_rewards(send_data)
embedStaking = discord.Embed(
title="Staking Rewards Drop Complete!",
color=0xFF1C0A,
)
embedStaking.add_field(name=f"Staked Fallen Order", value=f"{total_staked}", inline=False)
embedStaking.add_field(name=f"Staked Ghosts Of Algo", value=f"{total_staked_ghosts}", inline=False)
embedStaking.add_field(name=f"Total Staking Rewards Sent", value=f"{total_order_sent} $ORDER | {total_exp_sent} $EXP", inline=False)
embedStaking.set_footer(text=f"Play some games and upgrade your characters! 🧙♂️")
embedStaking.set_image(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
await message.edit(embed=embedStaking)
send_data = []
class BuyTicketsCog(commands.Cog):
@cog_ext.cog_slash(name="tickets", description="Buy $RAFFLE Tickets With ORDER/EXP", options=[
{
"name": "payment",
"description": "Payment Currency",
"type": 3,
"required": True,
"choices": [
{
"name": "ORDER",
"value": "ORDER"
},
{
"name": "EXP",
"value": "EXP"
}
]
},
{
"name": "amount",
"description": "Amount Of Tickets To Buy",
"type": 4,
"required": True
}
])
async def buy_tickets(self, ctx, payment, amount):
if payment == "ORDER":
token_id = 811718424
cost = amount * 5
elif payment == "EXP":
token_id = 811721471
cost = amount * 50
sender = ctx.author.id
sender_name = ctx.author.name
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(sender)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
else:
sender_balance = await get_balance(wallet, token_id)
sender_balance_raffle = await get_balance(wallet, 815766197)
if sender_balance == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
txnid = await send_assets(sender_name, wallet, fallen_order_main, token_id, payment, cost)
txnid2 = await send_assets("Fallen Order Raffles", fallen_order_main, wallet, 815766197, "RAFFLE", amount)
new_sender_bal = sender_balance - cost
new_sender_bal_raffle = sender_balance_raffle + amount
embedPurchased = discord.Embed(
title=f"I have transformed {cost} ${payment} into {amount} $RAFFLE Tickets for <@{sender}>",
description=f"[Payment Txn](https://algoexplorer.io/tx/{txnid}) | [Receipt Txn](https://algoexplorer.io/tx/{txnid2})",
color=0xFFFB0A
)
embedPurchased.set_footer(text=f"New ${payment} Balance: {new_sender_bal}\nNew $RAFFLE Balance: {new_sender_bal_raffle}")
embedPurchased.set_image(url="https://nft-media.algoexplorerapi.io/images/bafkreiabe7amkqwuz6kip7xnx6c5bx7v73bw2qofuaoqhu23nufrwfnn4e")
await ctx.send(embed=embedPurchased)
return
class BuyEXPCog(commands.Cog):
@cog_ext.cog_slash(name="orderexp", description="Swap $ORDER for $EXP", options=[
{
"name": "amount",
"description": "Amount Of ORDER To Swap",
"type": 4,
"required": True
}
])
async def buy_tickets(self, ctx, amount):
exp_amount = amount * 10
sender = str(ctx.author.id)
sender_name = ctx.author.name
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(sender)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
else:
sender_balance_exp = await get_balance(wallet, 811721471)
sender_balance_order = await get_balance(wallet, 811718424)
if sender_balance_order == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance_order < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
txnid = await send_assets(sender_name, wallet, fallen_order_main, 811718424, "ORDER", amount)
txnid2 = await send_assets("Token Swap. The Order", fallen_order_main, wallet, 811721471, "EXP", exp_amount)
new_sender_bal_order = sender_balance_order - amount
new_sender_bal_exp = sender_balance_exp + exp_amount
embedSwapped = discord.Embed(
title=f"I have swapped {amount} $ORDER to {exp_amount} $EXP on <@{sender}>'s behalf",
description=f"[Payment Txn](https://algoexplorer.io/tx/{txnid}) | [Receipt Txn](https://algoexplorer.io/tx/{txnid2})",
color=0xFFFB0A
)
embedSwapped.set_footer(text=f"New $ORDER Balance: {new_sender_bal_order}\nNew $EXP Balance: {new_sender_bal_exp}")
embedSwapped.set_thumbnail(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
await ctx.send(embed=embedSwapped)
class SendTokensCog(commands.Cog):
@cog_ext.cog_slash(name="send", description="Send EXP/ORDER/RAFFLE/Logs to other users", options=[
{
"name": "user",
"description": "Receiving User",
"type": 6,
"required": True
},
{
"name": "token",
"description": "Token To Send",
"type": 3,
"required": True,
"choices": [
{
"name": "EXP",
"value": "EXP"
},
{
"name": "RAFFLE",
"value": "RAFFLE"
},
{
"name": "ORDER",
"value": "ORDER"
},
{
"name": "Oak Logs",
"value": "Oak Logs"
}
]
},
{
"name": "amount",
"description": "Amount To Send",
"type": 4,
"required": True
}
])
async def send(self, ctx, user, token, amount):
if token == "ORDER":
token_id = 811718424
elif token == "EXP":
token_id = 811721471
elif token == "RAFFLE":
token_id = 815766197
elif token == "Oak Logs":
token_id = 1064863037
sender = str(ctx.author.id)
receiver = str(user.id)
receiver_name = user.name
sender_name = ctx.author.name
wallet1, name1, won1, lost1, expwon1, explost1, lastdrip1, drip_exp1 = await get_wallet(sender)
wallet2, name2, won2, lost2, expwon2, explost2, lastdrip1, drip_exp1 = await get_wallet(receiver)
if wallet1 == '' or wallet2 == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
return
else:
sender_balance = await get_balance(wallet1, token_id)
receiver_balance = await get_balance(wallet2, token_id)
if sender_balance == -1 or receiver_balance == -1:
if token == "ORDER":
await ctx.send(embed=embedNoOptORDER)
if token == "EXP":
await ctx.send(embed=embedNoOptEXP)
if token == "RAFFLE":
await ctx.send(embed=embedNoOptRAFFLE)
if token == "Oak Logs":
embedNoOpt = discord.Embed(
title=f"You are not opted into Oak Logs!",
description=f"Please [click here](https://www.randgallery.com/algo-collection/?address=1064863037) to opt in and try again...",
color=0xFF0000
)
embedNoOpt.set_thumbnail(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
await ctx.send(embed=embedNoOpt)
elif sender_balance == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
if token == "Oak Logs":
txnid = await trade_logs(sender_name, wallet1, wallet2, 1064863037, amount)
else:
txnid = await send_assets(sender_name, wallet1, wallet2, token_id, token, amount)
new_sender_bal = sender_balance - amount
new_receiver_bal = receiver_balance + amount
embedSent = discord.Embed(
title=f"I have bestowed {amount} ${token} upon <@{receiver}>",
description=f"Sent By: <@{sender}> 💛 [Txn Link](https://algoexplorer.io/tx/{txnid})",
color=0xFFFB0A
)
embedSent.set_footer(text=f"{sender_name}'s New Balance: {new_sender_bal} ${token}\n{receiver_name}'s New Balance: {new_receiver_bal} ${token}")
await ctx.send(embed=embedSent)
class AdminSendCog(commands.Cog):
@cog_ext.cog_slash(name="admin-send", description="ADMIN ONLY! Send EXP/ORDER/RAFFLE to other users", options=[
{
"name": "sender",
"description": "Receiving Address",
"type": 3,
"required": True
},
{
"name": "receiver",
"description": "Receiving Address",
"type": 3,
"required": True
},
{
"name": "token",
"description": "Token To Send",
"type": 3,
"required": True,
"choices": [
{
"name": "EXP",
"value": "EXP"
},
{
"name": "RAFFLE",
"value": "RAFFLE"
},
{
"name": "ORDER",
"value": "ORDER"
}
]
},
{
"name": "amount",
"description": "Amount To Send",
"type": 4,
"required": True
}
])
async def admin_clawback(self, ctx, sender, receiver, token, amount):
if ctx.author.id != 805453439499894815:
await ctx.send(embed=embedAdminOnly)
return
else:
if token == "ORDER":
token_id = 811718424
elif token == "EXP":
token_id = 811721471
elif token == "RAFFLE":
token_id = 815766197
sender_balance = await get_balance(sender, token_id)
receiver_balance = await get_balance(receiver, token_id)
sender_short = sender[:5] + "..." + sender[-5:]
receiver_short = receiver[:5] + "..." + receiver[-5:]
if sender_balance == -1 or receiver_balance == -1:
if token == "ORDER":
await ctx.send(embed=embedNoOptORDER)
if token == "EXP":
await ctx.send(embed=embedNoOptEXP)
elif sender_balance == 0:
await ctx.send(embed=embedErr)
elif sender_balance < amount:
await ctx.send(embed=embedErr)
else:
new_sender_bal = sender_balance - amount
new_receiver_bal = receiver_balance + amount
txnid = await send_assets(sender_short, sender, receiver, token_id, token, amount)
embedSent = discord.Embed(
title=f"I have bestowed {amount} ${token} upon {receiver_short}",
description=f"Sent By: {sender_short} 💛 [Txn Link](https://algoexplorer.io/tx/{txnid})",
color=0xFFFB0A
)
embedSent.set_footer(text=f"{sender_short}'s New Balance: {new_sender_bal} ${token}\n{receiver_short}'s New Balance: {new_receiver_bal} ${token}")
await ctx.send(embed=embedSent)
class ManualSendTokensCog(commands.Cog):
@cog_ext.cog_slash(name="manual-send", description="Send EXP/ORDER/RAFFLE to a specific address!", options=[
{
"name": "address",
"description": "Receiving Wallet Address",
"type": 3,
"required": True
},
{
"name": "token",
"description": "Token To Send",
"type": 3,
"required": True,
"choices": [
{
"name": "EXP",
"value": "EXP"
},
{
"name": "RAFFLE",
"value": "RAFFLE"
},
{
"name": "ORDER",
"value": "ORDER"
}
]
},
{
"name": "amount",
"description": "Amount To Send",
"type": 4,
"required": True
}
])
async def manual_send(self, ctx, address, token, amount):
if token == "ORDER":
token_id = 811718424
elif token == "EXP":
token_id = 811721471
elif token == "RAFFLE":
token_id = 815766197
sender = str(ctx.author.id)
sender_name = ctx.author.name
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(sender)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
return
else:
sender_balance = await get_balance(wallet, token_id)
receiver_balance = await get_balance(address, token_id)
if sender_balance == -1 or receiver_balance == -1:
if token == "ORDER":
await ctx.send(embed=embedNoOptORDER)
elif token == "EXP":
await ctx.send(embed=embedNoOptEXP)
else:
await ctx.send(embed=embedNoOptRAFFLE)
elif sender_balance == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
txnid = await send_assets(sender_name, wallet, address, token_id, token, amount)
new_sender_bal = sender_balance - amount
embedSent = discord.Embed(
title=f"I have bestowed {amount} ${token} upon {address}",
description=f"Sent By: <@{sender}> 💛 [Txn Link](https://algoexplorer.io/tx/{txnid})",
color=0xFFFB0A
)
embedSent.set_footer(text=f"{sender_name}'s New Balance: {new_sender_bal} ${token}")
await ctx.send(embed=embedSent)
return
class BalanceCog(commands.Cog):
@cog_ext.cog_slash(name="balance", description="Check Your On Chain Balances!")
async def get_all_balances(self, ctx):
await ctx.defer()
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(str(ctx.author.id))
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
game_active -= 1
return
account_info = algod_client.account_info(wallet)
assets = account_info.get("assets", [])
ghostcount = 0
ghosticoncount = 0
wingcount = 0
aoa = 0
order = 0
exp = 0
raffle = 0
ghost = 0
fo_1count = 0
fo_2count = 0
fo_3count = 0
fo_4count = 0
fo_5count = 0
for asset in assets:
if asset["amount"] > 0 and asset["asset-id"] in angel_wings:
wingcount = asset["amount"]
if asset["asset-id"] == balance_list[0]:
aoa = asset["amount"]
if asset["asset-id"] == balance_list[1]:
order = asset["amount"]
if asset["asset-id"] == balance_list[2]:
exp = asset["amount"]
if asset["asset-id"] == balance_list[3]:
raffle = asset["amount"]
if asset["asset-id"] == balance_list[4]:
ghost = asset["amount"]/10000
if asset["amount"] > 0 and asset["asset-id"] in fo_rank1:
fo_1count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank2:
fo_2count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank3:
fo_3count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank4:
fo_4count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank5:
fo_5count += 1
if asset["amount"] > 0 and asset["asset-id"] in ghosts:
ghostcount += 1
if asset["amount"] > 0 and asset["asset-id"] in ghostsIcons:
ghosticoncount += 1
balances = [aoa, order, exp, raffle, ghost]
balances_formatted = []
for balance in balances:
if balance >= 1000000000:
formatted_bal = f"{balance / 1000000000:.3f}B"
elif balance >= 1000000000:
formatted_bal = f"{balance / 1000000:.3f}M"
elif balance >= 1000000:
formatted_bal = f"{balance / 1000000:.3f}M"
elif balance >= 1000:
formatted_bal = f"{balance / 1000:.3f}K"
else:
formatted_bal = str(balance)
balances_formatted.append(formatted_bal)
embedBalances = discord.Embed(
title=f"Current Holdings - {ctx.author.name}",
url=f"https://algoexplorer.io/address/{wallet}",
color=0xFCE303
)
embedBalances.add_field(name=f"AoA", value=f"{balances_formatted[0]}", inline=False)
embedBalances.add_field(name=f"ORDER", value=f"{balances_formatted[1]}", inline=False)
embedBalances.add_field(name=f"EXP", value=f"{balances_formatted[2]}", inline=False)
embedBalances.add_field(name=f"RAFFLE", value=f"{balances_formatted[3]} Tickets", inline=False)
embedBalances.add_field(name=f"GHOST", value=f"{balances_formatted[4]}", inline=False)
embedBalances.add_field(name=f"Angel Wings", value=f"{wingcount}", inline=False)
embedBalances.add_field(name=f"Fallen Order", value=f"{fo_1count} Angel | {fo_2count} Celestial | {fo_3count} Ethereal | {fo_4count} Empyreal | {fo_5count} Immortal ", inline=False)
embedBalances.add_field(name=f"Ghosts Of Algo", value=f"{ghostcount} Ghosties | {ghosticoncount} Icon", inline=False)
embedBalances.set_thumbnail(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
embedBalances.set_footer(text=f"*Holdings displayed are on chain and real time*", icon_url="https://s3.amazonaws.com/algorand-wallet-mainnet-thumbnails/prism-images/media/asset_verification_requests_logo_png/2022/06/22/d2c56a8e61244bd78017e38180d15c91.png--resize--w__200--q__70.webp")
await ctx.send(embed=embedBalances)
def setup(client: client):
client.add_cog(DripCog(client))
client.add_cog(WingRevenueCog(client))
client.add_cog(StakingCog(client))
client.add_cog(BuyTicketsCog(client))
client.add_cog(BuyEXPCog(client))
client.add_cog(SendTokensCog(client))
client.add_cog(AdminSendCog(client))
client.add_cog(ManualSendTokensCog(client))
client.add_cog(BalanceCog(client))
| AngelsOfAres/Fallen-Order-Keepers | c_heimdall/transfers.py | transfers.py | py | 32,658 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "client.client.get_guild",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "client.client",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "d... |
26833816694 | # https://pypi.org/project/firebirdsql/
# pip install firebirdsql
import firebirdsql
from decouple import config
# pip install mysql-connector-python
import mysql.connector
import re
import os
try:
# Mysql Local
# con_mysql = mysql.connector.connect(
# host=config("host"),
# user=config("user"),
# password=config("password"),
# database=config("database"))
# MYSQL Site
con_mysql = mysql.connector.connect(
host=config("host_"),
user=config("user_"),
password=config("password_"),
database=config("database_"))
print("Database connection Mysql made!")
cursor_mysql = con_mysql.cursor()
# site
cursor_mysql.execute("""SELECT cpf_cnpj, data_uso
FROM core_cliente""")
t_cli = cursor_mysql.fetchall()
dt_new = input('Por favor digite a data de vencimento(aaaa-mm-dd): ') # aaaa-mm-dd
for cpf_cnpj, dtus in t_cli:
print(cpf_cnpj, "Data Uso: ", dtus)
dtus = '2021-09-10' # aaaa-mm-dd
value_column = 'data_uso'
value_where = 'cpf_cnpj'
comando_sql = f"""UPDATE core_cliente
SET {value_column}=('{dt_new}')
WHERE {value_where}=('{cpf_cnpj}')"""
print('Atualizando: ', value_column)
cursor_mysql.execute(comando_sql)
con_mysql.commit()
con_mysql.close()
# fecha terminal?
os._exit(1)
except ValueError:
print('Error database')
else:
con_mysql.close()
con_fire.close()
os._exit(1)
| sistemadevsys/db_firebird_mysql | update_clientes.py | update_clientes.py | py | 1,549 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 20,
"usage_type": "name"
},
{
"... |
19663543751 | import tensorflow as tf, numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from baselines.bench.monitor import load_results
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo/train'
df_train = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo/test'
df_test = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/train/cpo_d_is_1'
df_train_d_is_1 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/test/cpo_d_is_1'
df_test_d_is_1 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/train/cpo_d_is_2'
df_train_d_is_2 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/test/cpo_d_is_2'
df_test_d_is_2 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/sp/sp_returns.txt'
f_sp = np.loadtxt(logger_dir)
xmax = 3000000
rolling_window = 365*1
rolling_reward = pd.Series(df_train["r"]).rolling(rolling_window)
rolling_reward = rolling_reward.mean().values[rolling_window-1:]
rolling_safety = pd.Series(df_train["s"]).rolling(rolling_window)
rolling_safety = rolling_safety.mean().values[rolling_window-1:]
linestyle_str = [
('solid', 'solid'), # Same as (0, ()) or '-'
('dotted', 'dotted'), # Same as (0, (1, 1)) or '.'
('dashed', 'dashed'), # Same as '--'
('dashdot', 'dashdot')] # Same as '-.'
linestyle_tuple = dict([
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
d = np.sum(f_sp)
cpo_r = "{0:.2f}%".format(np.mean((d+df_test["r"].sum())/d) * 100)
cpo_r_d_is_1 = "{0:.2f}%".format(np.mean((d+df_test_d_is_1["r"].sum())/d) * 100)
cpo_r_d_is_2 = "{0:.2f}%".format(np.mean((d+df_test_d_is_2["r"].sum())/d) * 100)
fig = plt.figure(figsize=(10,7))
ax = plt.subplot(111)
plt.plot(np.cumsum(f_sp), label='SP', linewidth=3.0, marker='*', markersize=10, markevery=20, color='#ff7f0e')
plt.plot(np.cumsum(-df_test["r"]), label=r'$d=0.1$', linewidth=3.0)
plt.plot(np.cumsum(-df_test_d_is_1["r"]), label=r'$d=1$', marker='v', markersize=7, markevery=20, linewidth=3.0, color='#2ca02c')
plt.plot(np.cumsum(-df_test_d_is_2["r"]), label=r'$d=2$', linewidth=3.0, linestyle='dashed', marker='X', markersize=8, markevery=20, color='#9467bd')
plt.xlim(0, 365)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel('Day', fontsize=20)
plt.ylabel('Cumulative Costs ($)', fontsize=20)
plt.legend(fontsize=20)
ax.text(368, np.around(np.sum(-df_test["r"])+3,2), cpo_r, style='italic', fontsize='x-large',
bbox={'facecolor': '#1f77b4', 'alpha': 0.5, 'pad': 5})
ax.text(368, np.around(np.sum(-df_test_d_is_1["r"]),2), cpo_r_d_is_1, style='italic', fontsize='x-large',
bbox={'facecolor': '#2ca02c', 'alpha': 0.5, 'pad': 5})
ax.text(368, np.around(np.sum(-df_test_d_is_2["r"])-6,2), cpo_r_d_is_2, style='italic', fontsize='x-large',
bbox={'facecolor': '#9467bd', 'alpha': 0.5, 'pad': 5})
axh = ax.axhline(y=np.sum(-df_test["r"]))
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
axh = ax.axhline(y=np.sum(-df_test_d_is_1["r"]))
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
axh = ax.axhline(y=np.sum(-df_test_d_is_2["r"]))
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
ax.yaxis.set_label_coords(-0.11,0.5)
plt.tight_layout(rect=(0,0,1,1))
plt.show(block=False)
d = 0.1
cpo_v_01 = "{0:.2f}%".format(np.mean(np.maximum(0, df_test["s"].values-d)/d) * 100)
d = 1.0
cpo_v_1 = "{0:.2f}%".format(np.mean(np.maximum(0, df_test_d_is_1["s"].values-d)/d) * 100)
d = 2.0
cpo_v_2 = "{0:.2f}%".format(np.mean(np.maximum(0, df_test_d_is_2["s"].values-d)/d) * 100)
fig = plt.figure(figsize=(10,7))
ax = plt.subplot(111)
plt.plot(np.cumsum(df_test["s"]), label=r'$d=0.1$', linewidth=3.0)
plt.plot(np.cumsum(df_test_d_is_1["s"]), label=r'$d=1$', marker='v', markersize=7, markevery=20, linewidth=3.0, color='#2ca02c')
plt.plot(np.cumsum(df_test_d_is_2["s"]), label=r'$d=2$', marker='X', markersize=8, markevery=20, linewidth=3.0, linestyle='dashed', color='#9467bd')
plt.xlim(0, 365)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel('Day', fontsize=20)
plt.ylabel('Cumulative Constraint Values (kWh)', fontsize=20)
ax.text(370, 70, cpo_v_01, style='italic', fontsize='x-large',
bbox={'facecolor': '#1f77b4', 'alpha': 0.5, 'pad': 7})
ax.text(370, 420, cpo_v_1, style='italic', fontsize='x-large',
bbox={'facecolor': '#2ca02c', 'alpha': 0.5, 'pad': 7})
ax.text(370, 780, cpo_v_2, style='italic', fontsize='x-large',
bbox={'facecolor': '#9467bd', 'alpha': 0.5, 'pad': 7})
axh = ax.axhline(y=399)
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
axh1 = ax.axhline(y=780)
axh1.set_linestyle('--')
axh1.set_color('#7f7f7f')
plt.legend(fontsize=20)
plt.tight_layout(rect=(0,0,1,1))
plt.show(block=True) | liudading/tmp | ev/plot_d.py | plot_d.py | py | 5,409 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "baselines.bench.monitor.load_results",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "baselines.bench.monitor.load_results",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "baselines.bench.monitor.load_results",
"line_number": 14,
"usage... |
40095183195 | from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from datetime import datetime
from Tecnocasa.tecnocasa_main import scrape_tecnocasa_url
default_args = {
'start_date': datetime(2023, 1, 1),
'retries': 1,
}
dag = DAG('AIHOME', default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id='start', dag=dag)
Tecnocasa_scrapper = PythonOperator(
task_id='Scrapper_tecnocasa',
python_callable=scrape_tecnocasa_url,
dag=dag,
)
start >> Tecnocasa_scrapper
if __name__ == "__main__":
dag.cli()
| pasqualepescina/AIHome | dags/dag.py | dag.py | py | 630 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "airflow.operators.dummy_operator.DummyOperator",
"line_number": 14,
"usage_type": "call"
},
{
"api_n... |
8757517027 | from urllib3.exceptions import ProtocolError, ReadTimeoutError
import tweepy
import dataset
import json
from tweepy import StreamListener
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
from models import *
import pandas as pd
import numpy as np
from config import *
engine = create_engine('postgresql://paulinazheng:@localhost5432/flu')
Session = sessionmaker(bind=engine)
session = Session()
analyser = SentimentIntensityAnalyzer()
def sentiment_score(text):
return analyser.polarity_scores(text)
api = tweepy.API(auth_r)
def calculate_centroid(box):
avg_lat = (box[1][1] + box[0][1])/2
avg_long = (box[2][0] + box[1][0])/2
return avg_lat, avg_long
LOCATION = [-164.639405, 18.776344, -66.947028, 71.76871]
cities = [('New York', 40.7127837, -74.00594129999999),
('Los Angeles', 34.0522342, -118.24368490000002),
('Chicago', 41.8781136, -87.62979820000001),
('Houston', 29.7604267, -95.36980279999999),
('Philadelphia', 39.9525839, -75.1652215),
('Phoenix', 33.4483771, -112.07403729999999),
('San Antonio', 29.4241219, -98.4936282),
('San Diego', 32.715738, -117.1610838),
('Dallas', 32.7766642, -96.7969879),
('San Jose', 37.338208200000004, -121.88632859999998),
('Austin', 30.267153000000004, -97.7430608),
('Indianapolis', 39.768403, -86.158068),
('Jacksonville', 30.3321838, -81.65565099999998),
('San Francisco', 37.7749295, -122.4194155),
('Columbus', 39.9611755, -82.99879419999998),
('Charlotte', 35.2270869, -80.8431267),
('Fort Worth', 32.7554883, -97.3307658),
('Detroit', 42.331427000000005, -83.0457538),
('El Paso', 31.7775757, -106.44245590000001),
('Memphis', 35.1495343, -90.0489801),
('Seattle', 47.6062095, -122.33207079999998),
('Denver', 39.739235799999996, -104.990251),
('Washington', 38.9071923, -77.03687070000001),
('Boston', 42.360082500000004, -71.0588801),
('Nashville-Davidson', 36.1626638, -86.78160159999999),
('Baltimore', 39.2903848, -76.6121893),
('Oklahoma City', 35.4675602, -97.5164276),
('Louisville/Jefferson County', 38.252664700000004, -85.7584557),
('Portland', 45.523062200000005, -122.67648159999999),
('Las Vegas', 36.169941200000004, -115.13982959999998)]
def add_item(item):
db.session.add(item)
db.session.commit()
def find_closest_city(centroid_lat, centroid_long, cities=cities):
smallest = 10000
point = (centroid_lat, centroid_long)
for city in cities:
dist = np.sqrt((city[1]-point[0])**2 + (city[2]-point[1])**2)
if dist < smallest:
smallest = dist
closest = city
return closest
def get_city_id(lat, long):
closest = find_closest_city(lat, long, cities=cities)
if closest[0] not in [city.name for city in City.query.all()]:
city = City(name=closest[0], lat=closest[1], long=closest[2])
add_item(city)
city_id = city.id
else:
city = City.query.filter_by(name = closest[0]).all()
city_id=city[0].id
return city_id
def get_or_create_user(user_id, location):
user = User.query.filter_by(user_id=user_id).first()
if user:
return user
else:
user = User(user_id=user_id, location=location)
add_item(user)
return user
def get_or_create_tweet(user_id, location, twitter_id, created, centroid_lat, centroid_long, text, city_id):
tweet = Tweet.query.filter_by(twitter_id=twitter_id).first()
if tweet:
return tweet
else:
user = get_or_create_user(user_id, location)
sentiment = sentiment_score(text)
positivity = round(sentiment['pos'], 4)
negativity = round(sentiment['neg'], 4)
compound = round(sentiment['compound'], 4)
polarity = round((TextBlob(text)).sentiment.polarity, 4)
tweet = Tweet(twitter_id=twitter_id, text=text, created=created, centroid_lat=centroid_lat,
centroid_long=centroid_long, positivity=positivity, negativity=negativity, compound=compound,
polarity=polarity, user_id=user.id, city_id=city_id)
add_item(tweet)
return tweet
class StreamListener(tweepy.StreamListener):
def on_connect(self):
print("Now we're saving from Twitter!")
def on_status(self, status):
#avoids retweets, non-geolocated
if status.retweeted:
return
if not status.place:
return
if status.lang == 'en':
if status.truncated == True:
text = status.extended_tweet['full_text']
if len(text) > 320:
text = text[:320]
else:
text=status.text
id_str = status.id_str
created = status.created_at
box = status.place.bounding_box.coordinates[0]
centroid_lat, centroid_long = calculate_centroid(box)
coords = status.coordinates
if coords is not None:
coords = json.dumps(coords)
loc = status.user.location
user_id = str(status.user.id)
sentiment = sentiment_score(text)
positivity = round(sentiment['pos'], 4)
negativity = round(sentiment['neg'], 4)
compound = round(sentiment['compound'], 4)
polarity = round((TextBlob(text)).sentiment.polarity, 4)
city_id = get_city_id(centroid_lat, centroid_long)
get_or_create_tweet(user_id, loc, id_str, created, centroid_lat, centroid_long, text, city_id)
def on_exception(self, exception):
print(exception)
return
def on_error(self, status_code):
if status_code == 420:
return False
flu = ['flu', 'influenza', 'cough', 'fever', 'sore throat', 'headache',
'phlegm', 'runny nose', 'stuffy nose', 'Robitussin',
'dayquil', 'nyquil', 'tamiflu', 'vomit', 'body ache', 'mucinex',
'pneumonia', 'vomit', 'bodyache', 'medicine']
stream_listener = StreamListener()
stream = tweepy.Stream(auth=api.auth, listener=stream_listener)
while True:
try:
stream.filter(track=flu)
except (ProtocolError, AttributeError, ReadTimeoutError):
continue
test = stream.filter(track=flu)
| paulinaczheng/twitter_flu_tracking | twitter_package/tweet-stream.py | tweet-stream.py | py | 6,168 | python | en | code | 11 | github-code | 6 | [
{
"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 70,
"usage_type": "call"
},
{
"ap... |
32802746456 |
import gensim
import gensim.downloader as api
from gensim.models import Word2Vec as w2v
import inspect
import logging
import warnings
import numpy as np
from sklearn import *
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import pairwise_distances
import os
import pandas as pd
import matplotlib.pyplot as plt
from elasticsearch import Elasticsearch, helpers
import json
import time
''' To query '''
matching_query = { "query_string": {
"query" : None
}
}
def main():
global matching_query
muser = None #o user gia ton opoio 8a exw to id tou
#sundesh
es = Elasticsearch(host = "localhost", port = 9200)
#results["hits"]["hits"][0]["_score"] to score ka8e document
#results[0]["_source"] periexontai ta kleidia ths plhroforias
while 1:
#pairnw eisodo
mvar = str(input("Give a string : "))
res_sz = str(input(" *** \n(1 < #results && 10 000 > #results)n\ *** \nNumber of results : "))
mykappa = int(input("Number of clusters for kmeans : "))
maxIterations = int(input("Number of iterations for kmeans : "))
matching_query["query_string"]["query"] = str(mvar)
#searching ...
results = es.search(index="bx_books_2",query=matching_query,size = int(res_sz))
mcounter = 0 #gia na apari8mhsw to plh8os
results = results["hits"]["hits"]#ta apotelesmata moy
#pairnw ta kleidia
try :
lst = list(results[0]["_source"].keys())
except IndexError : #an paizei index error den exw parei apotelesmata, afou prospa8w na parw to 0 ke den to vriskw eimai se empty list
print("No results.\nSearch again.")
summaries = []
for res in results :
summaries.append(res["_source"]["summary"])
print(str(summaries))
warnings.filterwarnings('ignore')
#ratings_df = pd.read_csv('BX-Book-Ratings.csv')
#ratings_df = ratings_df.loc[ratings_df['uid']==uid]
#ratings_df['isbn'] = ratings_df['isbn'].map(lambda x: x.strip())
# print(len(ratings_df.isbn))
#books_df = pd.read_csv('BX-Books.csv')
# print(books_df.columns)
#books_df = books_df.drop(['book_author', 'year_of_publication', 'publisher', 'category'], axis='columns')
#books_df['isbn'] = books_df['isbn'].map(lambda x: x.strip())
mtuple= []
'''logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
corpus = api.load('text8')
print(inspect.getsource(corpus.__class__))
print(inspect.getfile(corpus.__class__))
model = w2v(corpus)
model.save('readyvocab.model')'''
#myratings = []
#i = 0
#for isbn in ratings_df.isbn:
# try:
# i+=1
# summaries.append(list2string(books_df[books_df['isbn']==str(isbn)].summary.values))
# if summaries[len(summaries)-1] == "":
# continue
# else:
# mtuple.append( (isbn , summaries[len(summaries)-1] ))
# myratings.append( ratings_df[ratings_df['isbn']==str(isbn)].rating.values[0])
# except:
# ratings_df.pop(i)
model = w2v.load('readyvocab.model')
processed_sentences = []
for sentence in summaries:
processed_sentences.append(gensim.utils.simple_preprocess(sentence))
# print(*processed_sentences, sep='\n')
vectors = {}
i = 0
for v in processed_sentences:
vectors[str(i)] = []
for k in v:
try:
vectors[str(i)].append(model.wv[k].mean())
except:
vectors[str(i)].append(np.nan)
i+=1
df_input = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in vectors.items() ]))
for i in range(0,len(vectors)):
df_input.fillna(value=0.0,inplace=True)
df_input[str(i)].replace(to_replace=0,value=df_input[str(i)].mean(),inplace=True )
processed = my_kmeans(df=df_input,k=mykappa,maxIterations=maxIterations)
#np.any(np.isnan(df_input))
#np.all(np.isfinite(df_input))
#X_train, X_test, y_train, y_test = train_test_split(X, y,shuffle=False)
'''pairnw tous titlous'''
titles = []
for res in results:
try:
titles.append(res["_source"]["book_title"])
#titles.append(list2string(books_df[books_df['isbn']==str(isbn)].book_title.values))
except:
pass#ratings_df.pop(i)
'''print tis klaseis'''
for myint in range(0,mykappa):#poses klaseis exw
mcounter = -1
print('\n'+5*"*"+" Klash : "+str(myint+1)+ " "+5*"*"+'\n')
for j in processed[1]:
mcounter+=1
if myint == j:#einai sthn idia klash
print(titles[mcounter])
else:
pass
def my_kmeans(df,k = 2,maxIterations=10):
#arxikopoihsh kentrweidwn
always_centroid = []
c1 = None
c2 = None
choose = np.random.randint(df.shape[1] , size=k)
my_centroids = []
for i in range(0,k):
my_centroids.append( df[str(choose[i])].values.tolist() )
always_centroid.append( df[str(choose[i])] )
#ta exw kanei lista
i = 0
to_centroid = []
for i in range(0,df.shape[1]):
if i in choose:
pass
else:
similarities = []
for j in range(0,len(my_centroids)):
#vazw tis omoiothtes se lista ke pairnw thn megaluterh apoluth timh
similarities.append( my_cosine_similarity(np.squeeze( np.asarray(my_centroids[j] ) ) ,np.squeeze( np.asarray(df[str(i)].values.tolist() ) ) ) )
#dialegw to megalutero similarity
best = 0
for j in range(0,len(similarities)):
if abs(similarities[j]) > best:
best = similarities[j]
#prepei na kanw ke ena pop
if len(to_centroid)-1 == i:#to plh8os twn stoixeiwn einai iso me to i panta!1 kentroeides gia ka8e perilhpsh
to_centroid.pop(len(to_centroid) -1)
#to dianusma 8a paei sto kentroeides tade
to_centroid.append(j)
iterations = -1
while iterations < maxIterations:
c1 = always_centroid#prin allaksei to kentroeides
iterations+=1
kappa = 0
#update centroids
for i in range(0,len(my_centroids)):#gia ka8e kedroeides
for j in range(0,len(to_centroid)):
#an eimai sto katallhlo kanw summ
if to_centroid[j] == i:
#kane sum
always_centroid[i] = always_centroid[i]+df[str(j)]
else:
pass
#sto telos pollaplasiazw ola ta stoixeia
always_centroid[i] = always_centroid[i]*(1/len(always_centroid[i]))
#ksanakanw thn diadikasia ?
my_centroids = []
for i in range(0,k):
my_centroids.append( always_centroid[i].values.tolist() )
#ta exw kanei lista
i = 0
to_centroid = []
for i in range(0,df.shape[1]):
if i in choose:
pass
else:
similarities = []
for j in range(0,len(my_centroids)):
#vazw tis omoiothtes se lista ke pairnw thn megaluterh apoluth timh
similarities.append( my_cosine_similarity(np.squeeze( np.asarray(my_centroids[j] ) ) ,np.squeeze( np.asarray(df[str(i)].values.tolist() ) ) ) )
#dialegw to megalutero similarity
best = 0
for j in range(0,len(similarities)):
if abs(similarities[j]) > best:
best = similarities[j]
#prepei na kanw ke ena pop
if len(to_centroid)-1 == i:#to plh8os twn stoixeiwn einai iso me to i panta!1 kentroeides gia ka8e perilhpsh
to_centroid.pop(len(to_centroid) - 1)
#to dianusma 8a paei sto kentroeides tade
#print(csimilarity)
to_centroid.append(j)
c2 = my_centroids
#an ta kedroeidh idia tote break
p = True
for i in range(0,k):
#print(str(c1[i]))
#print(str(c2[i]))
print("Finished in : "+ str(iterations) +" iterations .")
if c1[i].equals(c2[i]):
pass
else:
p = False
return (choose, to_centroid)
def my_cosine_similarity(arr1,arr2):
dot = sum(a*b for a,b in zip(arr1,arr2) )
norm_arr1 = sum(a*a for a in arr1) ** 0.5
norm_arr2 = sum(b*b for b in arr2) ** 0.5
csimilarity = dot/(norm_arr1*norm_arr2)
return csimilarity
def list2string(s):
strl = ""
for e in s :
strl +=e
return strl
if __name__ == "__main__":
main()
| d4g10ur0s/InformationRetrieval_21_22 | paradotea/erwthma_4.py | erwthma_4.py | py | 9,321 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 98,
"usage_type": "call"
},
{
"a... |
17961034725 | from flask import render_template, redirect, url_for, request
from surv import app
from .storage import c, new_game, save_game, load_game
@app.route('/')
def home():
return redirect(url_for('new'))
@app.route('/new/')
def new():
new_game()
return redirect(url_for('list'))
@app.route('/list/')
def list():
return render_template('list.html',title='List',game=load_game())
@app.route('/schedule/')
def schedule():
return render_template('schedule.html',title="schedule",game=load_game())
@app.route('/event/<eventid>/')
def event(eventid):
g = load_game()
this_event = g.get_event(int(eventid))
return render_template('event.html',title='event',game=g,event=this_event)
@app.route('/run/<eventid>/')
def run(eventid):
g = load_game()
this_event = g.get_event(int(eventid))
if this_event.complete == False:
this_event.run(g)
save_game(g)
return render_template('event.html',title='event',game=g,event=this_event)
else:
return redirect(url_for('event',eventid=eventid))
@app.route('/tribe/<tribeid>/')
def tribe(tribeid):
g = load_game()
id = int(tribeid)
this_tribe = [x for x in g.tribes if x.id == id][0]
return render_template('tribe.html',title='tribe',game=g,tribe=this_tribe)
@app.route('/player/<playerid>/')
def player(playerid):
id = int(playerid)
this_player = [x for x in g.players if x.id == id][0]
return render_template('player.html',title='tribe',game=g,player=this_player)
@app.route('/next/')
def next():
g = load_game()
g.run_next()
save_game(g)
old_url = request.referrer
return redirect(old_url)
@app.route('/story/')
def story():
g = load_game()
return render_template('story.html',title='Story',game=g)
@app.route('/sim/')
def sim():
g = load_game()
g.run_all()
save_game(g)
return redirect(url_for('story'))
| pkugelmass/survivor | surv/routes.py | routes.py | py | 1,892 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.redirect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "surv.app.route",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "surv.app",
"line_number"... |
34528771620 |
# https://medium.com/javarevisited/the-ultimate-guide-to-binary-trees-47112269e6fc
# There are two ways check both
#udacity course way
class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
def search(self, find_val):
"""Return True if the value
is in the tree, return
False otherwise."""
return self.preorder_search(tree.root, find_val)
def print_tree(self):
"""Print out all tree nodes
as they are visited in
a pre-order traversal."""
return self.preorder_print(tree.root, "")[:-1]
def preorder_search(self, start, find_val):
"""Helper method - use this to create a
recursive search solution."""
if start:
if start.value == find_val:
return True
else:
return self.preorder_search(start.left, find_val) or self.preorder_search(start.right, find_val)
return False
def preorder_print(self, start, traversal):
"""Helper method - use this to create a
recursive print solution."""
if start:
traversal += (str(start.value) + "-")
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal
# Set up tree
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
# Test search
# Should be True
print( tree.search(4))
# Should be False
print( tree.search(6))
# Test print(_tree
# Should be 1-2-4-5-3
print( tree.print_tree())
############## Grokking teh coding interview method #######################
from collections import deque
class TreeNode():
def __init__(self,val):
self.val =val
self.left, self.right = None, None
def traverse(root):
result = []
if root is None:
return result
queue = deque()
queue.append(root)
while queue:
levelSize= len(queue)
currentLevel = []
for _ in range(levelSize):
currentNode = queue.popleft()
#add the node to teh current level
currentLevel.append(currentNode.val)
#insert children of current node in queue
if currentNode.left :
queue.append(currentNode.left)
if currentNode.right :
queue.append(currentNode.right)
result.append(currentLevel)
return result
# Time complexity #
# The time complexity of the above algorithm is O(N)O(N), where ‘N’ is the total number of nodes in the tree. This is due to the fact that we traverse each node once.
# Space complexity #
# The space complexity of the above algorithm will be O(N)O(N) as we need to return a list containing the level order traversal. We will also need O(N)O(N) space for the queue. Since we can have a maximum of N/2N/2 nodes at any level (this could happen only at the lowest level), therefore we will need O(N)O(N) space to store them in the queue.
if __name__ == "__main__":
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(8)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(11)
print("Level order traversal of binary tree is :\n", str(traverse(root))) | ved93/PythonPractice | data-strutures/binary_tree.py | binary_tree.py | py | 3,507 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 87,
"usage_type": "call"
}
] |
20831019932 | from django.urls import path
from posts.views import *
urlpatterns = [
path("", MainPage.as_view(), name="main"),
path("follow_post/", FollowPost.as_view(), name="follow_post"),
path("posts/<int:user_id>/", PostByUserId.as_view(), name="post"),
path("posts/view_post/<int:post_id>", ViewPost.as_view(), name="view_post"),
path("posts/view_post/<int:post_id>/upvote", upvote, name="upvote_post"),
path("posts/view_post/<int:post_id>/downvote", downvote, name="downvote_post"),
path("profile/add_post/", CreatePost.as_view(), name="create_post"),
path("user_tags/", tag_format_json, name="tags"),
]
| YevheniiMorozov/social | gramm/posts/urls.py | urls.py | py | 632 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
24529853326 | import numpy as np
import torch
import torch.nn as nn
from lib.datasets.utils import class2angle
from utils import box_ops
import math
padsize = np.array([28.,11.],dtype=np.float32)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
#new_pt = torch.cat((pt, torch.ones(1).to(device=device)), dim=0)
#new_pt = new_pt.unsqueeze(-1)
# expand project points as [N, 3, 1]
#new_pt=torch.matmul(t, new_pt)
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def softget(weighted_depth,outputs_coord):
h,w = weighted_depth.shape[-2], weighted_depth.shape[-1] # 32 58
pts_x =outputs_coord [0]
pts_y = outputs_coord[ 1]
pts_x_low = math.floor(pts_x )
pts_x_high =math.ceil(pts_x )
pts_y_low = math.floor(pts_y )
pts_y_high =math.ceil(pts_y)
pts_x_low = np.clip(pts_x_low, a_min=0,a_max = w-1)
pts_x_high = np.clip(pts_x_high, a_min=0,a_max = w-1)
pts_y_low = np.clip(pts_y_low, a_min=0,a_max = h-1)
pts_y_high =np.clip(pts_y_high, a_min=0,a_max = h-1)
rop_lt = weighted_depth[..., pts_y_low, pts_x_low]
rop_rt = weighted_depth[..., pts_y_low, pts_x_high]
rop_ld = weighted_depth[..., pts_y_high, pts_x_low]
rop_rd = weighted_depth[..., pts_y_high, pts_x_high]
rop_t = (1 - pts_x + pts_x_low) * rop_lt + (1 - pts_x_high + pts_x) * rop_rt
rop_d = (1 - pts_x + pts_x_low) * rop_ld + (1 - pts_x_high + pts_x) * rop_rd
rop = (1 - pts_y + pts_y_low) * rop_t + (1 - pts_y_high + pts_y) * rop_d
return rop
def decode_detections(dets, info, calibs, cls_mean_size, threshold,trans_inv,depthmaps,pre_denorm,weighted_depth):
'''
NOTE: THIS IS A NUMPY FUNCTION
input: dets, numpy array, shape in [batch x max_dets x dim]
input: img_info, dict, necessary information of input images
input: calibs, corresponding calibs for the input batch
output:
'''
results = {}
for i in range(dets.shape[0]): # batch
preds = []
for j in range(dets.shape[1]): # max_dets
cls_id = int(dets[i, j, 0])
score = dets[i, j, 1]
if score < threshold:
continue
# 2d bboxs decoding
x = dets[i, j, 2] * 928
y = dets[i, j, 3] * 512
w = dets[i, j, 4] * 928
h = dets[i, j, 5] * 512
#x = dets[i, j, 2] * info['img_size'][i][0]
#y = dets[i, j, 3] * info['img_size'][i][1]
#w = dets[i, j, 4] * info['img_size'][i][0]
#h = dets[i, j, 5] * info['img_size'][i][1]
bbox = np.array([x-w/2, y-h/2, x+w/2, y+h/2])
bbox[:2] = bbox[:2]-padsize
bbox[2:] =bbox[2:] -padsize
bbox[:2] = bbox[:2]*2.2#affine_transform(bbox[:2], trans_inv[i])
bbox[2:] = bbox[2:]*2.2#affine_transform(bbox[2:], trans_inv[i])
# 3d bboxs decoding
# depth decoding
depth_p = dets[i, j, 6]
# dimensions decoding
dimensions = dets[i, j, 31:34]
dimensions += cls_mean_size[int(cls_id)]
# positions decoding
#x3d = dets[i, j, 34]
#y3d = dets[i, j, 35]
size = np.array([928,512])
#size = torch.tensor(size).to(device)
pad = np.array([28,11])
#pad =torch.tensor(pad).to( device)
pad2 = np.array([2,1])
#pad2 =torch.tensor(pad2).to(device)
#coord =(dets[i, j, 34:36]*size-pad)/16+[2,1]
coord =(dets[i, j, 34:36]*size-pad)/16 # 本来图像的除以35.2 x,y
pts = np.array(coord)
w = 56
h = 31
pts_x = pts[0]
pts_x = np.clip(pts_x, a_min=0,a_max =w-1)
pts_y = pts[1]
pts_y = np.clip(pts_y , a_min=0,a_max = h-1)
denorm = pre_denorm[i] # 32,58,4
P = calibs[i].P2
#coord = np.array([i+2,j+1])
'''
d = denorm[int(pts_y)+1,int(pts_x)+2] #[16.69273 5.326345]
W =torch.tensor([[P[0,0]/35.2,0,P[0,2]/35.2-coord[0]],[0,P[1,1]/35.2,P[1,2]/35.2-coord[1]],[d[0],d[1],d[2]]])
result = torch.tensor([0,0,-d[3]]).reshape(-1,1)
W_inv = torch.inverse(W)
vvxyz = torch.mm(W_inv,result)
depth=vvxyz[2,0]
'''
'''
#print(coord.shape)
#weighteddepth = depthmaps[i]
coord = np.array([pts_x+2,pts_y+1])
weighteddepth = weighted_depth[i]
#weighteddepth = weighteddepth.transpose(1,0) #
weighteddepth = weighted_depth.cpu().numpy() # 32,58
depth = softget(weighteddepth,coord)
'''
x3d = dets[i, j, 34] * 928
y3d = dets[i, j, 35] * 512
x3d = x3d - padsize[0] # -28
y3d = y3d - padsize[1] #-11
xy = np.array([x3d, y3d])
xy= xy *2.2 #affine_transform(xy , trans_inv[i])
#xy= affine_transform(xy , trans_inv[i])
x3d = xy[0]
y3d = xy[1]
#x3d = dets[i, j, 34] * info['img_size'][i][0]
#y3d = dets[i, j, 35] * info['img_size'][i][1]
#locations = calibs[i].img_to_rect(x3d, y3d, depth).reshape(-1)
locations = calibs[i].img_to_rect(x3d, y3d, depth_p).reshape(-1)
#locations[1] += dimensions[0] / 2
# heading angle decoding
alpha = get_heading_angle(dets[i, j, 7:31])
ry = calibs[i].alpha2ry(alpha, x)
score = score * dets[i, j, -1]
preds.append([cls_id, alpha] + bbox.tolist() + dimensions.tolist() + locations.tolist() + [ry, score])
results[info['img_id'][i]] = preds
return results
def extract_dets_from_outputs(outputs, K=50, topk=50):
# get src outputs
# b, q, c
out_logits = outputs['pred_logits']
out_bbox = outputs['pred_boxes']
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), topk, dim=1)
# final scores
scores = topk_values
# final indexes
topk_boxes = (topk_indexes // out_logits.shape[2]).unsqueeze(-1)
# final labels
labels = topk_indexes % out_logits.shape[2]
heading = outputs['pred_angle']
size_3d = outputs['pred_3d_dim']
depth = outputs['pred_depth'][:, :, 0: 1]
sigma = outputs['pred_depth'][:, :, 1: 2]
sigma = torch.exp(-sigma)
# decode
boxes = torch.gather(out_bbox, 1, topk_boxes.repeat(1, 1, 6)) # b, q', 4
xs3d = boxes[:, :, 0: 1]
ys3d = boxes[:, :, 1: 2]
heading = torch.gather(heading, 1, topk_boxes.repeat(1, 1, 24))
depth = torch.gather(depth, 1, topk_boxes)
sigma = torch.gather(sigma, 1, topk_boxes)
size_3d = torch.gather(size_3d, 1, topk_boxes.repeat(1, 1, 3))
corner_2d = box_ops.box_cxcylrtb_to_xyxy(boxes)
xywh_2d = box_ops.box_xyxy_to_cxcywh(corner_2d)
size_2d = xywh_2d[:, :, 2: 4]
xs2d = xywh_2d[:, :, 0: 1]
ys2d = xywh_2d[:, :, 1: 2]
batch = out_logits.shape[0]
labels = labels.view(batch, -1, 1)
scores = scores.view(batch, -1, 1)
xs2d = xs2d.view(batch, -1, 1)
ys2d = ys2d.view(batch, -1, 1)
xs3d = xs3d.view(batch, -1, 1)
ys3d = ys3d.view(batch, -1, 1)
detections = torch.cat([labels.float(), scores, xs2d, ys2d, size_2d, depth, heading, size_3d, xs3d, ys3d, sigma], dim=2)
#detections = torch.cat([labels.float(), scores, xs2d, ys2d, size_2d, heading, size_3d, xs3d, ys3d ], dim=2)
return detections
############### auxiliary function ############
def _nms(heatmap, kernel=3):
padding = (kernel - 1) // 2
heatmapmax = nn.functional.max_pool2d(heatmap, (kernel, kernel), stride=1, padding=padding)
keep = (heatmapmax == heatmap).float()
return heatmap * keep
def _topk(heatmap, K=50):
batch, cat, height, width = heatmap.size()
# batch * cls_ids * 50
topk_scores, topk_inds = torch.topk(heatmap.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
# batch * cls_ids * 50
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_cls_ids = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_cls_ids, topk_xs, topk_ys
def _gather_feat(feat, ind, mask=None):
'''
Args:
feat: tensor shaped in B * (H*W) * C
ind: tensor shaped in B * K (default: 50)
mask: tensor shaped in B * K (default: 50)
Returns: tensor shaped in B * K or B * sum(mask)
'''
dim = feat.size(2) # get channel dim
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) # B*len(ind) --> B*len(ind)*1 --> B*len(ind)*C
feat = feat.gather(1, ind) # B*(HW)*C ---> B*K*C
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat) # B*50 ---> B*K*1 --> B*K*C
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
'''
Args:
feat: feature maps shaped in B * C * H * W
ind: indices tensor shaped in B * K
Returns:
'''
feat = feat.permute(0, 2, 3, 1).contiguous() # B * C * H * W ---> B * H * W * C
feat = feat.view(feat.size(0), -1, feat.size(3)) # B * H * W * C ---> B * (H*W) * C
feat = _gather_feat(feat, ind) # B * len(ind) * C
return feat
def get_heading_angle(heading):
heading_bin, heading_res = heading[0:12], heading[12:24]
cls = np.argmax(heading_bin)
res = heading_res[cls]
return class2angle(cls, res, to_label_format=True)
| HIYYJX/MonoGAE | lib/helpers/decode_helper.py | decode_helper.py | py | 10,233 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
... |
8561287023 | import requests
import json
from typing import Dict, List
from config.config_secret import secret_file_path
class Auth:
@classmethod
def get_secret(cls, secret_types: List[str]) -> Dict[str,str]:
"""json 파일로부터 요구되는 시크릿키 목록을 읽어 반환한다.
Args:
secret_types : 요구되는 시크릿 키 이름들을 담은 배열
Returns:
요구되는 시크릿 키 정보를 담은 딕셔너리
example:
{
"access_token" : "86289e71b93e7d9f67f4dcfbe69bc44d"
"client_id" : "86289e71b93e7d9f67f4dcfbe6123w4d"
}
"""
with open(secret_file_path, "r") as json_file:
secret_info = json.load(json_file)
return dict(filter(
lambda secret: True if secret[0] in secret_types else False, secret_info.items()))
@classmethod
def save_token(cls, access_token: str) -> None:
"""액세스 토큰 정보를 받아 json 파일에 저장한다.
Args:
access_token (str) : 발급한 액세스 토큰
"""
with open(secret_file_path, "r") as json_file:
secret_info = json.load(json_file)
secret_info["access_token"] = access_token
with open(secret_file_path, "w") as outfile:
json.dump(secret_info, outfile, indent=4)
@classmethod
def get_access_token_info(cls) -> Dict[str, str]:
"""액세스 토큰의 만료여부, 유효기간 등 정보를 확인한다.
Returns:
엑세스 토큰 관련 정보를 담은 딕셔너리
example:
{
'id': 2110957569,
'expiresInMillis': 14132012,
'expires_in': 14132,
'app_id': 701835,
'appId': 701835
}
"""
access_token = cls.get_access_token()
url = "https://kapi.kakao.com/v1/user/access_token_info"
headers = {
"Authorization": f"Bearer {access_token}"
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
return response.json()
@classmethod
def update_token(cls) -> None:
"""액세스 토큰과 리프레시 토큰을 갱신한다.
"""
secret = cls.get_secret(['client_id','refresh_token'])
url = "https://kauth.kakao.com/oauth/token"
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
data = {
"grant_type": "refresh_token",
"client_id": f"{secret['client_id']}",
"refresh_token": f"{secret['refresh_token']}"
}
response = requests.post(url=url, headers=headers, data=data)
if response.status_code == 200:
token_info = response.json()
# 리프레시 토큰 값은 만료 기간이 1개월 미만으로 남았을 때 갱신되어 전달되기 때문에 응답에 리프레시 토큰이 있는지 확인한다.
# https://developers.kakao.com/docs/latest/ko/kakaologin/rest-api 참고
if "refresh_token" in token_info:
cls.save_token(token_info['refresh_token'])
cls.save_token(token_info['access_token'])
else:
print(f"request failed with status: {response.status_code}")
@classmethod
def get_tokens(cls) -> Dict[str, str]:
"""인가코드 를 통해 토큰 관련 정보를 반환하며 재실행 필요시 웹 브라우저를 통해 인가코드를 재발급 받아야 한다.
https://developers.kakao.com/docs/latest/ko/kakaologin/rest-api#request-code 참고
Returns:
Dict[str,str] : 반환 예시 참조
{
'access_token': 'zmQou5uWoCpFNkfuu4N2-R5eZAUpMYTVqHHi_Qopb9UAAAF-97vVNg',
'token_type': 'bearer',
'refresh_token': 'QhBJVrzDpsZU3mteae0xikZR5ob1bQ1CQ8_YAwopb9UAAAF-97vVNQ',
'expires_in': 21599,
'scope': 'account_email profile_image talk_message profile_nickname',
'refresh_token_expires_in': 5183999
}
"""
url = "https://kauth.kakao.com/oauth/token"
secret = cls.get_secret(["code"])
headers = {
"Content-type": "application/x-www-form-urlencoded;charset=utf-8"
}
data = {
"grant_type": "authorization_code",
"client_id": "86289e71b93e7d9f67f4dcfbe69bc44d",
"redirect_uri": "http://localhost:3000",
# 일회성 인가코드
"code": f"{secret['code']}"
}
response = requests.post(url=url, headers=headers, data=data)
return response.json()
# 직접 실행시 토큰 정보 확인
if __name__ == "__main__":
print(Auth.get_tokens())
| Ywoosang/Dossa-Notification | app/auth/auth.py | auth.py | py | 4,905 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "config.config_secret.secret_file_path",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "json.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "typing... |
14958626079 | # -*- coding: utf-8 -*-
from datetime import datetime, date
import logging
import re
ERROR_PARSING_DATE = "Error parsing date"
def from_iso_format(string):
parts = [int(a) for a in string.split("-")]
if len(parts) != 3:
raise ValueError
return date(parts[0], parts[1], parts[2])
def datetime_with_microsecond(string):
# Python2 does not have a way of parsing date formats.
# Deprecate this once Python2 support is dropped.
time_split = re.split("[^0-9]", string)
parts = len(time_split)
if parts <= 6:
if logging.getLogger().propagate:
logging.warning(ERROR_PARSING_DATE)
return None
try:
year = int(time_split[0])
month = int(time_split[1])
day = int(time_split[2])
hour = int(time_split[3])
minute = int(time_split[4])
second = int(time_split[5])
microsecond = int(round(float("0." + time_split[6]) * 1e6))
return datetime(year, month, day, hour, minute, second, microsecond)
except ValueError:
if logging.getLogger().propagate:
logging.warning(ERROR_PARSING_DATE)
return None
| getyoti/yoti-python-sdk | yoti_python_sdk/date_parser.py | date_parser.py | py | 1,157 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "datetime.date",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_... |
22648817021 | import matplotlib.pyplot as plt
from scipy.io import loadmat
import numpy as np
import pandas
from os.path import join,exists
from os import mkdir
import cv2
import math
import os
rho = 1
actions = ['No-Action', 'sweeping', 'gargling', 'opening cupboard', 'washing hands', 'eating', 'writing', 'wiping',
'drinking','opening microwave oven', 'Throwing trash']
def get_skeleton(skeleton):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
skeleton[:,0]=-skeleton[:,0]
lines=[[0,1],[1,2],[2,3],[3,4],[21,22],[22,23],[23,24],[21,4],[4,20],[20,11],[11,13],[13,12],[10,11],[14,11]
,[10,9],[9,8],[8,7],[7,5],[5,6],[14,15],[15,16],[16,17],[17,19],[18,19]]
for a,b in lines:
ax.plot3D([skeleton[a][0],skeleton[b][0]], [skeleton[a][1],skeleton[b][1]], [skeleton[a][2],skeleton[b][2]], 'gray')
ax.scatter3D(skeleton[:,0], skeleton[:,1], skeleton[:,2] ,c=skeleton[:,2])
ax = plt.gca()
xmin,xmax=min(skeleton[:,0])-0.25,max(skeleton[:,0])+0.25
ymin,ymax=min(skeleton[:,1])-0.25,max(skeleton[:,1])+0.25
zmin,zmax=min(skeleton[:,2])-0.25,max(skeleton[:,2])+0.25
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_zlim([zmin, zmax])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.view_init(elev=-75, azim=90)
ax.set_axis_off()
plt.tight_layout()
plt.show()
def get_data(file,type='foot_to_foot'):
try:
f = open(file,'r').read().split()
datait = [float(x) for x in f]
if type=='no_order':
data = np.asarray(datait)
data = data.reshape((25,3))
else:
spine_base = datait[0:3]
spine_mid = datait[3:6]
neck = datait[6:9]
head = datait[9:12]
shoulder_left = datait[12:15]
elbow_left = datait[15:18]
wrist_left = datait[18:21]
hand_left = datait[21:24]
shoulder_right = datait[24:27]
elbow_right = datait[27:30]
wrist_right = datait[30:33]
hand_right = datait[33:36]
hip_left = datait[36:39]
knee_left = datait[39:42]
ankle_left = datait[42:45]
foot_left = datait[45:48]
hip_right = datait[48:51]
knee_right = datait[51:54]
ankle_right = datait[54:57]
foot_right = datait[57:60]
spine_shoulder = datait[60:63]
handtip_left = datait[63:66]
thumb_left = datait[66:69]
handtip_right = datait[69:72]
thumb_right = datait[72:75]
if type=='human':
data=np.stack((head, neck, spine_shoulder, shoulder_left, shoulder_right, elbow_left, elbow_right,
wrist_left, wrist_right, thumb_left, thumb_right, hand_left, hand_right, handtip_left,
handtip_right, spine_mid, spine_base, hip_left, hip_right, knee_left, knee_right,
ankle_left, ankle_right, foot_left, foot_right))
else :
data=np.stack((foot_left, ankle_left, knee_left, hip_left, spine_base, handtip_left, thumb_left,
hand_left, wrist_left, elbow_left, shoulder_left
,spine_shoulder,head,neck, shoulder_right,elbow_right,
wrist_right, hand_right,thumb_right
, handtip_right, spine_mid, hip_right,
knee_right, ankle_right,foot_right))
return data
except:
print('Ex',file)
return None
def normalize(array):
min_ = np.min(array,0)
max_ = np.max(array,0)
return (array-min_)/(max_-min_)
def get_sequence_energy(sequence):
energy = np.zeros((len(sequence),25))
for i in range(len(sequence)):
for k in range(25):
if i == 0:
energy[i][k] = np.linalg.norm(sequence[i][k] - sequence[i + 1][k])
elif i == len(sequence)-1:
energy[i][k] = np.linalg.norm(sequence[i][k] - sequence[i - 1][k])
else:
energy[i][k] = (np.linalg.norm(sequence[i][k] - sequence[i + 1][k])+np.linalg.norm(sequence[i][k] - sequence[i - 1][k]))/2
E = normalize(energy)
w = rho*E + (1-rho)
return w
def get_labels(file):
labels = open(file,'r').read().splitlines()
prev_action=None
start =[]
end = []
actions=[]
for line in labels:
if line.replace(' ','').isalpha():
prev_action = line.strip()
else:
tab = line.split(' ')
start.append(int(tab[0]))
end.append(int(tab[1]))
actions.append(prev_action)
return (start,end,actions)
def get_image_label(start,end,labels):
index = (start+end)//2
for s,e,a in set(zip(labels[0],labels[1],labels[2])):
if s <= index and index <= e:
return a
return 'No-Action'
def to_ludl(data_path,labels,window_length=30,type='no_order'):
start_frame = min(labels[0]) - window_length//2
end_frame = max(labels[1]) + window_length //2
data = []
for i in range(start_frame,end_frame+1):
data.append(get_data(data_path+'/'+str(i)+'.txt',type))
images = [data[i:i + window_length] for i in range(len(data) - window_length + 1)]
lab = [get_image_label(i,i+window_length,labels) for i in range(start_frame,end_frame -window_length+2)]
i=0
while i <len(lab):
if lab[i] is None:
del lab[i]
del images[i]
else:
i+=1
i = 0
while i < len(images):
for x in images[i]:
if x is None or not x.shape==(25,3):
del lab[i]
del images[i]
break
else:
i += 1
return np.asarray(images),lab
def transform_image_ludl(image,path,name,weights):
RGB = image
height = image.shape[1]
width = image.shape[0]
X = np.arange(height)
Y = np.arange(width)
RGB = np.squeeze(RGB)
# weights = np.expand_dims(weights,0)
white = np.ones((width,height))*255
for i in range(3):
RGB[:,:,i] = np.floor(255 * (RGB[:,:,i] - np.amin(RGB[:,:,i])) / (np.amax(RGB[:,:,i]) - np.amin(RGB[:,:,i])))
RGB[:, :, i] = RGB[:, :, i]*weights+(1-weights)*white
# w = np.expand_dims(w,1)
# print(w[:10])
# print(sequence[0][:10])
# # w = np.concatenate([w,w,w],axis=1)
# print(w.shape)
# for i in range(len(sequence)):
# sequence[i]=sequence[i]*w + np.asarray([255,255,255])*(1-w)
# sequence = np.asarray(sequence)
# print(sequence[0][:10])
# print(sequence.shape,w.shape)
# print(sequence*w)
#
#
img = np.zeros((height, width, 3), dtype=np.uint8)
for i in X:
for j in Y:
img[i,j]=RGB[j,i]
# img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(join(path,name+'_.png'),img)
return img
def to_nassim(data_path,labels,window_length=40,type_='foot_to_foot'):
# start_frame = min(labels[0]) - window_length//2
# end_frame = max(labels[1]) + window_length //2
subdirs = [x[2] for x in os.walk(data_path)][0]
frames = [int(x[:-4]) for x in subdirs]
start_frame = min(frames)
end_frame = max(frames)
data = []
for i in range(start_frame,end_frame+1):
data.append(get_data(data_path+'/'+str(i)+'.txt',type_))
images = [data[i:i + window_length] for i in range(len(data) - window_length + 1)]
lab = [get_image_label(i,i+window_length,labels) for i in range(start_frame,end_frame - window_length+2)]
i=0
No_action_count = 100
while i <len(lab):
if lab[i] is None:
del lab[i]
del images[i]
elif lab[i] == 'No-Action':
# if No_action_count <= 0:
del lab[i]
del images[i]
# else:
# No_action_count -= 1
# i+=1
else:
i+=1
i = 0
images_aug=[]
while i < len(images):
jump = False
new_image=[]
for x in images[i]:
if x is None or not x.shape==(25,3):
del lab[i]
del images[i]
jump = True
break
else:
new_image.append(x * [1,1,-1])
if not jump:
i += 1
images_aug.append(new_image)
# lab.append(lab[i])
# images.extend(images_aug)
return np.asarray(images),np.asarray(lab),[get_sequence_energy(x) for x in images]
def transform_nassim(data_path,label_path,out_path):
images, labels, weights = to_nassim(data_path, get_labels(label_path), window_length=10,type_='foot')
data = []
lab = []
for i in range(len(images)):
path = join(out_path,labels[i])
if not exists(path):
mkdir(path)
data.append(transform_image_ludl(images[i],path,str(i),weights[i]))
lab.append(actions.index(labels[i]))
data = np.asarray(data)
labels = np.asarray(lab)
return data , labels
data_path='data'
train_path = 'Train_OAD_40_base'
test_path = 'Test_OAD_40_base'
if not exists(train_path):
mkdir(train_path)
if not exists(test_path):
mkdir(test_path)
train_sub = [1, 2, 3, 4, 7, 8, 9, 14, 15, 16, 18, 19, 20, 22, 23, 24, 25, 32, 33, 34, 35, 37, 38, 39, 49, 50, 51, 54, 57, 58]
test_sub = [0, 10, 13, 17, 21, 26, 27, 28, 29, 36, 40, 41, 42, 43, 44, 45, 52, 53, 55, 56]
train = None
train_label = None
test = None
test_label = None
for i in range(59):
path = join(data_path, str(i))
label_path = join(path,'label','label.txt')
image_path = join(path,'skeleton')
print('Processing sequence num ===========>',i)
data, label = transform_nassim(image_path, label_path, train_path if i in train_sub else test_path)
if i in train_sub:
if train_sub.index(i)==0:
train = data
train_label = label
else:
train = np.concatenate([train, data])
train_label = np.concatenate([train_label, label])
elif i in test_sub:
if test_sub.index(i)==0:
test = data
test_label = label
else:
test = np.concatenate([test,data])
test_label = np.concatenate([test_label,label])
#
# from keras.utils.np_utils import to_categorical
# test_label = to_categorical(test_label)
# train_label = to_categorical(train_label)
# test_label=test_label[:,1:]
# train_label=train_label[:,1:]
# np.save('train_x_{}_base_one_by_one.npy'.format(rho),train)
# np.save('test_x_{}_base_one_by_one.npy'.format(rho),test)
# np.save('train_y_{}_base_one_by_one.npy'.format(rho),train_label)
# np.save('test_y_{}_base_one_by_one.npy'.format(rho),test_label)
Y = np.argmax(train_label,axis=1)
print(Y.shape)
unique, counts = np.unique(Y, return_counts=True)
print(dict(zip(unique, counts)))
Y = np.argmax(test_label,axis=1)
print(Y.shape)
unique, counts = np.unique(Y, return_counts=True)
print(dict(zip(unique, counts)))
print(train.shape,train_label.shape,test.shape,test_label.shape)
#29126,)
# (23912,)
| Vincent-Fer/activity-recognition-prediction-online | encodage/encodage.py | encodage.py | py | 11,449 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matp... |
19024426652 | from enum import Enum
from typing import Union
from typing import NamedTuple
from typing import Callable
class TaskType(Enum):
"""Type of machine learning task
Attributes
----------
MULTI_CLASS_CLASSIFICATION
multi-class classification
MULTI_LABEL_CLASSIFICATION
multi-label classification
REGRESSION
regression
REPRESENTATION_LEARNING
representation learning
"""
MULTI_CLASS_CLASSIFICATION = 0
MULTI_LABEL_CLASSIFICATION = 1
REGRESSION = 2
REPRESENTATION_LEARNING = 3
class TaskOutput(Enum):
"""Expected output
Attributes
----------
SEQUENCE
A sequence of vector is expected.
VECTOR
A single vector is expected.
"""
SEQUENCE = 0
VECTOR = 1
class Task(NamedTuple):
type: TaskType
output: TaskOutput
@classmethod
def from_str(cls, representation: str):
task_output, task_type = representation.split(" ", 1)
if task_output == "frame-wise":
task_output = TaskOutput.SEQUENCE
elif task_output == "chunk-wise":
task_output = TaskOutput.VECTOR
else:
msg = f'"{task_output}" task output is not supported.'
raise NotImplementedError(msg)
if task_type == "multi-class classification":
task_type = TaskType.MULTI_CLASS_CLASSIFICATION
elif task_type == "multi-label classification":
task_type = TaskType.MULTI_LABEL_CLASSIFICATION
elif task_type == "regression":
task_type = TaskType.REGRESSION
elif task_type == "representation learning":
task_type = TaskType.REPRESENTATION_LEARNING
else:
msg = f'"{task_type}" task type is not supported.'
raise NotImplementedError(msg)
return cls(type=task_type, output=task_output)
def __str__(self) -> str:
"""String representation"""
if self.returns_sequence:
name = "frame-wise"
elif self.returns_vector:
name = "chunk-wise"
else:
msg = (
"string representation (__str__) is not implemented "
"for this task output."
)
raise NotImplementedError(msg)
if self.is_multiclass_classification:
name = f"{name} multi-class classification"
elif self.is_multilabel_classification:
name = f"{name} multi-label classification"
elif self.is_regression:
name = f"{name} regression"
elif self.is_representation_learning:
name = f"{name} representation learning"
else:
msg = (
"string representation (__str__) is not implemented "
"for this type of task."
)
raise NotImplementedError(msg)
return name
@property
def returns_sequence(self) -> bool:
"""Is the output expected to be a sequence?
Returns
-------
`bool`
`True` if the task output is a sequence, `False` otherwise.
"""
return self.output == TaskOutput.SEQUENCE
@property
def returns_vector(self) -> bool:
"""Is the output expected to be a single vector?
Returns
-------
`bool`
`True` if the task output is a single vector, `False` otherwise.
"""
return self.output == TaskOutput.VECTOR
@property
def is_multiclass_classification(self) -> bool:
"""Is it multi-class classification?
Returns
-------
`bool`
`True` if the task is multi-class classification
"""
return self.type == TaskType.MULTI_CLASS_CLASSIFICATION
@property
def is_multilabel_classification(self) -> bool:
"""Is it multi-label classification?
Returns
-------
`bool`
`True` if the task is multi-label classification
"""
return self.type == TaskType.MULTI_LABEL_CLASSIFICATION
@property
def is_regression(self) -> bool:
"""Is it regression?
Returns
-------
`bool`
`True` if the task is regression
"""
return self.type == TaskType.REGRESSION
@property
def is_representation_learning(self) -> bool:
"""Is it representation learning?
Returns
-------
`bool`
`True` if the task is representation learning
"""
return self.type == TaskType.REPRESENTATION_LEARNING
@property
def default_activation(self):
"""Default final activation
Returns
-------
`torch.nn.LogSoftmax(dim=-1)` for multi-class classification
`torch.nn.Sigmoid()` for multi-label classification
`torch.nn.Identity()` for regression
Raises
------
NotImplementedError
If the default activation cannot be guessed.
"""
import torch.nn
if self.is_multiclass_classification:
return torch.nn.LogSoftmax(dim=-1)
elif self.is_multilabel_classification:
return torch.nn.Sigmoid()
elif self.is_regression:
return torch.nn.Identity()
else:
msg = f"Unknown default activation for {self} task."
raise NotImplementedError(msg)
| DanRuta/xva-trainer | lib/_dev/pyannote/audio/train/task.py | task.py | py | 5,402 | python | en | code | 78 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.nn.LogSoftmax",
"li... |
37273642245 | import rosbag
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import csv
import glob
import os
from tf.transformations import euler_from_quaternion
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation as R
import numpy as np
if (len(sys.argv) > 1):
filename = "../bag/" + sys.argv[1] + ".bag"
else:
list_of_files = glob.glob('../bag/*')
latest_file = max(list_of_files, key=os.path.getctime)
filename = latest_file
def get_rpy(msg):
roll, pitch, yaw = euler_from_quaternion([msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w])
return roll, pitch, yaw
def diff_percent(a, b):
if b != 0:
return (a-b)/b * 100
else:
return (b-a)/a * 100
class topic_data:
def __init__(self, topic_name, filename):
self.topic_name = topic_name
self.data = {
"time": [],
"r_x": [],
"r_y": [],
"r_z": [],
"q_x": [],
"q_y": [],
"q_z": [],
"q_w": [],
"roll": [],
"pitch": [],
"yaw": [],
"v_x": [],
"v_y": [],
"v_z": [],
"omega_x": [],
"omega_y": [],
"omega_z": []
}
self.get_data_from_bag(filename)
def get_data_from_bag(self, filename):
with rosbag.Bag(filename) as bag:
for topic, msg, t in bag.read_messages(topics=[self.topic_name]):
self.data["time"].append(
msg.header.stamp.secs + msg.header.stamp.nsecs/1e9)
self.data["r_x"].append(msg.pose.pose.position.x)
self.data["r_y"].append(msg.pose.pose.position.y)
self.data["r_z"].append(msg.pose.pose.position.z)
self.data["q_x"].append(msg.pose.pose.orientation.x)
self.data["q_y"].append(msg.pose.pose.orientation.y)
self.data["q_z"].append(msg.pose.pose.orientation.z)
self.data["q_w"].append(msg.pose.pose.orientation.w)
self.data["roll"].append(get_rpy(msg)[0])
self.data["pitch"].append(get_rpy(msg)[1])
self.data["yaw"].append(get_rpy(msg)[2])
self.data["v_x"].append(msg.twist.twist.linear.x)
self.data["v_y"].append(msg.twist.twist.linear.y)
self.data["v_z"].append(msg.twist.twist.linear.z)
self.data["omega_x"].append(msg.twist.twist.angular.x)
self.data["omega_y"].append(msg.twist.twist.angular.y)
self.data["omega_z"].append(msg.twist.twist.angular.z)
def calculate_velocities(self):
self.data["v_x"] = []
self.data["v_y"] = []
self.data["v_z"] = []
self.data["omega_x"] = []
self.data["omega_y"] = []
self.data["omega_z"] = []
self.data["v_x"].append(0)
self.data["v_y"].append(0)
self.data["v_z"].append(0)
self.data["omega_x"].append(0)
self.data["omega_y"].append(0)
self.data["omega_z"].append(0)
for i in range(1, len(self.data["time"])):
self.data["v_x"].append((self.data["r_x"][i] - self.data["r_x"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["v_y"].append((self.data["r_y"][i] - self.data["r_y"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["v_z"].append((self.data["r_z"][i] - self.data["r_z"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
# p = R.from_quat([self.data["q_x"][i]-1, self.data["q_y"][i-1], self.data["q_z"][i-1], self.data["q_w"][i-1]])
# q = R.from_quat([self.data["q_x"][i], self.data["q_y"][i], self.data["q_z"][i], self.data["q_w"][i]])
# r = np.matmul(p.as_dcm().transpose(), q.as_dcm())
# r = p.as_dcm()-q.as_dcm()
# print(r)
self.data["omega_x"].append((self.data["roll"][i] - self.data["roll"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["omega_y"].append((self.data["pitch"][i] - self.data["pitch"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["omega_z"].append((self.data["yaw"][i] - self.data["yaw"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
if __name__ == "__main__":
truth = topic_data('/tag_box_pose_ground_truth', filename)
detected = topic_data('/detected_object_state', filename)
predicted = topic_data('/predicted_object_state', filename)
ekf = topic_data('/updated_object_state', filename)
detected.calculate_velocities()
start_time = 3.12
simulation_time = 0.79
truth.data['time'] = [x - start_time for x in truth.data['time']]
detected.data['time'] = [x - start_time for x in detected.data['time']]
predicted.data['time'] = [x - start_time for x in predicted.data['time']]
ekf.data['time'] = [x - start_time for x in ekf.data['time']]
plot_trajectory = 0
plot_error = 0
plot_all = 0
plot_group = 1
plot_seperate = 0
save_csv = 0
plot_latency = 0
def compare_plot(attr, include_detected):
plt.plot(truth.data['time'], truth.data[attr], 'r.-', label='truth')
if include_detected:
plt.plot(detected.data['time'],
detected.data[attr], 'g.', label='detected')
plt.plot(predicted.data['time'],
predicted.data[attr], 'y.', label='predicted')
plt.plot(ekf.data['time'], ekf.data[attr], 'b.', label='ekf')
plt.title(attr)
plt.legend()
plt.xlim(2, 5)
plt.show()
plt.savefig(attr, dpi=400)
plt.clf()
def generate_plot(ax, attr, is_detection_estimation, y1=None, y2=None, title=None):
ax.plot(truth.data['time'], truth.data[attr], 'r.-', label='ground truth', ms=3, lw=1)
if is_detection_estimation:
ax.plot(detected.data['time'], detected.data[attr], 'g.-', label='detected estimation')
else:
ax.plot(detected.data['time'], detected.data[attr], 'g.-', label='detected state')
ax.plot(predicted.data['time'], predicted.data[attr], 'y.-', label='predicted state')
ax.plot(ekf.data['time'], ekf.data[attr], 'b.-', label='EKF estimation')
ax.set_title(title if title else attr)
ax.legend()
ax.set_xlim(0, simulation_time)
ax.set_ylim(y1, y2)
ax.set_xlabel("time [s]")
if attr == "r_x" or attr == "r_y" or attr == "r_z":
ax.set_ylabel("position [m]")
if attr == "roll" or attr == "pitch" or attr == "yaw":
ax.set_ylabel("rotation [rad]")
if attr == "v_x" or attr == "v_y" or attr == "v_z":
ax.set_ylabel("linear velocity [m/s]")
if attr == "omega_x" or attr == "omega_y" or attr == "omega_z":
ax.set_ylabel("angular velocity [rad/s]")
def error_plot(ax, attr):
f = interp1d(truth.data['time'], truth.data[attr])
ax.plot(ekf.data['time'], f(ekf.data['time']))
ax.plot(ekf.data['time'], ekf.data[attr])
err_det = []
err_ekf = []
for i in range(len(ekf.data['time'])):
err_det.append(diff_percent(
detected.data[attr][i], f(detected.data['time'][i])))
err_ekf.append(diff_percent(ekf.data[attr][i], f(ekf.data['time'][i])))
ax.plot(detected.data['time'], err_det, 'g.-', label='detected')
ax.plot(ekf.data['time'], err_ekf, 'b.-', label='ekf')
ax.set_title(attr)
ax.legend()
# ax.set_xlim()
if plot_trajectory:
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(truth.data["r_x"][300:390], truth.data["r_y"][300:390], truth.data["r_z"]
[300:390], 'r.-', ms=6, lw=2, label='gournd truth trajectory')
# ax.plot(predicted.data["r_x"][:-5], predicted.data["r_y"][:-5], predicted.data["r_z"][:-5],'y.-', ms=12, lw= 2,label='predicted trajectory')
ax.plot(detected.data["r_x"][1:-5], detected.data["r_y"][1:-5], detected.data["r_z"][1:-5],'g.-', ms=12, lw= 2,label='detected trajectory')
ax.plot(ekf.data["r_x"][:-5], ekf.data["r_y"][:-5], ekf.data["r_z"][:-5], 'b.-', ms=12, lw=2, label='ekf trajectory')
ax.legend()
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
ax.set_zlabel('Z [m]')
ax.set_xlim(ekf.data['r_x'][-1], ekf.data['r_x'][0])
ax.set_ylim()
ax.set_zlim()
plt.figure(dpi=400)
plt.show()
if plot_error:
plt.clf()
fig, axs = plt.subplots(4, 3)
error_plot(axs[0][0], 'r_x')
error_plot(axs[0][1], 'r_y')
error_plot(axs[0][2], 'r_z')
error_plot(axs[1][0], 'roll')
error_plot(axs[1][1], 'pitch')
error_plot(axs[1][2], 'yaw')
error_plot(axs[2][0], 'v_x')
error_plot(axs[2][1], 'v_y')
error_plot(axs[2][2], 'v_z')
error_plot(axs[3][0], 'omega_x')
error_plot(axs[3][1], 'omega_y')
error_plot(axs[3][2], 'omega_z')
plt.show()
if plot_all:
plt.clf()
fig, axs = plt.subplots(4, 3)
generate_plot(axs[0][0], 'r_x', False)
generate_plot(axs[0][1], 'r_y', False)
generate_plot(axs[0][2], 'r_z', False)
generate_plot(axs[1][0], 'roll', False)
generate_plot(axs[1][1], 'pitch', False)
generate_plot(axs[1][2], 'yaw', False)
generate_plot(axs[2][0], 'v_x', True)
generate_plot(axs[2][1], 'v_y', True)
generate_plot(axs[2][2], 'v_z', True)
generate_plot(axs[3][0], 'omega_x', True)
generate_plot(axs[3][1], 'omega_y', True)
generate_plot(axs[3][2], 'omega_z', True)
plt.show()
if plot_group:
# fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
# generate_plot(axs[0], 'r_x', False, title=r'$r_x$', y1=1.5, y2=5.5)
# generate_plot(axs[1], 'r_y', False, title=r'$r_y$', y1=-2, y2=2)
# generate_plot(axs[2], 'r_z', False, title=r'$r_z$', y1=-1, y2=3)
# plt.savefig('/home/ziyou/Desktop/report/figures/s2_r_plot.png', dpi=400)
# plt.show()
# plt.clf()
# fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
# generate_plot(axs[0], 'roll', False, title=r'$roll$', y1=-0.5, y2=1)
# generate_plot(axs[1], 'pitch', False, title=r'$pitch$', y1=-0.4, y2=1.1)
# generate_plot(axs[2], 'yaw', False, title=r'$yaw$', y1=-0.75, y2=0.75)
# plt.savefig('/home/ziyou/Desktop/report/figures/s2_q_plot.png', dpi=400)
# plt.show()
# plt.clf()
# fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
# generate_plot(axs[0], 'v_x', True, title=r'$v_x$', y1=-6, y2=4)
# generate_plot(axs[1], 'v_y', True, title=r'$v_y$', y1=-4.5, y2=5.5)
# generate_plot(axs[2], 'v_z', True, title=r'$v_z$', y1=-5, y2=5)
# plt.savefig('/home/ziyou/Desktop/report/figures/s2_v_plot.png', dpi=400)
# plt.show()
# plt.clf()
fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
generate_plot(axs[0], 'omega_x', True, title=r'$\omega_x$', y1=-15, y2=15)
generate_plot(axs[1], 'omega_y', True, title=r'$\omega_y$', y1=-15, y2=15)
generate_plot(axs[2], 'omega_z', True, title=r'$\omega_z$', y1=-15, y2=15)
plt.savefig('/home/ziyou/Desktop/report/figures/s2_omega_plot.png', dpi=400)
plt.show()
plt.clf()
if plot_seperate:
compare_plot('r_x', True)
compare_plot('r_y', True)
compare_plot('r_z', True)
compare_plot('q_x', True)
compare_plot('q_y', True)
compare_plot('q_z', True)
compare_plot('q_w', True)
compare_plot('roll', True)
compare_plot('pitch', True)
compare_plot('yaw', True)
compare_plot('v_x', False)
compare_plot('v_y', False)
compare_plot('v_z', False)
compare_plot('omega_x', False)
compare_plot('omega_y', False)
compare_plot('omega_z', False)
if save_csv:
# rows = [truth.time,
# truth.r_z,
# truth.roll,
# truth.v_z,
# truth.omega_x,
# predicted.time,
# predicted.r_z,
# predicted.roll,
# predicted.v_z,
# predicted.omega_x,
# detected.time,
# detected.r_z,
# detected.roll,
# detected.v_z,
# detected.omega_x,
# ekf.time,
# ekf.r_z,
# ekf.roll,
# ekf.v_z,
# ekf.omega_x]
rows = []
with open('analysis.csv', 'wb') as f:
writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_ALL)
for row in rows:
writer.writerow(row)
if plot_latency:
latency = [20, 20, 20, 10, 20, 20, 20, 10, 20, 20, 30, 20, 20, 20, 20, 10, 10]
seq = range(1, len(latency)+1)
plt.clf()
plt.bar(seq, latency)
plt.xlabel("Image processing sequence")
plt.ylabel("detected latency [ms]")
plt.xticks(np.arange(1, len(latency)+1, step=1))
plt.show()
print(sum(latency) / len(latency))
| ZiyouZhang/rotors_datmo | scripts/bag_analysis.py | bag_analysis.py | py | 13,599 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18... |
22388129127 | import pygame
# Initialize pygame
pygame.init()
# Set up window
size = (400, 400)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Circle Line")
# Set up colors
WHITE = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
click_sound = pygame.mixer.Sound("clicked_sound.mp3")
# Set up circles
circle_radius = 10
circle_distance = 100
circle_x1 = (size[0] // 2) - (circle_distance // 2) - circle_radius
circle_x2 = (size[0] // 2) + (circle_distance // 2) - circle_radius
circle_y = size[1] // 2
circle_color1 = RED
circle_color2 = RED
circle1_active = False
circle2_active = False
# Set up line
line_thickness = 5
line_color = GREEN
line_rect = pygame.Rect(0, 0, 0, line_thickness)
line_active = False
# Main game loop
done = False
while not done:
# Handle events
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
# Toggle circle 1 on/off
if not circle1_active and abs(mouse_pos[0] - (circle_x1 + circle_radius)) <= circle_radius and abs(mouse_pos[1] - (circle_y + circle_radius)) <= circle_radius:
circle1_active = True
circle_color1 = GREEN
# Toggle circle 2 on/off and draw line if both circles are active
elif not circle2_active and abs(mouse_pos[0] - (circle_x2 + circle_radius)) <= circle_radius and abs(mouse_pos[1] - (circle_y + circle_radius)) <= circle_radius:
circle2_active = True
circle_color2 = GREEN
if circle1_active:
line_rect = pygame.Rect(circle_x1 + circle_radius, circle_y + (line_thickness // 2), circle_distance, line_thickness)
line_active = True
click_sound.play()
# Draw everything
screen.fill(WHITE)
pygame.draw.circle(screen, circle_color1, (circle_x1 + circle_radius, circle_y + circle_radius), circle_radius)
pygame.draw.circle(screen, circle_color2, (circle_x2 + circle_radius, circle_y + circle_radius), circle_radius)
if line_active:
pygame.draw.rect(screen, line_color, line_rect)
pygame.display.update()
# Quit pygame properly
pygame.quit()
| Vormamim/boxes | matrix_stage3.py | matrix_stage3.py | py | 2,268 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.s... |
30859953212 | import configparser
import pandas as pd
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, date_format
import pyspark.sql.functions as F
from pyspark.sql.types import StructType as R, StructField as Fld, DoubleType as Dbl, StringType as Str, IntegerType as Int, DateType as Date, TimestampType as Ti, LongType as Lo, TimestampType as T
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']
# Schema for songs data
song_table_schema = R([
Fld('artist_id',Str()),
Fld('artist_latitude',Dbl()),
Fld('artist_location',Str()),
Fld('artist_longitude',Dbl()),
Fld('artist_name',Str()),
Fld('duration',Dbl()),
Fld('num_songs',Dbl()),
Fld('song_id',Str()),
Fld('title',Str()),
Fld('year',Int()),
])
#Create Spark Session
def create_spark_session():
"""
Create Spark Session
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
# Song Data Process using Spark
def process_song_data(spark, input_data, output_data):
"""
Read song data and process it and save to provided output location
:param spark: Spark session
:param input_data: Input url
:param output_data: Output location
"""
# File path for song data
song_data = os.path.join(input_data,"song_data/*/*/*/*.json")
# Path to write the data back to S3
output_songs_data = os.path.join(output_data,"songs_table")
output_artists_data = os.path.join(output_data,"artists_table")
# Read song data file
df_song_data = spark.read.json(song_data,schema=song_table_schema).dropDuplicates()
df_song_data.printSchema()
df_song_data.show(5)
# Extract columns for songs table
songs_table = df_song_data.selectExpr("song_id","title","artist_name","artist_id","year","duration")
songs_table.printSchema()
songs_table.show(5)
# Write songs table back to S3
songs_table.partitionBy("year", "artist_id").write.parquet(output_songs_data, mode="overwrite")
# Extract columns for artists table
artists_table = df_song_data.selectExpr("artist_id",
"artist_name as name",
"artist_location as location",
"artist_latitude as latitude",
"artist_longitude as longitude")
artists_table.printSchema()
artists_table.show(5)
# Write artists table back to S3
artists_table.write.parquet(output_artists_data, mode="overwrite")
# Log Data Process Using Spark
def process_log_data(spark, input_data, output_data):
"""
Read log data and process it and sand create songplays table to
using both the log data and song data
Store the songplays data to specified output location
:param spark: Spark session
:param input_data: Input url
:param output_data: Output location
"""
output_users_data = os.path.join(output_data,"users_table")
output_time_data = os.path.join(output_data,"time_table")
output_songs_data = os.path.join(output_data,"songs_table")
output_songplays_data = os.path.join(output_data,"songplays_table")
# get filepath to log data file
log_data =os.path.join(input_data,"log_data/*/*/*.json")
print(log_data)
# read log data file
df_log_data = spark.read.json(log_data).dropDuplicates()
df_log_data.printSchema()
df_log_data.show(5)
# filter by actions for song plays
df_log_data = df_log_data.filter("page=='NextSong'")
df_log_data.show(5)
# extract columns for users table
users_table = df_log_data.selectExpr("userId as user_id",
"firstName as first_name",
"lastName as last_name",
"gender",
"level")
users_table.printSchema()
users_table.show(5)
# write users table to parquet files
users_table.write.parquet(output_users_data, mode="overwrite")
# create timestamp column from original timestamp column
get_timestamp = F.udf(lambda x : datetime.fromtimestamp(x),T())
df_log_data = df_log_data.withColumn("start_time",get_timestamp(df_log_data['ts']/1000))
df_log_data.printSchema()
df_log_data.show(5)
# create datetime column from original timestamp column
get_datetime = F.udf(lambda x : datetime.fromtimestamp(x),T())
df_log_data = df_log_data.withColumn("year",year(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("month",month(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("day",dayofmonth(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("hour",hour(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("week",weekofyear(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("weekday",date_format(df_log_data['start_time'],'EEEE'))
df_log_data.printSchema()
df_log_data.show(5)
# extract columns to create time table
time_table = df_log_data.selectExpr("start_time","hour","day","week","month","year","weekday")
time_table.printSchema()
time_table.show(5)
# write time table to parquet files partitioned by year and month
time_table.partitionBy('year','month').write.parquet(output_time_data, mode="overwrite")
# read in song data to use for songplays table
song_df = spark.read.parquet(output_songs_data).dropDuplicates()
song_df.printSchema()
song_df.show(5)
song_df.createOrReplaceTempView("songView")
df_log_data.createOrReplaceTempView("logView")
# extract columns from joined song and log datasets to create songplays table
songplays_table = spark.sql("""
SELECT l.start_time,
l.userId as user_id,
l.level,s.song_id,
s.artist_id,
l.sessionId as session_id,
l.location,
l.userAgent as user_agent,
l.year,
l.month
FROM songView s
JOIN logView l
ON (s.artist_name == l.artist)
""")
songplays_table.printSchema()
songplays_table.show(5)
# write songplays table to parquet files partitioned by year and month
songplays_table.partitionBy("year","month").write.parquet(output_songplays_data, mode="overwrite")
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://udacity-datalake-project/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| yashth/Data_Lake | etl.py | etl.py | py | 7,406 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql... |
3323951129 | #!/usr/bin/python
import os
import re
import math
import matplotlib.pyplot as plt
import numpy
from numpy import sin, pi, arange
import astropy.io
from astropy.io import fits
from PIL import Image
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This script contrasts photon index and luminosity for various observations. #
# In order to reduce error, the data is binned together #
# #
# Usage: The script was strictly developed for the 13 galaxies avaliable at #
# the time of development, and thus is more of a script to be used for #
# reference.
# #
# Author: April Walker #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def main():
fits_file = []
#set to the directory containing all data you wish to plot
fits_dir = os.listdir(os.environ["RESEARCH_DATA_PATH"] + "/Final-fits/")
#specify the path each data file
for i in range(len(fits_dir)):
fits_file.append(os.environ["RESEARCH_DATA_PATH"] + "/Final-fits/" + fits_dir[i])
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(111)
x = []
x_error = []
x_error_mean = []
y = []
for iterator, value in enumerate(fits_file):
hdu_list = fits.open(value)
#our data is now put into columns
data = hdu_list[1].data
cols = hdu_list[1].columns
hdu_list.close()
#take the values you need. If you need a new field, use print(cols) and use
gamma = data.field('gamma')
gamma_error = data.field('gamma_err')
lh8 = data.field('lh8')
#gamma of value 1.7 is an error as is NaN, so we only append indexes wihtout
#those values
gamma_indices = numpy.where(gamma != 1.7)
for i, value in enumerate(gamma_indices[0]):
if (gamma[value] == gamma[value] and lh8[value] == lh8[value] and gamma_error[value] > 0):
x.append(gamma[value])
x_error.append(gamma_error[value])
y.append(lh8[value])
#this guy holds our data set sorted by luminosity
data_set = []
for i in range(len(x)):
temp = [y[i],x[i],x_error[i]]
data_set.append(temp)
#sort it
data_set.sort(key=lambda x: x[0])
Ay = []
Ax = []
Ax_error = []
for i in range(len(x)):
Ay.append(data_set[i][0])
Ax.append(data_set[i][1])
Ax_error.append(data_set[i][2])
bin_minimum = Ay[0]
#Set this in case you're binning too many values
if iterator == 4:
bin_upper_limit = 16
bin_lower_limit = 8
if iterator == 5 or 6 or 7 or 10:
bin_upper_limit = 14
bin_lower_limit = 6
if iterator == 9:
bin_lower_limit = 4
bin_upper_limit = 10
if iterator == 8:
bin_upper_limit = 25
bin_lower_limit = 10
else:
bin_upper_limit = 15
bin_lower_limit = 10
y_binned = []
x_binned = []
x_error_binned = []
y_unbinned = []
x_unbinned = []
x_error_unbinned = []
counter = []
# THE BELOW CODE AUTOBINS NORMALLY #
j = 0
for i in range(len(Ay)):
# if(Ax_error[i] < 0.17):
# y_unbinned.append(Ay[i])
# x_unbinned.append(Ax[i])
# x_error_unbinned.append(Ax_error[i])
if(j == len(x_error_binned) - 1):
if((numpy.sqrt(x_error_binned[j])/(counter[j]) < 0.13) and (counter[j] >= bin_lower_limit)):
j += 1
elif(counter[j] >= bin_upper_limit):
j += 1
else:
counter[j] += 1
y_binned[j] += Ay[i]
x_binned[j] += Ax[i]
x_error_binned[j] += Ax_error[i]**2
else:
y_binned.append(0)
x_binned.append(0)
x_error_binned.append(0)
counter.append(0)
counter[j] += 1
y_binned[j] += Ay[i]
x_binned[j] += Ax[i]
x_error_binned[j] += Ax_error[i]**2
#calculates the mean error as sqrt(sum(errors^2))/sqrt(n)
for j in range(len(y_binned)):
if value == value:
y_binned[j] = y_binned[j]/counter[j]
x_binned[j] = x_binned[j]/counter[j]
x_error_binned[j] = numpy.sqrt(x_error_binned[j])/(counter[j])
no = '_nolegend_'
if iterator == 0 or iterator == 5 or iterator == 10:
if iterator == 0:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#231193', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 5:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#231193', fmt = 'o', marker = "o", zorder = 4)
elif iterator == 8:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#231193', fmt = 'o', marker = "^", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#3219cd', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#aabaff', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC4382')
# legend_labels = ["NGC4382"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M63')
# legend_labels = ["M63"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC3184')
# legend_labels = ["NGC3184"]
# The following ignores this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 1 or iterator == 6 or iterator == 11:
if iterator == 1:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#096612', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 6:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#096612', fmt = 'o', marker = "o", zorder = 4)
elif iterator == 11:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#096612', fmt = 'o', marker = "^", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#0c8718', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#90e097', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC0628')
# legend_labels.append(["NGC0628"])
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M94')
# legend_labels = ["M94"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC3198')
# legend_labels = ["NGC3198"]
# The following ignores this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 2 or iterator == 7 or iterator == 12:
if iterator == 2:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#6a0a7a', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 7:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#6a0a7a', fmt = 'o', marker = "o", zorder = 4)
elif iterator == 12:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#6a0a7a', fmt = 'o', marker = "^", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#880d9d', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#edddff', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC2403')
# legend_labels = ["NGC2403"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M95')
# legend_labels = ["M95"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC4559')
# legend_labels = ["NGC4559"]
# The following lines ignore this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 3 or iterator == 8:
if iterator == 3:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#b50636', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 8:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#b50636', fmt = 'o', marker = "o", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#ec0040', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#f2bcc5', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC6946')
# legend_labels = ["NGC6946"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M100')
# legend_labels = ["M100"]
# The following lines ignore this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 4 or iterator == 9:
if iterator == 4:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#bb9407', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 9:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#bb9407', fmt = 'o', marker = "*", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#f1bc00', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#f1d9ac', zorder = 2, label = no, s=5)
# legend_labels = ["NGC7793"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC7793')
# legend_labels = ["NGC2841"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC2841')
# The following lines ignore this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
plt.yscale('log');
ax1.set_xlabel("Photon Index")
ax1.set_ylabel("Luminosity")
ax1.set_title('Comparitive Chart')
legend_labels = []
for i in range(len(fits_dir)):
legend_labels.append("".join(fits_dir[i].rsplit("-final-sample.fits")))
ax1.legend(legend_labels)
plt.grid(True)
plt.draw()
ax1.apply_aspect()
fig.savefig('comparitive_auto.eps', dpi=fig.dpi)
Image.open('comparitive_auto.eps').save('comparitive_auto.png','png')
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# if (legend_labels == ["NGC4382", "NGC0628", "NGC2403", "NGC6946", "NGC7793", "M63", "M94", "M95", "M100", "NGC2841", "NGC3184", "NGC3198", "NGC4559"]):
# fig.savefig('comparitive_auto_update.eps', dpi=fig.dpi)
# Image.open('comparitive_auto_update.eps').save('comparitive_auto_update.png','png')
#
# elif (legend_labels == ["NGC4382"]):
# fig.savefig('NGC4382.eps', dpi=fig.dpi)
# Image.open('NGC4382.eps').save('NGC4382.png','png')
#
# elif (legend_labels == ["NGC0628"]):
# fig.savefig('NGC0628.eps', dpi=fig.dpi)
# Image.open('NGC0628.eps').save('NGC0628.png','png')
#
# elif (legend_labels == ["NGC2403"]):
# fig.savefig('NGC2403.eps', dpi=fig.dpi)
# Image.open('NGC2403.eps').save('NGC2403.png','png')
#
# elif (legend_labels == ["NGC6946"]):
# fig.savefig('NGC6946.eps', dpi=fig.dpi)
# Image.open('NGC6946.eps').save('NGC6946.png','png')
#
# elif (legend_labels == ["NGC7793"]):
# fig.savefig('NGC7793.eps', dpi=fig.dpi)
# Image.open('NGC7793.eps').save('NGC7793.png','png')
#
# if (legend_labels == ["M63"]):
# fig.savefig('M63.eps', dpi=fig.dpi)
# Image.open('M63.eps').save('M63.png','png')
#
# elif (legend_labels == ["M94"]):
# fig.savefig('M94.eps', dpi=fig.dpi)
# Image.open('M94.eps').save('M94.png','png')
#
# elif (legend_labels == ["M95"]):
# fig.savefig('M95.eps', dpi=fig.dpi)
# Image.open('M95.eps').save('M95.png','png')
#
# elif (legend_labels == ["M100"]):
# fig.savefig('M100.eps', dpi=fig.dpi)
# Image.open('M100.eps').save('M100.png','png')
#
# elif (legend_labels == ["NGC2841"]):
# fig.savefig('NGC2841.eps', dpi=fig.dpi)
# Image.open('NGC2841.eps').save('NGC2841.png','png')
#
# elif (legend_labels == ["NGC3184"]):
# fig.savefig('NGC3184.eps', dpi=fig.dpi)
# Image.open('NGC3184.eps').save('NGC3184.png','png')
#
# elif (legend_labels == ["NGC3198"]):
# fig.savefig('NGC3198.eps', dpi=fig.dpi)
# Image.open('NGC3198.eps').save('NGC3198.png','png')
#
# elif (legend_labels == ["NGC4559"]):
# fig.savefig('NGC4559.eps', dpi=fig.dpi)
# Image.open('NGC4559.eps').save('NGC4559.png','png')
plt.show()
return 0
main()
| aprilcotwut/photon_index_binning | compare_bin.py | compare_bin.py | py | 15,988 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",... |
20146094706 | from datetime import datetime, timedelta
from tqdm import tqdm
import aiohttp
import asyncio
from pymongo import MongoClient
from private import royale_token
client = MongoClient('mongodb://localhost:27017/')
MAX_TROPHIES = "Соперник(и) с наибольшим количеством кубков:"
MAX_BEST_TROPHIES = "Соперник(и) с наибольшим рекордом по кубкам:"
MAX_CLAN_WAR_WINS = "Соперник(и) с наибольшим количеством побед в кв:"
MIN_CARD_LEVEL = "Самая непрокачанная карта в колоде в кв:"
MIN_MEAN_CARDS_LEVEL = "Самая непрокачанная колода в кв:"
class Player:
def __init__(self, tag, name, trophies, best_trophies, war_day_wins):
self.tag = tag
self.name = name
self.trophies = trophies
self.best_trophies = best_trophies
self.war_day_wins = war_day_wins
self.cards = []
@property
def min_card_level(self):
min_level = 13
for card in self.cards:
level = 13 - card["maxLevel"] + card["level"]
if min_level > level:
min_level = level
return min_level
@property
def mean_level(self):
s = 0
for card in self.cards:
s += 13 - card["maxLevel"] + card["level"]
return s / len(self.cards)
async def load_player(session, player_tag: str) -> Player:
player_tag = player_tag.replace("#", "")
url = f"https://api.clashroyale.com/v1/players/%23{player_tag}"
params = dict(
authorization=royale_token
)
async with session.get(url, params=params) as response:
p = await response.json()
if "name" not in p:
return None
player = Player(player_tag, p["name"], p["trophies"], p["bestTrophies"], p["warDayWins"])
return player
async def fetch_current_war(session, clan_tag: str):
clan_tag = clan_tag.replace("#", "")
url = f"https://api.clashroyale.com/v1/clans/%23{clan_tag}/currentwar"
params = dict(
authorization=royale_token
)
async with session.get(url, params=params) as response:
if response.status != 200:
return None
war = await response.json()
return war
def filter_battles_by_clan(battles, clan_tag):
filtered_battles = []
for battle in battles:
for player in battle["team"]:
if player["clan"]["tag"] == clan_tag:
filtered_battles.append(battle)
break
return filtered_battles
def filter_battles_by_date(battles, start_date, end_date):
filtered_battles = []
for battle in battles:
date = battle["battleTime"]
battle_time = datetime.strptime(date, "%Y%m%dT%H%M%S.%fZ")
if start_date <= battle_time <= end_date:
filtered_battles.append(battle)
return filtered_battles
def filter_battles_by_win(battles):
filtered_battles = []
for battle in battles:
player_crowns = int(battle["team"][0]["crowns"])
opponent_crowns = int(battle["opponent"][0]["crowns"])
if player_crowns > opponent_crowns:
filtered_battles.append(battle)
return filtered_battles
def load_collection_day_battles(start_date, end_date, battle_log, clan_tag):
battles = battle_log.find({"type": "clanWarCollectionDay"})
current_war_battles = filter_battles_by_date(battles, start_date, end_date)
current_war_battles_by_clan = filter_battles_by_clan(current_war_battles, clan_tag)
# current_war_battles_by_clan = filter_battles_by_win(current_war_battles_by_clan)
return current_war_battles_by_clan
def load_war_day_battles(start_date, end_date, battle_log, clan_tag):
battles = battle_log.find({"type": "clanWarWarDay"})
current_war_battles = filter_battles_by_date(battles, start_date, end_date)
current_war_battles_by_clan = filter_battles_by_clan(current_war_battles, clan_tag)
# current_war_battles_by_clan = filter_battles_by_win(current_war_battles_by_clan)
return current_war_battles_by_clan
async def load_opponents(session, battles):
players = []
for battle in tqdm(battles):
player = await load_player(session, battle["team"][0]["tag"])
if player is None:
continue
player.trophies = battle["team"][0]["startingTrophies"]
player.cards = battle["team"][0]["cards"]
player_crowns = int(battle["team"][0]["crowns"])
opponent = await load_player(session, battle["opponent"][0]["tag"])
if opponent is None:
continue
opponent.trophies = battle["opponent"][0]["startingTrophies"]
opponent.cards = battle["opponent"][0]["cards"]
opponent_crowns = int(battle["opponent"][0]["crowns"])
players.append((player, opponent, player_crowns, opponent_crowns))
return players
async def collection_day_results(session, clan_tag: str):
db = client["clashroyale"]
war_log = db["warlog"]
war = next(war_log.find({}).sort("createdDate", -1))
date = war["createdDate"]
end_date = datetime.utcnow()
start_date = datetime.strptime(date, "%Y%m%dT%H%M%S.%fZ")
current_war_battles = load_collection_day_battles(start_date, end_date, db["battlelog"], clan_tag)
players = await load_opponents(session, current_war_battles)
text = ""
text += find_best(players, lambda x: x[1].trophies, True, MAX_TROPHIES)
text += find_best(players, lambda x: x[1].best_trophies, True, MAX_BEST_TROPHIES, 7000)
text += find_best(players, lambda x: x[1].war_day_wins, True, MAX_CLAN_WAR_WINS)
return text
async def war_day_results(session, clan_tag: str):
db = client["clashroyale"]
war_log = db["warlog"]
war = next(war_log.find({}).sort("createdDate", -1))
date = war["createdDate"]
end_date = datetime.strptime(date, "%Y%m%dT%H%M%S.%fZ")
start_date = end_date + timedelta(days=-1)
current_war_battles = load_war_day_battles(start_date, end_date, db["battlelog"], clan_tag)
players = await load_opponents(session, current_war_battles)
text = ""
text += find_best(players, lambda x: x[1].trophies, True, MAX_TROPHIES)
text += find_best(players, lambda x: x[1].best_trophies, True, MAX_BEST_TROPHIES, 7000)
text += find_best(players, lambda x: x[1].war_day_wins, True, MAX_CLAN_WAR_WINS)
text += find_best(players, lambda x: x[0].min_card_level, False, MIN_CARD_LEVEL, 9)
text += find_best(players, lambda x: x[0].mean_level, False, MIN_MEAN_CARDS_LEVEL)
return text
def find_best(values, key, reverse, name, threshold=None):
values = sorted(values, key=key, reverse=reverse)
threshold = threshold or key(values[0])
if (reverse and key(values[0]) < threshold) or (not reverse and key(values[0]) > threshold):
threshold = key(values[0])
result = f"{name}\n"
for value in values:
if reverse:
if key(value) < threshold:
break
else:
if key(value) > threshold:
break
if reverse:
result += f"{value[0].name} {value[2]}-{value[3]} {value[1].name} ({key(value)})\n"
else:
result += f"{value[0].name} (уровень: {key(value)}) {value[2]}-{value[3]} {value[1].name}\n"
result += "\n"
return result
async def main():
clan_tag = "#2UJ2GJ"
async with aiohttp.ClientSession() as session:
current_war = await fetch_current_war(session, clan_tag)
if current_war is not None:
state = current_war["state"]
if state == "collectionDay" or state == "notInWar":
text = await war_day_results(session, clan_tag)
elif state == "warDay":
text = collection_day_results(session, clan_tag)
else:
text = "Current war is unavailable or unknown state."
print(text)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except asyncio.CancelledError:
pass
client.close()
| dfomin/clashroyaledata | data_analyzer.py | data_analyzer.py | py | 8,146 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "private.royale_token",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "private.royale_token",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "datetim... |
24958905519 | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SwitchNode(Node, ArmLogicTreeNode):
'''Switch node'''
bl_idname = 'LNSwitchNode'
bl_label = 'Switch'
bl_icon = 'CURVE_PATH'
min_inputs = 1
min_outputs = 1
def __init__(self):
array_nodes[str(id(self))] = self
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('NodeSocketShader', 'Value')
self.outputs.new('ArmNodeSocketAction', 'Default')
def draw_buttons(self, context, layout):
row = layout.row(align=True)
op = row.operator('arm.node_add_input_output', text='New', icon='PLUS', emboss=True)
op.node_index = str(id(self))
op.in_socket_type = 'NodeSocketShader'
op.out_socket_type = 'ArmNodeSocketAction'
op.in_name_format = 'Case {0}'
op.out_name_format = 'Case {0}'
op.in_index_name_offset = -1
op2 = row.operator('arm.node_remove_input_output', text='', icon='X', emboss=True)
op2.node_index = str(id(self))
add_node(SwitchNode, category='Logic')
| phillipmacon/armory-3d-engine | blender/arm/logicnode/logic_switch.py | logic_switch.py | py | 1,162 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bpy.types.Node",
"line_number": 6,
"usage_type": "name"
}
] |
26486257932 | import dcos.config
import dcos.http
import dcos.package
import json
import logging
import os
import re
import requests
import s3
import shakedown
import subprocess
import urllib
def _init_logging():
logging.basicConfig(level=logging.INFO)
logging.getLogger('dcos').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
_init_logging()
LOGGER = logging.getLogger(__name__)
DEFAULT_HDFS_TASK_COUNT=10
HDFS_PACKAGE_NAME='beta-hdfs'
HDFS_SERVICE_NAME='hdfs'
SPARK_PACKAGE_NAME='spark'
def hdfs_enabled():
return os.environ.get("HDFS_ENABLED") != "false"
def is_strict():
return os.environ.get('SECURITY') == 'strict'
def require_hdfs():
LOGGER.info("Ensuring HDFS is installed.")
_require_package(HDFS_PACKAGE_NAME, _get_hdfs_options())
_wait_for_hdfs()
def require_spark(options={}, service_name=None):
LOGGER.info("Ensuring Spark is installed.")
_require_package(SPARK_PACKAGE_NAME, service_name, _get_spark_options(options))
_wait_for_spark(service_name)
_require_spark_cli()
# This should be in shakedown (DCOS_OSS-679)
def _require_package(pkg_name, service_name=None, options={}):
pkg_manager = dcos.package.get_package_manager()
installed_pkgs = dcos.package.installed_packages(
pkg_manager,
None,
None,
False)
pkg = next((pkg for pkg in installed_pkgs if pkg['name'] == pkg_name), None)
if (pkg is not None) and (service_name is None):
LOGGER.info("Package {} is already installed.".format(pkg_name))
elif (pkg is not None) and (service_name in pkg['apps']):
LOGGER.info("Package {} with app_id={} is already installed.".format(
pkg_name,
service_name))
else:
LOGGER.info("Installing package {}".format(pkg_name))
shakedown.install_package(
pkg_name,
options_json=options,
wait_for_completion=True)
def _wait_for_spark(service_name=None):
def pred():
dcos_url = dcos.config.get_config_val("core.dcos_url")
path = "/service{}".format(service_name) if service_name else "service/spark"
spark_url = urllib.parse.urljoin(dcos_url, path)
status_code = dcos.http.get(spark_url).status_code
return status_code == 200
shakedown.wait_for(pred)
def _require_spark_cli():
LOGGER.info("Ensuring Spark CLI is installed.")
installed_subcommands = dcos.package.installed_subcommands()
if any(sub.name == SPARK_PACKAGE_NAME for sub in installed_subcommands):
LOGGER.info("Spark CLI already installed.")
else:
LOGGER.info("Installing Spark CLI.")
shakedown.run_dcos_command('package install --cli {}'.format(
SPARK_PACKAGE_NAME))
def _get_hdfs_options():
if is_strict():
options = {'service': {'principal': 'service-acct', 'secret_name': 'secret'}}
else:
options = {"service": {}}
options["service"]["beta-optin"] = True
return options
def _wait_for_hdfs():
shakedown.wait_for(_is_hdfs_ready, ignore_exceptions=False, timeout_seconds=25 * 60)
def _is_hdfs_ready(expected_tasks = DEFAULT_HDFS_TASK_COUNT):
return is_service_ready(HDFS_SERVICE_NAME, expected_tasks)
def is_service_ready(service_name, expected_tasks):
running_tasks = [t for t in shakedown.get_service_tasks(service_name) \
if t['state'] == 'TASK_RUNNING']
LOGGER.info("Waiting for {n} tasks got {m} for service {s}".format(n=expected_tasks,
m=len(running_tasks),
s=service_name))
return len(running_tasks) >= expected_tasks
def no_spark_jobs(service_name):
driver_ips = shakedown.get_service_ips(service_name)
LOGGER.info("Waiting for drivers to finish or be killed, still seeing {}".format(len(driver_ips)))
return len(driver_ips) == 0
def _get_spark_options(options = None):
if options is None:
options = {}
if hdfs_enabled():
options["hdfs"] = options.get("hdfs", {})
options["hdfs"]["config-url"] = "http://api.hdfs.marathon.l4lb.thisdcos.directory/v1/endpoints"
if is_strict():
options["service"] = options.get("service", {})
options["service"]["principal"] = "service-acct"
options["security"] = options.get("security", {})
options["security"]["mesos"] = options["security"].get("mesos", {})
options["security"]["mesos"]["authentication"] = options["security"]["mesos"].get("authentication", {})
options["security"]["mesos"]["authentication"]["secret_name"] = "secret"
return options
def run_tests(app_url, app_args, expected_output, app_name, args=[]):
task_id = submit_job(app_url=app_url,
app_args=app_args,
app_name=app_name,
args=args)
check_job_output(task_id, expected_output)
def check_job_output(task_id, expected_output):
LOGGER.info('Waiting for task id={} to complete'.format(task_id))
shakedown.wait_for_task_completion(task_id)
stdout = _task_log(task_id)
if expected_output not in stdout:
stderr = _task_log(task_id, "stderr")
LOGGER.error("task stdout: {}".format(stdout))
LOGGER.error("task stderr: {}".format(stderr))
raise Exception("{} not found in stdout".format(expected_output))
class SecretHandler():
def __init__(self, path, value):
self.payload = json.dumps({"value": value})
self.api_url = urllib.parse.urljoin(dcos.config.get_config_val("core.dcos_url"),
"secrets/v1/secret/default/{}".format(path))
self.token = dcos.config.get_config_val("core.dcos_acs_token")
self.headers = {"Content-Type": "application/json", "Authorization": "token={}".format(self.token)}
def create_secret(self):
return requests.put(self.api_url, data=self.payload, headers=self.headers, verify=False)
def delete_secret(self):
return requests.delete(self.api_url, headers=self.headers, verify=False)
def upload_file(file_path):
LOGGER.info("Uploading {} to s3://{}/{}".format(
file_path,
os.environ['S3_BUCKET'],
os.environ['S3_PREFIX']))
s3.upload_file(file_path)
basename = os.path.basename(file_path)
return s3.http_url(basename)
def submit_job(app_url, app_args, app_name="/spark", args=[]):
if is_strict():
args += ["--conf", 'spark.mesos.driverEnv.MESOS_MODULES=file:///opt/mesosphere/etc/mesos-scheduler-modules/dcos_authenticatee_module.json']
args += ["--conf", 'spark.mesos.driverEnv.MESOS_AUTHENTICATEE=com_mesosphere_dcos_ClassicRPCAuthenticatee']
args += ["--conf", 'spark.mesos.principal=service-acct']
args_str = ' '.join(args + ["--conf", "spark.driver.memory=2g"])
submit_args = ' '.join([args_str, app_url, app_args])
cmd = 'dcos spark --name={app_name} run --verbose --submit-args="{args}"'.format(app_name=app_name, args=submit_args)
LOGGER.info("Running {}".format(cmd))
stdout = subprocess.check_output(cmd, shell=True).decode('utf-8')
LOGGER.info("stdout: {}".format(stdout))
regex = r"Submission id: (\S+)"
match = re.search(regex, stdout)
return match.group(1)
def wait_for_executors_running(framework_name, num_executors, wait_time=600):
LOGGER.info("Waiting for executor task to be RUNNING...")
shakedown.wait_for(lambda: is_service_ready(framework_name, num_executors),
ignore_exceptions=False,
timeout_seconds=wait_time)
def kill_driver(driver_id, app_name):
LOGGER.info("Killing {}".format(driver_id))
cmd = "dcos spark --name={app_name} kill {driver_id}".format(app_name=app_name, driver_id=driver_id)
out = subprocess.check_output(cmd, shell=True).decode("utf-8")
return out
def _task_log(task_id, filename=None):
cmd = "dcos task log --completed --lines=1000 {}".format(task_id) + \
("" if filename is None else " {}".format(filename))
LOGGER.info("Running {}".format(cmd))
stdout = subprocess.check_output(cmd, shell=True).decode('utf-8')
return stdout
def is_framework_completed(fw_name):
# The framework is not Active or Inactive
return shakedown.get_service(fw_name, True) is None
| KoddiDev/spark-streaming-mesos | tests/utils.py | utils.py | py | 8,430 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.WARNI... |
41814241194 | from django.shortcuts import render, get_object_or_404
from django.http import Http404
from .models import Question
# Create your views here.
def index(request):
try:
latest_question_list = Question.objects.order_by('pub_date')[:5]
except Question.DoesNotExist:
raise Http404('Question does not exist')
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'pools/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {
'question': question
}
return render(request, 'pools/detail.html', context)
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {
'question': question
}
return render(request, 'pools/results.html', context)
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {
'question': question
}
return render(request, 'pools/vote.html', context)
| Vladimir-vut/django_mysite | mysite/pools/views.py | views.py | py | 1,074 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Question.objects.order_by",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Question.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 12,
"usage_type": "name"
},
{
"api... |
11342650070 | # -*- encoding: utf-8 -*-
import requests
from scdl import CLIENT_ID
class Client():
def get_collection(self, url, token, maxpage):
params = {
'client_id': CLIENT_ID,
'linked_partitioning': '1',
}
if token:
params['oauth_token'] = token
resources = list()
count = 0
while url and count < maxpage:
response = requests.get(url,
headers={
"Sec-Fetch-Mode":"cors",
"Origin": "https://soundcloud.com",
"Authorization": "OAuth {}".format(token),
"Content-Type": "application/json",
"Accept": "application/json, text/javascript, */*; q=0.1",
"Referer": "https://soundcloud.com/",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36",
"DNT": "1",
})
response.raise_for_status()
json_data = response.json()
if 'collection' in json_data:
resources.extend(json_data['collection'])
else:
resources.extend(json_data)
if 'next_href' in json_data:
url = json_data['next_href']
count += 1
else:
url = None
return resources
| jz1/scdl | scdl/client.py | client.py | py | 1,399 | python | en | code | null | github-code | 6 | [
{
"api_name": "scdl.CLIENT_ID",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
}
] |
36947412448 | from functools import partial
from random import shuffle
from kivy.clock import Clock
from kivy.app import App
from kivy.lang import Builder
import ani_property
ani_property.install()
KV_CODE = r'''
GridLayout:
rows: 4
cols: 4
padding: 20
spacing: 20
'''
def shuffle_children(widget, dt):
children = widget.children[:]
widget.clear_widgets()
shuffle(children)
for c in children:
widget.add_widget(c)
class SampleApp(App):
def build(self):
return Builder.load_string(KV_CODE)
def on_start(self):
from kivy.uix.button import Button
from ani_property import AniMagnet
grid = self.root
for i in range(grid.rows * grid.cols):
label = Button(text=str(i), font_size=50, opacity=0.5)
magnet = AniMagnet()
magnet.add_widget(label)
grid.add_widget(magnet)
Clock.schedule_interval(partial(shuffle_children, grid), 3)
if __name__ == '__main__':
SampleApp().run()
| gottadiveintopython/ani-property | examples/magnet.py | magnet.py | py | 1,011 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ani_property.install",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "kivy.app.App",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "kivy.lang.Builder.loa... |
36010827079 | import json
import serial
import numpy as np
class CameraMLX90640(serial.Serial):
"""
Implements communications camera_mlx90640_firmware
"""
FRAME_HEIGHT = 24
FRAME_WIDTH = 32
def __init__(self, port):
self.port_param = {'port': port, 'baudrate': 115200, 'timeout': 2.0}
super().__init__(**self.port_param)
self.num_throw_away = 10
self.throw_away_lines()
def throw_away_lines(self):
"""
Throw away first few lines. Deals with case where user has updated the
firmware which writes a bunch text to the serial port.
"""
self.timeout = 0.1
for i in range(self.num_throw_away):
line = self.readline()
self.timeout = self.port_param['timeout']
def send_and_receive(self, msg_dict):
"""
Send and receive message from the device.
"""
msg_json = f'{json.dumps(msg_dict)}\n'
self.write(msg_json.encode())
rsp_json = self.read_until()
rsp_json = rsp_json.strip()
rsp_dict = {}
try:
rsp_dict = json.loads(rsp_json.decode('utf-8'))
except json.decoder.JSONDecodeError as e:
print(f'Error decoding json message: {e}')
return rsp_dict
def grab_frame(self):
"""
Grab from from camera and convert it to a numpy array
"""
cmd = {'cmd': 'frame'}
rsp = self.send_and_receive(cmd)
try:
frame = np.array(rsp['frame'])
except KeyError:
frame = np.zeros((self.FRAME_HEIGHT,self.FRAME_WIDTH))
ok = False
else:
frame = np.array(frame)
frame = np.reshape(frame, (self.FRAME_HEIGHT,self.FRAME_WIDTH))
frame = np.flipud(frame)
frame = np.fliplr(frame)
ok = True
return ok, frame
| willdickson/camera_mlx90640 | camera_mlx90640/camera_mlx90640.py | camera_mlx90640.py | py | 1,876 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.decoder",
"line_numb... |
40899444822 | """DeviceManagement URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url,patterns
from django.contrib import admin
from Dmanage import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^borrowDeviceForm/$',views.borrowDeviceForm,name='borrowDeviceForm'),
url(r'^return_device/$',views.return_device,name='returnDevice'),
url(r'^list/$',views.list,name='list'),
url(r'^list/(?P<device_sn_slug>\w+)/history/$',views.device_history,name='device_history'),
url(r'^list/data$',views.list_data,name='list_data'),
url(r'^list/(?P<device_sn_slug>\w+)/history/data$',views.device_history_data,name='device_history_data'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
]
| BensonXiong/DeviceManagement | DeviceManagement/DeviceManagement/urls.py | urls.py | py | 1,371 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_... |
34253272584 | from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
from flask import re # the regex module
# create a regular expression object that we'll use later
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class User:
def __init__( self , data ):
self.id = data['id']
self.name = data['name']
self.location = data['location']
self.language = data['language']
self.comment = data['comment']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
# Now we use class methods to query our database
# Now we use class methods to query our database
@classmethod
def get_all(cls):
query = "SELECT * FROM dojo_survey;"
# make sure to call the connectToMySQL function with the schema you are targeting.
results = connectToMySQL('dojo_survey_schema').query_db(query)
# Create an empty list to append our instances of friends
users = []
# Iterate over the db results and create instances of friends with cls.
for user in results:
users.append( cls(user) )
return users
# GET A SPECIFIC USER
@classmethod
def get_user_by_id(cls, data):
query = "SELECT * FROM dojo_survey WHERE dojo_survey.id = %(id)s;"
results = connectToMySQL('dojo_survey_schema').query_db(query, data)
if results:
return results[0]
return False
#CREATE
@classmethod
def save(cls, data ):
query = "INSERT INTO dojo_survey ( name , location , language , comment, created_at, updated_at ) VALUES ( %(name)s , %(location)s , %(language)s ,%(comment)s , NOW() , NOW() );"
# data is a dictionary that will be passed into the save method from server.py
return connectToMySQL('dojo_survey_schema').query_db( query, data )
@staticmethod
def validate_user(user):
is_valid = True # we assume this is true
if len(user['name']) < 3:
flash("Name must be at least 3 characters.", "firstNameRegister")
is_valid = False
if not user.get('location'):
flash("You must select a location.", "locationRegister")
is_valid = False
if not user.get('language'):
flash("You must select a language.", "languageRegister")
is_valid = False
if len(user['comment']) < 3:
flash("Comment must be at least 3 characters.", "commentRegister")
is_valid = False
# test whether a field matches the pattern
if not EMAIL_REGEX.match(user['email']):
flash("Invalid email address!")
is_valid = False
return is_valid | gerald-cakoni/PythonAssignments | flask_mysql/validation/dojo_survey/flask_app/models/user.py | user.py | py | 2,765 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.re.compile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.re",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",
"line_number": 23,
"usage_type": "call"
},
{
"api_name"... |
30056444826 | import re
from collections import defaultdict
def parse_rule_container(rule):
regex = re.compile(r'(.*) bags contain(.*)')
matches = regex.match(rule)
return matches.group(1), matches.group(2)
def parse_contained_rule(rule):
regex = re.compile(r'((\d+) ([a-z ]+) bag)+')
all_matches = regex.findall(rule)
return [(lambda x: (x[1], x[2]))(match) for match in all_matches]
def contains_bag(contained_bags_arr, expected_color):
for (count, color) in contained_bags_arr:
if color == expected_color:
return True
return False
def main():
f = open("input.txt", "r")
inp = f.read().split('\n')
inp = list(filter(lambda inp_line: len(inp_line) != 0, inp))
rules = [parse_rule_container(inp_line) for inp_line in inp]
fwd_map = defaultdict(lambda: [])
rev_map = defaultdict(lambda: [])
for rule in rules:
container, contained_rule = rule
contained_bags_arr = parse_contained_rule(contained_rule)
fwd_map[container] = contained_bags_arr
for contained_bags in contained_bags_arr:
contained_bags_count, contained_bags_color= contained_bags
rev_map[contained_bags_color].append(container)
ans = set()
possible_container = set(rev_map['shiny gold'])
while len(possible_container) != 0:
for possible_bag in possible_container.copy():
possible_container.remove(possible_bag)
if possible_bag in ans:
continue
ans.add(possible_bag)
for bag in rev_map[possible_bag]:
possible_container.add(bag)
print(ans)
print(len(ans))
if __name__ == '__main__':
main()
| nithish-thomas/adventOfCode2020 | aoc20/day7/day7_1.py | day7_1.py | py | 1,700 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict"... |
10812461830 | # -*- coding: utf-8 -*-
import os
from sys import path
from django.conf.global_settings import (TEMPLATE_CONTEXT_PROCESSORS,
STATICFILES_FINDERS)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
path.append(os.path.join(BASE_DIR, 'apps'))
SECRET_KEY = '_c#^&2xzwd@xt@i2b5kftn+*-9$t&l+bg9&zb3@^jq)&^s38*d'
DEBUG = False
TEMPLATE_DEBUG = True
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
)
THIRD_PARTY_APPS = (
'compressor',
# 'south',
'typogrify',
'bourbon',
'meta',
)
LOCAL_APPS = (
'usrs',
'wknd',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'wknd_project.urls'
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Australia/Adelaide'
USE_I18N = False
USE_L10N = True
USE_TZ = False
STATIC_URL = '/s/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS += (
'compressor.finders.CompressorFinder',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Django compressor
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Django extensions.
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
# WKND defaults
#AUTH_PROFILE_MODULE = 'usrs.Profile'
APPLICATION_PER_DAY_LIMIT = 2
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# Django Meta
META_SITE_PROTOCOL = 'http'
META_SITE_DOMAIN = 'wkndcrew.com'
META_SITE_TYPE = 'website'
META_DEFAULT_KEYWORDS = META_INCLUDE_KEYWORDS = ['events', 'South Australia', 'Adelaide', 'WKND crew']
META_USE_OG_PROPERTIES = True
META_USE_TWITTER_PROPERTIES = True
| andreyshipilov/wknd_django | wknd_project/settings/common.py | common.py | py | 2,378 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
19593396615 | import h5py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import roc_auc_score, roc_curve
import numpy as np
from tensorflow.keras import datasets, layers, models
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from ocnn import OneClassNeuralNetwork
def main():
data = h5py.File('Data/http.mat', 'r')
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
X = np.array(x_train, dtype=np.float32).T
"""
Mapping derived from http://odds.cs.stonybrook.edu/smtp-kddcup99-dataset/ and http://odds.cs.stonybrook.edu/http-kddcup99-dataset/
"""
feature_index_to_name = {0: "duration",
1: "src_bytes",
2: "dst_bytes"}
num_features = 32
num_hidden = 32
r = 1.0
epochs = 10
nu = 0.001
oc_nn = OneClassNeuralNetwork(num_features, num_hidden, r)
model, history = oc_nn.train_model(x_train, epochs=epochs, nu=nu)
plt.style.use("ggplot")
plt.figure()
plt.plot(history.epoch, history.history["loss"], label="train_loss")
plt.plot(history.epoch, history.history["quantile_loss"], label="quantile_loss")
plt.plot(history.epoch, history.history["r"], label="r")
plt.title("OCNN Training Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(loc="upper right")
y_pred = model.predict(x_test)
roc_auc_score(y_test, y_pred)
if __name__ == "__main__":
main()
exit()
| Yoichiro-Y/oc-nn-ensemble | .history/mnist_20221216201506.py | mnist_20221216201506.py | py | 1,596 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "h5py.File",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.datasets.mnist.load_data",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.datasets.mnist",
"line_number": 19,
"usage_type": "attribute"
},
... |
39869333421 | from django import forms
from .models import Order, ProductReview
PRODUCT_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)]
class CustomSelectWidget(forms.Select):
template_name = 'store/tst.html'
class OrderForm(forms.Form):
'''in the single_product view, we pass the product instance to the form.'''
def __init__(self, instance, *args, **kwargs):
super(OrderForm, self).__init__(*args, **kwargs)
self.instance = instance
self.fields['size'] = forms.ModelChoiceField(
queryset=self.instance.sizes.all(),
widget=forms.RadioSelect())
self.fields['quantity'] = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control'}), initial=1)
class ReviewForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ReviewForm, self).__init__(*args, **kwargs)
#remove help text from form fields
for field in self.fields:
self.fields[field].label = ''
class Meta:
model = ProductReview
fields = ['name', 'email', 'comment',]
widgets = {'name': forms.widgets.TextInput(attrs={
'placeholder': 'Name'}),
'email': forms.widgets.TextInput(attrs={
"placeholder": 'Email'}),
'comment': forms.widgets.Textarea(attrs={
"placeholder": 'Review', "rows": "5"}),
}
class OrderAddressForm(forms.ModelForm):
class Meta:
model = Order
fields = ['first_name', 'last_name', 'country', 'city', 'address', 'postal_code'] | TodorToshev/Own-Blueprint | store/forms.py | forms.py | py | 1,612 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.forms.Select",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.for... |
24370174876 | import unittest
import matplotlib.pyplot as plt
class TestCreateParticles(unittest.TestCase):
def test_create_particles(self):
from src.dataio import GridIO, FlowIO
from src.create_particles import Particle, LaserSheet, CreateParticles
# Read-in the grid and flow file
grid = GridIO('../data/shocks/shock_test.sb.sp.x')
grid.read_grid()
grid.compute_metrics()
flow = FlowIO('../data/shocks/shock_test.sb.sp.q')
flow.read_flow()
# Set particle data
p = Particle()
p.min_dia = 144e-9
p.max_dia = 573e-9
p.mean_dia = 281e-9
p.std_dia = 97e-9
p.density = 810
p.n_concentration = 5000
p.compute_distribution()
# print(p.particle_field)
# Read-in the laser sheet
laser = LaserSheet(grid)
laser.position = 0.0009
laser.thickness = 0.0001 # Adjusted based on grid thickness
laser.pulse_time = 1e-7
laser.compute_bounds()
# print(laser.width)
# Create particle locations array
ia_bounds = [None, None, None, None]
loc = CreateParticles(grid, flow, p, laser, ia_bounds)
loc.ia_bounds = [0, 0.003, 0, 0.001]
loc.in_plane = 90
loc.compute_locations()
loc.compute_locations2()
# Sample code to plot particle locations and relative diameters
_in_plane = int(p.n_concentration * loc.in_plane * 0.01)
# plot in-plane particle locations
plt.scatter(loc.locations[:_in_plane, 0], loc.locations[:_in_plane, 1],
s=10*loc.locations[:_in_plane, 3]/p.min_dia, c='g')
# plot out-of-plane locations
plt.scatter(loc.locations[_in_plane:, 0], loc.locations[_in_plane:, 1],
s=10*loc.locations[_in_plane:, 3]/p.min_dia, c='r')
plt.xlim([-0.0001, 0.004])
plt.ylim([0, 0.0019])
# plt.show()
# plot in-plane particle locations
plt.figure()
plt.scatter(loc.locations2[:_in_plane, 0], loc.locations2[:_in_plane, 1],
s=10 * loc.locations2[:_in_plane, 3] / p.min_dia, c='g')
# plot out-of-plane locations
plt.scatter(loc.locations2[_in_plane:, 0], loc.locations2[_in_plane:, 1],
s=10 * loc.locations2[_in_plane:, 3] / p.min_dia, c='r')
plt.xlim([-0.0001, 0.004])
plt.ylim([0, 0.0019])
plt.show()
if __name__ == '__main__':
unittest.main()
| kalagotla/syPIV | test/test_create_paritcles.py | test_create_paritcles.py | py | 2,496 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "src.dataio.GridIO",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "src.dataio.FlowIO",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "src.create_... |
2310558186 | import requests
import os
from requests_oauthlib import OAuth2Session
from dotenv import load_dotenv
load_dotenv()
from config import twitter_scopes
from mongo_db import mongo
redirect_uri = os.environ.get("REDIRECT_URI")
client_id = os.environ.get("CLIENT_ID")
class Token_Manager():
def __init__(self):
self.client_id = os.environ.get("CLIENT_ID")
self.client_secret = os.environ.get("CLIENT_SECselfRET")
self.token_url = "https://api.twitter.com/2/oauth2/token"
def get_userid(self, token):
url = "https://api.twitter.com/2/users/me"
user_id = requests.request(
"GET",
url,
headers={
"Authorization": "Bearer {}".format(token["access_token"]),
"Content-Type": "application/json",
}
)
print(user_id.json())
print()
self.user_id = user_id.json()['data']['id']
return self.user_id
def save_token(self, refreshed_token):
collection_name = 'user_tokens'
user_id = self.get_userid(refreshed_token)
new_token_entry = {"user_id": user_id, "token": refreshed_token}
mongo.save_to_collection(new_token_entry, collection_name, user_id)
def refresh_token(self, token):
twitter = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=twitter_scopes)
refreshed_token = twitter.refresh_token(
client_id=self.client_id,
client_secret=self.client_secret,
token_url=self.token_url,
refresh_token=token["refresh_token"],
)
self.save_token(refreshed_token)
return refreshed_token
tm = Token_Manager()
if __name__ == "__main__":
print("Executing TokenManager as main file") | tyrovirtuosso/Twitter_Bookmark-Manager | token_manager.py | token_manager.py | py | 1,780 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
39269918095 | import copy
import os
import shlex
import sys
import textwrap
from functools import wraps
def bash_quote(w, quote):
'''
Quote word *w* with quote character *quote* which may be empty, single quote or double quote.
'''
assert quote in ('', '"', "'")
if quote == "'":
w = w.replace("'", quote + '"\'"' + quote)
else:
# some characters are special and cannot be escaped unless we use a single quote:
# ! - get rid of history expansion
# \x01 - breaks escaping in bash: echo "\\$" -> \\$
# \n - when only using escapes
special_characters = '!\x01'
if quote == '':
special_characters += '\n'
for special in special_characters:
if special in w:
return ("'" + special + "'").join(bash_quote(s, quote) for s in w.split(special))
# escape characters
escaped_chars = set()
if quote == '':
escaped_chars |= set(os.environ.get("COMP_WORDBREAKS", " \t\"'@><=;|&(:."))
escaped_chars |= set("`$\"'\t ~&;?|#()*{><[")
elif quote == '"':
escaped_chars |= set("`$\"")
escaped = ''
last = ''
for i, c in enumerate(w):
if last == '\\' and (c in escaped_chars | set('\n\\') or quote == ''):
escaped += '\\'
if (c == '\\' and i == len(w) - 1) or (c in escaped_chars):
escaped += '\\'
escaped += c
last = c
w = escaped
return quote + w + quote
class Namespace(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __deepcopy__(self, memo):
return copy.deepcopy(dict(self), memo)
class Parser(object):
def __init__(self, tokens, complete_token=None):
'''
:param complete_token: the token to be completed; `None` disables completion
'''
self.tokens = tokens
self.complete_token = complete_token
# internal state
self.long = {}
self.short = {}
self.pos = 0
# results
self._completions = []
self.values = Namespace()
self.errors = []
self.subcommands = []
def get_state(self):
return dict([(attr, copy.copy(getattr(self, attr)))
for attr in ('long', 'short', 'pos', '_completions', 'errors', 'subcommands')] +
[('values', Namespace(copy.deepcopy(self.values)))])
def set_state(self, state):
for attr, val in state.items():
setattr(self, attr, val)
def add_options(self, options):
for opt in options:
if opt.short:
self.short[opt.short] = opt
if opt.long:
self.long[opt.long] = opt
def error(self, error):
self.errors.append(error)
def __repr__(self):
return "<Parser values=%r, errors=%r, subcommands=%r>" % (self.values, self.errors, self.subcommands)
@property
def token(self):
return self.tokens[self.pos] if self.pos < len(self.tokens) else None
@property
def last_token(self):
return self.tokens[self.pos - 1] if self.pos - 1 >= 0 else None
def token_is_option(self):
return self.token.startswith('-')
def eat_token(self):
token = self.token
self.pos += 1
return token
def barf_token(self):
self.pos -= 1
def parse_options(self):
while self.token and self.token_is_option():
option = None
token = self.eat_token()
if token.startswith('--'):
if token[2:] in self.long:
option = self.long[token[2:]]
elif token[1:] in self.short:
option = self.short[token[1:]]
if option is None:
self.error('Unknown option %s' % token)
return
else:
option.parse(self)
if self._completing_option:
self._add_completions('-' + k for k in list(self.short.keys()))
self._add_completions('--' + k for k in list(self.long.keys()))
def parse_arguments(self, arguments):
for arg in arguments:
if arg.nargs not in (None, '?', '*', '+'):
raise Exception('Invalid nargs %s' % arg.nargs)
self._add_arg_completions(arg)
self.parse_options()
if arg.nargs in (None, '+'):
arg.parse(self)
self.parse_options()
if arg.nargs in ('?', '*', '+'):
rewind_state = None
while self.token and (not arg.choices or self.token in arg.choices):
if type(arg.stop_at) != list and self.token == arg.stop_at:
rewind_state = self.get_state()
elif type(arg.stop_at) == list and self.token in arg.stop_at:
rewind_state = self.get_state()
arg.parse(self)
self.parse_options()
if arg.nargs == '?':
break
if rewind_state:
self.set_state(rewind_state)
if arg.nargs in ('*', '+'):
# Even if the token doesn't match the set of choices, it
# might still yield valid completions for the current arg
self._add_arg_completions(arg)
if self.errors:
return
self.parse_options()
@property
def completing(self):
return not self.errors and self.token is None and self.complete_token is not None
@property
def _completing_option(self):
return self.completing and len(self.complete_token) > 0 and self.complete_token[0] == '-'
@property
def _completing_argument(self):
return self.completing and (len(self.complete_token) == 0 or self.complete_token[0] != '-')
def _add_completions(self, completions):
self._completions.extend(c for c in completions if c.startswith(self.complete_token))
def _add_arg_completions(self, arg):
if self._completing_argument:
self._add_completions(arg.completions(self.complete_token, self))
class Option(object):
def __init__(self, short, long, action='store_true', dest=None, help=None, default=None):
'''
The number of additional tokens needed for an Option is determined by
*action*:
- ``store_true`` requires 0 tokens and stores True in *dest*
- ``store`` requires 1 token and stores it in *dest**
'''
self.short = short
self.long = long
self.dest = dest if dest else long
self.help = help
self.action = action
self.default = default
def __repr__(self):
return '-%s/--%s' % (self.short, self.long)
def set_default(self, parser):
parser.values[self.dest] = self.default
def parse(self, parser):
if self.action == 'store_true':
parser.values[self.dest] = True
elif self.action == 'store':
if parser.token is None or parser.token_is_option():
parser.error("%s expects an argument" % parser.last_token)
else:
value = parser.eat_token()
parser.values[self.dest] = value
class ArgMixin(object):
def usage(self):
if self.nargs is None:
return self.metavar
elif self.nargs == '?':
return '[%s]' % self.metavar
elif self.nargs == '*':
return '[%s]...' % self.metavar
elif self.nargs == '+':
return '%s...' % self.metavar
else:
raise Exception('Invalid nargs %s' % self.nargs)
def __repr__(self):
return self.metavar
def set_default(self, parser):
'''
Sets the default value for the curent argument. Called as soon as the argument's command is seen.
'''
pass
def completions(self, complete_token, parser):
'''
Returns the completions matching `complete_token` for the current state from `parser`.
'''
pass
def parse(self, parser):
'''
Uses the state from `parser` to consume the tokens for the current arg
(only one instance, even if nargs says otherwise). Called only if at
least a token is required for the current argument.
'''
pass
class Argument(ArgMixin):
def __init__(self, name, dest=None, metavar=None, nargs=None, action='store', choices=None,
default=None, completions=None, stop_at=None):
self.name = name
self.dest = dest if dest else name
if metavar:
self.metavar = metavar
elif choices:
self.metavar = '|'.join(choices)
else:
self.metavar = name.upper()
self.nargs = nargs
self.action = action
self.choices = choices
self.completion_fn = completions
self.default = default
# stop_at is an ugly hack to resolve grammar ambiguity
# The parser will revert to the state for the last instance of this token
self.stop_at = stop_at
def set_default(self, parser):
if self.action in ('append', 'append_unique') or self.nargs in ('*', '+'):
parser.values.setdefault(self.dest, [])
elif self.action == 'store':
parser.values.setdefault(self.dest, self.default)
else:
pass
def completions(self, complete_token, parser):
if self.choices:
if self.action == 'append_unique':
return set(self.choices) - set(parser.values[self.dest])
else:
return self.choices
elif hasattr(self, 'completion_fn') and callable(self.completion_fn):
comps = self.completion_fn(complete_token, parser)
if self.action == 'append_unique':
return set(comps) - set(parser.values[self.dest])
return comps
else:
return []
def parse(self, parser):
token = parser.eat_token()
if token is None:
parser.error("A value is required for %s" % self.metavar)
return
if self.choices and token not in self.choices:
parser.error("%s must be one of: %s" % (self.metavar, ' '.join(self.choices)))
return
if self.action == 'append' or self.nargs in ('*', '+'):
parser.values[self.dest].append(token)
elif self.action == 'store':
parser.values[self.dest] = token
elif self.action == 'append_unique':
pv = parser.values[self.dest]
if token in pv:
parser.error('%s cannot be specified twice' % token)
else:
pv.append(token)
elif self.action is None:
pass
else:
raise Exception('Invalid action %s' % self.action)
class Token(Argument):
def __init__(self, name, dest=None, nargs=None, action=None):
super(Token, self).__init__(name, metavar=name, choices=(name, ), action=action, nargs=nargs)
if dest is None:
self.dest = None
class Group(ArgMixin):
'''
If the group has nargs='?' or nargs='*' and it's not followed by eof it must
start with a static set of choices (otherwise the grammar would be
ambiguous).
'''
def __init__(self, *args, **kwargs):
self.nargs = kwargs.pop('nargs', None)
self.stop_at = kwargs.pop('stop_at', None)
self.arguments = args
@property
def metavar(self):
return ' '.join(a.usage() for a in self.arguments)
@property
def choices(self):
return self.arguments[0].choices
def completions(self, complete_token, parser):
return self.arguments[0].completions(complete_token, parser)
def parse(self, parser):
parser.parse_arguments(self.arguments)
def set_default(self, parser):
for arg in self.arguments:
arg.set_default(parser)
class Command(object):
def __init__(self, name, *args, **kwargs):
self.name = name
self.options = []
self.subcommands = []
self.arguments = []
for o in args:
if isinstance(o, Option):
self.options.append(o)
elif isinstance(o, Command):
self.subcommands.append(o)
else:
self.arguments.append(o)
self.help = kwargs.pop('help', None)
self.description = kwargs.pop('description', None)
self.defaults = kwargs.pop('defaults', {})
self.default_subcommand = kwargs.pop('default_subcommand', None)
assert not kwargs
def register(self, *args, **kwargs):
def decorator(func):
cmd, path = self._get_scmd_path(args[0])
if 'description' not in kwargs and func.__doc__:
kwargs['description'] = textwrap.dedent(func.__doc__).strip()
kwargs.setdefault('defaults', {}).setdefault('run', func)
cmd.subcommands.append(Command(path[-1], *(args[1:]), **kwargs))
@wraps(func)
def wrapper(*wargs, **wkwargs):
func(*wargs, **wkwargs)
return wrapper
return decorator
def alias(self, source_path, dest_path):
scmd, spath = self._get_scmd_path(source_path)
dcmd, dpath = self._get_scmd_path(dest_path)
dest_cmd = copy.copy(scmd._get_subcommand(spath[-1]))
dest_cmd.name = dpath[-1]
dcmd.subcommands.append(dest_cmd)
def set_default(self, parser):
parser.values.update(self.defaults)
for arg in self.arguments:
arg.set_default(parser)
for opt in self.options:
opt.set_default(parser)
def parse(self, tokens):
parser = Parser(tokens)
self._parse_command(parser)
if parser.token:
parser.error('Unparsed tokens: %s' % ' '.join(parser.tokens[parser.pos:]))
return parser
def complete(self, line, point):
# ignore everything after point
line = line[:point]
# if the line ends in an incomplete escape sequence skip it
if line[-1] == '\\' and line[-2] != '\\':
line = line[:-1]
quote_char = ''
for attempt in range(2):
try:
lex = shlex.shlex(line, posix=True)
lex.whitespace_split = True
tokens = list(lex)
except ValueError:
if attempt == 0:
# close the quotes and try again
quote_char = lex.state
line += quote_char
else:
raise
tokens = tokens[1:] # skip the program name
if tokens and (line[-1] != ' ' or line[-2:] == '\ '):
complete_token = tokens.pop()
else:
complete_token = ''
parser = Parser(tokens, complete_token)
self._parse_command(parser)
return set(bash_quote(c, quote_char) for c in parser._completions)
def handle_shell_completion(self):
if 'COMP_LINE' in os.environ:
for c in self.complete(os.environ['COMP_LINE'], int(os.environ['COMP_POINT'])):
print(c)
sys.exit()
def usage(self):
return ' '.join([self.name] + [a.usage() for a in self.arguments])
def chain_usage(self, chain):
return ' '.join(c.usage() for c in chain)
def print_help(self, subcommands):
'''Only works for the top-level command'''
last = self
chain = [self]
for cmd_name in subcommands:
last = last._get_subcommand(cmd_name)
if last is None:
print("Unknown subcommand: %s" % cmd_name)
return
chain.append(last)
usage = self.chain_usage(chain)
if last.subcommands:
if last.default_subcommand:
usage += ' [<subcommand>]'
else:
usage += ' <subcommand>'
print("Usage: {}".format(usage))
if last.description or last.help:
print("\n", last.description or last.help)
def _cmd_chains(cmd, stop_on_args=False):
'''Follows subcommand chains until an argument can be specified'''
if not cmd.subcommands or (cmd.arguments and stop_on_args):
return {'': cmd}
else:
return dict(((s.name + ' ' + name).strip(), cmd)
for s in cmd.subcommands
for name, cmd in _cmd_chains(s, True).items())
if last.subcommands:
print("\nSubcommands:")
if last.default_subcommand:
cmd = last._get_subcommand(last.default_subcommand)
print(" %-20s %s" % ('[%s]' % cmd.name, cmd.help or cmd.name))
for name, cmd in sorted(_cmd_chains(last).items()):
if not last.default_subcommand or last.default_subcommand != name:
print(" %-20s %s" % (name, cmd.help or name))
for i, cmd in enumerate(reversed(chain)):
if cmd.options:
print("\nOptions for %s:" % ' '.join(c.name for c in chain[:len(chain) - i]))
wrapper = textwrap.TextWrapper(width=80,
initial_indent=' ' * 26,
subsequent_indent=' ' * 26)
for opt in sorted(cmd.options, key=lambda x: x.long or x.short):
print(" %-2s %-20s %s" % ('-' + opt.short if opt.short else '',
'--' + opt.long if opt.long else '',
wrapper.fill(opt.help or '').lstrip()))
def _get_subcommand(self, subcommand):
for cmd in self.subcommands:
if cmd.name == subcommand:
return cmd
else:
return None
def _get_scmd_path(self, path_string):
path = path_string.split()
cmd = self
for cname in path[:-1]:
cmd = cmd._get_subcommand(cname)
if cmd is None:
raise Exception('Invalid command path: %s (%s not found)' % (path_string, cname))
return cmd, path
def _parse_command(self, parser):
self.set_default(parser)
parser.add_options(self.options)
parser.parse_arguments(self.arguments)
if self.subcommands:
if parser._completing_argument:
parser._add_completions(s.name for s in self.subcommands)
token = parser.eat_token()
if token is None:
if self.default_subcommand:
self._get_subcommand(self.default_subcommand).set_default(parser)
else:
parser.error("Subcommand expected")
else:
cmd = self._get_subcommand(token.lower())
if cmd:
parser.subcommands.append(cmd.name)
cmd._parse_command(parser)
elif self.default_subcommand:
parser.barf_token()
cmd = self._get_subcommand(self.default_subcommand)
cmd._parse_command(parser)
else:
parser.error("Invalid subcommand %s" % token)
| 1and1/dim | ndcli/dimcli/cliparse.py | cliparse.py | py | 19,558 | python | en | code | 39 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_nu... |
25262905175 | import datetime
import logging
from concurrent import futures
from google.appengine.ext import ndb
from upvote.gae.bigquery import tables
from upvote.gae.datastore.models import exemption as exemption_models
from upvote.gae.datastore.models import host as host_models
from upvote.gae.datastore.models import policy as policy_models
from upvote.gae.lib.bit9 import api as bit9_api
from upvote.gae.lib.bit9 import utils as bit9_utils
from upvote.gae.lib.exemption import checks
from upvote.gae.lib.exemption import notify
from upvote.gae.lib.exemption import monitoring
from upvote.gae.utils import env_utils
from upvote.gae.utils import mail_utils
from upvote.gae.utils import template_utils
from upvote.shared import constants
# Done for brevity.
_STATE = constants.EXEMPTION_STATE
class Error(Exception):
"""Base error class for this module."""
class UnknownHostError(Error):
"""Raised when a particular host cannot be found."""
class InvalidEnforcementLevelError(Error):
"""Raised when an invalid Bit9 enforcement level is provided."""
class UnknownPolicyError(Error):
"""Raised if a Bit9 host has an unknown policy."""
class InvalidClientModeError(Error):
"""Raised when an invalid Santa client mode is provided."""
class UnsupportedPlatformError(Error):
"""Raised if an Exemption with an unsupported platform is encountered."""
class UnsupportedClientError(Error):
"""Raised when attempting to take an action for an unsupported client."""
class InvalidStateChangeError(Error):
"""Raised when attempting to change an Exemption to an invalid state."""
class InvalidReasonError(Error):
"""Raised when an invalid EXEMPTION_REASON is provided."""
class InvalidDurationError(Error):
"""Raised when an invalid EXEMPTION_DURATION is provided."""
class InvalidRenewalError(Error):
"""Raised when trying to make an invalid Exemption renewal."""
_POLICY_CHECKS = {
constants.PLATFORM.MACOS: [],
constants.PLATFORM.WINDOWS: [],
}
def _ChangeEnforcementInBit9(host_id, new_enforcement_level):
"""Changes enforcement level for a Bit9Host.
Args:
host_id: The ID of the Bit9Host.
new_enforcement_level: The new enforcement level to set for the Bit9Host.
Raises:
UnknownHostError: if the host cannot be found in Datastore.
InvalidEnforcementLevelError: if the provided enforcement level is invalid.
UnknownPolicyError: if the host's Bit9 policy is unknown.
"""
# Verify the host_id corresponds to an actual Bit9Host.
if not host_models.Bit9Host.get_by_id(host_id):
monitoring.enforcement_errors.Increment()
raise UnknownHostError('Host %s is unknown' % host_id)
# Verify the specified enforcement level is valid.
if new_enforcement_level not in constants.BIT9_ENFORCEMENT_LEVEL.SET_ALL:
monitoring.enforcement_errors.Increment()
raise InvalidEnforcementLevelError(
'Invalid Bit9 enforcement level: %s' % new_enforcement_level)
# Retrieve the current Computer policy from Bit9.
computer = bit9_api.Computer.get(int(host_id), bit9_utils.CONTEXT)
current_policy_id = computer.policy_id
# Determine the appropriate policy for the new enforcement level.
policy_map = constants.BIT9_ENFORCEMENT_LEVEL.MAP_TO_POLICY_ID
new_policy_id = policy_map.get(new_enforcement_level)
# If there's not a valid policy, bail.
if not new_policy_id:
monitoring.enforcement_errors.Increment()
raise UnknownPolicyError(
'Host %s has an unknown policy ID: %s' % (host_id, current_policy_id))
logging.info(
'Changing policy from %s to %s', current_policy_id, new_policy_id)
# Write the new policy back to Bit9.
computer.policy_id = new_policy_id
computer.put(bit9_utils.CONTEXT)
# Change the policy Key on the entity itself.
new_policy_key = ndb.Key(policy_models.Bit9Policy, new_policy_id)
host_models.Bit9Host.ChangePolicyKey(host_id, new_policy_key)
# Insert a row into BigQuery reflecting the change.
host = host_models.Bit9Host.get_by_id(host_id)
tables.HOST.InsertRow(
device_id=host_id,
timestamp=datetime.datetime.utcnow(),
action=constants.HOST_ACTION.MODE_CHANGE,
hostname=host.hostname,
platform=constants.PLATFORM.WINDOWS,
users=host.users,
mode=new_enforcement_level)
def _ChangeEnforcementInSanta(host_id, new_client_mode):
"""Toggles between MONITOR and LOCKDOWN for a SantaHost.
Args:
host_id: The ID of the SantaHost.
new_client_mode: The new client mode to set for the SantaHost.
Raises:
UnknownHostError: if the host cannot be found in Datastore.
InvalidClientModeError: if the provided client mode is invalid.
"""
# Verify the host_id corresponds to an actual SantaHost.
host = host_models.SantaHost.get_by_id(host_id)
if not host:
monitoring.enforcement_errors.Increment()
raise UnknownHostError('Host %s is unknown' % host_id)
# Verify the specified client mode is valid.
if new_client_mode not in constants.CLIENT_MODE.SET_ALL:
monitoring.enforcement_errors.Increment()
raise InvalidClientModeError(
'Invalid Santa client mode: %s' % new_client_mode)
host_models.SantaHost.ChangeClientMode(host_id, new_client_mode)
# If changing to MONITOR mode and transitive whitelisting is enabled, disable
# it.
if (new_client_mode == constants.CLIENT_MODE.MONITOR and
host.transitive_whitelisting_enabled):
ChangeTransitiveWhitelisting(host_id, False)
host = host_models.Host.get_by_id(host_id)
tables.HOST.InsertRow(
device_id=host_id,
timestamp=datetime.datetime.utcnow(),
action=constants.HOST_ACTION.MODE_CHANGE,
hostname=host.hostname,
platform=constants.PLATFORM.MACOS,
users=[host.primary_user],
mode=host.client_mode)
def _EnableLockdown(exm_key):
"""Enables LOCKDOWN mode for a given Exemption.
Args:
exm_key: The Key of the Exemption we're enabling LOCKDOWN for.
Raises:
UnsupportedPlatformError: if the platform of the corresponding Host is
unsupported.
"""
host_id = exm_key.parent().id()
platform = exemption_models.Exemption.GetPlatform(exm_key)
logging.info('Enabling LOCKDOWN mode for Host %s', host_id)
if platform == constants.PLATFORM.WINDOWS:
_ChangeEnforcementInBit9(host_id, constants.BIT9_ENFORCEMENT_LEVEL.LOCKDOWN)
elif platform == constants.PLATFORM.MACOS:
_ChangeEnforcementInSanta(host_id, constants.CLIENT_MODE.LOCKDOWN)
else:
monitoring.enforcement_errors.Increment()
raise UnsupportedPlatformError(
'Host %s has an unsupported platform: %s' % (host_id, platform))
def _DisableLockdown(exm_key):
"""Disables LOCKDOWN mode for a given Exemption.
Args:
exm_key: The Key of the Exemption we're disabling LOCKDOWN for.
Raises:
UnsupportedPlatformError: if the platform of the corresponding Host is
unsupported.
"""
host_id = exm_key.parent().id()
platform = exemption_models.Exemption.GetPlatform(exm_key)
logging.info('Disabling LOCKDOWN mode for Host %s', host_id)
if platform == constants.PLATFORM.WINDOWS:
_ChangeEnforcementInBit9(host_id, constants.BIT9_ENFORCEMENT_LEVEL.MONITOR)
elif platform == constants.PLATFORM.MACOS:
_ChangeEnforcementInSanta(host_id, constants.CLIENT_MODE.MONITOR)
else:
monitoring.enforcement_errors.Increment()
raise UnsupportedPlatformError(
'Host %s has an unsupported platform: %s' % (host_id, platform))
@ndb.transactional
def Request(host_id, reason, other_text, duration):
"""Creates a new Exemption, or reuses an existing one.
If no corresponding Exemption exists, creates a new one in the REQUESTED
state. Otherwise, if one exists in a terminal state
(CANCELLED/REVOKED/EXPIRED), sets it back to REQUESTED with the new
deactivation date.
Args:
host_id: (str) Host ID
reason: (str) The reason for requesting an Exemption. Must be one of
constants.EXEMPTION_REASON.
other_text: (str) Additional text if the reason is OTHER
duration: (str) The requested duration of the Exemption. Must be one of
constants.EXEMPTION_DURATION.
Raises:
InvalidReasonError: if the provided reason is invalid.
InvalidDurationError: if the provided duration is invalid.
InvalidRenewalError: if the Exemption cannot currently be renewed.
"""
logging.info('Requesting Exemption for host %s', host_id)
# Validate the reason.
if reason not in constants.EXEMPTION_REASON.SET_ALL:
message = 'Invalid reason provided: %s' % reason
logging.error(message)
raise InvalidReasonError(message)
# Validate the duration.
if duration not in constants.EXEMPTION_DURATION.SET_ALL:
message = 'Invalid exemption duration: %s' % duration
logging.error(message)
raise InvalidDurationError(message)
duration_delta = datetime.timedelta(
days=constants.EXEMPTION_DURATION.MAP_TO_DAYS[duration])
deactivation_dt = datetime.datetime.utcnow() + duration_delta
exm = exemption_models.Exemption.Get(host_id)
# If an Exemption has never existed for this host_id, just create one.
if exm is None:
exm_key = exemption_models.Exemption.Insert(
host_id, deactivation_dt, reason, other_text=other_text)
notify.DeferUpdateEmail(exm_key, _STATE.REQUESTED, transactional=True)
return
# If we're dealing with an existing Exemption which can state change back to
# REQUESTED, then make the change.
if exm.CanChangeToState(_STATE.REQUESTED):
exm_key = exemption_models.Exemption.CreateKey(host_id)
details = [reason, other_text] if other_text else [reason]
exemption_models.Exemption.ChangeState(
exm_key, _STATE.REQUESTED, details=details)
exm.deactivation_dt = deactivation_dt
exm.put()
notify.DeferUpdateEmail(exm_key, _STATE.REQUESTED, transactional=True)
# Otherwise, we've received a request for an invalid renewal.
else:
message = 'Host %s already has a(n) %s Exemption' % (host_id, exm.state)
logging.error(message)
raise InvalidRenewalError(message)
def Process(exm_key):
"""Checks if a REQUESTED Exemption is compatible with all policies.
Args:
exm_key: The NDB Key of the Exemption entity.
"""
host_id = exm_key.parent().id()
logging.info('Processing Exemption for host %s', host_id)
# Change state from REQUESTED to PENDING.
try:
exemption_models.Exemption.ChangeState(exm_key, _STATE.PENDING)
# Process() shouldn't be transactional due to all the potential calls out made
# below. Because of this, it's entirely possible that the calls to Process()
# in RequestExemptionHandler and ProcessExemptions could both end up trying to
# transition this Exemption to PENDING at the same time. It's a benign race
# condition, so we should just note it and move on.
except exemption_models.InvalidStateChangeError:
logging.warning(
'Error encountered while processing Exemption for host %s', host_id)
return
# Any other Exceptions should make noise.
except Exception: # pylint: disable=broad-except
monitoring.processing_errors.Increment()
logging.exception(
'Error encountered while processing Exemption for host %s', host_id)
return
try:
# If no platform can be determined, auto-deny, because it means there's a
# bug. Otherwise this request will just endlessly bounce between REQUESTED
# and PENDING.
try:
platform = exemption_models.Exemption.GetPlatform(exm_key)
except exemption_models.UnknownPlatformError:
message = 'Host %s has an unknown platform' % host_id
logging.error(message)
monitoring.processing_errors.Increment()
Deny(exm_key, details=[message])
return
# If no policy has been defined for the platform, auto-deny, because it
# means there's a bug. Otherwise this request will just endlessly bounce
# between REQUESTED and PENDING.
if platform not in _POLICY_CHECKS:
message = 'Platform "%s" is unsupported' % platform
logging.error(message)
monitoring.processing_errors.Increment()
Deny(exm_key, details=[message])
return
# An empty policy should fail open, otherwise it would require a no-op check
# which always returns APPROVED. An empty policy that fails closed would be
# better suited by simply disabling the exemption system altogether.
policy_checks = _POLICY_CHECKS[platform]
if not policy_checks:
logging.info('Empty policy defined for platform "%s"', platform)
Approve(exm_key)
return
# Create a ThreadPoolExecutor and run the individual policy checks.
logging.info(
'Executing %d policy check(s) against host %s', len(policy_checks),
host_id)
with futures.ThreadPoolExecutor(max_workers=len(policy_checks)) as executor:
running_futures = [
executor.submit(check, exm_key) for check in policy_checks]
done_futures = futures.wait(running_futures).done
results = [done_future.result() for done_future in done_futures]
# If any of the checks return a non-'outcome' state, auto-deny, because it
# means there's a bug. Otherwise this request will just endlessly bounce
# between REQUESTED and PENDING.
for result in results:
if result.state not in _STATE.SET_OUTCOME:
message = '%s returned an invalid state: %s' % (
result.name, result.state)
logging.error(message)
monitoring.processing_errors.Increment()
Deny(exm_key, details=[message])
return
details = [result.detail for result in results if result.detail]
# Outcome precedence is: any(DENIED) > any(ESCALATED) > any(APPROVED).
if any(result.state == _STATE.DENIED for result in results):
Deny(exm_key, details=details)
elif any(result.state == _STATE.ESCALATED for result in results):
Escalate(exm_key, details=details)
else:
Approve(exm_key, details=details)
except Exception as e: # pylint: disable=broad-except
logging.exception(
'Error encountered while processing Exemption for host %s', host_id)
monitoring.processing_errors.Increment()
# If something breaks, revert back to REQUESTED so the cron can retry.
exemption_models.Exemption.ChangeState(
exm_key, _STATE.REQUESTED,
details=['Error while processing: ' + str(e)])
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Approve(exm_key, details=None):
"""Transitions an Exemption to the APPROVED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: Optional list of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Approving Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.APPROVED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.APPROVED))
_DisableLockdown(exm_key)
exemption_models.Exemption.ChangeState(
exm_key, _STATE.APPROVED, details=details)
notify.DeferUpdateEmail(
exm_key, _STATE.APPROVED, details=details, transactional=True)
@ndb.transactional
def Deny(exm_key, details=None):
"""Transitions an Exemption to the DENIED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: Optional list of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Denying Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.DENIED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.DENIED))
exemption_models.Exemption.ChangeState(
exm_key, _STATE.DENIED, details=details)
notify.DeferUpdateEmail(
exm_key, _STATE.DENIED, details=details, transactional=True)
@ndb.transactional
def Escalate(exm_key, details=None):
"""Transitions an Exemption to the ESCALATED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: Optional list of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Escalating Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.ESCALATED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.ESCALATED))
exemption_models.Exemption.ChangeState(
exm_key, _STATE.ESCALATED, details=details)
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Expire(exm_key):
"""Transitions an Exemption to the EXPIRED state.
Args:
exm_key: The NDB Key of the Exemption entity.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Expiring Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.EXPIRED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.EXPIRED))
_EnableLockdown(exm_key)
exemption_models.Exemption.ChangeState(exm_key, _STATE.EXPIRED)
notify.DeferUpdateEmail(exm_key, _STATE.EXPIRED, transactional=True)
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Revoke(exm_key, details):
"""Transitions an Exemption to the REVOKED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: List of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Revoking Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.REVOKED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.REVOKED))
_EnableLockdown(exm_key)
exemption_models.Exemption.ChangeState(
exm_key, _STATE.REVOKED, details=details)
notify.DeferUpdateEmail(
exm_key, _STATE.REVOKED, details=details, transactional=True)
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Cancel(exm_key):
"""Transitions an Exemption to the CANCELLED state.
Args:
exm_key: The NDB Key of the Exemption entity.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Cancelling Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.CANCELLED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.CANCELLED))
_EnableLockdown(exm_key)
exemption_models.Exemption.ChangeState(exm_key, _STATE.CANCELLED)
notify.DeferUpdateEmail(exm_key, _STATE.CANCELLED, transactional=True)
@ndb.transactional
def ChangeTransitiveWhitelisting(host_id, enable):
"""Changes the transitive whitelisting state for a SantaHost.
Args:
host_id: The ID of the SantaHost.
enable: Whether to enable or disable transitive whitelisting.
Raises:
UnsupportedClientError: if called against anything other than a SantaHost.
"""
# Only Santa clients are supported.
host = host_models.Host.get_by_id(host_models.Host.NormalizeId(host_id))
if host.GetClientName() != constants.CLIENT.SANTA:
raise UnsupportedClientError(
'Only Santa clients support transitive whitelisting')
# If this is a no-op, just bail now.
if host.transitive_whitelisting_enabled == enable:
logging.warning(
'Transitive whitelisting is already %s for %s',
'enabled' if enable else 'disabled', host.hostname)
return
# Make the change.
host.transitive_whitelisting_enabled = enable
host.put()
modification = 'enabled' if enable else 'disabled'
logging.info('Transitive whitelisting %s for %s', modification, host.hostname)
# If enabling transitive whitelisting and the SantaHost has an APPROVED
# Exemption, cancel it.
exm_key = exemption_models.Exemption.CreateKey(host_id)
exm = exm_key.get()
if enable and exm and exm.state == constants.EXEMPTION_STATE.APPROVED:
Cancel(exm_key)
# Notify the user of the mode change.
body = template_utils.RenderEmailTemplate(
'transitive_modified.html', modification=modification,
device_hostname=host.hostname, upvote_hostname=env_utils.ENV.HOSTNAME)
subject = 'Developer mode changed: %s' % host.hostname
mail_utils.Send(subject, body, to=[host.primary_user], html=True)
# Note the state change in BigQuery.
comment = 'Transitive whitelisting %s' % modification
tables.HOST.InsertRow(
device_id=host_id,
timestamp=datetime.datetime.utcnow(),
action=constants.HOST_ACTION.COMMENT,
hostname=host.hostname,
platform=constants.PLATFORM.MACOS,
users=[host.primary_user],
mode=host.client_mode,
comment=comment)
| google/upvote_py2 | upvote/gae/lib/exemption/api.py | api.py | py | 21,555 | python | en | code | 449 | github-code | 6 | [
{
"api_name": "upvote.shared.constants.EXEMPTION_STATE",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "upvote.shared.constants",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "upvote.shared.constants.PLATFORM",
"line_number": 72,
"usage_type": ... |
4143583368 | import torch
import torchvision
from torchvision.io import read_image
import os
from torch.utils.data import Dataset
"""
This part of the script is an easy API to load and get the datasets
"""
def get_dataset(dataset: str, c_angle=30, new_size=[32, 32], batch_size=300):
"""
:param new_size:
:param c_angle:
:param dataset: chosen dataset
:return: train loader ,test loader and input size
"""
if dataset == 'FASHION_MNIST':
train_set = FASHION_MNIST('./data/' + dataset + '/', download=True, train=True,
transform=torchvision.transforms.ToTensor())
test_set = FASHION_MNIST('./data/' + dataset + '/', download=True, train=False,
transform=torchvision.transforms.ToTensor())
input_size = (28, 28, 1)
elif dataset == 'Rotate FASHION_MNIST':
rotate_tran_fun = lambda x: rotate_tran(x, angle=c_angle)
train_set = FASHION_MNIST('./data/' + dataset + f'_Rotate_{c_angle}/', download=True, train=True,
transform=rotate_tran_fun)
test_set = FASHION_MNIST('./data/' + dataset + f'_Rotate_{c_angle}/', download=True, train=False,
transform=rotate_tran_fun)
input_size = (28, 28, 1)
elif dataset == 'LFW':
train_set = LFW('./data/' + dataset + '/', split='train',
transform=torchvision.transforms.ToTensor(), download=True)
test_set = LFW('./data/' + dataset + '/', split='test',
transform=torchvision.transforms.ToTensor(), download=True)
input_size = (250, 250)
elif dataset == 'LFW_resize':
resize_tran_fun = lambda x: resize_tran(x, new_size=new_size)
train_set = LFW('./data/' + dataset + f'_{new_size}/', split='train', transform=resize_tran_fun, download=True)
test_set = LFW('./data/' + dataset + f'_{new_size}/', split='test', transform=resize_tran_fun, download=True)
input_size = (new_size[0], new_size[1], 3)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last=True)
return train_loader, test_loader, input_size, batch_size
def rotate_tran(img, angle):
tensor_img = torchvision.transforms.ToTensor(img)
return torchvision.transforms.functional.rotate(img=tensor_img, angle=angle)
def resize_tran(img, new_size=[32, 32]):
tensor_img = torchvision.transforms.ToTensor(img)
return torchvision.transforms.functional.resize(img=tensor_img, size=new_size)
class FASHION_MNIST(torchvision.datasets.FashionMNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getitem__(self, index):
return super().__getitem__(index)[0]
class LFW(torchvision.datasets.LFWPeople):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getitem__(self, index):
return super().__getitem__(index)[0]
class Paining(Dataset):
def __init__(self, img_dir, transform=None, target_transform=None):
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
image = read_image(img_path)
label = self.img_labels.iloc[idx, 1]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image | TamirShazman/ML2-Project | code/datasets.py | datasets.py | py | 3,740 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 24,
"usage_type": "call"
}... |
33975836628 | from flask import Flask, g
from flask.ext.login import LoginManager
import user_model
DEBUG = True
PORT = 8000
HOST = '0.0.0.0'
app = Flask(__name__)
app.secret_key = 'randomstuff'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user():
try:
return user_model.User.get(user_model.User.id == userid)
except user_model.DoesNotExist:
return None
@app.before_request
def before_request():
"""Connect to the database before each request."""
g.db = user_model.DATABASE
g.db.connect()
@app.after_request
def after_request(response):
"""Close the databse function after each request"""
g.db.close()
return response
if __name__ == '__main__':
user_model.initialize()
user_model.User.create_user(username = "Kaka", email = "random@gmail.com", password = "password", admin = True)
app.run(debug = DEBUG, port = PORT, host = HOST)
| kaka21garuda/FlaskSocial | app.py | app.py | py | 972 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.LoginManager",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "user_model.User.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "user_mod... |
42663326199 | import os
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.ioff()
import seaborn
def compare_abs_auroc_differences(results_dir,
model_a_path, model_a_descriptor,
model_b_path, model_b_descriptor):
"""Take the results of eval_by_scan_attr.calculate_model_perf_by_scan_attr()
for two different models and make comparison plots.
<results_dir> is the path to the directory in which to save the results
<model_a_path> is the path to the directory in which the results of
eval_by_scan_attr.py are stored for Model A
<model_b_path> is the path to the directory in which the results of
eval_by_scan_attr.py are stored for Model B
<model_a_descriptor> and <model_b_descriptor> are descriptive strings
that will be used in generating the plots
For each scan attribute, a df will be loaded that has the following format:
the columns are different scan attribute options. For example if the
attribute is StationName, then the options (the columns) can
include 'DMPRAD3FORCE', 'DMP_CT1', 'DMP_CT2', 'CCCT3Revo',
'IPCT1', 'CaryCT1',...,'CTC1'.
the rows are different abnormalities, such as 'lung_nodule',
'heart_cardiomegaly', and 'h_great_vessel_atherosclerosis'
the values are AUROCs calculated for that particular scan attribute
option and abnormality."""
if not os.path.exists(results_dir):
os.mkdir(results_dir)
#Create plots for all the attributes
for attribute in ['SliceThickness','PatientAgeYears',
'orig_square','orig_numslices','orig_slope','orig_inter',
'orig_yxspacing','orig_zdiff',
'Manufacturer','ManufacturerModelName','InstitutionName',
'StationName','SoftwareVersions','ConvolutionKernel',
'PatientSex','EthnicGroup','IterativeReconAnnotation',
'IterativeReconConfiguration','IterativeReconLevel',
'ProtocolName','ReconAlgo','ReconAlgoManuf']:
model_a_df = pd.read_csv(os.path.join(model_a_path, attribute+'_AUROC.csv'),header=0,index_col=0)
model_b_df = pd.read_csv(os.path.join(model_b_path,attribute+'_AUROC.csv'),header=0,index_col=0)
model_a_df_w_diff = add_diff_column(model_a_df,model_a_descriptor)
model_b_df_w_diff = add_diff_column(model_b_df,model_b_descriptor)
#Combine the dataframes
combined = pd.concat([model_a_df_w_diff, model_b_df_w_diff],axis=0, ignore_index=True)
#sort by model and then by difference
combined = combined.sort_values(by=['Model','Max AUROC Difference'],ascending=[False,True])
#make plots
make_bar_plot_per_abnormality(combined, attribute, results_dir)
make_boxplot_agg_abnormalities(combined, attribute, results_dir)
make_boxplot_agg_abnormality_groups(combined, attribute, results_dir)
def add_diff_column(df, model_descriptor):
"""Calculate the maximum AUROC difference between the different scan
attribute options for each abnormality, and reformat the df for subsequent
seaborn plotting.
A bigger Max AUROC Difference is worse, because it means the performance
varies a lot by that scan attribute.
A smaller difference is good, because it means the performance for
that abnormality is consistent across the different scan attribute
options.
For example, if the AUROC difference is very large for 'cardiomegaly'
depending on different StationName (CT scanner) attribute options, that
suggests the model might be cheating and using information about the
CT scanner to predict 'cardiomegly.'
<df> is a pandas dataframe with the format described in the
docstring for compare_abs_auroc_differences()
<model_descriptor> is a descriptive string"""
df['Maximum'] = df.max(axis=1)
df['Minimum'] = df.min(axis=1)
df['Max AUROC Difference'] = (df['Maximum']-df['Minimum'])
#drop 'Count' row
df = df.drop(index='Count')
#add a column indicating the model so you can use seaborn barplot easily
df['Model'] = model_descriptor
#make the abnormality index into a column so you can use seaborn barplot easily
df.reset_index(inplace=True)
df = df.rename(columns = {'index':'Abnormality'})
#keep only the 3 columns needed for plotting
df = df[['Abnormality','Max AUROC Difference','Model']]
return df
def make_bar_plot_per_abnormality(combined, attribute, results_dir):
"""Make bar plot where each abnormality has two bars, one bar for Model A
and one bar for Model B. The y axis shows the Max AUROC Difference, so
lower is better."""
fig, ax = plt.subplots(figsize=(16,8))
seaborn.barplot(x = 'Abnormality', y = 'Max AUROC Difference', data = combined,
hue = 'Model', hue_order = ['Base','Mask'], ax = ax)
plt.xticks(rotation=90, fontsize='x-small')
plt.savefig(os.path.join(results_dir,attribute+'_BarPerAbn.png'))
plt.close()
def make_boxplot_agg_abnormalities(combined, attribute, results_dir):
"""Boxplot where different abnormalities are aggregated for each model,
and the y axis shows the Max AUROC Difference, so a lower overall
boxplot is better"""
fig, ax = plt.subplots(figsize=(6,6))
seaborn.boxplot(x = 'Model', y = 'Max AUROC Difference', data = combined, ax = ax, order=['Base','Mask'])
plt.title('Max AUROC Difference \nAcross Abnormalities',fontsize='xx-large')
increase_label_sizes(plt)
plt.savefig(os.path.join(results_dir,attribute+'_BoxAggAbns.png'))
plt.close()
def make_boxplot_agg_abnormality_groups(combined, attribute, results_dir):
"""Grouped boxplot where different abnormalities are aggregated for each
model, but abnormalities are split up according to their organ: lung,
heart, great_vessel, or mediastinum. The y axis shows the Max AUROC
Difference, so a lower overall boxplot is better."""
#Assign an organ to each abnormality
combined['Organ']=''
for idx in combined.index.values.tolist():
abnormality = combined.at[idx,'Abnormality']
if 'lung' in abnormality:
combined.at[idx,'Organ'] = 'lung'
elif 'heart' in abnormality:
combined.at[idx,'Organ'] = 'heart'
elif 'vessel' in abnormality:
combined.at[idx,'Organ'] = 'great_vessel'
elif 'mediastinum' in abnormality:
combined.at[idx,'Organ'] = 'mediastinum'
#Sanity check: make sure every abnormality has an organ assigned
assert combined[combined['Organ']==''].shape[0]==0
#Make plot
fig, ax = plt.subplots(figsize=(8,8))
seaborn.boxplot(x = 'Model', y = 'Max AUROC Difference', order = ['Base','Mask'],
hue = 'Organ', data = combined, ax = ax, palette = 'mako')
plt.title('Max AUROC Difference\nAcross Grouped Abnormalities',fontsize='xx-large')
increase_label_sizes(plt)
plt.savefig(os.path.join(results_dir,attribute+'_BoxAggAbnsByOrgan.png'))
plt.close()
def increase_label_sizes(plt):
"""Increase the axis label sizes for a seaborn plot"""
#https://stackoverflow.com/questions/43670164/font-size-of-axis-labels-in-seaborn?rq=1
for ax in plt.gcf().axes:
current_xlabels = ax.get_xlabel()
ax.set_xlabel(current_xlabels, fontsize='x-large')
current_ylabels = ax.get_ylabel()
ax.set_ylabel(current_ylabels, fontsize='x-large')
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large') | rachellea/explainable-ct-ai | src/evals/eval_by_scan_attr_compare.py | eval_by_scan_attr_compare.py | py | 7,694 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ioff",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.exists",... |
71964995709 | from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
import os
import dill as dpickle
import numpy as np
import pandas as pd
import logging
class MLPWrapper:
"""Wrapper for Multi-Layer Perceptron classifier"""
def __init__(self,
clf,
model_file="model.dpkl",
precision_threshold=0.7,
recall_threshold=0.5,
load_from_model=False):
"""Initialize parameters of the MLP classifier
Args:
clf: a sklearn.neural_network.MLPClassifier object
model_file: the local path to save or load model
precision_threshold: the threshold that the precision of one label must meet in order to be predicted
recall_threshold: the threshold that the recall of one label must meet in order to be predicted
load_from_model: load classifier from model file or not
"""
if clf:
self.clf = clf
elif load_from_model:
self.load_model(model_file=model_file)
else:
raise Exception("You need to pass a MLPClassifier object to the wrapper")
self.model_file = model_file
self.precision_threshold = precision_threshold
self.recall_threshold = recall_threshold
# precisions/probability_thresholds/recalls are dict
# {label_index: number or None}
self.precisions = None
self.probability_thresholds = None
self.recalls = None
# count of labels
self.total_labels_count = None
def fit(self, X, y):
"""Train the classifier
Args:
X: features, numpy.array
y: labels, numpy.array
"""
self.clf.fit(X, y)
def predict_probabilities(self, X):
"""Predict probabilities of all labels for data
Args:
X: features, numpy.array
Return: a list, shape (n_samples, n_classes)
"""
return self.clf.predict_proba(X)
def find_probability_thresholds(self, X, y, test_size=0.3):
"""Split the dataset into training and testing to find probability thresholds for all labels
Args:
X: features, numpy.array
y: labels, numpy.array
"""
# split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1234)
self.fit(X_train, y_train)
y_pred = self.predict_probabilities(X_test)
self.probability_thresholds = {}
self.precisions = {}
self.recalls = {}
self.total_labels_count = len(y_test[0])
for label in range(self.total_labels_count):
# find the probability for each label
best_precision, best_recall, best_threshold = 0.0, 0.0, None
precision, recall, threshold = precision_recall_curve(np.array(y_test)[:, label], y_pred[:, label])
for prec, reca, thre in zip(precision[:-1], recall[:-1], threshold):
# precision, recall must meet two thresholds respecitively
if prec >= self.precision_threshold and reca >= self.recall_threshold:
# choose the threshold with the higher precision
if prec > best_precision:
best_precision = prec
best_recall = reca
best_threshold = thre
# self.probability_thresholds is a dict {label_index: probability_threshold}
# If probability_thresholds[label] is None, do not predict this label always, which
# means this label is in the excluded list because it does not satisfy
# both of the precision and recall thresholds
self.probability_thresholds[label] = best_threshold
self.precisions[label] = best_precision
self.recalls[label] = best_recall
def grid_search(self, params=None, cv=5, n_jobs=-1):
"""Grid search to find the parameters for the best classifier
Args:
params: parameter settings to try
a dict with param names as keys and lists of settings as values
cv: cross-validation splitting strategy, int
n_jobs: number of jobs to run in parallel, int or None
"""
if not params:
# default parameters to try
params = {'hidden_layer_sizes': [(100,), (200,), (400, ), (50, 50), (100, 100), (200, 200)],
'alpha': [.001, .01, .1, 1, 10],
'learning_rate': ['constant', 'adaptive'],
'learning_rate_init': [.001, .01, .1]}
self.clf = GridSearchCV(self.clf, params, cv=cv, n_jobs=n_jobs)
def save_model(self, model_file=None):
"""Save the model to the local path
Args:
model_file: The local path to save the model, str or None
if None, use the property of this class.
"""
if model_file:
self.model_file = model_file
with open(self.model_file, 'wb') as f:
dpickle.dump(self.clf, f)
def load_model(self, model_file=None):
"""Load the model from the local path
Args:
model_file: The local path to load the model, str or None
if None, use the property of this class.
"""
if model_file:
self.model_file = model_file
if not os.path.exists(self.model_file):
raise Exception("Model path {self.model_file} does not exist")
with open(self.model_file, 'rb') as f:
self.clf = dpickle.load(f)
def calculate_auc(predictions, y_holdout, label_columns):
"""Calculate AUC.
Args:
Predictions: num_samples x num_features array
y_holdout: Labels "one" hot encoded; num_samples x num_labels
label_columns: List of labels
"""
auc_scores = []
counts = []
for i, l in enumerate(label_columns):
y_hat = predictions[:, i]
y = y_holdout[:, i]
auc = roc_auc_score(y_true=y, y_score=y_hat)
auc_scores.append(auc)
counts = y_holdout.sum(axis=0)
df = pd.DataFrame({'label': label_columns, 'auc': auc_scores, 'count': counts})
display(df)
weightedavg_auc = df.apply(lambda x: x.auc * x['count'], axis=1).sum() / df['count'].sum()
print(f'Weighted Average AUC: {weightedavg_auc}')
return df, weightedavg_auc | kubeflow/code-intelligence | py/label_microservice/mlp.py | mlp.py | py | 6,621 | python | en | code | 55 | github-code | 6 | [
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 83,
"usage_type": "call"
}... |
23555067153 | #!/usr/bin/python3
import numpy, tqdm, json, os, random
import scipy.signal
import util
import speech_features
RATE = 8000
NUMCEP = 16
CLASSES = 45
LENGTH = 4000
DELTA = 2000
LABEL_SCALE = 100
LABEL_SAVE_JSON = "switchboard-labels.json"
OUT_FILE = "melspecs-switchboard.npy"
EMPTY = "<EMPTY>"
SIL = "SIL"
SIL_DROPOUT = 0.5
def load(specf):
specf = os.path.join(specf, OUT_FILE)
fragfile = util.FragmentedFile(specf)
return fragfile.load()
def view(specf):
SAMPLES = 5
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot
data = load(specf)
fig, axes = pyplot.subplots(nrows=2, ncols=SAMPLES)
fig.set_size_inches(18, 6)
kmap, imap = load_label_map(os.path.join(specf, LABEL_SAVE_JSON))
size = len(kmap)
for i, (x, y, l) in zip(range(SAMPLES), data):
axes[0, i].imshow(x.T, cmap="hot", interpolation="bicubic", aspect="auto")
y = util.onehot(y, size).T
for j in range(len(y)):
axes[1, i].plot(y[j])
title = [imap[i] for i in l if i]
axes[0, i].set_title(", ".join(title))
pyplot.savefig("switchboard-mfcc-samples.png", bbox_inches="tight")
def create_spectrograms(dataf):
data = list(_load(dataf))
Xa = []
Xb = []
ya = []
yb = []
la = []
lb = []
all_labels = set()
for num, rate, waveA, waveB, pA, pB, sA, sB in tqdm.tqdm(data, desc="Processing data", ncols=80):
assert rate == RATE
waveA = remove_noise(waveA)
waveB = remove_noise(waveB)
yA = match_labels(waveA, pA)
yB = match_labels(waveB, pB)
for wavA, slcA, slcy in slice_step(waveA, yA, pA, LENGTH, DELTA):
if keep_slice(slcA):
melA = convert_spectrogram(wavA)
Xa.append(melA)
ya.append(slcA)
la.append(slcy)
all_labels.update(slcA)
for wavB, slcB, slcy in slice_step(waveB, yB, pB, LENGTH, DELTA):
if keep_slice(slcB):
melB = convert_spectrogram(wavB)
Xb.append(melB)
yb.append(slcB)
lb.append(slcy)
all_labels.update(slcB)
print('''
***
Skipped %d files because they were shorter than 1 second.
***
''' % SKIPPED)
all_labels = sorted(all_labels)
assert all_labels[0] == EMPTY
assert len(all_labels) == CLASSES + 1
DATA_DIR = os.path.dirname(dataf)
label_file = os.path.join(DATA_DIR, LABEL_SAVE_JSON)
save_label_map(all_labels, label_file)
keymap, idxmap = load_label_map(label_file)
ya = convert_key2idx(keymap, ya)
yb = convert_key2idx(keymap, yb)
la = convert_key2idx(keymap, la)
lb = convert_key2idx(keymap, lb)
assert len(Xa) == len(ya) == len(la)
assert len(Xb) == len(yb) == len(lb)
out_file = os.path.join(DATA_DIR, OUT_FILE)
X = Xa + Xb
Y = ya + yb
L = la + lb
assert len(X) == len(Y) == len(L)
L = pad_fitlargest(L)
fragfile = util.FragmentedFile(out_file)
fragfile.dump(len(X), zip(X, Y, L))
# === PRIVATE ===
SKIPPED = 0
def pad_fitlargest(labels):
"Because the first class is EMPTY, we can use that as padding."
longest = max(map(len, labels))
print("Labels padded to length: %d" % longest)
def pad(arr):
out = numpy.zeros(longest).astype(numpy.int32)
out[:len(arr)] = arr
return out
return list(map(pad, labels))
def keep_slice(slc):
if all([v==SIL for v in slc]):
return random.random() > SIL_DROPOUT
else:
return True
def slice_step(wav, lab, phns, length, step):
if len(wav) == len(lab) and len(wav) > length:
def locate_phns(i):
out = []
for name, start, end, pid in phns:
if start > end:
start, end = end, start
if start >= i+length:
break
elif end < i:
continue
else:
out.append(name)
return out
d, r = divmod(len(wav)-length, step)
for i in range(0, d*step, step):
yield wav[i:i+length], lab[i:i+length][::LABEL_SCALE], locate_phns(i)
if r:
yield wav[-length:], lab[-length:][::LABEL_SCALE], locate_phns(len(wav)-length)
else:
global SKIPPED
SKIPPED += 1
def remove_noise(data):
b, a = scipy.signal.butter(2, 40/(8000/2), btype="highpass")
data = scipy.signal.lfilter(b, a, data)
return data
def convert_key2idx(keymap, y):
out = []
for arr in tqdm.tqdm(y, desc="Converting labels to ints", ncols=80):
out.append(numpy.array([keymap[v] for v in arr]))
return out
def save_label_map(labels, fname):
with open(fname, "w") as f:
json.dump(labels, f)
def load_label_map(fname):
with open(fname, "r") as f:
labels = json.load(f)
print("Classes: %d" % (len(labels)-1))
assert CLASSES + 1 == len(labels)
idxmap = dict(enumerate(labels))
keymap = {k:i for i, k in idxmap.items()}
return keymap, idxmap
def _load(dataf):
with open(dataf, "rb") as f:
with tqdm.tqdm(desc="Loading %s" % dataf, ncols=80) as bar:
while True:
try:
yield numpy.load(f)
bar.update()
except OSError:
break
def convert_spectrogram(wav):
return speech_features.mfcc(wav, samplerate=RATE, numcep=NUMCEP)
def match_labels(wav, phns):
y = [EMPTY] * len(wav)
for name, start, end, pid in phns:
if start > end:
start, end = end, start
y[start:end] = [name] * (end-start)
return y
@util.main(__name__)
def main(fname, sample=0):
sample = int(sample)
if sample:
view(fname)
else:
create_spectrograms(fname)
| ychnlgy/Switchboard2.0 | src/load.py | load.py | py | 5,989 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "util.FragmentedFile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.use",
"l... |
71592748667 | import datetime
import json
import math
import threading
import traceback
import asyncio
import pandas as pd
from constants import (
MAP_MARKET_ID_TO_NAME as MARKET_MAP,
AVAILABLE_MARKETS,
ALL_MARKET_LABEL,
QUERY_INTERVAL,
MINT_DIVISOR,
CONTRACT_ADDRESS
)
from prometheus_metrics import metrics
from blockchain.client import ResourceClient as BlockchainClient
from subgraph.client import ResourceClient as SubgraphClient
# Contract addresses
CONTRACT_ADDRESS = CONTRACT_ADDRESS
def write_to_json(data, filename):
with open(filename, 'w') as json_file:
json.dump({"data": data}, json_file, indent=4)
async def process_live_positions(blockchain_client, live_positions):
"""
Asynchronously process live positions data.
Args:
live_positions (list): List of live position data, where each element is a dictionary
containing information about a live position.
Returns:
pandas.DataFrame: DataFrame containing processed live position information.
This asynchronous function processes live position data by filtering out positions not in available markets,
retrieving their current values, and calculating UPNL (Unrealized Profit and Loss) metrics.
Args Details:
- `live_positions`: List of live position data.
Note:
- `AVAILABLE_MARKETS`, `get_current_value_of_live_positions`, and `MINT_DIVISOR` are assumed to be defined.
- This function utilizes asynchronous operations for improved performance.
"""
live_positions_df = pd.DataFrame(live_positions)
live_positions_df.drop(
live_positions_df[~live_positions_df['market'].isin(AVAILABLE_MARKETS)].index,
inplace = True
)
# values = await get_current_value_of_live_positions(blockchain_client, live_positions_df)
positions = live_positions_df[['market', 'owner.id', 'position_id']].values.tolist()
values = await blockchain_client.get_value_of_positions(positions)
values = [v / MINT_DIVISOR for v in values]
live_positions_df['value'] = values
live_positions_df['upnl'] = live_positions_df['value'] - live_positions_df['collateral_rem']
live_positions_df['upnl_pct'] = live_positions_df['upnl'] / live_positions_df['collateral_rem']
return live_positions_df
def set_metrics_to_nan():
"""
Set metrics values to NaN to indicate a query error.
This function updates the 'mint_gauge' metrics labels for all markets, setting their values to NaN.
This is typically used to indicate that there was an issue with the query or data retrieval.
Note:
- `metrics` is a global object representing a metrics collector.
- `AVAILABLE_MARKETS` is a global variable.
- `MARKET_MAP` is a global variable.
- `ALL_MARKET_LABEL` is a global variable.
Returns:
None
"""
# Set metric to NaN to indicate that something went wrong with the query
metrics['upnl_gauge'].labels(market=ALL_MARKET_LABEL).set(math.nan)
metrics['collateral_rem_gauge'].labels(market=ALL_MARKET_LABEL).set(math.nan)
metrics['upnl_pct_gauge'].labels(market=ALL_MARKET_LABEL).set(math.nan)
for market in AVAILABLE_MARKETS:
metrics['upnl_gauge'].labels(market=MARKET_MAP[market]).set(math.nan)
metrics['collateral_rem_gauge'].labels(market=MARKET_MAP[market]).set(math.nan)
metrics['upnl_pct_gauge'].labels(market=MARKET_MAP[market]).set(math.nan)
def set_metrics(live_positions_df_with_curr_values):
"""
Set metrics based on processed live positions data.
Args:
live_positions_df_with_curr_values (pandas.DataFrame): DataFrame containing processed live position information.
Returns:
None
This function sets various metrics based on the processed live position data, including UPNL (Unrealized Profit and Loss),
collateral, and UPNL percentage metrics.
Args Details:
- `live_positions_df_with_curr_values`: DataFrame containing processed live position information.
Note:
- `set_metrics_to_nan`, `metrics`, `AVAILABLE_MARKETS`, `MARKET_MAP`, and `ALL_MARKET_LABEL` are assumed to be defined.
- This function updates metrics based on the provided live position data.
"""
if not len(live_positions_df_with_curr_values):
set_metrics_to_nan()
return
# Calculate current value of each live position
live_positions_df = live_positions_df_with_curr_values
# Set initial value of upnl metric so far
upnl_total = live_positions_df['upnl'].sum()
upnl_total_per_market_df = live_positions_df.groupby(by='market')['upnl'].sum().reset_index()
upnl_total_per_market = dict(zip(upnl_total_per_market_df['market'], upnl_total_per_market_df['upnl']))
metrics['upnl_gauge'].labels(market=ALL_MARKET_LABEL).set(upnl_total)
for market_id in upnl_total_per_market:
metrics['upnl_gauge'].labels(market=MARKET_MAP[market_id]).set(upnl_total_per_market[market_id])
# Set initial value for collateral metric so far
collateral_total = live_positions_df['collateral_rem'].sum()
collateral_total_per_market_df = live_positions_df.groupby(by='market')['collateral_rem'].sum().reset_index()
collateral_total_per_market = dict(zip(collateral_total_per_market_df['market'], collateral_total_per_market_df['collateral_rem']))
metrics['collateral_rem_gauge'].labels(market=ALL_MARKET_LABEL).set(collateral_total)
for market_id in collateral_total_per_market:
metrics['collateral_rem_gauge'].labels(market=MARKET_MAP[market_id]).set(collateral_total_per_market[market_id])
metrics['upnl_pct_gauge'].labels(market=MARKET_MAP[market_id]).set(
upnl_total_per_market[market_id] / collateral_total_per_market[market_id]
)
# live_positions_df['upnl_pct'] = live_positions_df['upnl'] / live_positions_df['collateral_rem']
metrics['upnl_pct_gauge'].labels(market=ALL_MARKET_LABEL).set(upnl_total / collateral_total)
async def query_upnl(subgraph_client, blockchain_client, stop_at_iteration=math.inf):
"""
Asynchronously query unrealized profit and loss (UPNL) metrics from the subgraph.
Args:
subgraph_client: An instance of the subgraph client used for querying data.
blockchain_client: An instance of the blockchain client used for querying data.
stop_at_iteration (int, optional): The maximum number of iterations to run the query. Default is math.inf.
Returns:
None
This asynchronous function queries UPNL metrics from the provided subgraph client, connects to the Arbitrum network,
and handles exceptions.
It performs the following steps:
1. Connects to the Arbitrum network.
2. Initializes metrics and sets them to NaN.
3. Fetches live positions from the subgraph and calculates current values.
4. Sets UPNL metrics based on the live positions and current values.
5. Runs iterations to update UPNL metrics.
6. Handles exceptions and resets metrics if an error occurs.
Note:
- `process_live_positions`, `set_metrics`, and `set_metrics_to_nan` are defined functions.
- `QUERY_INTERVAL` is a global variable.
- `network` is a global object representing network connectivity.
"""
print('[upnl] Starting query...')
blockchain_client.connect_to_network()
set_metrics_to_nan()
try:
iteration = 0
# Fetch all live positions so far from the subgraph
print('[upnl] Getting live positions from subgraph...')
live_positions = subgraph_client.get_all_live_positions()
print('live_positions', len(live_positions))
# write_to_json(live_positions, 'live_positions.json')
print('[upnl] Getting live positions current value from blockchain...')
live_positions_df_with_curr_values = await process_live_positions(blockchain_client, live_positions)
# write_to_json(live_positions_df_with_curr_values.to_dict(orient="records"), 'live_positions_with_current_values.json')
print('[upnl] Calculating upnl metrics...')
set_metrics(live_positions_df_with_curr_values)
await asyncio.sleep(QUERY_INTERVAL)
while iteration < stop_at_iteration:
try:
print('===================================')
print(f'[upnl] Running iteration #{iteration}...')
timestamp_start = math.ceil(datetime.datetime.now().timestamp())
print('[upnl] timestamp_start', datetime.datetime.utcfromtimestamp(timestamp_start).strftime('%Y-%m-%d %H:%M:%S'))
# Fetch all live positions so far from the subgraph
live_positions = subgraph_client.get_all_live_positions()
live_positions_df_with_curr_values = await process_live_positions(blockchain_client, live_positions)
set_metrics(live_positions_df_with_curr_values)
# Increment iteration
iteration += 1
# Wait for the next iteration
await asyncio.sleep(QUERY_INTERVAL)
# if iteration == 10:
# 1 / 0
except Exception as e:
print(
f"[upnl] An error occurred on iteration "
f"{iteration} timestamp_start "
f"{datetime.datetime.utcfromtimestamp(timestamp_start).strftime('%Y-%m-%d %H:%M:%S')}:", e)
traceback.print_exc()
except Exception as e:
print(f"[upnl] An error occurred:", e)
traceback.print_exc()
set_metrics_to_nan()
subgraph_client = SubgraphClient()
blockchain_client = BlockchainClient()
thread = threading.Thread(target=asyncio.run, args=(query_upnl(subgraph_client, blockchain_client),))
if __name__ == '__main__':
asyncio.run(query_upnl)
| overlay-market/ChainMonitoring | metrics/upnl.py | upnl.py | py | 9,876 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "constants.CONTRACT_ADDRESS",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "constants.AVAIL... |
29906192183 | # ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Surface Heat Diffuse Skinning",
"author": "mesh online",
"version": (3, 4, 2),
"blender": (2, 80, 0),
"location": "View3D > UI > Mesh Online",
"description": "Surface Heat Diffuse Skinning",
"warning": "",
"wiki_url": "http://www.mesh-online.net/vhd.html",
"category": "Object"
}
import bpy
import sys
import os
import time
import platform
from subprocess import PIPE, Popen
from threading import Thread
from bpy.props import *
from queue import Queue, Empty
class SFC_OT_ModalTimerOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.surface_heat_diffuse"
bl_label = "Surface Heat Diffuse Skinning"
bl_options = {'REGISTER', 'UNDO'}
_timer = None
_pid = None
_queue = None
_objs = []
_permulation = []
_selected_indices = []
_selected_group_index_weights = []
_start_time = None
def write_bone_data(self, obj, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse bone export.\n")
amt = obj.data
bpy.ops.object.mode_set(mode='EDIT')
for bone in amt.edit_bones:
if bone.use_deform:
world_bone_head = obj.matrix_world @ bone.head
world_bone_tail = obj.matrix_world @ bone.tail
f.write("b,{},{},{},{},{},{},{}\n".format(
bone.name.replace(",", "\\;"), world_bone_head[0], world_bone_head[1], world_bone_head[2],
world_bone_tail[0], world_bone_tail[1], world_bone_tail[2]))
bpy.ops.object.mode_set(mode='OBJECT')
f.close()
def write_mesh_data(self, objs, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse mesh export.\n")
vertex_offset = 0
for obj in objs:
for v in obj.data.vertices:
world_v_co = obj.matrix_world @ v.co
f.write("v,{},{},{}\n".format(world_v_co[0], world_v_co[1], world_v_co[2]))
for poly in obj.data.polygons:
f.write("f");
for loop_ind in poly.loop_indices:
vert_ind = obj.data.loops[loop_ind].vertex_index
f.write(",{}".format(vertex_offset + vert_ind))
f.write("\n")
vertex_offset += len(obj.data.vertices)
f.close()
def read_weight_data(self, objs, filepath):
# make permulation for all vertices
vertex_offset = 0;
for obj in objs:
for index in range(len(obj.data.vertices)):
self._permulation.append((vertex_offset + index, index, obj))
vertex_offset += len(obj.data.vertices)
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# get selected vertex indices
self._selected_indices.append([i.index for i in obj.data.vertices if i.select])
self._selected_group_index_weights.append([])
# push protected vertices weight
for vert_ind in self._selected_indices[index]:
for g in obj.data.vertices[vert_ind].groups:
self._selected_group_index_weights[index].append((obj.vertex_groups[g.group].name, vert_ind, g.weight))
f = open(filepath, 'r', encoding='utf-8')
bones = []
for line in f:
if len(line) == 0:
continue
tokens = line.strip("\r\n").split(",")
if tokens[0] == "b":
group_name = tokens[1].replace("\\;", ",")
bones.append(group_name)
for obj in objs:
#check for existing group with the same name
if None != obj.vertex_groups.get(group_name):
group = obj.vertex_groups[group_name]
obj.vertex_groups.remove(group)
obj.vertex_groups.new(name = group_name)
if tokens[0] == "w":
group_name = bones[int(tokens[2])]
index = int(tokens[1])
vert_ind = self._permulation[index][1]
weight = float(tokens[3])
obj = self._permulation[index][2]
# protect vertices weight
if bpy.context.scene.surface_protect and vert_ind in self._selected_indices[objs.index(obj)]:
continue
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
f.close()
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# pop protected vertices weight
for (group_name, vert_ind, weight) in self._selected_group_index_weights[index]:
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
def modal(self, context, event):
if event.type == 'ESC':
self._pid.terminate()
return self.cancel(context)
if event.type == 'TIMER':
# background task is still running
if None == self._pid.poll():
# read line without blocking
try: rawline = self._queue.get_nowait()
except Empty:
pass
else:
line = rawline.decode().strip("\r\n")
self.report({'INFO'}, line)
else:
# background task finished running
self.read_weight_data(self._objs, os.path.join(os.path.dirname(__file__), "data", "untitled-weight.txt"))
running_time = time.time() - self._start_time
self.report({'INFO'}, "".join(("Complete, ", "running time: ", \
str(int(running_time / 60))," minutes ", str(int(running_time % 60)), " seconds")))
# bind meshes to the armature
bpy.ops.object.parent_set(type='ARMATURE')
return self.cancel(context)
return {'RUNNING_MODAL'}
def execute(self, context):
arm_count = 0
obj_count = 0
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm_count += 1
if 'MESH' == ob.type:
obj_count += 1
if not (context.mode == 'OBJECT' and arm_count == 1 and obj_count >= 1):
self.report({'ERROR'}, "Please select one armature and at least one mesh in 'OBJECT' mode, then try again.")
return {'CANCELLED'}
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
arm = None
objs = []
# get armature and mesh
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm = ob
if 'MESH' == ob.type:
objs.append(ob)
# sort meshes by name
objs.sort(key=lambda obj:obj.name);
# save the reference for later use
self._objs = objs
for obj in objs:
# focus on the mesh
bpy.context.view_layer.objects.active = obj
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write mesh data
self.write_mesh_data(objs, os.path.join(os.path.dirname(__file__), "data", "untitled-mesh.txt"))
# we must focus on the armature before we can write bone data
bpy.context.view_layer.objects.active = arm
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write bone data
self.write_bone_data(arm, os.path.join(os.path.dirname(__file__), "data", "untitled-bone.txt"))
# do voxel skinning in background
ON_POSIX = 'posix' in sys.builtin_module_names
# chmod
if ON_POSIX:
os.chmod(os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd"), 0o755)
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
executable_path = None
if platform.system() == 'Windows':
if platform.machine().endswith('64'):
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x64", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x86", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd")
self._pid = Popen([executable_path,
"untitled-mesh.txt",
"untitled-bone.txt",
"untitled-weight.txt",
str(context.scene.surface_resolution),
str(context.scene.surface_loops),
str(context.scene.surface_samples),
str(context.scene.surface_influence),
str(context.scene.surface_falloff),
context.scene.surface_sharpness,
"y" if context.scene.detect_surface_solidify else "n"],
cwd = os.path.join(os.path.dirname(__file__), "data"),
stdout = PIPE,
bufsize = 1,
close_fds = ON_POSIX)
self._queue = Queue()
t = Thread(target=enqueue_output, args=(self._pid.stdout, self._queue))
t.daemon = True
t.start()
self._start_time = time.time()
# start timer to poll data
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
# remove timer
context.window_manager.event_timer_remove(self._timer)
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
return {'CANCELLED'}
def init_properties():
bpy.types.Scene.surface_resolution = IntProperty(
name = "Voxel Resolution",
description = "Maximum voxel grid size",
default = 128,
min = 32,
max = 1024)
bpy.types.Scene.surface_loops = IntProperty(
name = "Diffuse Loops",
description = "Heat diffuse pass = Voxel Resolution * Diffuse Loops",
default = 5,
min = 1,
max = 9)
bpy.types.Scene.surface_samples = IntProperty(
name = "Sample Rays",
description = "Ray samples count",
default = 64,
min = 32,
max = 128)
bpy.types.Scene.surface_influence = IntProperty(
name = "Influence Bones",
description = "Max influence bones per vertex, please decrease the value (such as 4) for mobile devices",
default = 8,
min = 1,
max = 128)
bpy.types.Scene.surface_falloff = FloatProperty(
name = "Diffuse Falloff",
description = "Heat diffuse falloff",
default = 0.2,
min = 0.01,
max = 0.99)
bpy.types.Scene.surface_protect = BoolProperty(
name = "Protect Selected Vertex Weight",
description = "Protect selected vertex weight",
default = False)
bpy.types.Scene.surface_sharpness = EnumProperty(
name = "Edges",
description = "Edges",
items = [
('1','Soft','Soft Curvature'),
('2','Normal','Normal Curvature'),
('3','Sharp','Sharp Curvature'),
('4','Sharpest','Sharpest Curvature')],
default = '3')
bpy.types.Scene.detect_surface_solidify = BoolProperty(
name = "Detect Solidify",
description = "Detect solidified clothes, if you enable this option, make sure that all bones are in the charecter's volume, otherwise, the result may be wrong",
default = False)
def clear_properties():
props = ["surface_resolution",
"surface_samples",
"surface_falloff",
"surface_loops",
"surface_influence",
"surface_protect"]
for p in props:
if p in bpy.types.Scene.bl_rna.properties:
exec("del bpy.types.Scene." + p)
class SFC_PT_SurfaceHeatDiffuseSkinningPanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Surface Heat Diffuse Skinning"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Mesh Online'
@classmethod
def poll(self, context):
return True
def draw(self, context):
layout = self.layout
layout.prop(context.scene, 'surface_resolution', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_loops', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_samples', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_influence', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_falloff', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_sharpness')
layout.prop(context.scene, 'surface_protect')
layout.prop(context.scene, 'detect_surface_solidify')
row = layout.row()
row.operator("wm.surface_heat_diffuse")
def register():
bpy.utils.register_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.register_class(SFC_OT_ModalTimerOperator)
init_properties()
def unregister():
bpy.utils.unregister_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.unregister_class(SFC_OT_ModalTimerOperator)
clear_properties()
if __name__ == "__main__":
register()
| meshonline/Surface-Heat-Diffuse-Skinning | addon/surface_heat_diffuse_skinning/__init__.py | __init__.py | py | 14,549 | python | en | code | 170 | github-code | 6 | [
{
"api_name": "bpy.types",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.mode_set",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.mo... |
35243133405 | import sys
import os
import json
if "test_script" not in sys.modules:
from pyakaikkr.CompareCifKkr import CompareCifKkr
def get_kkr_struc_from_cif(ciffilepath: str, specx: str, displc: bool,
use_bravais=True, remove_temperaryfiles=True,
fmt="cif"):
"""get kkr structure parameter from cif file.
If specx is akaikkr, then displc is set to False.
If specx is akaikkr_cnd, then displc is set to True.
If use_bravais is True, bcc,bcc,.., and a,c,b,alpha,beta,gaam are used.
If use_bravias is False, aux and lattice vectors iare used.
rmt, lmax, ..., displc are set to be default values.
Args:
ciffilepath (str): cif file path
specx (str, optional): specx path. Defaults to str.
displc (bool, optional): displc parameter is added.
use_bravais (bool, optional): use bravias lattice. Defaults to True.
remove_temperaryfiles (bool, optional): delete temporary files on exit. Defaults to True.
fmt (str, optional): file format. Defaults to "cif".
Returns:
dict: kkr structure parameters on success.
"""
comp = CompareCifKkr(ciffilepath, specx, displc=displc, fmt=fmt)
result = comp.convert_and_compare(use_bravais=use_bravais)
struc_param = None
if result == comp.SUCCESS:
struc_param = comp.get_structure_param(
remove_temperaryfiles=remove_temperaryfiles)
else:
print("failed to convert the cif file")
print("msg=", comp.msg)
print("result=", result)
sys.exit(10)
try:
os.rmdir(comp.parent_directory)
except OSError:
# ignore errors on removing output directory
pass
return struc_param
if __name__ == "__main__":
def main(path_prefix, ciffile_path, akaikkr_type="akaikkr",):
"""load data from path_prefix and convert to the PyAkaiKKR dict format.
The output is printed to stdout.
akaikkr_type can be akaikkr or akaikkr_cnd.
Args:
path_prefix (str): path prefix to AkaiKKR
ciffile_path (st): the cif file name.
akaikkr_type (str, optional): type of AkaiKKR. Defaults to "akaikkr".
Raises:
ValueError: unknown fmt.
"""
if akaikkr_type == "akaikkr":
displc = False
elif akaikkr_type == "akaikkr_cnd":
displc = True
else:
raise ValueError("unknown akaikkr_type={}".format(akaikkr_type))
specx = os.path.join(path_prefix, akaikkr_type, "specx")
use_bravais = True
struc_param = get_kkr_struc_from_cif(ciffile_path, specx,
use_bravais=use_bravais,
displc=displc, fmt="cif")
print()
print("sturc_param")
if False:
for key, value in struc_param.items():
print(key, value)
else:
print(json.dumps(struc_param))
def define_and_get_parse():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--akaikkr", default= "kino/kit/AkaiKKRprogram.current.gfortran")
parser.add_argument("--prefix", default= "kino/kit/MaterialsLibrary")
parser.add_argument("--akaikkr_type", choices = ["akaikkr", "akaikkr_and"], default= "akaikkr")
parser.add_argument("structure_file")
# e.g. ="kino/kit/MaterialsLibrary/MaterialsLibrary/AtomWorkData/small_sites/made_by_kino/Co_P63mmc.cif"
args = parser.parse_args()
return args
homedir = os.path.expanduser("~")
args = define_and_get_parse()
main(os.path.join(homedir, args.akaikkr),
os.path.join(homedir, args.prefix, args.structure_file),
args.akaikkr_type)
| AkaiKKRteam/AkaiKKRPythonUtil | util/cif2kkr_test_script/cif2kkr_convert_to_akaikkrparam_sample.py | cif2kkr_convert_to_akaikkrparam_sample.py | py | 3,825 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "sys.modules",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyakaikkr.CompareCifKkr.CompareCifKkr",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.rmd... |
7577852611 | from django import forms
from models import Attachment
class AttachmentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
self.actived = kwargs.pop('actived', False)
super(AttachmentForm, self).__init__(*args, **kwargs)
def save(self):
attachment = super(AttachmentForm, self).save(commit=False)
attachment.user = self.user
attachment.actived = self.actived
attachment.save()
return attachment
class Meta:
model = Attachment
fields = ('file',)
| vicalloy/django-lb-attachments | attachments/forms.py | forms.py | py | 616 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.Attachment",
"line_number": 20,
"usage_type": "name"
}
] |
38316017126 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
#imports routes
from .routes import home_blueprint
# from .database.model import *
def create_app():
app = Flask(__name__)
#load config file
app.config.from_object("project.config.Config")
#routes
app.register_blueprint(home_blueprint, url_prefix='/api/v1/home')
#init database
db.init_app(app)
return app
| vipin733/flask_boilerplate | services/web/project/__init__.py | __init__.py | py | 427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "routes.home_blueprint",
"line_number": 18,
"usage_type": "argument"
}
] |
37842291992 | # ----------------------------------------------------------------------------------------------------------------------
# Implementation of k-Means Machine learning algorithm, tested using synthetic data created in script
#
# Sean Taylor Thomas
# 9/2021
# stth223@uky.edu
# ----------------------------------------------------------------------------------------------------------------------
import math
import random
import sys
import matplotlib.pyplot as plt
random.seed(1)
# Generating Random dataset, dataset
dataset = []
dimensions = 2
num_elements = 1000
for x in range(num_elements):
rand1 = random.randint(0, 250)
rand2 = random.randint(0, 250)
if not rand2 == rand1 * 2 + 45: # none on this line.. hmm
dataset.append([rand1, rand2])
def compute_centroid(element, centroids):
""" return the index of the closest centroid to given element"""
which_centroid = 0
min_dist = sys.maxsize
for centroid in centroids:
dist = 0 # temp dist
for i in range(dimensions):
dist += (element[i] - centroid[i]) ** 2
if dist < min_dist: # new min distance
which_centroid = centroids.index(centroid) # index of closest centroid
min_dist = dist
return which_centroid # returns index of closest centroid
def compute_cluster_mean(cluster):
"""computes literal average of given cluster"""
mean_element = list(cluster[0])
for dim in range(dimensions):
for element in cluster:
mean_element[dim] += element[dim] # Sum of elements' "dim" dimension
# Computing Average for each dimension (dividing by num elements)
mean_element[dim] /= len(cluster)
return mean_element # return average
max_iterations = 200
# Choosing initial centroids from dataset at random
k = 5
centroids = []
centroids = random.choices(dataset, k=5)
iterations = 0 # num iterations of loop
isSame = 0 # boolean testing if previous clusters are the same as current
while iterations < max_iterations and not isSame:
iterations += 1
# Initializing List, named clusters, to hold and separate k clusters
clusters = []
iterator = 0
for x in range(k):
clusters.append(list()) # List representing each of k clusters
iterator += 1
# Calculate distance from each element in dataset to each cluster seed
# And choose which of k clusters is closest to this element
for element in dataset:
closest_centroid_index = compute_centroid(element, centroids) # index of centroid closest to element
clusters[closest_centroid_index].append(element) # grouping each point into a cluster
same_centroids = 0 # variable to check if all clusters change
# Finding new centroid for each cluster, k-means
for cluster_k in clusters:
average_of_cluster = compute_cluster_mean(cluster_k) # literal average, not necessarily an element in cluster
new_centroid = cluster_k[compute_centroid(average_of_cluster, cluster_k)] # find new centroid
# add one for each centroid that hasn't change;
if new_centroid == centroids[clusters.index(cluster_k)]:
same_centroids += 1
centroids[clusters.index(cluster_k)] = new_centroid
if same_centroids == k:
isSame = 1
# Plotting elements as clusters (stars) -- 11 different clusters supported
clr = ["blue", "red", "green", "purple", "orange", "black", "brown", "cyan", "white", "yellow", "magenta"]
color_indx = 0
for cluster in clusters:
x = []
y = []
for i in cluster:
x.append(i[0])
y.append(i[1])
plt.scatter(x, y, label="Cluster " + str(color_indx), color=clr[color_indx%11], marker="*",
s=30)
color_indx += 1
# Plotting the Centroids (Large Stars)
color_indx = 0
for centroid in centroids:
x = []
y = []
x.append(centroid[0])
y.append(centroid[1])
plt.scatter(x, y, label="Centroid " + str(color_indx), color=clr[color_indx%11], marker="*",
s=450)
color_indx += 1
plt.ylabel('y-axis')
plt.title("K-Means Clustering")
plt.legend()
plt.show()
# calculating WCSS
total_cluster_sum =0
for cluster_k in range(len(clusters)):
WCSS = 0
for element in clusters[cluster_k]:
for dim in range(dimensions):
WCSS += abs(element[dim] - centroids[cluster_k][dim]) ** 2
total_cluster_sum += WCSS
print("Average WCSS:", total_cluster_sum/k)
print("Number of Iterations: ", iterations)
# Plotting elements as clusters (stars) -- 11 different clusters supported
clr = ["blue", "red", "green", "purple", "orange", "black", "brown", "cyan","white","yellow","magenta"]
color_indx = 0
for cluster in clusters:
x = []
y = []
for i in cluster:
x.append(i[0])
y.append(i[1])
plt.scatter(x, y,label="Cluster "+str(color_indx), color=clr[color_indx%11], marker="*",
s=30)
color_indx += 1
# Plotting the Centroids (Large Stars)
color_indx=0
for centroid in centroids:
x = []
y = []
x.append(centroid[0])
y.append(centroid[1])
plt.scatter(x, y, label="Centroid "+str(color_indx), color=clr[color_indx%11], marker="*",
s=450)
color_indx += 1
plt.ylabel('y-axis')
plt.title("K-Means Clustering")
plt.legend()
plt.show()
| STaylorT/Machine-Learning | K-Means.py | K-Means.py | py | 5,400 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_num... |
36387303386 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.model_selection import train_test_split
from matplotlib.colors import LinearSegmentedColormap
from sklearn.preprocessing import LabelEncoder
def prep_data(data):
# Assuming y is your target labels
y = data['target'].values
# Normalize the data
data_normalized = data.iloc[:, :-1].values / 255.0
# Convert data to PyTorch tensors
X_tensor = torch.tensor(data_normalized, dtype=torch.float32)
y_tensor = torch.tensor(y, dtype=torch.long)
# Split the data into training and testing sets
X_train_tensor, X_test_tensor, y_train_tensor, y_test_tensor = train_test_split(X_tensor, y_tensor, test_size=0.2, random_state=42)
num_classes = len(set(y))
print("Unique classes in target labels:", num_classes)
return X_train_tensor,y_train_tensor,X_test_tensor,y_test_tensor,num_classes
def create_model(MaxoutNetworkWithSoftmax):
input_size = 784
num_classes = 10
device = torch.device("cpu")
# Define the model
model = MaxoutNetworkWithSoftmax(input_size, num_classes)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Define the loss function
criterion = nn.CrossEntropyLoss()
return model,criterion,optimizer
def create_model_2(DeepMaxoutNetwork,X_train_tensor):
# Hyperparameters
num_epochs = 100
Batch_size = 100
# Create an instance of the DeepMaxoutNetwork
deep_maxout_model = DeepMaxoutNetwork(input_dim=784, hidden_dim=100, output_dim=10, num_units=2, num_layers=3)
input_size = X_train_tensor.shape[1]
criterion = nn.CrossEntropyLoss()
sgd = optim.SGD(deep_maxout_model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
adam = optim.Adam(deep_maxout_model.parameters(), lr=0.01,weight_decay=1e-2)
optimizer = adam
return deep_maxout_model,criterion,optimizer, num_epochs, Batch_size
def create_model_3(ShallowRBF,X_train_tensor,centers):
# Hyperparameters
num_epochs = 100
learning_rate = 0.01
momentum = 0.9
Batch_size = 100
num_classes = 10
input_size = 784
n_channels = 3
num_units = 2
# Create an instance of the DeepMaxoutNetwork
RBF_model = ShallowRBF(input_dim=784, num_classes=10, num_centers=centers.shape[0])
input_size = X_train_tensor.shape[1]
criterion = nn.CrossEntropyLoss()
sgd = optim.SGD(RBF_model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
adam = optim.Adam(RBF_model.parameters(), lr=0.01)
optimizer = adam
return RBF_model,criterion,optimizer, num_epochs, Batch_size
def create_model_4(ShallowSoftmaxNetBN,X_train_tensor):
# Hyperparameters
num_epochs = 100
learning_rate = 0.01
momentum = 0.9
Batch_size = 100
num_classes = 10
input_size = 784
n_channels = 3
num_units = 2
# Create an instance of the DeepMaxoutNetwork
ShallowSoftmax_model = ShallowSoftmaxNetBN(input_dim=784, output_dim=10)
input_size = X_train_tensor.shape[1]
criterion = nn.CrossEntropyLoss()
sgd = optim.SGD(ShallowSoftmax_model.parameters(), lr=0.01,weight_decay=1e-2)
adam = optim.Adam(ShallowSoftmax_model.parameters(), lr=0.01,weight_decay=1e-4)
optimizer = adam
return ShallowSoftmax_model,criterion,optimizer, num_epochs, Batch_size
def test_eps(test_Maxout,model,device,test_dataloader):
# Run test for each epsilon
accuracies = []
args = []
epsilons = [-15,-14,-13,-12,-11, -10,-9,-8,-7,-6, -5,-4,-3,-2,-1, 0,1,2,3,4, 5,6,7,8,9, 10,11,12,13,14, 15]
for eps in epsilons:
accuracy, arg = test_Maxout(model, device, test_dataloader, eps)
accuracies.append(accuracy)
args.append(arg)
return args, epsilons
def plot_eps(args,epsilons):
for i in range(len(args)):
args[i] = args[i].detach().numpy()
args[i] = np.log(args[i]) - np.log(np.sum(np.exp(args[i]), axis=1, keepdims=True))
args[i] = args[i].mean(axis=0)
# Plot the average values as a function of epsilon
#plt.figure(figsize=(6, 18)) # Increase the height by a factor of 3
plt.plot(epsilons, [i[:] for i in args])
plt.xlabel('Epsilon')
plt.ylabel('softmax output')
plt.title('softmax output for each class vs Epsilon')
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
plt.show()
def training_loop(optimizer, model, criterion, X_train_tensor, y_train_tensor, num_epochs=200, batch_size=128):
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
train_losses = [] # Store training losses for each epoch
for epoch in range(num_epochs):
for batch_X, batch_y in train_loader:
outputs = model(batch_X)
loss = criterion(outputs, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses.append(loss.item()) # Store the loss after each epoch
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
return train_losses,model
def plot_losses(train_losses):
import matplotlib.pyplot as plt
plt.plot(train_losses, label='Training Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Curve')
plt.legend()
plt.show()
def eval_train(X_train_tensor,y_train_tensor,model):
model.eval() # Set the model to evaluation mode
with torch.no_grad():
train_outputs = model(X_train_tensor)
_, predicted = torch.max(train_outputs, 1)
correct = (predicted == y_train_tensor).sum().item()
total = y_train_tensor.size(0)
train_accuracy = correct / total
print(f'Training Accuracy: {train_accuracy * 100:.2f}%')
def visualize_weights_and_signs(model):
# Get the weights from the first layer (assuming it's the layer you're interested in)
weights = model.linear.weight.data
# Extract the signs of the weights
weight_signs = torch.sign(weights)
# Check if the weights are 1-dimensional
if weights.dim() == 1:
# Reshape the weights to be a 2D tensor with one row
weights = weights.view(1, -1)
# Reshape the weights to match the original image dimensions (assuming 28x28)
weight_images = weights.view(-1, 28, 28)
# Reshape the weight signs to match the original image dimensions (assuming 28x28)
weight_sign_images = weight_signs.view(-1, 28, 28)
# Plot each set of weights and weight signs in a separate subplot
num_classes = weight_images.size(0)
fig, axes = plt.subplots(num_classes, 2, figsize=(16, 8 * num_classes))
for i in range(num_classes):
# Plot weights
axes[i, 0].imshow(weight_images[i].cpu().numpy(), cmap='gray')
axes[i, 0].set_title(f'Class {i} - Weight')
axes[i, 0].axis('off')
# Plot weight signs
axes[i, 1].imshow(weight_sign_images[i].cpu().numpy(), cmap='gray', vmin=-1, vmax=1)
axes[i, 1].set_title(f'Class {i} - Sign')
axes[i, 1].axis('off')
# Show the plot
plt.show()
def eval_test(X_test_tensor, y_test_tensor, model):
# Convert test data to DataLoader for batching
test_dataset = TensorDataset(X_test_tensor, y_test_tensor)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
model.eval() # Set the model to evaluation mode
total = 0
correct = 0
total_mean_confidence = 0.0
total_samples = 0
incorrect_mean_confidence = 0.0
incorrect_samples = 0
wrong_predictions = []
correct_predictions = []
all_predictions = [] # List to store all predictions
with torch.no_grad():
for batch_X, batch_y in test_loader:
outputs = model(batch_X)
_, predicted = torch.max(outputs.data, 1)
total += batch_y.size(0)
correct += (predicted == batch_y).sum().item()
# Store all predictions
all_predictions.extend(predicted.tolist())
# Calculate mean confidence for all predictions
probabilities = nn.functional.softmax(outputs, dim=1)
confidences, _ = torch.max(probabilities, dim=1)
total_mean_confidence += confidences.sum().item()
total_samples += batch_y.size(0)
# Calculate mean confidence for incorrect predictions
incorrect_mask = predicted != batch_y
if incorrect_mask.sum().item() > 0:
incorrect_mean_confidence += confidences[incorrect_mask].sum().item()
incorrect_samples += incorrect_mask.sum().item()
# Store the wrong predictions
wrong_predictions.extend(predicted[incorrect_mask].tolist())
# Store the correct predictions
correct_predictions.extend(predicted[~incorrect_mask].tolist())
# Calculate mean confidence for all examples
if total_samples > 0:
total_mean_confidence /= total_samples
# Calculate mean confidence for incorrect predictions
if incorrect_samples > 0:
incorrect_mean_confidence /= incorrect_samples
accuracy = correct / total
print(f'Accuracy: {accuracy * 100:.2f}%')
print(f'Mean Confidence for All Examples: {total_mean_confidence:.4f}')
print(f'Mean Confidence for Incorrect Predictions: {incorrect_mean_confidence:.4f}')
return wrong_predictions, correct_predictions, all_predictions # Return all predictions | quentinRolld/Adversarial_attack | generalization/train_gen.py | train_gen.py | py | 9,674 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.tensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_n... |
30419861071 | """
An AI agent that will explore its environment and perform certain tasks (mining, smelting, forging, and buying/selling items)
"""
import sys
from time import sleep
import traceback
import cv2
import pyautogui
from game_map import GameMap
import utilities as utils
from user_interface import UserInterface
from player import Player
# Set defaults
task = Player.TASKS.MINE
if len(sys.argv) > 1:
task = Player.TASKS[sys.argv[1].upper()]
# Initialize classes
game_map = GameMap()
player = Player(game_map, task)
user_interface = UserInterface()
utils.log("INIT", "====================================================")
utils.log("INIT", "Initializing...")
utils.log("INIT", F"Default task set to {task}")
# Find blocking window in screenshot
screenshot = utils.take_screenshot(False)
result = cv2.matchTemplate(screenshot, user_interface.templates['sponsored'], cv2.TM_CCORR_NORMED)
_, max_val, _, max_loc = cv2.minMaxLoc(result)
# Found the blocking window window with high confidence
if max_val > 0.9:
click_at = (max_loc[0] + 428, max_loc[1] + 144)
utils.log("INIT", "Closed blocking window")
pyautogui.moveTo(click_at[0], click_at[1], 0.15)
pyautogui.click()
sleep(5)
# Bring game to foreground
utils.bring_game_to_foreground()
# Detect environment
screenshot = utils.take_screenshot()
game_map.update_player_position(screenshot)
utils.log("INIT", F"Player location initialized")
game_map.update_map()
utils.log("INIT", "Field of view mapped")
utils.log("INIT", "Initialization complete")
utils.log("INIT", "====================================================")
try:
while utils.bring_game_to_foreground():
player.perform_task()
except Exception as exception:
utils.log("SEVERE", exception)
utils.log("SEVERE", traceback.format_exc())
utils.quit_game()
| jeffaustin32/game_ai | main.py | main.py | py | 1,821 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "player.Player.TASKS",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "player.Player",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "player.Player.TA... |
44408183986 | #!/usr/bin/env python
# coding: utf-8
# ## This Jupyter notebook will show you to perform basic calculations and plots with 2 dimensional data (matrices, )
#
# ## We will compare two images:
# ### * MODIS-AQUA - 31st August 2005
# ### * MODIS-AQUA - 16th Feburary 2017
#
# Now, we will need to import several packages/toolboxes that are essential for nearly every scientific work in Python.
# In[1]:
import os #change folders
import numpy as np # perform calculations and basic math
import matplotlib.pyplot as plt # plot data
import pandas as pd # work with dataframes,tables, spreadsheets, etc.
import netCDF4 as nc4 # work with netcdf files, the standard file for satellite 2D and 3D data
# ## Now, lets load each image using the netCDF4 module.
# In[2]:
# Let's open the first image (31st August 2005)
file = 'A2005243140500.L2_LAC_OC.x 2.hdf' #write the name of the file
modis_31august2005 = nc4.Dataset(file, mode='r') #open the file in python
print(modis_31august2005) #print full details of the image
# In[3]:
# You can also use fh.variables to read information only on the variables
print(modis_31august2005.variables)
# ## Notice that you have the following variables:
# ### * Time information
# * Year
# * Day of the Year
# * Milliseconds of Day
# ### * Scan line information
# * Tilt angle for scan line
# * Scan start-pixel longitude
# * Scan center-pixel longitude
# * Scan end-pixel longitude
# * Scan start-pixel latitude
# * Scan center-pixel latitude
# * Scan end-pixel latitude
# * (...)
# ### * Remote Sensing Reflectances
# ## * **Latitude**
# ## * **Longitude**
# ## * **Chl-a (OC3 algorithm)**
# ### * Aerosol optical thickness
# ### * CDOM
# ### * PAR
# ### * Particulate Organic Carbon
# In[5]:
# Extracting variables
longitude = np.array(modis_31august2005['longitude'])
print(longitude)
# In[ ]:
# Extracting variables
longitude = np.array(modis_31august2005['longitude'])
latitude = np.array(modis_31august2005['latitude'])
mld = np.array(fh['mlotst'])
mld[mld == 32767] = np.nan
mld = np.swapaxes(np.swapaxes(mld, 0, 2), 0, 1)
time = np.array(fh['time'])
pixel1 = pd.read_csv('pixel1_monthly.csv')
pixel2 = pd.read_csv('pixel2_monthly.csv')
pixel3 = pd.read_csv('pixel3_monthly.csv')
# Let's print one of the datasets to check the structure
# In[ ]:
print(pixel1)
# You will notice the data corresponds to monthly-averaged Chl-a concentrations.
#
# Let's extract the data from each dataset and calculate the mean, min, max, standard deviation
# In[ ]:
pixel1_chla = pixel1['Chl-a'].values
pixel2_chla = pixel2['Chl-a'].values
pixel3_chla = pixel3['Chl-a'].values
# Pixel 1
pixel1_mean = np.nanmean(pixel1_chla)
pixel1_min = np.nanmin(pixel1_chla)
pixel1_max = np.nanmax(pixel1_chla)
pixel1_stdev = np.nanstd(pixel1_chla)
# Pixel 2
pixel2_mean = np.nanmean(pixel2_chla)
pixel2_min = np.nanmin(pixel2_chla)
pixel2_max = np.nanmax(pixel2_chla)
pixel2_stdev = np.nanstd(pixel2_chla)
# Pixel 3
pixel3_mean = np.nanmean(pixel3_chla)
pixel3_min = np.nanmin(pixel3_chla)
pixel3_max = np.nanmax(pixel3_chla)
pixel3_stdev = np.nanstd(pixel3_chla)
print('The Chl-a dataset of pixel 1 has:',
'mean = {:.2f} mg.m/3, minimum = {:.2f} mg.m/3, maximum = {:.2f} mg.m/3 and standard deviation = {:.2f} mg.m/3 \n'.format(pixel1_mean, pixel1_min, pixel1_max, pixel1_stdev))
print('The Chl-a dataset of pixel 2 has:',
'mean = {:.2f} mg.m/3, minimum = {:.2f} mg.m/3, maximum = {:.2f} mg.m/3 and standard deviation = {:.2f} mg.m/3 \n'.format(pixel2_mean, pixel2_min, pixel2_max, pixel2_stdev))
print('The Chl-a dataset of pixel 3 has:',
'mean = {:.2f} mg.m/3, minimum = {:.2f} mg.m/3, maximum = {:.2f} mg.m/3 and standard deviation = {:.2f} mg.m/3 \n'.format(pixel3_mean, pixel3_min, pixel3_max, pixel3_stdev))
# ## Other simple to calculate and useful calculations using numpy are:
# ``` python
# np.ptp(array) # Calculates range (maximum - minimum)
# np.percentile(array) # Calculates the q-th percentile
# np.quantile(array) # Calculates the q-th quantile
# np.median(array) # Calculates the median
# ```
# ## Now say we want to plot each dataset
# In[ ]:
print('Pixel 1 Plot')
plt.plot(pixel1_chla)
# In[ ]:
print('Pixel 2 Plot')
plt.plot(pixel2_chla)
# In[ ]:
print('Pixel 3 Plot')
plt.plot(pixel3_chla)
# They all seem different but let's compare put them in the same plot for comparison.
# In[ ]:
plt.plot(pixel1_chla)
plt.plot(pixel2_chla)
plt.plot(pixel3_chla)
# We can use matplotlib options to improve our plot.
# In[ ]:
plt.figure(figsize=(12,6))
plt.plot(pixel1_chla, c='r', label='Pixel 1')
plt.plot(pixel2_chla, c='b', linestyle='--', label='Pixel 2')
plt.plot(pixel3_chla, c='k', linestyle=':', label='Pixel 3')
plt.xlabel('Years', fontsize=14)
plt.ylabel('Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.xticks(ticks=np.arange(0, len(pixel1_chla), 12), labels=np.arange(1998, 2021))
plt.xlim(0,len(pixel1_chla))
plt.ylim(0, 2)
plt.title('Pixel Chl-$\it{a}$ comparison', fontsize=18)
plt.legend(loc=0, fontsize=14)
#plt.tight_layout()
# ## Other types of plots you can do to compare one dimensional datasets!
# * Scatter plots
# * Histograms
# * Boxplots
# * etc.
# In[ ]:
plt.figure()
plt.scatter(pixel1_chla, pixel2_chla, s=10)
plt.xlabel('Pixel 1 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('Pixel 2 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Scatter Plot - Pixel 1 vs. Pixel 2', fontsize=18)
plt.figure()
plt.scatter(pixel1_chla, pixel3_chla, s=10, c='grey')
plt.xlabel('Pixel 1 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('Pixel 3 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Scatter Plot - Pixel 1 vs. Pixel 3', fontsize=18)
# In[ ]:
plt.figure()
plt.hist(pixel1_chla, color='r')
plt.xlabel('Pixel 1 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('N', fontsize=14)
plt.title('Histogram - Pixel 1', fontsize=18)
plt.xlim(0,2)
plt.ylim(0,150)
plt.figure()
plt.hist(pixel2_chla, color='b')
plt.xlabel('Pixel 2 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('N', fontsize=14)
plt.title('Histogram - Pixel 2', fontsize=18)
plt.xlim(0,2)
plt.ylim(0,150)
plt.figure()
plt.hist(pixel3_chla, color='b')
plt.xlabel('Pixel 3 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('N', fontsize=14)
plt.title('Histogram - Pixel 3', fontsize=18)
plt.xlim(0,2)
plt.ylim(0,150)
# In[ ]:
pixel1_chla_nonans = pixel1_chla[~np.isnan(pixel1_chla)] # Remove missing values
plt.figure()
bplot = plt.boxplot([pixel1_chla_nonans, pixel2_chla, pixel3_chla], notch = True, patch_artist=True, vert=True)
# fill with colors
colors = ['r', 'b', 'k']
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
for patch, color in zip(bplot['medians'], colors):
patch.set_color('w')
patch.set_linewidth(2)
plt.xlabel('Pixels', fontsize=14)
plt.ylabel('Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Boxplot Comparison', fontsize=18)
# ## Last but not least, how to save an image.
#
# Let's use the boxplots image as an example
# In[ ]:
pixel1_chla_nonans = pixel1_chla[~np.isnan(pixel1_chla)] # Remove missing values
plt.figure()
bplot = plt.boxplot([pixel1_chla_nonans, pixel2_chla, pixel3_chla], notch = True, patch_artist=True, vert=True)
# fill with colors
colors = ['r', 'b', 'k']
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
for patch, color in zip(bplot['medians'], colors):
patch.set_color('w')
patch.set_linewidth(2)
plt.xlabel('Pixels', fontsize=14)
plt.ylabel('Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Boxplot Comparison', fontsize=18)
#plt.show()
# Save in .png
plt.savefig('boxplots_TP4.png',format = 'png', bbox_inches = 'tight', dpi = 100)
# Save in .jpeg
plt.savefig('boxplots_TP4.jpeg',format = 'jpeg', bbox_inches = 'tight', dpi = 100)
# Save in .pdf
plt.savefig('boxplots_TP4.pdf',format = 'pdf', bbox_inches = 'tight', dpi = 100)
| afonsomferreira/ppm_jupyter | 2D_oceancolour_plots.py | 2D_oceancolour_plots.py | py | 7,930 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_numbe... |
18478316296 | from block import Block
from transaction import Transaction
class ConverterToObj():
@staticmethod
def chain_to_obj(blockchain):
"""
Receives a blockchain of dictionaries and converts the blocks
into block objects and the transactions into Transactions objects
Returns an updated blockchain of objects
"""
updated_blockchain = []
for block in blockchain:
converted_tx = [Transaction(
tx['sender'], tx['receiver'], tx['signature'], tx['amount']) for tx in block['transactions']]
updated_block = Block(
block['index'], block['previous_hash'], converted_tx, block['proof'], block['timestamp'])
updated_blockchain.append(updated_block)
return updated_blockchain
@staticmethod
def transaction_dict_to_obj(transactions):
"""
Converts a set of transactions dictionaries to Transaction object
Arguments:
- An Array of transactions
"""
updated_transactions = []
for tx in transactions:
updated_transaction = Transaction(
tx['sender'], tx['receiver'], tx['signature'], tx['amount'])
updated_transactions.append(updated_transaction)
return updated_transactions
| salvescoding/bockchain_cryptocurrency | app/helpers/converter_to_obj.py | converter_to_obj.py | py | 1,249 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "transaction.Transaction",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "block.Block",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "transaction.Transaction",
"line_number": 31,
"usage_type": "call"
}
] |
8287227022 | # encoding: utf-8
from django.test import TestCase
from django.db import IntegrityError
from subscription.models import Subscription
class SubscriptionModelTest(TestCase):
def test_create_new_subscription(self):
s = Subscription.objects.create(
name='Henrique Bastos',
cpf='05633165780',
email='henrique@bastos.net',
phone='21-9618-6180'
)
self.assertEquals(s.id, 1)
class SubscriptionModelUniqueTest(TestCase):
fixtures = ['subscription.json']
def test_cpf_must_be_unique(self):
s = Subscription(
name='Henrique Bastos',
cpf='05633165780',
email='henrique@bastos.net',
phone='21-9618-6180'
)
self.assertRaises(IntegrityError, s.save)
def test_email_must_be_unique(self):
s = Subscription(
name='Henrique Bastos',
cpf='38067528772',
email='henrique@bastos.net',
phone='21-9618-6180')
self.assertRaises(IntegrityError, s.save) | rosenclever/Eventex | subscription/tests/test_models.py | test_models.py | py | 1,055 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "subscription.models.Subscription.objects.create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subscription.models.Subscription.objects",
"line_number": 9,
"usage_ty... |
30086424921 | import logging
import paddle.fluid as fluid
import paddle.fluid.dygraph.nn as nn
from utils import build_norm_layer, build_conv_layer, Sequential
class BasicBlock(fluid.dygraph.Layer):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias_attr=False)
self.add_sublayer(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias_attr=False)
self.add_sublayer(self.norm2_name, norm2)
self.relu = fluid.layers.relu
self.downsample = downsample
self.stride = stride
self.dilation = dilation
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = fluid.layers.elementwise_add(out, identity)
out = self.relu(out)
return out
class Bottleneck(fluid.dygraph.Layer):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_cfg=None,
norm_cfg=dict(type='BN')):
"""Bottleneck block for ResNet.
the stride-two layer is the 3x3 conv layer,.
"""
super(Bottleneck, self).__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv1_stride = 1
self.conv2_stride = stride
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
1,
stride=1,
bias_attr=False)
self.add_sublayer(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias_attr=False)
self.add_sublayer(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
1,
bias_attr=False)
self.add_sublayer(self.norm3_name, norm3)
self.relu = fluid.layers.relu
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = fluid.layers.elementwise_add(out, identity)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN')):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
1,
stride=stride,
bias_attr=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
return Sequential(*layers)
class ResNet(fluid.dygraph.Layer):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=True,
zero_init_residual=True):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_sublayer(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self):
self.conv1 = build_conv_layer(
self.conv_cfg,
3,
64,
7,
stride=2,
padding=3,
bias_attr=False)
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.add_sublayer(self.norm1_name, norm1)
self.relu = fluid.layers.relu
self.maxpool = nn.Pool2D(pool_size=3, pool_stride=2, pool_padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.norm1.eval()
for layer in [self.conv1, self.norm1]:
layer.eval()
for param in layer.parameters():
param.stop_gradient = True
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, 'layer{}'.format(i))
layer.eval()
for param in layer.parameters():
param.stop_gradient = True
def init_weights(self, pretrained=None):
logger = logging.getLogger()
if isinstance(pretrained, str):
logger.info('Loading pretrained model from {}'.format(pretrained))
self.set_dict(fluid.dygraph.load_dygraph(pretrained)[0])
elif pretrained is None:
logger.warning('No pretrained model for Resnet')
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs.append(x) # add for encoder
x = self.maxpool(x)
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self):
super(ResNet, self).train()
self._freeze_stages()
if self.norm_eval:
for layer in self.sublayers():
# trick: eval have effect on BatchNorm only
if isinstance(layer, nn.BatchNorm):
layer.eval()
| VIS-VAR/LGSC-for-FAS | models/resnet.py | resnet.py | py | 10,152 | python | en | code | 223 | github-code | 6 | [
{
"api_name": "paddle.fluid.dygraph",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "paddle.fluid",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "utils.build_norm_layer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.bui... |
34268869156 | import pandas as pd
import numpy as np
import numpy as np
import pandas as pd
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from numpy import concatenate
from flask import Flask, request, jsonify
import json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def getMill():
raw_ts = pd.read_pickle('./time_series.pk1')
raw_ts=raw_ts.reset_index(drop=True)
raw_ts.drop(['var52(t-3)','var52(t-2)','var52(t-1)'],axis='columns', inplace=True)
raw_ts = raw_ts.sort_values(by=['var2(t)','var3(t)'])
raw_ts=raw_ts.reset_index(drop=True)
raw_val = raw_ts.values
scaler = MinMaxScaler(feature_range=(0, 1))
raw_scaled = scaler.fit_transform(raw_val)
raw_eval = raw_scaled[57193:,:]
raw_train_test = raw_scaled[:57193,:]
raw_train_test_x = raw_train_test[:, :-1]
raw_train_test_y = raw_train_test[:, -1]
x_train= raw_train_test_x[:42588, :]
x_test = raw_train_test_x[42588:, :]
y_train=raw_train_test_y[:42588]
y_test= raw_train_test_y[42588:]
x_train = x_train.reshape((x_train.shape[0], 1, x_train.shape[1]))
x_test = x_test.reshape((x_test.shape[0], 1, x_test.shape[1]))
raw_eval_x = raw_eval[:, :-1]
x_eval= raw_eval_x.reshape((raw_eval_x.shape[0], 1, raw_eval_x.shape[1]))
raw_est = pd.read_csv("RVESTfull.csv")
extract_columns = [154,155,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204]
col_names = ['SEASON','SUGARMONTH','Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu','ErrorRV']
dummie_cols = ['Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu']
y_pred_1 = model.predict(x_test)
x_test_1 = x_test.reshape((x_test.shape[0], x_test.shape[2]))
test_1 = concatenate((x_test_1,y_pred_1), axis=1 )
test_1_scaled = scaler.inverse_transform(test_1)
y_test = y_test.reshape(x_test.shape[0],1)
test_1_actual = concatenate((x_test_1,y_test), axis=1 )
test_1_actual_scaled = scaler.inverse_transform(test_1_actual)
y_test_pred = test_1_scaled[:, -1]
y_test_actual = test_1_actual_scaled[:, -1]
df_test_actual = pd.DataFrame(test_1_actual_scaled)
df_test_pred = pd.DataFrame(test_1_scaled)
mill_season_month_error_actual_test = df_test_actual[df_test_actual[155]>5][extract_columns]
mill_season_month_error_actual_test.columns = col_names
mill_col = mill_season_month_error_actual_test[dummie_cols].idxmax(axis=1)
mill_season_month_error_actual_test['mill'] = mill_col
mill_season_month_error_actual_test.drop(dummie_cols,axis='columns', inplace=True)
mill_season_month_error_actual_test
mill_season_month_error_pred_test = df_test_pred[df_test_pred[155]>5][extract_columns]
mill_season_month_error_pred_test.columns = col_names
mill_season_month_error_actual_test['pred_ErrorRv']=mill_season_month_error_pred_test['ErrorRV']
eval_1 = mill_season_month_error_actual_test
eval_1['SUGARMONTH'] = eval_1['SUGARMONTH'].round()
ev_1 = eval_1[eval_1['SUGARMONTH']<9.5].groupby(by=['mill','SUGARMONTH'])[['pred_ErrorRv','ErrorRV']].mean()
ev_1 = ev_1.reset_index(drop=False)
final_op_test = pd.merge(left= raw_est[(raw_est['SUGARMONTH']>6.5)&(raw_est['fa_SEASON']==2020)], right=ev_1[['mill','SUGARMONTH','pred_ErrorRv']], how='left', left_on=['cf_mill','SUGARMONTH'], right_on=['mill','SUGARMONTH'])
final_op_test['pred_rv'] = final_op_test['FCFORECAST'] + final_op_test['pred_ErrorRv']
final_op_test = final_op_test.dropna(how='any')
final_op_test.columns= ['SUGARMONTH', 'FC_FORECAST', 'Actual_RV', 'ErrorRV', 'cf_mill', 'fa_SEASON','mill', 'pred_ErrorRv', 'Prediction']
test_op = final_op_test[['fa_SEASON','SUGARMONTH','FC_FORECAST','Actual_RV','Prediction','mill']]
extract_columns = [154,155,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204]
col_names = ['SEASON','SUGARMONTH','Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu','ErrorRV']
dummie_cols = ['Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu']
y_pred_2 = model.predict(x_train)
x_train_2 = x_train.reshape((x_train.shape[0], x_train.shape[2]))
train_2 = concatenate((x_train_2,y_pred_2), axis=1 )
train_1_scaled = scaler.inverse_transform(train_2)
y_train = y_train.reshape(y_train.shape[0],1)
train_1_actual = concatenate((x_train_2,y_train), axis=1 )
train_1_actual_scaled = scaler.inverse_transform(train_1_actual)
y_train_pred = train_1_scaled[:, -1]
y_train_actual = train_1_actual_scaled[:, -1]
df_train_actual = pd.DataFrame(train_1_actual_scaled)
df_train_pred = pd.DataFrame(train_1_scaled)
mill_season_month_error_actual_train = df_train_actual[extract_columns].copy()
mill_season_month_error_actual_train.columns = col_names
mill_col = mill_season_month_error_actual_train[dummie_cols].idxmax(axis=1)
mill_season_month_error_actual_train['mill'] = mill_col
mill_season_month_error_actual_train.drop(dummie_cols,axis='columns', inplace=True)
mill_season_month_error_actual_train
mill_season_month_error_pred_train = df_train_pred[extract_columns]
mill_season_month_error_pred_train.columns = col_names
mill_season_month_error_actual_train['pred_ErrorRv']=mill_season_month_error_pred_train['ErrorRV']
eval_2 = mill_season_month_error_actual_train
eval_2['SUGARMONTH'] = eval_2['SUGARMONTH'].round()
ev_2 = eval_2[eval_2['SUGARMONTH']<9.5].groupby(by=["SEASON",'mill','SUGARMONTH'])[['pred_ErrorRv','ErrorRV']].mean()
ev_2 = ev_2.reset_index(drop=False)
ev_2
final_op_train = pd.merge(left= raw_est, right=ev_2[['mill','SEASON','SUGARMONTH','pred_ErrorRv']], how='left', left_on=['cf_mill','fa_SEASON','SUGARMONTH'], right_on=['mill','SEASON','SUGARMONTH'])
final_op_train = final_op_train.dropna(how='any')
final_op_train['pred_rv'] = final_op_train['FCFORECAST'] + final_op_train['pred_ErrorRv']
final_op_train.drop(['SEASON'],axis='columns', inplace=True)
final_op_train.columns= ['SUGARMONTH', 'FC_FORECAST', 'Actual_RV', 'ErrorRV', 'cf_mill', 'fa_SEASON','mill', 'pred_ErrorRv', 'Prediction']
train_op = final_op_train[['fa_SEASON','SUGARMONTH','FC_FORECAST','Actual_RV','Prediction','mill']]
extract_columns = [154,155,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204]
col_names = ['SEASON','SUGARMONTH','Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu','pred_ErrorRv']
dummie_cols = ['Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu']
y_pred_eval = model.predict(x_eval)
x_eval_1 = x_eval.reshape((x_eval.shape[0], x_eval.shape[2]))
eval = concatenate((x_eval_1,y_pred_eval), axis=1 )
eval_scaled = scaler.inverse_transform(eval)
eval_pred = eval_scaled[:, -1]
df_eval = pd.DataFrame(eval_scaled)
mill_season_month_error_actual_eval = df_eval[extract_columns].copy()
mill_season_month_error_actual_eval.columns = col_names
mill_col = mill_season_month_error_actual_eval[dummie_cols].idxmax(axis=1)
mill_season_month_error_actual_eval['mill'] = mill_col
mill_season_month_error_actual_eval.drop(dummie_cols,axis='columns', inplace=True)
eval_3 = mill_season_month_error_actual_eval
eval_3['SUGARMONTH'] = eval_3['SUGARMONTH'].round()
ev_3 = eval_3[eval_3['SUGARMONTH']<9.5].groupby(by=["SEASON",'mill','SUGARMONTH'])[['pred_ErrorRv']].mean()
ev_3 = ev_3.reset_index(drop=False)
ev_3
final_op_eval = pd.merge(left= raw_est[raw_est['fa_SEASON']==2021], right=ev_3[['mill','SEASON','SUGARMONTH','pred_ErrorRv']], how='left', left_on=['cf_mill','fa_SEASON','SUGARMONTH'], right_on=['mill','SEASON','SUGARMONTH'])
final_op_eval.drop(['SEASON','SRV','ErrorRV'],axis='columns', inplace=True)
final_op_eval = final_op_eval.dropna(how='any')
final_op_eval['pred_rv'] = final_op_eval['FCFORECAST'] + final_op_eval['pred_ErrorRv']
final_op_eval.columns= ['SUGARMONTH', 'FC_FORECAST', 'cf_mill', 'fa_SEASON','mill', 'pred_ErrorRv', 'Prediction']
eval_op = final_op_eval[['fa_SEASON','SUGARMONTH','FC_FORECAST','Prediction','mill']]
print(test_op.shape)
print(train_op.shape)
print(eval_op.shape)
mill = request.args.get('mill', type = str)
print(mill)
response = jsonify({'train': json.loads(train_op[train_op['mill']==mill].to_json(orient='index')) , 'test': json.loads(test_op[test_op['mill']==mill].to_json(orient='index')) ,'eval': json.loads(eval_op[eval_op['mill']==mill].to_json(orient='index'))})
response.headers.add("Access-Control-Allow-Origin", "*")
return response
if __name__ == '__main__':
model = load_model('./Model3')
app.run(debug=True, host='0.0.0.0')
| Francis-Walker/AI_api | api_deploy/model_api.py | model_api.py | py | 9,209 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pan... |
14761591721 | import os
from twisted.internet import reactor, defer
def read_mail(mailitems):
print(mailitems)
return "Junk Mail... Sending to shredder: " + mailitems
def shred_mail(mailitems):
print('buzzzzz: ' + mailitems)
os.remove('mail')
reactor.stop()
def create_mail(msg):
with open("mail","w") as f:
f.write(msg)
def wait_for_mail(d=None):
if not d:
d = defer.Deferred()
if not os.path.isfile('mail'):
reactor.callLater(1, wait_for_mail, d)
else:
with open("mail") as f:
contents = f.readlines()
d.callback(contents[0])
return d
deferred = wait_for_mail()
deferred.addCallback(read_mail)
deferred.addCallback(shred_mail)
reactor.callLater(2, create_mail, "Look at this new letter!")
reactor.callLater(20, reactor.stop)
reactor.run()
| mina32/APT_Black | mailExample/mailPolling.py | mailPolling.py | py | 826 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.remove",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "twisted.internet.reactor.stop",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "twisted.internet.reactor",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "twis... |
33386199461 | """
File input and output functions
"""
import ujson as json
from dev_funcs import printline, Recorded_Time
from comms import Appointment
#class to store data imported from local json config file
class FileIO:
def __init__(self):
#build all of the variables from data.json file
self.load_local_vars()
#print the device data after import
self.print_dev_data()
def load_local_vars(self):
#read in unparsed json data
unparsed_data = self.read_in_file()
#parse json data into dict objects
pdata = json.loads(unparsed_data)
#assign parsed json data to local variables
self.dev_id = pdata["device_info"]["dev_id"]
self.server_pass = pdata["device_info"]["server_pass"]
self.firm_version = pdata["device_info"]["firm_version"]
self.wifi_networks = pdata["wifi_params"]
self.appointments = pdata["appointments"]
self.last_known_time = pdata["device_info"]["last_known_time"]
self.quiet_hours = pdata["device_info"]["quiet_hours"]
#function to print basic device info
def print_dev_data(self):
#construct a string with all the device info to be displayed
ts = "Device " + str(self.dev_id) + " | Firmware version: " + \
str(self.firm_version)
#print constructed string
print(ts)
#function to update time in json file with current time
#takes a Recorded_Time instance (preferred) or a string (not as good)
#no formatting, if time is rewritten incorrectly it could cause a failure
def update_last_known_time(self, current_time):
#make new string to store the new time
#new_time = ""
#check if current_time is a Recorded_Time object
if isinstance(current_time, Recorded_Time):
#get the time as a datetime formatted string
new_time = current_time.get_datetime_string()
else:
#otherwise write new_time with current_time object or string
#this is where failure could happen, use cautiously
new_time = current_time
#read in data from file
read_in_data = json.loads(self.read_in_file())
#rewrite last_known_time
read_in_data["device_info"]["last_known_time"] = new_time
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
def update_quiet_hours(self, start=None, end=None):
#define new quiet hours json
quiet_hours = {
"start_time": start,
"end_time": end
}
#read in data from file
read_in_data = json.loads(self.read_in_file())
#rewrite old unmodified quiet hours entry (preserves all data)
read_in_data["device_info"]["quiet_hours"] = quiet_hours
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function takes an Appointment object and adds appointment to appointments object
def add_appointment(self, new_appt):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#create new JSON of new appt to add
appt_to_add = {
"appointment_id": int(new_appt.appointment_id),
"appointment_date_time": new_appt.appointment_date_time,
"answers" : [],
"cancelled" : False
}
#append new appointment onto appointment JSON obj
appointments.append(appt_to_add)
#rewrite old unmodified appointment entry (preserves all data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function to remove an appointment from the json file
#takes an appointment id as an arg, does not return anything
def remove_appointment(self, appointment_id):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#make empty dict of appointments that can be filled by loop
remaining_appts = []
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] != appointment_id:
remaining_appts.append(appt)
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = remaining_appts
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function to get appoint data stored in data.json
#returns None (if no appts) or an array of Appointment objects
def get_appointments(self, appt_id=None):
if appt_id:
for appt in self.appointments:
if appt["appointment_id"] == appt_id:
return Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
return None
else:
#create new array for resulting objects
appts_arr = []
#go through appointments json
for appt in self.appointments:
#create new appointment with json data
new_appt = Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
#add newly created Appointment obj to list to return
appts_arr.append(new_appt)
#return the array
return appts_arr
def get_cancelled_appointments(self):
#create new array for resulting objects
appts_arr = []
#go through appointments json
for appt in self.appointments:
if appt.cancelled:
#create new appointment with json data
new_appt = Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
#add newly created Appointment obj to list to return
appts_arr.append(new_appt)
#return the array
return appts_arr
def get_unsent_appointment_answers(self):
appts_arr = []
#go through appointments json
for appt in self.appointments:
for answer in appt["answers"]:
if answer["sent"] == False:
#create new appointment with json data
new_appt = Appointment(appt["appointment_id"],appt["answers"],appt["appointment_date_time"], appt["cancelled"])
highest_answer = 0
for i in appt["answers"]:
if i["number"] > highest_answer:
highest_answer = i["number"]
#add newly created Appointment obj to list to return
appts_arr.append([new_appt,highest_answer])
#return the array
return appts_arr
#function adds an appointment answer to the specified appt
#takes an appt id (int), an answer (True,False,None), and a Recorded_Time object
def new_appointment_answer(self, appointment_id, answer, currtime, answer_number):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
currtime.update_time()
new_answer = {
"answer": answer,
"time_answered": currtime.get_datetime_string(),
"number": answer_number,
"sent": False
}
appt["answers"].append(new_answer)
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
def cancel_appointment(self, appointment_id):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
appt["cancelled"] = True
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
def remove_appointment_answer(self, appointment_id):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
appt["answers"] = []
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#updates answer status (change sent status from false to true)
def update_appointment_answer_status(self, appointment_id, status, number):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the appointment data
appointments = read_in_data["appointments"]
#search through appointments for matching id
for appt in appointments:
if appt["appointment_id"] == appointment_id:
for answer in appt["answers"]:
if number == answer["number"]:
answer["sent"] = status
#rewrite old unmodified appointment entry (preserves all other data)
read_in_data["appointments"] = appointments
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function takes an ssid, password, adds wifi network to wifi params
def add_wifi_network(self, ssid, password):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the "wifi_params" section of json data
wifi_networks = read_in_data["wifi_params"]
#create new JSON of new wifi network to add
network_to_add ={
"ssid": ssid,
"password" : password
}
#append new network onto wifi_networks JSON obj
wifi_networks.append(network_to_add)
#rewrite old unmodified wifi_params entry (preserves all other data)
read_in_data["wifi_params"] = wifi_networks
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function to remove a wifi network entry from the json file
#takes a wifi ssid an arg, does not return anything
def remove_wifi_network(self, ssid):
#read in data from file
read_in_data = json.loads(self.read_in_file())
#isolate the "wifi_params" section of json data
wifi_networks = read_in_data["wifi_params"]
#make empty dict of remaining networks that can be filled by loop
remaining_networks = []
#search through wifi_networks for matching ssid
for wifi_network in wifi_networks:
if wifi_network["ssid"] != ssid:
remaining_networks.append(wifi_network)
#rewrite old unmodified appointment entry (preserves all data)
read_in_data["wifi_params"] = remaining_networks
#dump the json data to the file saver func, reload local vars from json file
self.write_to_file(json.dumps(read_in_data))
self.load_local_vars()
#function reads in data.json file and returns unmodified string
def read_in_file(self):
#create file object pointing to json config file
loc_file = open('data.json', 'r')
#read in unparsed json data, close file
unparsed_data = loc_file.read()
loc_file.close()
#return resulting unparsed data
return unparsed_data
#function to rewrite json file
#WILL OVERWRITE ALL JSON DATA, READ DATA, MODIFY, THEN WRITE
def write_to_file(self, new_file_text):
#create file object pointing to json config file
loc_file = open('data.json', 'w')
#write data to file
loc_file.write(new_file_text)
#close file
loc_file.close() | TotalJTM/DoccoLink-Device-Firmware-V1 | file_funcs.py | file_funcs.py | py | 11,601 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ujson.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "dev_funcs.Recorded_Time",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "ujson.loads",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "ujson.dumps",
... |
15018763785 | from tkinter import Widget
import customtkinter as ctk
from customtkinter import ThemeManager
from View.GUI.Windows.GraphWindow.ButtonBar import ButtonBar
from View.GUI.Windows.GraphWindow.GraphCanvas import GraphCanvas
from View.GUI.Windows.WindowInterface import WindowInterface, Position
class GraphWindow(WindowInterface, ctk.CTkFrame):
@staticmethod
def get_title() -> str:
return "Graph"
@staticmethod
def get_start_position() -> Position:
return Position.Center
@staticmethod
def get_importance():
return 5
def __init__(self, parent, controller, network, move_to_center=True):
WindowInterface.__init__(self, parent, controller, network)
bg_color = ThemeManager.theme["color_scale"]["outer"]
fg_color = ThemeManager.theme["color_scale"]["inner"]
ctk.CTkFrame.__init__(self, parent, fg_color=fg_color, bg_color=bg_color)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.graph_canvas = GraphCanvas(self, controller, network, move_to_center=move_to_center)
self.graph_canvas.grid(column=0, row=0, sticky="news", padx=3, pady=3)
self.button_bar = ButtonBar(self, self.graph_canvas)
self.button_bar.grid(column=0, row=0, sticky="n", pady=5)
self.graph_canvas.button_bar = self.button_bar
self.graph_canvas.initial_setup()
def clone(self, new_parent: Widget) -> 'WindowInterface':
new_window = GraphWindow(new_parent, self.controller, self.network, move_to_center=False)
new_window.graph_canvas.zoom_to(self.graph_canvas.scale_factor)
old_x_middle = self.graph_canvas.canvasx(self.graph_canvas.winfo_width() / 2)
old_y_middle = self.graph_canvas.canvasy(self.graph_canvas.winfo_height() / 2)
old_x_model, old_y_model = self.graph_canvas.coords_canvas_to_model(old_x_middle, old_y_middle)
# estimate screen mid as canvas is not yet drawn with correct width / height
estimated_mid_x = int(new_window.graph_canvas.canvasx(new_parent.winfo_width() / 2))
estimated_mid_y = int(new_window.graph_canvas.canvasy(new_parent.winfo_height() / 2))
new_window.graph_canvas.move_canvas_to(old_x_model, old_y_model, estimated_mid_x, estimated_mid_y)
return new_window
| Moni5656/npba | View/GUI/Windows/GraphWindow/GraphWindow.py | GraphWindow.py | py | 2,319 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "View.GUI.Windows.WindowInterface.WindowInterface",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "customtkinter.CTkFrame",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "View.GUI.Windows.WindowInterface.Position.Center",
"line_number"... |
9352031238 | """Cleaning Functions
These functions define standard text processing functions for cleaning.
"""
from html import unescape
import re
import emoji
def clean_text(text):
"""Cleans single data entry of text.
Args:
text (str): input text for cleaning.
Returns:
str: output cleaned text.
"""
# convert HTML codes
text = unescape(text)
# replace mentions, URLs and emojis with special token
text = re.sub(r"@[A-Za-z0-9_-]+",'[USER]',text)
text = re.sub(r"http\S+",'[URL]',text)
text = ''.join(' [EMOJI] ' if (char in emoji.UNICODE_EMOJI) else char for char in text).strip()
# in Samory dataset there are mentions e.g. MENTION3851 --> convert to USER tokens
text = re.sub("MENTION[0-9]*", '[USER]', text)
# remove newline and tab characters
text = text.replace('\n',' ')
text = text.replace('\t',' ')
# remove leading ">" (reddit artifact)
text = text.lstrip('>')
# collapse whitespace into single whitespace
text = re.sub(r'\s+', ' ', text)
# remove leading and trailing whitespaces
text = text.strip()
return text
def drop_nans(input_df, subset_col='text', verbose = False):
"""Removes posts with NaN values in given column.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for NaN removal. Defaults to 'text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# remove NANs in place
input_df.dropna(subset=[subset_col], inplace = True)
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
def drop_duplicates(input_df, subset_col = 'clean_text', verbose = False):
"""Removes duplicate values in given column. Should be run *after* text cleaning.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for de-duplication. Defaults to 'clean_text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# remove duplicates in place
input_df.drop_duplicates(subset=[subset_col], inplace = True)
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
def drop_empty_text(input_df, subset_col = 'clean_text', verbose = False):
"""Removes rows with empty text. Should be run *after* text cleaning.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for empty text removal. Defaults to 'clean_text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# drop rows with empty text
input_df = input_df[input_df[subset_col].values!=""]
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
def drop_url_emoji(input_df, subset_col = 'clean_text', verbose = False):
"""Removes rows with only [URL] or [EMOJI] tokens. Should be run *after* text cleaning.
Args:
input_df (pd.DataFrame): input dataframe.
subset_col (str, optional): column for text removal. Defaults to 'clean_text'.
verbose (bool, optional): whether to print number of dropped values. Defaults to False.
Returns:
pd.DataFrame: output dataframe with modifications.
"""
# Get original len
orig_len = len(input_df)
# drop rows with text that is just [URL] or [EMOJI]
input_df = input_df[(input_df[subset_col]!="[URL]") & (input_df[subset_col]!="[EMOJI]")]
# Get new len
new_len = len(input_df)
if verbose is True:
print(f"""\nOrig len: {orig_len},
Num of dropped values: {orig_len - new_len},
New len: {new_len}""")
return input_df
| HannahKirk/ActiveTransformers-for-AbusiveLanguage | scripts/0_data_prep/cleaning_functions.py | cleaning_functions.py | py | 4,543 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "html.unescape",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "emoji.UNICODE_EMOJI",
"line_number": ... |
18552105619 | # # Napisać program który wyświetla wykres funkcji kwadratowej o podanych współczynnikach.
# # Tworząc wykres należy tak dobrać zakres wyświetlanej osi X aby znalazły się w nim:
# # współrzędna wierzchołka oraz miejsca zerowe z marginesem ok 10%
# # (dla przykładu: jeżeli miejsca zerowe wynoszą np x1=2 i x2=10 to oś X powinna zawierać punkty od 1.8 do 11).
# # Jeżeli parabola nie ma miejsc zerowych, lub ma podwójne miejsce zerowe, wykres powinien zawierać wierzchołek paraboli oraz margines ok 20%
# # (dla przykładu jeżeli wsp. wierzchołka wynosi x0=5 to oś X powinna zawierać punkty od 4 do 6).
import math
import matplotlib.pyplot as plot
import numpy as np
def liczenie_delty(a,b,c):
delta = (b*b) - 4*(a*c)
print('delta =',delta)
return delta
def wykres(delta,a,b,c):
if delta == 0 :
print('Równanie ma jedno rozwiązanie')
x0 = (-b-(math.sqrt(delta)))/(2*a)
print('x0 =',x0)
elif delta > 0 :
print('Równanie ma dwa rozwiązanie')
x1 = (-b-(math.sqrt(delta)))/(2*a)
x2 = (-b+(math.sqrt(delta)))/(2*a)
print('x1 =',x1)
print('x2 =',x2)
x0 = None
else : print('Równanie nie ma rozwiązań')
print("f(x)={0}x^2+{1}x+{2}".format(a,b,c))
p = (-b)/(2*a)
q = (-delta)/(4*a)
print('p',p,'q',q)
if x0 is None:
if x1>x2:
x = np.linspace(x1+(0.1*x1), x2-(0.1*x1), 1000)
y = a * x ** 2 + b * x + c
fig, ax = plot.subplots()
ax.set_title("Wykres funkcji kwadratowej")
plot.grid(True)
ax.plot(x, y)
ax.hlines(y=0, xmin=min(x), xmax=max(x), colors='r', linestyles='--', lw=1)
plot.scatter(p, q, color='red', label='Wierzchołek')
if x1 is not None:
plot.scatter(x1, 0, color='green', label='Miejsce zerowe')
if x2 is not None:
plot.scatter(x2, 0, color='green', label='Miejsce zerowe')
plot.show()
else:
x = np.linspace(x1-(0.1*x1), x2+(0.1*x1), 1000)
y = a * x ** 2 + b * x + c
fig, ax = plot.subplots()
ax.set_title("Wykres funkcji kwadratowej")
plot.grid(True)
ax.plot(x, y)
ax.hlines(y=0, xmin=min(x), xmax=max(x), colors='r', linestyles='--', lw=1)
plot.scatter(p, q, color='red', label='Wierzchołek')
if x1 is not None:
plot.scatter(x1, 0, color='green', label='Miejsce zerowe')
if x2 is not None:
plot.scatter(x2, 0, color='green', label='Miejsce zerowe')
plot.show()
else:
x = np.linspace(x0-(0.2*x0), x0+(0.2*x0), 1000)
y = a * x ** 2 + b * x + c
fig, ax = plot.subplots()
ax.set_title("Wykres funkcji kwadratowej")
plot.grid(True)
ax.plot(x, y)
ax.hlines(y=0, xmin=min(x), xmax=max(x), colors='r', linestyles='--', lw=1)
plot.scatter(p, q, color='red', label='Wierzchołek')
plot.show()
print('Podaj liczbę a:')
a=input()
while a == '0':
print('a musi być liczbą całkowitą ani być równe zero. Podaj liczbę a jeszcze raz:')
a=input()
a = int(a)
print('Podaj liczbę b:')
b=input()
b = int(b)
print('Podaj liczbę c:')
c=input()
c = int(c)
delta = liczenie_delty(a,b,c)
wykres(delta,a, b, c)
| TomaszWs/Python-training | UG-training/wykres-funkcji.py | wykres-funkcji.py | py | 3,391 | python | pl | code | 0 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 36,... |
33680361650 | from django.urls import path, include
from rest_framework import routers
from .views import (
IndexView,
DetailView,
ResultsView,
vote,
QuestionViewSet,
ChoiceViewSet,
)
router = routers.DefaultRouter()
router.register(r"questions", QuestionViewSet)
router.register(r"choices", ChoiceViewSet)
app_name = "polls"
urlpatterns = [
path("", IndexView.as_view(), name="index"),
path("<int:pk>/", DetailView.as_view(), name="detail"),
path("<int:pk>/results", ResultsView.as_view(), name="results"),
path("<int:question_id>/vote", vote, name="vote"),
path("api/", include(router.urls)),
]
| orvisevans/django-vue-site | backend/polls/urls.py | urls.py | py | 630 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "views.QuestionViewSet",
"line_number": 13,
"usage_type": "argument"
},
{
... |
787431703 | """
Tests for `nameko_cachetools` module.
"""
import time
import pytest
from mock import Mock, patch
import random
import eventlet
from nameko.rpc import rpc
from nameko.standalone.rpc import ServiceRpcProxy
from nameko_cachetools import CachedRpcProxy, CacheFirstRpcProxy
from nameko.testing.services import (entrypoint_hook, entrypoint_waiter,
get_extension)
@pytest.fixture
def container(container_factory, rabbit_config):
class Service(object):
name = "service"
cached_service = CachedRpcProxy('some_other_service', failover_timeout=1)
cache_first_service = CacheFirstRpcProxy('some_other_service')
@rpc
def cached(self, *args, **kwargs):
return self.cached_service.some_method(*args, **kwargs)
@rpc
def cache_first(self, *args, **kwargs):
return self.cache_first_service.some_method(*args, **kwargs)
container = container_factory(Service, rabbit_config)
container.start()
return container
def test_cached_response(container):
cached_rpc = get_extension(container, CachedRpcProxy)
def fake_some_method(*args, **kwargs):
return 'hi'
with patch('nameko.rpc.MethodProxy.__call__', fake_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook('test') == 'hi'
def broken_some_method(*args, **kwargs):
raise Exception('hmm')
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook('test') == 'hi'
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
with pytest.raises(Exception):
hook('unknown')
cached_rpc.cache = {}
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
with pytest.raises(Exception):
hook('test')
def test_cached_response_on_timeout(container):
cached_rpc = get_extension(container, CachedRpcProxy)
def fake_some_method(*args, **kwargs):
return 'hi'
with patch('nameko.rpc.MethodProxy.__call__', fake_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook() == 'hi'
def slow_response(*args, **kwargs):
eventlet.sleep(3)
return 'hi'
start = time.time()
with patch('nameko.rpc.MethodProxy.__call__', slow_response):
with entrypoint_hook(container, 'cached') as hook:
assert hook() == 'hi'
assert time.time() - start < 2
cached_rpc.cache = {}
start = time.time()
with patch('nameko.rpc.MethodProxy.__call__', slow_response):
with entrypoint_hook(container, 'cached') as hook:
assert hook() == 'hi'
assert time.time() - start >= 3
def test_cached_rich_args_rich_response(container):
response = {}
request = {}
for i in range(400):
response[random.randint(1, 1000)] = ['a', (2, 3), {'b': 4.3}]
request[random.randint(1, 1000)] = ['b', [4, 6], {'c': 8.9}]
def fake_some_method(*args, **kwargs):
return response
with patch('nameko.rpc.MethodProxy.__call__', fake_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook(request) == response
def broken_some_method(*args, **kwargs):
raise Exception('hmm')
with patch('nameko.rpc.MethodProxy.__call__', broken_some_method):
with entrypoint_hook(container, 'cached') as hook:
assert hook(request) == response
def test_cache_first(container):
mock = Mock()
with patch('nameko.rpc.MethodProxy.__call__', mock):
with entrypoint_hook(container, 'cache_first') as hook:
hook('ho')
mock.assert_called_once_with('ho')
mock.reset_mock()
with patch('nameko.rpc.MethodProxy.__call__', mock):
with entrypoint_hook(container, 'cache_first') as hook:
hook('ho')
mock.assert_not_called()
cache_first_rpc = get_extension(container, CacheFirstRpcProxy)
cache_first_rpc.cache = {}
with patch('nameko.rpc.MethodProxy.__call__', mock):
with entrypoint_hook(container, 'cache_first') as hook:
hook('ho')
mock.assert_called_once_with('ho')
| santiycr/nameko-cachetools | test/test_nameko_cachetools.py | test_nameko_cachetools.py | py | 4,402 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "nameko_cachetools.CachedRpcProxy",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nameko_cachetools.CacheFirstRpcProxy",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "nameko.rpc.rpc",
"line_number": 25,
"usage_type": "name"
},
{
... |
8267999016 | from __future__ import annotations
from unittest import mock
from kombu.utils.objects import cached_property
class test_cached_property:
def test_deleting(self):
class X:
xx = False
@cached_property
def foo(self):
return 42
@foo.deleter
def foo(self, value):
self.xx = value
x = X()
del x.foo
assert not x.xx
x.__dict__['foo'] = 'here'
del x.foo
assert x.xx == 'here'
def test_when_access_from_class(self):
class X:
xx = None
@cached_property
def foo(self):
return 42
@foo.setter
def foo(self, value):
self.xx = 10
desc = X.__dict__['foo']
assert X.foo is desc
assert desc.__get__(None) is desc
assert desc.__set__(None, 1) is desc
assert desc.__delete__(None) is desc
assert desc.setter(1)
x = X()
x.foo = 30
assert x.xx == 10
del x.foo
def test_locks_on_access(self):
class X:
@cached_property
def foo(self):
return 42
x = X()
# Getting the value acquires the lock, and may do so recursively
# on Python < 3.12 because the superclass acquires it.
with mock.patch.object(X.foo, 'lock') as mock_lock:
assert x.foo == 42
mock_lock.__enter__.assert_called()
mock_lock.__exit__.assert_called()
# Setting a value also acquires the lock.
with mock.patch.object(X.foo, 'lock') as mock_lock:
x.foo = 314
assert x.foo == 314
mock_lock.__enter__.assert_called_once()
mock_lock.__exit__.assert_called_once()
# .. as does clearing the cached value to recompute it.
with mock.patch.object(X.foo, 'lock') as mock_lock:
del x.foo
assert x.foo == 42
mock_lock.__enter__.assert_called_once()
mock_lock.__exit__.assert_called_once()
| celery/kombu | t/unit/utils/test_objects.py | test_objects.py | py | 2,091 | python | en | code | 2,643 | github-code | 6 | [
{
"api_name": "kombu.utils.objects.cached_property",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "kombu.utils.objects.cached_property",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "kombu.utils.objects.cached_property",
"line_number": 60,
"usage_t... |
10649216487 | """
ProjectManager
Description:
"""
import pygame,sys
pygame.init()
# Defining Image Width
get_width = int(input("Image Width: (px)"))
get_height = int(input("Image Height: (px)"))
get_name = str(input("Project Name: "))
win_size = (get_width,get_height)
# Creating Project Script
file = get_name + '.txt'
with open(file,'w') as f:
f.write("class " + get_name + ":\n")
f.write(" def __init__(self,bg_color,pos=(0,0)):\n")
f.write(" self.pos = list(pos)\n")
f.write(" self.img = pygame.Surface(" + str([win_size[0],win_size[1]]) + ")\n")
f.write(" self.img.fill(bg_color)\n\n")
f.write(" # Drawing Code Goes Here")
# Editing Current Shape
currentPolygon = False
# Window
w,h = (win_size[0],win_size[1])
win = pygame.display.set_mode([w,h])
# Variables
image_panel = []
pt_list = []
color_list = []
# Idea for Saving Data?
save_data = {
"item1": "color_data"
}
# Color Tuples
BACKGROUND = (255,255,255)
color = (0,0,0)
# Shaping Functions
def update_polygons(point_list):
global image_panel
for i in range(len(point_list)):
pygame.draw.circle(win,(255,0,0),(point_list[i][0],point_list[i][1]),4)
pygame.draw.polygon(win, color,point_list)
def polygon_tool():
global pt_list,currentPolygon
if not currentPolygon:
image_panel.append(pt_list)
color_list.append(color)
pt_list = []
print("Current Tool: None")
else:
print("Current Tool: Polygon Shape Tool")
def undo_move():
global image_panel,pt_list,color
try: image_panel.pop(-1)
except: pass
win.fill(BACKGROUND)
for i in range(len(image_panel)):
pygame.draw.polygon(win, color,image_panel[i])
def save_image(image_panel):
with open (file, 'a') as f:
for i in range(len(image_panel)):
f.write('\n pygame.draw.polygon(self.img,' + str(color_list[i]) + "," + str(image_panel[i]) + ')')
print("Image Saved! You can now close the application...")
# Window Loop
while True:
x, y = pygame.mouse.get_pos()
key = pygame.key.get_pressed()
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
if e.type == pygame.MOUSEBUTTONDOWN:
if currentPolygon:
pt_list += [(x,y)]
if e.type == pygame.KEYUP:
if key[pygame.K_p]:
print("Current Tool: Pen Tool")
if key[pygame.K_r]:
currentPolygon = not currentPolygon
polygon_tool()
if key[pygame.K_f]:
print("Current Tool: Bucket Fill Tool")
if key[pygame.K_LEFT]: # Undo Move
undo_move()
if key[pygame.K_RIGHT]: # Redo Move
pass
if key[pygame.K_c]: # Change Color
new_color = input("Enter New Color: (tuple) | ")
color = tuple(eval(new_color))
if key[pygame.K_s]: # Saving
print("Saving Image...")
save_image(image_panel)
update_polygons(pt_list)
pygame.display.flip()
| LandenTy/GeometricEngine | CustomTexturer/main.py | main.py | py | 3,267 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.ci... |
1584185600 | import cv2
def draw_boxes(im, boxes, class_names=None, scores=None, colors=None):
scores = [None] * len(boxes) if scores is None else scores
colors = [None] * len(boxes) if colors is None else colors
class_names = [None] * len(boxes) if class_names is None else class_names
for params in zip(boxes, class_names, scores, colors):
_draw_box(im, *params)
return im
def _draw_box(im, box, class_name=None, score=None, color=None):
x1, y1, x2, y2 = box
color = color if color is not None else (0, 255, 0)
msg = class_name.capitalize() if class_name else None
if msg is not None and score is not None:
msg += f' [{int(score * 100)}]'
cv2.rectangle(im, (x1, y1), (x2, y2), color=color, thickness=2)
if msg is not None:
cv2.rectangle(im, (x1 - 1, y1 - 20), (x2 + 1, y1), color, -1)
cv2.putText(im, msg, (x1 + 10, y1 - 8), cv2.FONT_HERSHEY_SIMPLEX ,
.5, (0, 0, 0), 2, cv2.LINE_AA)
return im
| Guillem96/ssd-pytorch | ssd/viz.py | viz.py | py | 991 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.rectangle",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
... |
5558606800 | import os
from dotenv import load_dotenv
from configparser import ConfigParser
conf = ConfigParser()
conf.read('model.conf')
load_dotenv('.env')
def _getenv(key, default): return type(default)(os.getenv(key)) if os.getenv(key) else default
SERVER_IP = _getenv('SERVER_IP', '0.0.0.0') # Service IP
SERVER_PORT = _getenv('SERVER_PORT', '6002') # Service IP
REGISTER = _getenv('REGISTER', 0) # register to the management service
MANAGER_IP = _getenv('MANAGER_IP', '127.0.0.1') # Management server address
MANAGER_PORT = _getenv('MANAGER_PORT', 5005) # Management server address
MANAGER_INTERFACE_REGISTER = _getenv('MANAGER_INTERFACE_REGISTER', '/model/register')
MANAGER_INTERFACE_CANCEL = _getenv('MANAGER_INTERFACE_CANCEL', '/model/cancel')
MODEL_TYPE = _getenv('MODEL_TYPE', conf.get('model', 'model_type', fallback='')) # Service type
MODEL_VERSION = _getenv('MODEL_VERSION', 1) # Service version number
ENGINE_FILE_PATH = _getenv('ENGINE_FILE_PATH', conf.get('model', 'engine_file_path', fallback=''))
CLASS_NUM = _getenv('CLASS_NUM', int(conf.get('model', 'class_num', fallback='0')))
CLASS_NAMES = [name.strip() for name in _getenv('CLASS_NAMES', conf.get('model', 'class_names')).split(',')]
KEY = _getenv('KEY', 'LONGYUAN') | rahmanmahbub073/PythonBased_FastAPI_mL_dL_Repo | UnwantedImageDetection_server/config.py | config.py | py | 1,241 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
}
] |
36132885755 | from random import choice
from time import sleep
from colorama import init, Fore
init()
deck_preset = ("A", *range(2, 11), "J", "Q", "K")
deck = [item for item in deck_preset for i in range(4)]
del deck_preset
class Card:
special_names = ["A", "J", "Q", "K"]
def __init__(self, name):
if name == "A":
self.name = str(name)
self.value = 11
elif name in Card.special_names:
self.name = str(name)
self.value = 10
else:
self.value = name
self.name = str(name)
def __repr__(self):
if self.name in Card.special_names:
return f"{self.name}({self.value})"
else:
return f"{self.name}"
def calculate_scores(player):
return sum([card.value for card in player])
def validate_score(player):
if calculate_scores(player) > 21:
return True
def print_cards(player, method="spread", hide_last=False):
if method == "spread":
if hide_last:
return ', '.join([str(card) for card in player[:-1]])
return ', '.join([str(card) for card in player])
elif method == "sum":
if hide_last:
return str(sum([card.value for card in player[:-1]]))
return str(calculate_scores(player))
def print_scores(player, dealer, hide_dealer=True):
print(f"\nYour cards: {Fore.CYAN + print_cards(player) + Fore.WHITE} "
f"[{Fore.MAGENTA + str(calculate_scores(player)) + Fore.WHITE}]")
if hide_dealer:
print(f"Dealer cards: {Fore.CYAN + print_cards(dealer, 'spread', hide_dealer) + Fore.WHITE}, (?)"
f"[{Fore.MAGENTA + print_cards(dealer, 'sum', hide_dealer) + Fore.WHITE}]")
else:
print(f"Dealer cards: {Fore.CYAN + print_cards(dealer, 'spread', hide_dealer) + Fore.WHITE} "
f"[{Fore.MAGENTA + print_cards(dealer, 'sum', hide_dealer) + Fore.WHITE}]")
def draw_cards(n=1):
cards = []
for i in range(n):
card = choice(deck)
deck.remove(card)
cards.append(Card(card))
return cards
def change_aces(player):
score = calculate_scores(player)
a_index = [player.index(card) for card in player if card.name == "A" and card.value == 11]
if score > 21 and a_index:
for index in a_index:
player[index].value = 1
a_index.pop(0)
score = calculate_scores(player)
if score <= 21:
break
def check_scores(player1, player2, check_draw=False):
player1_score = calculate_scores(player1)
player2_score = calculate_scores(player2)
if check_draw:
if player1_score == player2_score:
return True
else:
if player1_score == 21:
return True
return False
def compare_scores(player, dealer):
player_score = calculate_scores(player)
dealer_score = calculate_scores(dealer)
if dealer_score < player_score:
return True
if check_scores(player, dealer) and check_scores(dealer, player):
print(Fore.YELLOW + "\n----------Draw!----------")
quit()
elif check_scores(player, dealer, True):
if calculate_scores(dealer) > 18:
print(Fore.YELLOW + "\n----------Draw!----------")
quit()
else:
return True
elif 21 >= player_score > dealer_score:
print(Fore.GREEN + "\n----------You win!----------")
quit()
elif 21 >= dealer_score > player_score:
print(Fore.RED + "\n----------Dealer wins!----------")
quit()
else:
print(Fore.BLUE + "Unexpected situation:", player_score, dealer_score)
quit()
def end_game(player, dealer):
change_aces(player)
change_aces(dealer)
print_scores(player, dealer, False)
while compare_scores(player, dealer):
dealer.extend(draw_cards())
change_aces(dealer)
sleep(1)
print_scores(player, dealer, False)
if validate_score(dealer):
print(Fore.GREEN + "\n----------You win!----------")
quit()
def game():
in_game = True
player = draw_cards(2)
change_aces(player)
dealer = draw_cards(2)
print_scores(player, dealer)
while in_game:
button_draw = Fore.GREEN + "'d'" + Fore.WHITE
button_stand = Fore.GREEN + "'s'" + Fore.WHITE
print(f"Type {button_draw} to draw a card or {button_stand} to stand: ", end='')
user_choice = input().lower().strip()
if user_choice[0] == "d":
player.extend(draw_cards())
change_aces(player)
print_scores(player, dealer)
if validate_score(player):
print(Fore.RED + "\n----------Dealer wins!----------")
quit()
elif user_choice[0] == "s":
end_game(player, dealer)
else:
print(Fore.YELLOW + "\n----------Invalid choice.----------" + Fore.WHITE)
print("""
.------. _ _ _ _ _
|A_ _ |. | | | | | | (_) | |
|( \/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __
| \ /|K /\ | | '_ \| |/ _` |/ __| |/ / |/ _` |/ __| |/ /
| \/ | / \ | | |_) | | (_| | (__| <| | (_| | (__| <
`-----| \ / | |_.__/|_|\__,_|\___|_|\_\ |\__,_|\___|_|\_\\
| \/ K| _/ |
`------' |__/
""")
game()
| Rikaisan/100-days-of-code | python-files/11_blackjack.py | 11_blackjack.py | py | 5,613 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "colorama.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "colorama.Fore.CYAN",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.WHIT... |
23338785771 | import tensorflow as tf
import pandas as pd
from sklearn.metrics import multilabel_confusion_matrix, confusion_matrix, precision_score, recall_score, f1_score
def calculate_output(model, actual_classes, session, feed_dict):
actuals = tf.argmax(actual_classes, 1)
predictions = tf.argmax(model, 1)
actuals = session.run(actuals, feed_dict)
predictions = session.run(predictions, feed_dict)
return actuals, predictions
def tf_confusion_metrics(model, actual_classes, session, feed_dict):
import numpy as np
cat = 5
actuals, predictions = calculate_output(model, actual_classes, session, feed_dict)
lbls = [*range(cat)]
mcm = multilabel_confusion_matrix(actuals, predictions, labels=lbls)
tp = mcm[:, 1, 1]
tn = mcm[:, 0, 0]
fn = mcm[:, 1, 0]
fp = mcm[:, 0, 1]
cm = confusion_matrix(actuals, predictions, labels=lbls, sample_weight=None)
tp = np.mean(tp)
tn = np.mean(tn)
fp = np.mean(fp)
fn = np.mean(fn)
try:
tpr = float(tp)/(float(tp) + float(fn))
accuracy = (float(tp) + float(tn))/(float(tp) + float(fp) + float(fn) + float(tn))
recall = tpr
if((fp+tp)!=0):
precision = float(tp)/(float(tp) + float(fp))
f1_score = (2 * (precision * recall)) / (precision + recall)
else:
precision=0
f1_score=0
fp_rate=float(fp)/(float(fp)+float(tn))
fn_rate=float(fn)/(float(fn)+float(tp))
# return precision, recall, f1_score, accuracy, fp_rate, fn_rate
PR = str(round(precision * 100, 2))
RC = str(round(recall * 100, 2))
F1 = str(round(f1_score * 100, 2))
ACC = str(round(accuracy * 100, 2))
FPR = str(round(fp_rate * 100, 2))
FNR = str(round(fn_rate * 100, 2))
data_pd=[['PR',PR],['RC', RC],['F1', F1],['ACC', ACC],['FPR', FPR], ['FNR', FNR],['tp', tp],['tn', tn],['fp', fp], ['fn', fn]]
df = pd.DataFrame(data_pd, columns=['Measure', 'Percentage'])
except Exception as e:
print(e)
data_pd = [['PR', 'Err'], ['RC', 'Err'], ['F1', 'Err'], ['ACC', 'Err'], ['FPR', 'Err'], ['FNR', 'Err']]
df = pd.DataFrame(data_pd, columns=['Measure', 'Percentage'])
return df
def tf_confusion_metrics_2(model, actual_classes, session, feed_dict):
actuals, predictions = calculate_output(model, actual_classes, session, feed_dict)
cm = tf.confusion_matrix(actuals, predictions)
print("Confusion Matrix")
return session.run(cm, feed_dict)
def Macro_calculate_measures_tf(y_true, y_pred, session, feed_dict):
y_true, y_pred = calculate_output(y_pred, y_true, session, feed_dict)
pr= precision_score(y_true, y_pred, average='macro')
rc = recall_score(y_true, y_pred, average='macro')
f1 = f1_score(y_true, y_pred, average='macro')
print("pr, rc, f1:" ,str(pr)+ str(rc)+str(f1))
return pr, rc, f1
| Sam-Mah/PLDNN | tensorflow_confusion_metrics.py | tensorflow_confusion_metrics.py | py | 2,817 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "tensorflow.argmax",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensorflow.argmax",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.multilabel_confusion_matrix",
"line_number": 19,
"usage_type": "call"
},
{
"api... |
38514794793 | import gc
import os
from pathlib import Path
from typing import Any, Dict, cast
import mlflow
import numpy as np
import onnx
import torch
import transformers
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from transformers.modeling_utils import PreTrainedModel
from transformers.onnx import FeaturesManager, export, validate_model_outputs
from crypto_sentiment_demo_app.models.train.base import IModelTrain, TrainRegistry
from .dataset import build_dataloaders, split_train_val
from .pipeline import MetricTracker, SentimentPipeline
transformers.logging.set_verbosity_error()
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@TrainRegistry.register("bert")
class Bert(IModelTrain):
"""Bert model. Wrapper for hugging face models.
:param cfg: model config
"""
def __init__(self, cfg: Dict[str, Any]):
"""Init model."""
super().__init__(cfg)
self.model_cfg = self.cfg["model"]
self.class_names = self.cfg["data"]["class_names"]
if self.model_cfg["device"] == "gpu" and not torch.cuda.is_available():
self.device = torch.device("cpu")
else:
self.device = torch.device(self.model_cfg["device"])
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
"""Fit model.
:param X: train data
:param y: train labels
"""
seed_everything(self.model_cfg["seed"])
train_data, val_data, train_labels, val_labels = split_train_val(X, y)
train_dataloader, val_dataloader = build_dataloaders(
self.model_cfg, train_data, train_labels, val_data, val_labels
)
self.model = SentimentPipeline(self.model_cfg)
metric_tracker = MetricTracker()
checkpoint_path = Path(self.model_cfg["checkpoint_path"]).parent
checkpoint_filename = Path(self.model_cfg["checkpoint_path"]).stem
checkpoint_callback = ModelCheckpoint(
save_top_k=1,
monitor="val_acc",
mode="max",
dirpath=checkpoint_path,
filename=checkpoint_filename,
)
gpus = 1 if self.device.type == "cuda" and torch.cuda.is_available() else 0
self.trainer = Trainer(
max_epochs=self.model_cfg["epochs"],
gpus=gpus,
callbacks=[metric_tracker, checkpoint_callback],
num_sanity_val_steps=0,
enable_checkpointing=True,
logger=False,
)
self.trainer.fit(
self.model,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
def save(self) -> None:
"""Save model."""
save_dir = Path(self.model_cfg["path_to_model"]).parent
filename = Path(self.model_cfg["path_to_model"]).stem
pt_path = save_dir / f"{filename}.pt"
onnx_path = save_dir / f"{filename}.onnx"
self._onnx_export(onnx_path)
onnx_model = onnx.load_model(onnx_path)
mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path="bert", registered_model_name="bert")
del onnx_model
gc.collect()
self.model = SentimentPipeline.load_from_checkpoint(self.model_cfg["checkpoint_path"], cfg=self.model_cfg)
cast(PreTrainedModel, self.model.model).eval()
cast(PreTrainedModel, self.model.tokenizer).save_pretrained(pt_path)
cast(PreTrainedModel, self.model.model).save_pretrained(pt_path)
def load(self) -> None:
"""Load model checkpoint."""
self.model = SentimentPipeline.load_from_checkpoint(self.model_cfg["checkpoint_path"], cfg=self.model_cfg)
def _onnx_export(self, path: Path):
model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(
self.model.model, feature="sequence-classification"
)
onnx_config = model_onnx_config(self.model.model.config)
onnx_inputs, onnx_outputs = export(
self.model.tokenizer, self.model.model, onnx_config, onnx_config.default_onnx_opset, path
)
validate_model_outputs(
onnx_config, self.model.tokenizer, self.model.model, path, onnx_outputs, onnx_config.atol_for_validation
)
def enable_mlflow_logging(self) -> None:
mlflow.set_experiment("bert")
mlflow.pytorch.autolog()
| crypto-sentiment/crypto_sentiment_demo_app | crypto_sentiment_demo_app/models/train/bert/model.py | model.py | py | 4,376 | python | en | code | 25 | github-code | 6 | [
{
"api_name": "transformers.logging.set_verbosity_error",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "transformers.logging",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
... |
26185607454 | """Rotate Image
You are given an n x n 2D matrix representing an image, rotate the image by 90 degrees (clockwise).
You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
Input: matrix = [[1,2,3],[4,5,6],[7,8,9]]
Output: [[7,4,1],[8,5,2],[9,6,3]]
Input: matrix = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]]
Output: [[15,13,2,5],[14,3,4,1],[12,6,8,9],[16,7,10,11]]
"""
from typing import List
import unittest
def rotate(matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
for row in range((n+1)//2):
for col in range(n//2):
temp = matrix[col][n-1-row]
matrix[col][n-1-row] = matrix[row][col]
matrix[row][col] = matrix[n-1-col][row]
matrix[n-1-col][row] = matrix[n-1-row][n-1-col]
matrix[n-1-row][n-1-col] = temp
class TestProblems(unittest.TestCase):
def test_rotate_image(self):
actual = rotate([[1,2,3],[4,5,6],[7,8,9]])
expected = [[7,4,1],[8,5,2],[9,6,3]]
self.assertCountEqual(actual, expected)
actual_1 = rotate([[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]])
expected_1 = [[15,13,2,5],[14,3,4,1],[12,6,8,9],[16,7,10,11]]
self.assertCountEqual(actual_1, expected_1)
if __name__ == '__main__':
unittest.main()
| 01o91939/leetcode | rotateImage.py | rotateImage.py | py | 1,441 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 42,
"usage_type": "call"
}
] |
5809207089 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:biovectors]
# language: python
# name: conda-env-biovectors-py
# ---
# # Statistical Test for Multi-Model Variation
# After confirming that aligning multiple word2vec models is a success [03_multi_model_alignment_check.ipynb](03_multi_model_alignment_check.ipynb), the next step is to construct a metric that accounts for intra and inter year variation.
#
# Typically, the way to compare words words is to use cosine distance, which measures the distance between two vectors by looking at the angle between two vectors.
# A more common name for this would be [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity); however, the difference here is that cosine distance shifts the range from -1 to 1 to 0 to 2 (1 - cosine similarity).
#
# Regarding this project, I'm using cosine distance to see how a word changes across time.
# I based this comparison off of two metrics defined by authors in [this paper](http://arxiv.org/abs/1606.02821).
# - Global distance is defined as the cosine distance between words in year with their second year counterparts
# - Local distance is defined as the cosine distance of a word's similarity to its neighbors across time (no longer used)
# +
# %load_ext autoreload
# %autoreload 2
from collections import Counter
import csv
import copy
import itertools
import math
from pathlib import Path
import random
import re
from gensim.models import Word2Vec, KeyedVectors
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
import plotnine as p9
import plydata as ply
import plydata.tidy as ply_tdy
import scipy.stats as stats
import tqdm
from biovectors_modules.word2vec_analysis_helper import align_word2vec_models
# -
# Method only used for this notebook
def return_global_plot(year_model, tok="are", limits=(0, 1), inter_or_intra="intra"):
g = (
p9.ggplot(
year_model >> ply.query(f"tok=='{tok}'"),
p9.aes(x="year", y="global_distance"),
)
+ p9.geom_boxplot()
+ p9.labs(
title=f"{inter_or_intra.capitalize()} Year global Distance for Token: '{tok}'"
)
+ p9.coord_flip()
+ p9.scale_y_continuous(limits=limits)
+ p9.theme_seaborn(style="white")
)
return g
# # Grab a listing of all word models
word_models = list(Path("output/models").rglob("*model"))
word_models = sorted(word_models, key=lambda x: x.stem)
word_model_filter = list(filter(lambda x: "2021" not in x.stem, word_models))
alignment_base_model = Word2Vec.load(str(word_model_filter[-1]))
temp_output_path = Path("output/aligned_vectors_tmp")
for model_file in tqdm.tqdm(word_model_filter):
if not Path(f"{str(temp_output_path)}/{model_file.stem}.kv").exists():
word_model = Word2Vec.load(str(model_file))
aligned_model = align_word2vec_models(alignment_base_model.wv, word_model.wv)
aligned_model.save(f"{str(temp_output_path)}/{model_file.stem}.kv")
# # Inter and Intra Variation calculation
# Refer to the following scripts in order to perform inter and intra word2vec calculations:
# 1. [pmacs_cluster_running_inter_model_variation.py](pmacs_cluster_running_inter_model_variation.py)
# 2. [pmacs_cluster_running_intra_model_variation.py](pmacs_cluster_running_intra_model_variation.py)
# # Are word2vec models unstable?
# Due to the nature of negative sampling, word2vec models generat weights arbitrarily.
# This is undesired as a token in the year 2000 cannot be compared with a token in 2001.
# A solution is to use orthogonal procrustes to align word2vec models; however, variation could still remain in these word models.
# To measure this variation I trained 10 unique word2vec models on abstracts for each given year and then calculated global and local distances between every word2vec model pair (10 choose 2).
# From there I analyzed variation within each year (term intra-year variation).
# ## Intra Model Calculations
intra_year_models = []
for idx, file in enumerate(Path("output/intra_models").rglob("*.tsv.xz")):
intra_year_model_df = pd.read_csv(
str(file), sep="\t", na_filter=False
) >> ply_tdy.extract("year_pair", into="year", regex=r"(\d+)_", convert=True)
intra_year_models.append(intra_year_model_df)
if Path(
f"output/averaged_intra_models/average_{str(Path(file.stem).stem)}.tsv"
).exists():
continue
averaged_intra_year_models = dict()
for idx, row in tqdm.tqdm(
intra_year_model_df.iterrows(), desc=f"intra_df: {str(file)}"
):
if (row["tok"], int(row["year"])) not in averaged_intra_year_models:
averaged_intra_year_models[(row["tok"], int(row["year"]))] = dict(
global_distance=[], local_distance=[]
)
averaged_intra_year_models[(row["tok"], int(row["year"]))][
"global_distance"
].append(row["global_distance"])
averaged_intra_year_models[(row["tok"], int(row["year"]))][
"local_distance"
].append(row["local_distance"])
with open(
f"output/averaged_intra_models/average_{str(Path(file.stem).stem)}.tsv", "w"
) as outfile:
fieldnames = [
"average_global_distance",
"average_local_distance",
"var_global_distance",
"var_local_distance",
"tok",
"year",
]
writer = csv.DictWriter(outfile, fieldnames=fieldnames, delimiter="\t")
writer.writeheader()
for tok, year in tqdm.tqdm(
averaged_intra_year_models, desc=f"summary_intra_writer: {str(file.stem)}"
):
writer.writerow(
{
"average_global_distance": np.mean(
averaged_intra_year_models[(tok, year)]["global_distance"]
),
"var_global_distance": np.var(
averaged_intra_year_models[(tok, year)]["global_distance"]
),
"average_local_distance": np.mean(
averaged_intra_year_models[(tok, year)]["local_distance"]
),
"var_local_distance": np.var(
averaged_intra_year_models[(tok, year)]["local_distance"]
),
"tok": tok,
"year": year,
}
)
intra_year_models = pd.concat(intra_year_models)
intra_year_models.year = pd.Categorical(intra_year_models.year.tolist())
intra_year_models.head()
return_global_plot(intra_year_models, limits=(0, 0.1))
return_global_plot(intra_year_models, "privacy", limits=(0, 0.5))
return_global_plot(intra_year_models, "rna", limits=(0, 0.5))
# ## Inter Model Calculations
for idx, file in enumerate(Path("output/inter_models/on_years").rglob("*.tsv.xz")):
average_file_name = f"output/averaged_inter_models/average_{str(Path(file).stem)}"
if Path(average_file_name).exists():
continue
inter_year_model_df = pd.read_csv(
str(file), sep="\t", na_filter=False
) >> ply_tdy.extract(
"year_pair", into=["year1", "year2"], regex=r"(\d+)_\d-(\d+)_\d", convert=True
)
averaged_inter_year_models = dict()
for idx, row in tqdm.tqdm(
inter_year_model_df.iterrows(), desc=f"inter_df {str(Path(file).stem)}"
):
if (
row["tok"],
int(row["year1"]),
int(row["year2"]),
) not in averaged_inter_year_models:
averaged_inter_year_models[
(row["tok"], int(row["year1"]), int(row["year2"]))
] = dict(global_distance=[], local_distance=[])
averaged_inter_year_models[(row["tok"], int(row["year1"]), int(row["year2"]))][
"global_distance"
].append(row["global_distance"])
with open(average_file_name, "w") as outfile:
fieldnames = [
"average_global_distance",
"var_global_distance",
"tok",
"year1",
"year2",
]
writer = csv.DictWriter(outfile, fieldnames=fieldnames, delimiter="\t")
writer.writeheader()
for tok, year1, year2 in tqdm.tqdm(
averaged_inter_year_models, desc="summary_inter_writer"
):
writer.writerow(
{
"average_global_distance": np.mean(
averaged_inter_year_models[(tok, year1, year2)][
"global_distance"
]
),
"var_global_distance": np.var(
averaged_inter_year_models[(tok, year1, year2)][
"global_distance"
]
),
"tok": tok,
"year1": year1,
"year2": year2,
}
)
# # Custom Statistic that accounts for Inter and Intra Variation
# I needed to figure out a metric to take in inter-year (between years) and intra-year(within year variation).
# Turns out population genetics developed a statistic that accounts for genetic variation between populations and with in populations (termed $Q_{st}$).
# This metric is calculated via this equation: $$Q_{st}= \frac{Variation_{between}}{Variation_{between} + 2*Variation_{within}}$$
#
# Translating this equation into my field, population is the same as a group of word2vec models trained on abstracts for a given year.
# Each "year" has it's own variation (intra) along with variation across years (inter), so the idea here is to try and capture this instability.
#
# Using the equation above as inspiration, I devise a custom equation below.
#
# First have to define the distance mapping function:
# Let distance be cosine distance: $$ distance(w_{x}, w_{y}) = cos\_dist(w_{x}, w_{y})$$ where $$ 0 \leq cos\_dist(w_{x}, w_{y}) \leq 2$$
#
# Values close to 2 signify completely opposite word contexts, while values close to 0 signify same word context.
#
# Every publication year has ten models. I took the average distance of every model combination for a given year to calculate the intra year variation for each given word.
# E.g. year 2000 has 10 choose 2 options so for every combination pair I calculated the distance above and then averaged over all years.
# For inter year I just performed the cartesian product of all models between years and then perform the same average approach above.
# Now assume each distance is averaged, we get the following equation:
#
# $$\hat{Distance} = \frac{Distance_{inter\_year(x,y)}}{Distance_{inter\_year(x,y)} + Distance_{intra\_year(x)} + Distance_{intra\_year(y)}}$$
#
# Where x and y are a particular year and $x \neq y$.
# If $x = y$ then this estimate would be 1.
#
# However, I cant use this metric for bayesian changepoint detection as this metric would be completely dominated by
# the frequency ratio metric.
# In other words the above metric is bound between 0 and 1, while the frequency ratio is bounded between 0 and infinity.
# Therefore, the change metric heavily depends on frequency to work. This is bad as there are words that have undergone a semantic change, but have yet to have a change in frequency to detect said change (e.g. increase).
#
# To account for this I'm using the following metric instead:
# $$\hat{Distance} = \frac{Distance_{inter\_year(x,y)}}{Distance_{intra\_year(x)} + Distance_{intra\_year(y)}}$$
intra_year_averaged = pd.concat(
[
pd.read_csv(str(file), sep="\t", na_filter=False)
for file in Path("output/averaged_intra_models").rglob("*.tsv")
]
)
intra_year_averaged.head()
tok_intra_year = dict()
for idx, row in tqdm.tqdm(intra_year_averaged.iterrows()):
tok_intra_year[(row["tok"], row["year"])] = {
"global": row["average_global_distance"],
"local": row["average_local_distance"],
}
inter_model_files = list(Path("output/averaged_inter_models").rglob("*tsv"))
unique_years = set(
list(map(lambda x: int(re.search(r"(\d+)", x.stem).groups()[0]), inter_model_files))
)
len(unique_years)
for year in unique_years:
if Path(
f"output/combined_inter_intra_distances/saved_{year}-{year+1}_distance.tsv"
).exists():
print(f"{year}-{year+1} exists!")
continue
inter_year_models_averaged = pd.concat(
[
pd.read_csv(str(file), sep="\t", na_filter=False)
for file in filter(
lambda x: int(re.search(r"(\d+)", x.stem).group(0)) == year,
Path("output/averaged_inter_models").rglob(f"*{year}*.tsv"),
)
]
)
data = []
already_seen = set()
for idx, row in tqdm.tqdm(inter_year_models_averaged.iterrows()):
# Inter year variation
global_inter_top = row["average_global_distance"]
# local_inter_top = row["average_local_distance"]
if (row["tok"], int(row["year1"])) not in tok_intra_year or (
row["tok"],
int(row["year2"]),
) not in tok_intra_year:
continue
# global intra year variation
global_intra_bottom = (
tok_intra_year[(row["tok"], int(row["year1"]))]["global"]
+ tok_intra_year[(row["tok"], int(row["year2"]))]["global"]
)
global_distance_qst = global_inter_top / (
global_inter_top + global_intra_bottom
)
data.append(
{
"tok": row["tok"],
"original_global_distance": global_inter_top,
"global_distance_qst": global_distance_qst,
"ratio_metric": global_inter_top / global_intra_bottom,
"year_1": row["year1"],
"year_2": row["year2"],
}
)
(
pd.DataFrame.from_records(data)
>> ply.call(
".to_csv",
f"output/combined_inter_intra_distances/saved_{year}-{year+1}_distance.tsv",
sep="\t",
index=False,
)
)
| greenelab/biovectors | multi_model_experiment/04_novel_distance_calculations.py | 04_novel_distance_calculations.py | py | 14,275 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "plotnine.ggplot",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "plydata.query",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "plotnine.aes",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "plotnine.geom_boxplot",
... |
46058474656 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django.db import IntegrityError, transaction
from .managers import TopicNotificationQuerySet
from spirit.core.conf import settings
class TopicNotification(models.Model):
UNDEFINED, MENTION, COMMENT = range(3)
ACTION_CHOICES = (
(UNDEFINED, _("Undefined")),
(MENTION, _("Mention")),
(COMMENT, _("Comment")))
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='st_topic_notifications',
on_delete=models.CASCADE)
topic = models.ForeignKey(
'spirit_topic.Topic',
on_delete=models.CASCADE)
comment = models.ForeignKey(
'spirit_comment.Comment',
on_delete=models.CASCADE)
date = models.DateTimeField(default=timezone.now)
action = models.IntegerField(choices=ACTION_CHOICES, default=UNDEFINED)
is_read = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
objects = TopicNotificationQuerySet.as_manager()
class Meta:
unique_together = ('user', 'topic')
ordering = ['-date', '-pk']
verbose_name = _("topic notification")
verbose_name_plural = _("topics notification")
def get_absolute_url(self):
if self.topic_id != self.comment.topic_id:
# Out of sync
return self.topic.get_absolute_url()
return self.comment.get_absolute_url()
@property
def text_action(self):
return self.ACTION_CHOICES[self.action][1]
@property
def is_mention(self):
return self.action == self.MENTION
@property
def is_comment(self):
return self.action == self.COMMENT
@classmethod
def mark_as_read(cls, user, topic):
if not user.is_authenticated:
return
(cls.objects
.filter(user=user, topic=topic)
.update(is_read=True))
@classmethod
def create_maybe(cls, user, comment, is_read=True, action=COMMENT):
# Create a dummy notification
return cls.objects.get_or_create(
user=user,
topic=comment.topic,
defaults={
'comment': comment,
'action': action,
'is_read': is_read,
'is_active': True})
@classmethod
def notify_new_comment(cls, comment):
(cls.objects
.filter(topic=comment.topic, is_active=True, is_read=True)
.exclude(user=comment.user)
.update(
comment=comment,
is_read=False,
action=cls.COMMENT,
date=timezone.now()))
@classmethod
def notify_new_mentions(cls, comment, mentions):
if not mentions:
return
# TODO: refactor
for user in mentions.values():
try:
with transaction.atomic():
cls.objects.create(
user=user,
topic=comment.topic,
comment=comment,
action=cls.MENTION,
is_active=True)
except IntegrityError:
pass
(cls.objects
.filter(
user__in=tuple(mentions.values()),
topic=comment.topic,
is_read=True)
.update(
comment=comment,
is_read=False,
action=cls.MENTION,
date=timezone.now()))
@classmethod
def bulk_create(cls, users, comment):
return cls.objects.bulk_create([
cls(user=user,
topic=comment.topic,
comment=comment,
action=cls.COMMENT,
is_active=True)
for user in users])
# XXX add tests
# XXX fix with migration (see issue #237)
@classmethod
def sync(cls, comment, topic):
# Notifications can go out of sync
# when the comment is no longer
# within the topic (i.e moved).
# User is subscribed to the topic,
# not the comment, so we either update
# it to a newer comment or set it as undefined
if comment.topic_id == topic.pk:
return
next_comment = (
topic.comment_set
.filter(date__gt=comment.date)
.order_by('date')
.first())
if next_comment is None:
(cls.objects
.filter(comment=comment, topic=topic)
.update(is_read=True, action=cls.UNDEFINED))
return
(cls.objects
.filter(comment=comment, topic=topic)
.update(comment=next_comment, action=cls.COMMENT))
| nitely/Spirit | spirit/topic/notification/models.py | models.py | py | 4,758 | python | en | code | 1,153 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 15,
"usage_type": "call"
},
{
... |
32645650527 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains functions related with Maya tag functionality for ueGear.
"""
from __future__ import print_function, division, absolute_import
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
from mgear.uegear import utils, log
logger = log.uegear_logger
TAG_ASSET_GUID_ATTR_NAME = "ueGearAssetGuid"
TAG_ASSET_TYPE_ATTR_NAME = "ueGearAssetType"
TAG_ASSET_NAME_ATTR_NAME = "ueGearAssetName"
TAG_ASSET_PATH_ATTR_NAME = "ueGearAssetPath"
TAG_ACTOR_NAME_ATTR_NAME = "ueGearActorName"
ALL_TAGS_ATTR_NAMES = [
TAG_ASSET_GUID_ATTR_NAME,
TAG_ASSET_TYPE_ATTR_NAME,
TAG_ASSET_NAME_ATTR_NAME,
TAG_ASSET_PATH_ATTR_NAME,
TAG_ACTOR_NAME_ATTR_NAME,
]
class TagTypes(object):
"""
Class that holds all available tag types.
"""
Skeleton = "skeleton"
StaticMesh = "staticmesh"
SkeletalMesh = "skeletalmesh"
Camera = "camera"
Alembic = "alembic"
MetahumanBody = "metahumanbody"
MetahumanFace = "metahumanface"
Sequence = "sequence"
def auto_tag(node=None, remove=False):
"""
Automatically tags given (or current selected nodes) so ueGear exporter can identify how to export the specific
nodes.
:param str or list(str) or None node: node/s to tag.
:param bool remove: if True tag will be removed.
"""
nodes = utils.force_list(node or cmds.ls(sl=True, long=True))
for node in nodes:
found_skin_clusters = utils.get_skin_clusters_for_node(node)
if found_skin_clusters and cmds.objectType(node) == "joint":
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.SkeletalMesh
)
else:
shapes = cmds.listRelatives(node, fullPath=True, shapes=True)
if not shapes:
continue
first_shape = utils.get_first_in_list(shapes)
if not first_shape:
continue
object_type = cmds.objectType(first_shape)
if object_type == "mesh":
found_skin_clusters = utils.get_skin_clusters_for_node(
first_shape
)
if found_skin_clusters:
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.Skeleton
)
else:
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.StaticMesh
)
elif object_type == "camera":
remove_tag(node) if remove else apply_tag(
node, attribute_value=TagTypes.Camera
)
def apply_tag(
node=None, attribute_name=TAG_ASSET_TYPE_ATTR_NAME, attribute_value=""
):
"""
Creates a new tag attribute with given value into given node/s (or selected nodes).
:param str or list(str) or None node: nodes to apply tag to.
:param str attribute_name: tag attribute value to use. By default, TAG_ASSET_TYPE_ATTR_NAME will be used.
:param str attribute_value: value to set tag to.
"""
nodes = utils.force_list(node or cmds.ls(sl=True))
attribute_value = str(attribute_value)
for node in nodes:
if not cmds.attributeQuery(attribute_name, node=node, exists=True):
cmds.addAttr(node, longName=attribute_name, dataType="string")
cmds.setAttr(
"{}.{}".format(node, attribute_name),
attribute_value,
type="string",
)
if attribute_value:
logger.info(
'Tagged "{}.{}" as {}.'.format(
node, attribute_name, attribute_value
)
)
else:
logger.info(
'Tagged "{}.{}" as empty.'.format(node, attribute_name)
)
def remove_tag(node=None, attribute_name=TAG_ASSET_TYPE_ATTR_NAME):
"""
Removes tag attribute from the given node.
:param str or list(str) or None node: nodes to remove tag from.
:param str attribute_name: tag attribute value to remove. By default, TAG_ASSET_TYPE_ATTR_NAME will be used.
"""
nodes = utils.force_list(node or cmds.ls(sl=True))
for node in nodes:
if not cmds.attributeQuery(attribute_name, node=node, exists=True):
continue
cmds.deleteAttr("{}.{}".format(node, attribute_name))
logger.info(
'Removed attribute {} from "{}"'.format(attribute_name, node)
)
def remove_all_tags(node=None):
"""
Removes all ueGear tags from the given node.
:param str or list(str) or None node: nodes to remove tags from.
"""
nodes = utils.force_list(node or cmds.ls(sl=True))
for attribute_name in ALL_TAGS_ATTR_NAMES:
remove_tag(nodes, attribute_name=attribute_name)
def apply_alembic_tag(node=None, remove=False):
"""
Applies alembic tag to given node/s (or selected nodes).
:param str or list(str) or None node: node/s to tag.
:param bool remove: if True tag will be removed.
"""
remove_tag(node=node) if remove else apply_tag(
node=node, attribute_value=TagTypes.Alembic
)
def find_tagged_nodes(
tag_name=TAG_ASSET_TYPE_ATTR_NAME, nodes=None, tag_value=None
):
"""
Returns a list with all nodes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:param str or list(str) or None nodes: list of nodes to find tags of, if not given all nodes in the scene will be
checked.
:param str tag_value: if given only tag with given value will be returned.
:return: list of found tagged nodes.
:rtype: list(str)
"""
found_tagged_nodes = list()
nodes = utils.force_list(nodes or cmds.ls())
for node in nodes:
if not cmds.attributeQuery(tag_name, node=node, exists=True):
continue
found_tag_value = cmds.getAttr("{}.{}".format(node, tag_name))
if not found_tag_value or (
tag_value is not None and found_tag_value != tag_value
):
continue
found_tagged_nodes.append(node)
return found_tagged_nodes
def find_tagged_selected_nodes(tag_name):
"""
Returns a list with all selected nodes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:return: list of found tagged nodes.
:rtype: list(str)
"""
return find_tagged_nodes(nodes=cmds.ls(sl=True))
def find_tagged_node_attributes(tag_name=TAG_ASSET_TYPE_ATTR_NAME, nodes=None):
"""
Returns a list with all node attributes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:param str or list(str) or None nodes: list of nodes to find tags of, if not given all nodes in the scene will be
checked.
:return: list of found tagged nodes.
:rtype: list(str)
"""
found_tagged_node_attributes = list()
nodes = utils.force_list(nodes or cmds.ls(long=True))
for node in nodes:
if not cmds.attributeQuery(tag_name, node=node, exists=True):
continue
if not cmds.getAttr("{}.{}".format(node, tag_name)):
continue
found_tagged_node_attributes.append("{}.{}".format(node, tag_name))
return found_tagged_node_attributes
def find_tagged_selected_node_attributes(tag_name):
"""
Returns a list with all selected node attributes that are tagged with the given tag name and has a value set.
:param str tag_name: name of the tag to search. By default, TAG_ATTR_NAME will be used.
:return: list of found tagged nodes.
:rtype: list(str)
"""
return find_tagged_node_attributes(nodes=cmds.ls(sl=True))
def tag_values(tag_name=TAG_ASSET_TYPE_ATTR_NAME, nodes=None):
"""
Returns a list with all node attribute values that are tagged with the given tag name.
:param str tag_name:name of the tag to search value of.
:param str or list(str) nodes: list of nodes to find tags of, if not given all nodes in the scene will be checked.
:return: list of tagged node values.
:rtype: list(object)
"""
found_tag_values = list()
nodes = utils.force_list(nodes or cmds.ls(long=True))
for node in nodes:
if not cmds.attributeQuery(tag_name, node=node, exists=True):
found_tag_values.append(None)
continue
found_tag_values.append(cmds.getAttr("{}.{}".format(node, tag_name)))
return found_tag_values
def tag_match(dag_path, tag_value, tag):
"""
Validates if the object specified by its dag path, has the same tag and value
assigned to it.
:param OpenMaya.DagPath dag_path: The object you want to validate has the
following tag and data assigned.
:param str tag_value: value assigned to the tag.
:param str tag: tag to correlate with.
:return: True if the object has matching tag and the values are the same.
:rtype: bool
"""
dag_node = OpenMaya.MFnDagNode(dag_path)
attr = dag_node.attribute(tag)
plug = dag_node.findPlug(attr, False)
plug_value = plug.asString()
return plug_value == tag_value
| mgear-dev/mgear4 | release/scripts/mgear/uegear/tag.py | tag.py | py | 9,387 | python | en | code | 209 | github-code | 6 | [
{
"api_name": "mgear.uegear.log.uegear_logger",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mgear.uegear.log",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "mgear.uegear.utils.force_list",
"line_number": 55,
"usage_type": "call"
},
{
... |
27519489621 | from django.shortcuts import (render, get_object_or_404,
get_list_or_404, redirect, HttpResponse)
from .models import Total
from .serializer import TotalSerializer, Serializer # , UserSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, logout, login
from .forms import RegisterForm, LoginForm, ProfileForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.auth.decorators import login_required
# from total.decorators import add_get_request
# from django.views.decorators.http import require_http_methods
# Create your views here.
def home(request):
return render(request, 'index.html', {})
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
message = request.POST['message']
print(name, email, message)
send_mail(subject='API message', message=message,
from_email=email, recipient_list=['jobscraper0@gmail.com'])
messages.success(request, 'Message sent Successfully')
return redirect('home')
else:
return render(request, 'index.html', {})
class endpoints(APIView):
def get(self, request):
return Response([
{"endpoint": 'description',
"api/v2/": 'endpoints home'},
{"register": 'Page to register user'},
{"login": 'Login Page, to get token after login'},
{"login/username=<username>&password=<password>/":
'''a GET reqest to this endpoint with a registered users
username & pasword return the token for the user'''},
{"api/v2/all/token": 'return all data from the beginning of corona virus till today'},
{"api/v2/today/token": 'return the data for today'},
{"api/v2/dates/2020-10-1:2020-11-10:2020-12-10/token":
'return the data for the three dates seperated by :'},
{"api/v2/from/2020-22-10/token": 'return the datas starting from 2020-22-10', },
{"api/v2/yesterday/token": 'return the data for yesterday'},
{"api/v2/date/2020-10-20/token": 'return the data for the specify date'},
])
def login_user(request):
next = request.GET.get('next')
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return render(request, 'login.html', {})
def logout_user(request):
logout(request)
return redirect('home')
@login_required
def profile(request):
user = request.user
return render(request, 'profile.html', {'user': user})
def register_user(request):
new_user = None
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
new_user = form.save(commit=False)
new_user.set_password(password)
new_user.save()
Token.objects.create(user=new_user)
messages.info(request, 'registration successfull, Login First')
return redirect('login')
# return render(request, 'register_success.html',
# {'new_user': new_user})
else:
form = RegisterForm()
return render(request, 'register.html', {'form': form})
class LoginView(APIView):
permission_classes = ()
def post(self, request, username, password):
username = username
password = password
# username = request.data.get('username')
# password = request.data.get('password')
user = authenticate(username=username, password=password)
if user:
return Response({"token": user.auth_token.key})
else:
return Response({"error": "wrong credentials"})
class TotalListView(APIView):
'''
This will return all datas from the commencement of Corona Virus till today
'''
# permission_classes = (IsAuthenticated,)
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
print(user)
if user:
obj = Total.objects.all()
# lookup_field = 'hello'
data = TotalSerializer(obj, many=True).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetDateView(APIView):
def get(self, request, day, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
obj = get_object_or_404(Total, day=day)
data = Serializer(obj).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetFromDate(APIView):
def get(self, request, day, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
q1 = get_object_or_404(Total, day=day).pk
q = Total.objects.filter(id__gte=q1)
# obj = get_list_or_404(q)
data = Serializer(q, many=True).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetFirstOccurence(APIView):
'''
Will return the day of the first occurence of Covid19 in Nigeria
'''
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
obj = Total.objects.all().filter(confirmed=1)
data = Serializer(obj[0]).data
# print(obj)
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetToday(APIView):
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
query = Total.objects.all()
obj = query[0]
data = Serializer(obj).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetYesterday(APIView):
def get(self, request, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
query = Total.objects.all().order_by('id')
obj = query[len(query) - 2]
data = Serializer(obj).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
class GetSepDate(APIView):
def get(self, request, days, token):
try:
user = get_object_or_404(User, auth_token=token)
except Exception as DoesNotExist:
user = None
if user:
d1 = days.split(':')[0]
d2 = days.split(':')[1]
d3 = days.split(':')[2]
print(d1, d2, d3, days)
obj = Total.objects.filter(day__in=[d1, d2, d3])
print(obj,)
data = Serializer(obj, many=True).data
return Response(data)
else:
return Response({'error': 'Invalid Token'})
@login_required
def edit_profile(request):
if request.method == 'POST':
form = ProfileForm(data=request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('profile')
else:
messages.warning(request, 'Error occured')
else:
form = ProfileForm(instance=request.user)
return render(request, 'edit_profile.html', {'form': form})
| Afeez1131/CovidNg-2021 | total/views.py | views.py | py | 8,441 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 36,
"usage_type": "call"
},
{
... |
31463758726 | import logging
import pathlib
import sqlite3
logger = logging.getLogger(__name__)
def is_database_exists(db_path):
return pathlib.Path(db_path).exists()
def open_connection(db_path):
if is_database_exists(db_path):
logger.debug(f"Connecting to {db_path}")
try:
return sqlite3.connect(db_path)
except Exception:
logger.exception(f"Failed to connect to {db_path}")
raise
else:
raise RuntimeError(f"Databse {db_path} doesn't exist")
def close_connection(connection):
assert connection is not None
logger.debug("Closing connection")
connection.close()
def create_database(db_path):
logger.info(f"Creating empty database at {db_path}")
if not is_database_exists(db_path):
try:
connection = sqlite3.connect(db_path)
except Exception:
logging.exception("Failed to create database")
raise
else:
close_connection(connection)
else:
raise RuntimeError(f"Database {db_path} already exists")
class DatabaseIO:
def __init__(self, db_path):
self._path = db_path
self._connection = None
def __enter__(self):
self._connection = open_connection(self._path)
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
del exc_type
del exc_val
del exc_traceback
close_connection(self._connection)
self._connection = None
return False
| nemeshnorbert/reveal | src/db/utils.py | utils.py | py | 1,509 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"l... |
28448639940 | # __author__ = 'heyin'
# __date__ = '2019/2/14 16:03'
# google翻译rpc服务端代码
import sys
sys.path.append('../')
import json
import grpc
import time
from concurrent import futures
from rpc_server.fanyi import fanyi_pb2, fanyi_pb2_grpc
from rpc_conf import HOST, PORT, ONE_DAY_IN_SECONDS
from core import google
js = google.Py4Js()
class Translate(fanyi_pb2_grpc.TranslateServicer):
def DoTranslate(self, request, context):
args = request.text
args = json.loads(args)
src = args.get('src')
dest = args.get('dest')
cookies = args.get('cookies')
# 下边内容为谷歌的翻译操作
ret = google.translate(js, args.get('content'), src, dest, cookies)
return fanyi_pb2.Data(text=ret)
def serve():
grpcServer = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
fanyi_pb2_grpc.add_TranslateServicer_to_server(Translate(), grpcServer)
grpcServer.add_insecure_port(HOST + ':' + PORT)
grpcServer.start()
try:
while True:
time.sleep(ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
grpcServer.stop(0)
if __name__ == '__main__':
serve()
| hy89/google-translate | rpc_server/server.py | server.py | py | 1,172 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "core.google.Py4Js",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "core.google",
"line... |
42600676142 | import multiprocessing
import operator
from functools import partial
import numpy as np
from core import mathlib
from core.interact import interact as io
from core.leras import nn
from facelib import FaceType, XSegNet
from models import ModelBase
from samplelib import *
class XSegModel(ModelBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, force_model_class_name='XSeg', **kwargs)
# 覆盖父类方法,用于初始化模型选项
#override
def on_initialize_options(self):
# 检查是否需要重写现有模型
ask_override = self.ask_override()
# 如果不是第一次运行并且用户选择了重写,则重置模型权重并从头开始训练
if not self.is_first_run() and ask_override:
if io.input_bool(f"是否重新开始训练?", False, help_message="重置模型权重并从头开始训练。"):
self.set_iter(0)
# 设置默认选项并加载之前的选项值(如果存在)
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'wf')
default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False)
# 如果是第一次运行,询问用户选择面部类型
if self.is_first_run():
self.options['face_type'] = io.input_str("请选择面部类型", default_face_type,
['h', 'mf', 'f', 'wf', 'head'],
help_message="选择半脸/中脸/全脸/整个脸部/头部。选择与您的Deepfake模型相同的类型。").lower()
# 如果是第一次运行或用户选择了重写,则询问批处理大小和是否启用预训练模式
if self.is_first_run() or ask_override:
self.ask_batch_size(4, range=[2, 16])
self.options['pretrain'] = io.input_bool("是否启用预训练模式", default_pretrain)
# 如果不是导出模型且启用了预训练模式但未设置预训练数据路径,则引发异常
if not self.is_exporting and (self.options['pretrain'] and self.get_pretraining_data_path() is None):
raise Exception("未定义pretraining_data_path")
# 检查是否只是禁用了预训练模式
self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
# 覆盖父类方法,用于在初始化模型时设置选项
#override
def on_initialize(self):
device_config = nn.getCurrentDeviceConfig()
self.model_data_format = "NCHW" if self.is_exporting or (
len(device_config.devices) != 0 and not self.is_debug()) else "NHWC"
nn.initialize(data_format=self.model_data_format)
tf = nn.tf
device_config = nn.getCurrentDeviceConfig()
devices = device_config.devices
self.resolution = resolution = 256
# 根据用户选择的面部类型设置面部类型('h'、'mf'、'f'、'wf' 或 'head')
self.face_type = {'h': FaceType.HALF,
'mf': FaceType.MID_FULL,
'f': FaceType.FULL,
'wf': FaceType.WHOLE_FACE,
'head': FaceType.HEAD}[self.options['face_type']]
# 确定是否将模型放置在CPU上
place_model_on_cpu = len(devices) == 0
models_opt_device = '/CPU:0' if place_model_on_cpu else nn.tf_default_device_name
# 定义输入图像和掩码的形状
bgr_shape = nn.get4Dshape(resolution, resolution, 3)
mask_shape = nn.get4Dshape(resolution, resolution, 1)
# 初始化模型类
self.model = XSegNet(name='XSeg',
resolution=resolution,
load_weights=not self.is_first_run(),
weights_file_root=self.get_model_root_path(),
training=True,
place_model_on_cpu=place_model_on_cpu,
optimizer=nn.RMSprop(lr=0.0001, lr_dropout=0.3, name='opt'),
data_format=nn.data_format)
# 设置预训练模式(如果需要)
self.pretrain = self.options['pretrain']
if self.pretrain_just_disabled:
self.set_iter(0)
if self.is_training:
# 调整批处理大小以适应多个GPU
gpu_count = max(1, len(devices))
bs_per_gpu = max(1, self.get_batch_size() // gpu_count)
self.set_batch_size(gpu_count * bs_per_gpu)
# 计算每个GPU的损失
gpu_pred_list = []
gpu_losses = []
gpu_loss_gvs = []
for gpu_id in range(gpu_count):
with tf.device(f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0'):
with tf.device(f'/CPU:0'):
# 在CPU上切片,否则所有批次数据将首先传输到GPU
batch_slice = slice(gpu_id * bs_per_gpu, (gpu_id + 1) * bs_per_gpu)
gpu_input_t = self.model.input_t[batch_slice, :, :, :]
gpu_target_t = self.model.target_t[batch_slice, :, :, :]
# 处理模型张量
gpu_pred_logits_t, gpu_pred_t = self.model.flow(gpu_input_t, pretrain=self.pretrain)
gpu_pred_list.append(gpu_pred_t)
if self.pretrain:
# 结构损失
gpu_loss = tf.reduce_mean(
5 * nn.dssim(gpu_target_t, gpu_pred_t, max_val=1.0, filter_size=int(resolution / 11.6)),
axis=[1])
gpu_loss += tf.reduce_mean(
5 * nn.dssim(gpu_target_t, gpu_pred_t, max_val=1.0, filter_size=int(resolution / 23.2)),
axis=[1])
# 像素损失
gpu_loss += tf.reduce_mean(10 * tf.square(gpu_target_t - gpu_pred_t), axis=[1, 2, 3])
else:
gpu_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=gpu_target_t, logits=gpu_pred_logits_t),
axis=[1, 2, 3])
gpu_losses += [gpu_loss]
gpu_loss_gvs += [nn.gradients(gpu_loss, self.model.get_weights())]
# 平均损失和梯度,并创建优化器更新操作
with tf.device(models_opt_device):
pred = tf.concat(gpu_pred_list, 0)
loss = tf.concat(gpu_losses, 0)
loss_gv_op = self.model.opt.get_update_op(nn.average_gv_list(gpu_loss_gvs))
# 初始化训练和查看函数
if self.pretrain:
def train(input_np, target_np):
l, _ = nn.tf_sess.run([loss, loss_gv_op], feed_dict={self.model.input_t: input_np, self.model.target_t: target_np})
return l
else:
def train(input_np, target_np):
l, _ = nn.tf_sess.run([loss, loss_gv_op], feed_dict={self.model.input_t: input_np, self.model.target_t: target_np})
return l
self.train = train
def view(input_np):
return nn.tf_sess.run([pred], feed_dict={self.model.input_t: input_np})
self.view = view
# 初始化样本生成器
cpu_count = min(multiprocessing.cpu_count(), 8)
src_dst_generators_count = cpu_count // 2
src_generators_count = cpu_count // 2
dst_generators_count = cpu_count // 2
if self.pretrain:
pretrain_gen = SampleGeneratorFace(self.get_pretraining_data_path(), debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=True),
output_sample_types=[{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': True, 'transform': True, 'channel_type': SampleProcessor.ChannelType.BGR, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': True, 'transform': True, 'channel_type': SampleProcessor.ChannelType.G, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
],
uniform_yaw_distribution=False,
generators_count=cpu_count)
self.set_training_data_generators([pretrain_gen])
else:
srcdst_generator = SampleGeneratorFaceXSeg([self.training_data_src_path, self.training_data_dst_path],
debug=self.is_debug(),
batch_size=self.get_batch_size(),
resolution=resolution,
face_type=self.face_type,
generators_count=src_dst_generators_count,
data_format=nn.data_format)
src_generator = SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': False, 'transform': False, 'channel_type': SampleProcessor.ChannelType.BGR, 'border_replicate': False, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
],
generators_count=src_generators_count,
raise_on_no_data=False)
dst_generator = SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[{'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp': False, 'transform': False, 'channel_type': SampleProcessor.ChannelType.BGR, 'border_replicate': False, 'face_type': self.face_type, 'data_format': nn.data_format, 'resolution': resolution},
],
generators_count=dst_generators_count,
raise_on_no_data=False)
self.set_training_data_generators([srcdst_generator, src_generator, dst_generator])
# 覆盖父类方法,返回模型文件名列表
#override
def get_model_filename_list(self):
return self.model.model_filename_list
# 覆盖父类方法,在保存时触发保存模型权重的操作
#override
def onSave(self):
self.model.save_weights()
# 覆盖父类方法,在每个训练迭代中触发训练操作
#override
def onTrainOneIter(self):
image_np, target_np = self.generate_next_samples()[0]
loss = self.train(image_np, target_np)
return (('loss', np.mean(loss)), )
# 覆盖父类方法,获取预览图像
#override
def onGetPreview(self, samples, for_history=False):
n_samples = min(4, self.get_batch_size(), 800 // self.resolution)
if self.pretrain:
srcdst_samples, = samples
image_np, mask_np = srcdst_samples
else:
srcdst_samples, src_samples, dst_samples = samples
image_np, mask_np = srcdst_samples
I, M, IM, = [ np.clip(nn.to_data_format(x, "NHWC", self.model_data_format), 0.0, 1.0) for x in ([image_np, mask_np] + self.view(image_np)) ]
M, IM, = [ np.repeat(x, (3,), -1) for x in [M, IM] ]
green_bg = np.tile(np.array([0, 1, 0], dtype=np.float32)[None, None, ...], (self.resolution, self.resolution, 1))
result = []
st = []
for i in range(n_samples):
if self.pretrain:
ar = I[i], IM[i]
else:
ar = I[i] * M[i] + 0.5 * I[i] * (1 - M[i]) + 0.5 * green_bg * (1 - M[i]), IM[i], I[i] * IM[i] + 0.5 * I[i] * (1 - IM[i]) + 0.5 * green_bg * (1 - IM[i])
st.append(np.concatenate(ar, axis=1))
result += [('XSeg training faces', np.concatenate(st, axis=0)), ]
if not self.pretrain and len(src_samples) != 0:
src_np, = src_samples
D, DM, = [ np.clip(nn.to_data_format(x, "NHWC", self.model_data_format), 0.0, 1.0) for x in ([src_np] + self.view(src_np)) ]
DM, = [ np.repeat(x, (3,), -1) for x in [DM] ]
st = []
for i in range(n_samples):
ar = D[i], DM[i], D[i] * DM[i] + 0.5 * D[i] * (1 - DM[i]) + 0.5 * green_bg * (1 - DM[i])
st.append(np.concatenate(ar, axis=1))
result += [('XSeg src faces', np.concatenate(st, axis=0)), ]
if not self.pretrain and len(dst_samples) != 0:
dst_np, = dst_samples
D, DM, = [ np.clip(nn.to_data_format(x, "NHWC", self.model_data_format), 0.0, 1.0) for x in ([dst_np] + self.view(dst_np)) ]
DM, = [ np.repeat(x, (3,), -1) for x in [DM] ]
st = []
for i in range(n_samples):
ar = D[i], DM[i], D[i] * DM[i] + 0.5 * D[i] * (1 - DM[i]) + 0.5 * green_bg * (1 - DM[i])
st.append(np.concatenate(ar, axis=1))
result += [('XSeg dst faces', np.concatenate(st, axis=0)), ]
return result
# 导出模型到ONNX格式
def export_dfm(self):
output_path = self.get_strpath_storage_for_file(f'model.onnx')
io.log_info(f'Dumping .onnx to {output_path}')
tf = nn.tf
with tf.device(nn.tf_default_device_name):
input_t = tf.placeholder(nn.floatx, (None, self.resolution, self.resolution, 3), name='in_face')
input_t = tf.transpose(input_t, (0, 3, 1, 2))
_, pred_t = self.model.flow(input_t)
pred_t = tf.transpose(pred_t, (0, 2, 3, 1))
tf.identity(pred_t, name='out_mask')
output_graph_def = tf.graph_util.convert_variables_to_constants(
nn.tf_sess,
tf.get_default_graph().as_graph_def(),
['out_mask']
)
import tf2onnx
with tf.device("/CPU:0"):
model_proto, _ = tf2onnx.convert._convert_common(
output_graph_def,
name='XSeg',
input_names=['in_face:0'],
output_names=['out_mask:0'],
opset=13,
output_path=output_path)
Model = XSegModel
| ccvvx1/Python_Df | models/Model_XSeg/Model.py | Model.py | py | 15,453 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.ModelBase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "core.interact.interact.input_bool",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "core.interact.interact",
"line_number": 28,
"usage_type": "name"
},
{
"api_nam... |
19053013321 | from functools import wraps
import re
from collections import defaultdict
from datetime import datetime, timedelta, timezone
import humanize
import simplejson as json
from dateutil.tz import tzutc
from flask import Blueprint, g, redirect, request, url_for, current_app, jsonify
from flask_login import current_user, login_required, logout_user
from flask_themes2 import render_theme_template
from sdc.crypto.encrypter import encrypt
from jwcrypto.common import base64url_decode
from structlog import get_logger
from app.globals import get_session_store, get_completeness
from app.data_model.answer_store import Answer, AnswerStore
from app.data_model.app_models import SubmittedResponse
from app.globals import get_answer_store, get_completed_blocks, get_metadata, get_questionnaire_store
from app.helpers.form_helper import post_form_for_location
from app.helpers.path_finder_helper import path_finder, full_routing_path_required
from app.helpers.schema_helpers import with_schema
from app.helpers.session_helpers import with_answer_store, with_metadata
from app.helpers.template_helper import (with_session_timeout, with_metadata_context, with_analytics,
with_questionnaire_url_prefix, with_legal_basis, render_template)
from app.questionnaire.location import Location
from app.questionnaire.navigation import Navigation
from app.questionnaire.path_finder import PathFinder
from app.questionnaire.router import Router
from app.questionnaire.rules import get_answer_ids_on_routing_path
from app.questionnaire.rules import evaluate_skip_conditions
from app.keys import KEY_PURPOSE_SUBMISSION
from app.storage import data_access
from app.storage.storage_encryption import StorageEncryption
from app.submitter.converter import convert_answers
from app.submitter.submission_failed import SubmissionFailedException
from app.templating.metadata_context import build_metadata_context_for_survey_completed
from app.templating.schema_context import build_schema_context
from app.templating.summary_context import build_summary_rendering_context
from app.templating.template_renderer import renderer, TemplateRenderer
from app.templating.view_context import build_view_context
from app.templating.utils import get_question_title
from app.utilities.schema import load_schema_from_session_data
from app.views.errors import MultipleSurveyError
from app.authentication.no_token_exception import NoTokenException
END_BLOCKS = 'Summary', 'Confirmation'
logger = get_logger()
questionnaire_blueprint = Blueprint(name='questionnaire',
import_name=__name__,
url_prefix='/questionnaire/<eq_id>/<form_type>/<collection_id>/')
post_submission_blueprint = Blueprint(name='post_submission',
import_name=__name__,
url_prefix='/questionnaire/<eq_id>/<form_type>/')
@questionnaire_blueprint.before_request
def before_questionnaire_request():
metadata = get_metadata(current_user)
if not metadata:
raise NoTokenException(401)
logger.bind(tx_id=metadata['tx_id'])
values = request.view_args
logger.bind(eq_id=values['eq_id'], form_type=values['form_type'],
ce_id=values['collection_id'])
logger.info('questionnaire request', method=request.method, url_path=request.full_path)
_check_same_survey(url_eq_id=values['eq_id'],
url_form_type=values['form_type'],
url_collection_id=values['collection_id'],
session_eq_id=metadata['eq_id'],
session_form_type=metadata['form_type'],
session_collection_id=metadata['collection_exercise_sid'])
session_data = get_session_store().session_data
g.schema = load_schema_from_session_data(session_data)
@post_submission_blueprint.before_request
def before_post_submission_request():
session = get_session_store()
if not session or not session.session_data:
raise NoTokenException(401)
session_data = session.session_data
g.schema = load_schema_from_session_data(session_data)
logger.bind(tx_id=session_data.tx_id)
values = request.view_args
logger.bind(eq_id=values['eq_id'], form_type=values['form_type'])
logger.info('questionnaire request', method=request.method, url_path=request.full_path)
_check_same_survey(url_eq_id=values['eq_id'],
url_form_type=values['form_type'],
url_collection_id='',
session_eq_id=session_data.eq_id,
session_form_type=session_data.form_type,
session_collection_id='')
@questionnaire_blueprint.after_request
def add_cache_control(response):
response.cache_control.no_cache = True
return response
def save_questionnaire_store(func):
@wraps(func)
def save_questionnaire_store_wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if not current_user.is_anonymous:
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
questionnaire_store.add_or_update()
return response
return save_questionnaire_store_wrapper
@questionnaire_blueprint.route('<group_id>/<int:group_instance>/<block_id>', methods=['GET'])
@login_required
@with_answer_store
@with_metadata
@with_schema
@full_routing_path_required
def get_block(routing_path, schema, metadata, answer_store, eq_id, form_type, collection_id, group_id, # pylint: disable=too-many-locals
group_instance, block_id):
current_location = Location(group_id, group_instance, block_id)
completeness = get_completeness(current_user)
router = Router(schema, routing_path, completeness, current_location)
if not router.can_access_location():
next_location = router.get_next_location()
return _redirect_to_location(collection_id, eq_id, form_type, next_location)
block = _get_block_json(current_location, schema, answer_store, metadata)
context = _get_context(routing_path, block, current_location, schema)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
@questionnaire_blueprint.route('<group_id>/<int:group_instance>/<block_id>', methods=['POST'])
@login_required
@with_answer_store
@with_metadata
@with_schema
@full_routing_path_required
def post_block(routing_path, schema, metadata, answer_store, eq_id, form_type, collection_id, group_id, # pylint: disable=too-many-locals
group_instance, block_id):
current_location = Location(group_id, group_instance, block_id)
completeness = get_completeness(current_user)
router = Router(schema, routing_path, completeness, current_location)
if not router.can_access_location():
next_location = router.get_next_location()
return _redirect_to_location(collection_id, eq_id, form_type, next_location)
block = _get_block_json(current_location, schema, answer_store, metadata)
schema_context = _get_schema_context(routing_path, current_location.group_instance, metadata, answer_store, schema)
rendered_block = renderer.render(block, **schema_context)
form = _generate_wtf_form(request.form, rendered_block, current_location, schema)
if 'action[save_sign_out]' in request.form:
return _save_sign_out(routing_path, current_location, form, schema, answer_store, metadata)
if form.validate():
_set_started_at_metadata_if_required(form, metadata)
_update_questionnaire_store(current_location, form, schema)
next_location = path_finder.get_next_location(current_location=current_location)
if _is_end_of_questionnaire(block, next_location):
return submit_answers(routing_path, eq_id, form_type, schema)
return redirect(_next_location_url(next_location))
context = build_view_context(block['type'], metadata, schema, answer_store, schema_context, rendered_block, current_location, form)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
@questionnaire_blueprint.route('<group_id>/0/household-composition', methods=['POST'])
@login_required
@with_answer_store
@with_metadata
@with_schema
@full_routing_path_required
def post_household_composition(routing_path, schema, metadata, answer_store, **kwargs):
group_id = kwargs['group_id']
if _household_answers_changed(answer_store, schema):
_remove_repeating_on_household_answers(answer_store, schema)
disable_mandatory = any(x in request.form for x in ['action[add_answer]', 'action[remove_answer]', 'action[save_sign_out]'])
current_location = Location(group_id, 0, 'household-composition')
block = _get_block_json(current_location, schema, answer_store, metadata)
form = post_form_for_location(schema, block, current_location, answer_store, metadata,
request.form, disable_mandatory=disable_mandatory)
form.validate() # call validate here to keep errors in the form object on the context
context = _get_context(routing_path, block, current_location, schema, form)
if 'action[add_answer]' in request.form:
form.household.append_entry()
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
if 'action[remove_answer]' in request.form:
index_to_remove = int(request.form.get('action[remove_answer]'))
form.remove_person(index_to_remove)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
if 'action[save_sign_out]' in request.form:
response = _save_sign_out(routing_path, current_location, form, schema, answer_store, metadata)
remove_empty_household_members_from_answer_store(answer_store, schema)
return response
if form.validate():
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
update_questionnaire_store_with_answer_data(questionnaire_store, current_location, form.serialise(), schema)
metadata = get_metadata(current_user)
next_location = path_finder.get_next_location(current_location=current_location)
return redirect(next_location.url(metadata))
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
@post_submission_blueprint.route('thank-you', methods=['GET'])
@login_required
@with_metadata
@with_schema
def get_thank_you(schema, metadata, eq_id, form_type): # pylint: disable=unused-argument
session_data = get_session_store().session_data
completeness = get_completeness(current_user)
if session_data.submitted_time:
metadata_context = build_metadata_context_for_survey_completed(session_data)
view_submission_url = None
view_submission_duration = 0
if _is_submission_viewable(schema.json, session_data.submitted_time):
view_submission_url = url_for('.get_view_submission', eq_id=eq_id, form_type=form_type)
view_submission_duration = humanize.naturaldelta(timedelta(seconds=schema.json['view_submitted_response']['duration']))
return render_theme_template(schema.json['theme'],
template_name='thank-you.html',
metadata=metadata_context,
analytics_ua_id=current_app.config['EQ_UA_ID'],
survey_id=schema.json['survey_id'],
survey_title=TemplateRenderer.safe_content(schema.json['title']),
is_view_submitted_response_enabled=is_view_submitted_response_enabled(schema.json),
view_submission_url=view_submission_url,
view_submission_duration=view_submission_duration)
routing_path = path_finder.get_full_routing_path()
collection_id = metadata['collection_exercise_sid']
router = Router(schema, routing_path, completeness)
next_location = router.get_next_location()
return _redirect_to_location(collection_id, metadata.get('eq_id'), metadata.get('form_type'), next_location)
@post_submission_blueprint.route('view-submission', methods=['GET'])
@login_required
@with_schema
def get_view_submission(schema, eq_id, form_type): # pylint: disable=unused-argument
session_data = get_session_store().session_data
if _is_submission_viewable(schema.json, session_data.submitted_time):
submitted_data = data_access.get_by_key(SubmittedResponse, session_data.tx_id)
if submitted_data:
metadata_context = build_metadata_context_for_survey_completed(session_data)
pepper = current_app.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_ENCRYPTION_USER_PEPPER')
encrypter = StorageEncryption(current_user.user_id, current_user.user_ik, pepper)
submitted_data = encrypter.decrypt_data(submitted_data.data)
# for backwards compatibility
# submitted data used to be base64 encoded before encryption
try:
submitted_data = base64url_decode(submitted_data.decode()).decode()
except ValueError:
pass
submitted_data = json.loads(submitted_data)
answer_store = AnswerStore(existing_answers=submitted_data.get('answers'))
metadata = submitted_data.get('metadata')
routing_path = PathFinder(schema, answer_store, metadata, []).get_full_routing_path()
schema_context = _get_schema_context(routing_path, 0, metadata, answer_store, schema)
rendered_schema = renderer.render(schema.json, **schema_context)
summary_rendered_context = build_summary_rendering_context(schema, rendered_schema['sections'], answer_store, metadata)
context = {
'summary': {
'groups': summary_rendered_context,
'answers_are_editable': False,
'is_view_submission_response_enabled': is_view_submitted_response_enabled(schema.json),
},
'variables': None,
}
return render_theme_template(schema.json['theme'],
template_name='view-submission.html',
metadata=metadata_context,
analytics_ua_id=current_app.config['EQ_UA_ID'],
survey_id=schema.json['survey_id'],
survey_title=TemplateRenderer.safe_content(schema.json['title']),
content=context)
return redirect(url_for('post_submission.get_thank_you', eq_id=eq_id, form_type=form_type))
def _set_started_at_metadata_if_required(form, metadata):
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
if not questionnaire_store.answer_store.answers and len(form.data) > 1:
started_at = datetime.now(timezone.utc).isoformat()
logger.info('first answer about to be stored. writing started_at time to metadata',
started_at=started_at)
metadata['started_at'] = started_at
def _render_page(block_type, context, current_location, schema, answer_store, metadata, routing_path):
if request_wants_json():
return jsonify(context)
return _build_template(
current_location,
context,
block_type,
schema,
answer_store,
metadata,
routing_path=routing_path)
def _generate_wtf_form(form, block, location, schema):
disable_mandatory = 'action[save_sign_out]' in form
wtf_form = post_form_for_location(
schema,
block,
location,
get_answer_store(current_user),
get_metadata(current_user),
request.form,
disable_mandatory)
return wtf_form
def _next_location_url(location):
metadata = get_metadata(current_user)
return location.url(metadata)
def _is_end_of_questionnaire(block, next_location):
return (
block['type'] in END_BLOCKS and
next_location is None
)
def submit_answers(routing_path, eq_id, form_type, schema):
metadata = get_metadata(current_user)
answer_store = get_answer_store(current_user)
message = json.dumps(convert_answers(
metadata,
schema,
answer_store,
routing_path,
))
encrypted_message = encrypt(message, current_app.eq['key_store'], KEY_PURPOSE_SUBMISSION)
sent = current_app.eq['submitter'].send_message(
encrypted_message,
current_app.config['EQ_RABBITMQ_QUEUE_NAME'],
metadata['tx_id'],
)
if not sent:
raise SubmissionFailedException()
submitted_time = datetime.utcnow()
_store_submitted_time_in_session(submitted_time)
if is_view_submitted_response_enabled(schema.json):
_store_viewable_submission(answer_store.answers, metadata, submitted_time)
get_questionnaire_store(current_user.user_id, current_user.user_ik).delete()
return redirect(url_for('post_submission.get_thank_you', eq_id=eq_id, form_type=form_type))
def _store_submitted_time_in_session(submitted_time):
session_store = get_session_store()
session_data = session_store.session_data
session_data.submitted_time = submitted_time.isoformat()
session_store.save()
def _store_viewable_submission(answers, metadata, submitted_time):
pepper = current_app.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_ENCRYPTION_USER_PEPPER')
encrypter = StorageEncryption(current_user.user_id, current_user.user_ik, pepper)
encrypted_data = encrypter.encrypt_data(
{
'answers': answers,
'metadata': metadata,
},
)
valid_until = submitted_time + timedelta(seconds=g.schema.json['view_submitted_response']['duration'])
item = SubmittedResponse(
tx_id=metadata['tx_id'],
data=encrypted_data,
valid_until=valid_until.replace(tzinfo=tzutc()),
)
data_access.put(item)
def is_view_submitted_response_enabled(schema):
view_submitted_response = schema.get('view_submitted_response')
if view_submitted_response:
return view_submitted_response['enabled']
return False
def _is_submission_viewable(schema, submitted_time):
if is_view_submitted_response_enabled(schema) and submitted_time:
submitted_time = datetime.strptime(submitted_time, '%Y-%m-%dT%H:%M:%S.%f')
submission_valid_until = submitted_time + timedelta(seconds=schema['view_submitted_response']['duration'])
return submission_valid_until > datetime.utcnow()
return False
def _save_sign_out(routing_path, current_location, form, schema, answer_store, metadata):
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
block = _get_block_json(current_location, schema, answer_store, metadata)
if form.validate():
_update_questionnaire_store(current_location, form, schema)
if current_location in questionnaire_store.completed_blocks:
questionnaire_store.remove_completed_blocks(location=current_location)
questionnaire_store.add_or_update()
logout_user()
return redirect(url_for('session.get_sign_out'))
context = _get_context(routing_path, block, current_location, schema, form)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
def _household_answers_changed(answer_store, schema):
answer_ids = schema.get_answer_ids_for_block('household-composition')
household_answers = answer_store.filter(answer_ids)
stripped_form = request.form.copy()
del stripped_form['csrf_token']
remove = [k for k in stripped_form if 'action[' in k]
for k in remove:
del stripped_form[k]
if household_answers.count() != len(stripped_form):
return True
for answer in request.form:
answer_id, answer_index = extract_answer_id_and_instance(answer)
try:
stored_answer = household_answers.filter(
answer_ids=[answer_id],
answer_instance=answer_index)[0]
except IndexError:
stored_answer = None
if stored_answer and (stored_answer['value'] or '') != request.form[answer]:
return True
return False
def _remove_repeating_on_household_answers(answer_store, schema):
answer_ids = schema.get_answer_ids_for_block('household-composition')
answer_store.remove(answer_ids=answer_ids)
questionnaire_store = get_questionnaire_store(
current_user.user_id,
current_user.user_ik,
)
for answer in schema.get_answers_that_repeat_in_block('household-composition'):
groups_to_delete = schema.get_groups_that_repeat_with_answer_id(answer['id'])
for group in groups_to_delete:
answer_ids = schema.get_answer_ids_for_group(group['id'])
answer_store.remove(answer_ids=answer_ids)
questionnaire_store.completed_blocks[:] = [b for b in questionnaire_store.completed_blocks if
b.group_id != group['id']]
def remove_empty_household_members_from_answer_store(answer_store, schema):
answer_ids = schema.get_answer_ids_for_block('household-composition')
household_answers = answer_store.filter(answer_ids=answer_ids)
household_member_name = defaultdict(list)
for household_answer in household_answers:
if household_answer['answer_id'] == 'first-name' or household_answer['answer_id'] == 'last-name':
household_member_name[household_answer['answer_instance']].append(household_answer['value'])
to_be_removed = []
for k, v in household_member_name.items():
name_value = ''.join(v).strip()
if not name_value:
to_be_removed.append(k)
for instance_to_remove in to_be_removed:
answer_store.remove(answer_ids=answer_ids, answer_instance=instance_to_remove)
def _update_questionnaire_store(current_location, form, schema):
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
if current_location.block_id in ['relationships', 'household-relationships']:
update_questionnaire_store_with_answer_data(questionnaire_store, current_location,
form.serialise(), schema)
else:
update_questionnaire_store_with_form_data(questionnaire_store, current_location, form.data, schema)
@save_questionnaire_store
def update_questionnaire_store_with_form_data(questionnaire_store, location, answer_dict, schema):
survey_answer_ids = schema.get_answer_ids_for_block(location.block_id)
for answer_id, answer_value in answer_dict.items():
# If answer is not answered then check for a schema specified default
if answer_value is None:
answer_value = schema.get_answer(answer_id).get('default')
if answer_id in survey_answer_ids or location.block_id == 'household-composition':
if answer_value is not None:
answer = Answer(answer_id=answer_id,
value=answer_value,
group_instance=location.group_instance)
latest_answer_store_hash = questionnaire_store.answer_store.get_hash()
questionnaire_store.answer_store.add_or_update(answer)
if latest_answer_store_hash != questionnaire_store.answer_store.get_hash() and schema.dependencies[answer_id]:
_remove_dependent_answers_from_completed_blocks(answer_id, location.group_instance, questionnaire_store, schema)
else:
_remove_answer_from_questionnaire_store(
answer_id,
questionnaire_store,
group_instance=location.group_instance)
if location not in questionnaire_store.completed_blocks:
questionnaire_store.completed_blocks.append(location)
def _remove_dependent_answers_from_completed_blocks(answer_id, group_instance, questionnaire_store, schema):
"""
Gets a list of answers ids that are dependent on the answer_id passed in.
Then for each dependent answer it will remove it's block from those completed.
This will therefore force the respondent to revisit that block.
The dependent answers themselves remain untouched.
:param answer_id: the answer that has changed
:param questionnaire_store: holds the completed blocks
:return: None
"""
answer_in_repeating_group = schema.answer_is_in_repeating_group(answer_id)
dependencies = schema.dependencies[answer_id]
for dependency in dependencies:
dependency_in_repeating_group = schema.answer_is_in_repeating_group(dependency)
answer = schema.get_answer(dependency)
question = schema.get_question(answer['parent_id'])
block = schema.get_block(question['parent_id'])
if dependency_in_repeating_group and not answer_in_repeating_group:
questionnaire_store.remove_completed_blocks(group_id=block['parent_id'], block_id=block['id'])
else:
location = Location(block['parent_id'], group_instance, block['id'])
if location in questionnaire_store.completed_blocks:
questionnaire_store.remove_completed_blocks(location=location)
def _remove_answer_from_questionnaire_store(answer_id, questionnaire_store,
group_instance=0):
questionnaire_store.answer_store.remove(answer_ids=[answer_id],
group_instance=group_instance,
answer_instance=0)
@save_questionnaire_store
def update_questionnaire_store_with_answer_data(questionnaire_store, location, answers, schema):
survey_answer_ids = schema.get_answer_ids_for_block(location.block_id)
for answer in [a for a in answers if a.answer_id in survey_answer_ids]:
questionnaire_store.answer_store.add_or_update(answer)
if location not in questionnaire_store.completed_blocks:
questionnaire_store.completed_blocks.append(location)
def _check_same_survey(url_eq_id, url_form_type, url_collection_id, session_eq_id, session_form_type, session_collection_id):
if url_eq_id != session_eq_id \
or url_form_type != session_form_type \
or url_collection_id != session_collection_id:
raise MultipleSurveyError
def _evaluate_skip_conditions(block_json, location, schema, answer_store, metadata):
for question in schema.get_questions_for_block(block_json):
if 'skip_conditions' in question:
skip_question = evaluate_skip_conditions(question['skip_conditions'], schema, metadata, answer_store, location.group_instance)
question['skipped'] = skip_question
for answer in question['answers']:
if answer['mandatory'] and skip_question:
answer['mandatory'] = False
return block_json
def extract_answer_id_and_instance(answer_instance_id):
matches = re.match(r'^household-(\d+)-(first-name|middle-names|last-name)$', answer_instance_id)
if matches:
index, answer_id = matches.groups()
else:
answer_id = answer_instance_id
index = 0
return answer_id, int(index)
def _redirect_to_location(collection_id, eq_id, form_type, location):
return redirect(url_for('questionnaire.get_block', eq_id=eq_id, form_type=form_type, collection_id=collection_id,
group_id=location.group_id,
group_instance=location.group_instance, block_id=location.block_id))
def _get_context(full_routing_path, block, current_location, schema, form=None):
metadata = get_metadata(current_user)
answer_store = get_answer_store(current_user)
schema_context = _get_schema_context(full_routing_path, current_location.group_instance, metadata, answer_store, schema)
rendered_block = renderer.render(block, **schema_context)
return build_view_context(block['type'], metadata, schema, answer_store, schema_context, rendered_block, current_location, form=form)
def _get_block_json(current_location, schema, answer_store, metadata):
block_json = schema.get_block(current_location.block_id)
return _evaluate_skip_conditions(block_json, current_location, schema, answer_store, metadata)
def _get_schema_context(full_routing_path, group_instance, metadata, answer_store, schema):
answer_ids_on_path = get_answer_ids_on_routing_path(schema, full_routing_path)
return build_schema_context(metadata=metadata,
schema=schema,
answer_store=answer_store,
group_instance=group_instance,
answer_ids_on_path=answer_ids_on_path)
def _get_front_end_navigation(answer_store, current_location, metadata, schema, routing_path=None):
completed_blocks = get_completed_blocks(current_user)
navigation = Navigation(schema, answer_store, metadata, completed_blocks,
routing_path, get_completeness(current_user))
block_json = schema.get_block(current_location.block_id)
if block_json['type'] != 'Introduction':
return navigation.build_navigation(current_location.group_id, current_location.group_instance)
return None
def get_page_title_for_location(schema, current_location, metadata, answer_store):
block = schema.get_block(current_location.block_id)
if block['type'] == 'Interstitial':
group = schema.get_group(current_location.group_id)
page_title = '{group_title} - {survey_title}'.format(group_title=group['title'], survey_title=schema.json['title'])
elif block['type'] == 'Question':
first_question = next(schema.get_questions_for_block(block))
question_title = get_question_title(first_question, answer_store, schema, metadata, current_location.group_instance)
page_title = '{question_title} - {survey_title}'.format(question_title=question_title, survey_title=schema.json['title'])
else:
page_title = schema.json['title']
return TemplateRenderer.safe_content(page_title)
def _build_template(current_location, context, template, schema, answer_store, metadata, routing_path=None):
front_end_navigation = _get_front_end_navigation(answer_store, current_location, metadata, schema, routing_path)
previous_location = path_finder.get_previous_location(current_location)
previous_url = previous_location.url(metadata) if previous_location is not None else None
return _render_template(context, current_location, template, front_end_navigation, previous_url, schema, metadata, answer_store)
@with_session_timeout
@with_questionnaire_url_prefix
@with_metadata_context
@with_analytics
@with_legal_basis
def _render_template(context, current_location, template, front_end_navigation, previous_url, schema, metadata, answer_store, **kwargs):
page_title = get_page_title_for_location(schema, current_location, metadata, answer_store)
return render_template(
template,
content=context,
current_location=current_location,
navigation=front_end_navigation,
previous_location=previous_url,
page_title=page_title,
metadata=kwargs.pop('metadata_context'), # `metadata_context` is used as `metadata` in the jinja templates
**kwargs,
)
def request_wants_json():
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
| ONSdigital/census-survey-runner | app/views/questionnaire.py | questionnaire.py | py | 32,299 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "structlog.get_logger",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "app.globals.get_... |
8337543538 | #! /usr/bin/env python
import sys
import csv
import screed
import random
import argparse
import sourmash
import sequtils # local import
def main():
parser = argparse.ArgumentParser()
parser.add_argument('genome')
parser.add_argument('-e', '--error-rate', type=float, default=.01)
parser.add_argument('-r', '--read-length', type=int, default=100,
help="Length of reads to generate")
parser.add_argument("-S", "--seed", dest="seed", help="Random seed", type=int,
default=1)
parser.add_argument("-k", "--ksize", default=31, help="k-mer size")
parser.add_argument("-o", "--output", required=True,
help="CSV output of detection curve")
args = parser.parse_args()
READLEN=args.read_length
ERROR_RATE=args.error_rate
NUM_FRACMINHASH = 5
random.seed(args.seed) # make this reproducible, please.
records = list(screed.open(args.genome))
assert len(records) == 1
record = records[0]
genome = record.sequence
len_genome = len(genome)
total_mh = sourmash.MinHash(0, args.ksize, scaled=1)
total_mh.add_sequence(genome)
all_hashes = set(total_mh.hashes)
# make NUM_FRACMINHASH minhashes each with different mmh seeds
all_hashes_list = []
scaled_mh_list = []
for i in range(NUM_FRACMINHASH):
smh = sourmash.MinHash(0, args.ksize, scaled=1000, seed=i + 42)
all_hashes_i = smh.copy_and_clear()
all_hashes_i.add_sequence(genome)
scaled_mh_list.append(smh)
all_hashes_list.append(all_hashes_i)
print('genome size:', len_genome, file=sys.stderr)
print('readlen:', READLEN, file=sys.stderr)
print('error rate:', ERROR_RATE, file=sys.stderr)
print('num k-mers:', len(total_mh))
reads_mut = 0
total_mut = 0
print(f"Read in template genome {0} of length {1} from {2}".format(record["name"], len_genome, args.genome), file=sys.stderr)
print(f"Generating reads of length {READLEN} with an error rate of 1 in {ERROR_RATE}", file=sys.stderr)
it = sequtils.generate_mutated_reads(genome, READLEN, ERROR_RATE)
it = iter(it)
fp = open(args.output, 'w', newline="")
csv_w = csv.writer(fp)
headers = ['num_reads', 'coverage', 'n_detected', 'f_detected']
for i in range(NUM_FRACMINHASH):
headers.append(f"smash_count_{i}")
csv_w.writerow(headers)
csv_w.writerow([0, 0, 0, 0] + [0]*NUM_FRACMINHASH)
n_reads = 0
total_bp_in_reads = 0
f01 = len(all_hashes) * 0.1
remaining_hashes = set(all_hashes)
while len(remaining_hashes) > f01:
start, read, read_mutations = next(it)
if read_mutations:
reads_mut += 1
total_mut += read_mutations
n_reads += 1
total_bp_in_reads += len(read)
# first, track _all_ hashes for actual k-mer detection
mh = total_mh.copy_and_clear()
mh.add_sequence(read)
remaining_hashes -= set(mh.hashes)
n_detected = len(all_hashes) - len(remaining_hashes)
f_detected = n_detected / len(all_hashes)
coverage = total_bp_in_reads / len_genome
# now, track sourmash detection & intersect with legit hashes:
smash_detection = []
for smh, all_hashes_i in zip(scaled_mh_list, all_hashes_list):
smh.add_sequence(read)
smh_hashes = set(smh.hashes)
smh_hashes.intersection_update(all_hashes_i.hashes)
smash_detection.append(len(smh_hashes))
csv_w.writerow([n_reads, f"{coverage:.4f}", n_detected, f"{f_detected:.4f}"] + smash_detection)
sys.stdout.write(u'\r\033[K')
sys.stdout.write(f"...{n_reads} reads, {len(all_hashes)} missing k-mers, {total_bp_in_reads / len_genome:.2f} coverage")
sys.stdout.flush()
fp.close()
if __name__ == '__main__':
sys.exit(main())
| ctb/2022-sourmash-sens-spec | scripts/make-detection-curve.py | make-detection-curve.py | py | 3,926 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "screed.open",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sourmash.MinHash",
... |
22558981666 | import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
#Dane początkowe
k1 = 1
m = 1
h = 0.05
x0 = 10
vx0 = 0
w1 = math.sqrt(k1/m)
A1 = math.sqrt((vx0*vx0)/(w1*w1) + (x0*x0))
iloscPunktow = 1000
#oś XY
setXl = 0
setXr = 55
setYl = 49.95
setYr = 50.04
if(vx0 <= 0):
fi1 = math.acos(x0/A1) * 180/math.pi
else:
fi1 = -math.acos(x0/A1) * 180/math.pi
#Wypisanie danych poczatkowych
print("\nk1 = {0}\nm = {1}\nh = {2}\nx0 = {3}\nvx0 = {4}\nw1 = {5}\nA1 = {6}\nfi1 = {7}"
.format(k1, m, h, x0, vx0, w1, A1, fi1))
print("\nIlosć punktów = {0}".format(iloscPunktow))
#Czas
time = []
for i in range(0, iloscPunktow+1):
time.append(round((i*h), 2))
#print(time[i])
#rozwiazanie dokladne listy oraz wartosci poczatkowe
dokladneX = []
dokladneVX = []
dokladneX.append(x0)
dokladneVX.append(vx0)
dokladneE = []
#metoda eulera listy oraz wartosci poczatkowe
eulerX = []
eulerVX = []
eulerX.append(x0)
eulerVX.append(vx0)
eulerE = []
#metoda punktu posredniego listy oraz wartosci poczatkowe
posredniaX = []
posredniaVX = []
posredniaX.append(x0)
posredniaVX.append(vx0)
posredniaE = []
#metoda verleta listy oraz wartosci poczatkowe
verletX = []
verletVX = []
verletX.append(x0)
verletVX.append(vx0)
verletE = []
#metoda beemana listy oraz wartosci poczatkowe
beemanX = []
beemanVX = []
beemanX.append(x0)
beemanVX.append(vx0)
beemanE = []
#uzupelnianie list
for i in range(1, iloscPunktow+1):
#dokladna
dokX = A1 * math.cos(w1 * time[i] + fi1 * math.pi / 180)
dokVX = -A1 * w1 * math.sin(w1 * time[i] + fi1 * math.pi/180)
dokladneX.append(dokX)
dokladneVX.append(dokVX)
#euler
eulX = eulerX[i - 1] + eulerVX[i - 1] * h
eulVX = eulerVX[i - 1] - (w1 * w1) * eulerX[i - 1] * h
eulerX.append(eulX)
eulerVX.append(eulVX)
#posrednia
posX = posredniaX[i-1] + posredniaVX[i-1] * h - 0.5 * (w1 * w1) * posredniaX[i-1] * (h * h)
posVX = posredniaVX[i-1] - (w1 * w1) * posredniaX[i-1] * h
posredniaX.append(posX)
posredniaVX.append(posVX)
#verlet
verX = verletX[i - 1] + verletVX[i - 1] * h - 0.5 * (w1 * w1) * verletX[i - 1] * (h * h)
verletX.append(verX)
verVX = verletVX[i - 1] - 0.5 * (w1 * w1) * (verletX[i - 1] + verletX[i]) * h
verletVX.append(verVX)
#beeman
# z verleta liczone
if(i == 1):
beemanX.append(verletX[1])
beemanVX.append(verletVX[1])
else:
bemX = beemanX[i - 1] + beemanVX[i - 1] * h + (w1 * w1) * (beemanX[i - 2] - 4 * beemanX[i - 1]) * (h * h)/6
beemanX.append(bemX)
bemVX = beemanVX[i - 1] + (w1 * w1) * (beemanX[i - 2] - 5 * beemanX[i - 1] - 2 * beemanX[i]) * h/6
beemanVX.append(bemVX)
#energia
for i in range(0, iloscPunktow+1):
dokE = 0.5 * k1 * (A1*A1)
dokladneE.append(dokE)
eulE = m * (eulerVX[i] * eulerVX[i])/2 + k1 * (eulerX[i] * eulerX[i]/2)
eulerE.append(eulE)
posE = m * (posredniaVX[i] * posredniaVX[i])/2 + k1 * (posredniaX[i] * posredniaX[i]/2)
posredniaE.append(posE)
verE = m * (verletVX[i] * verletVX[i])/2 + k1 * (verletX[i] * verletX[i]/2)
verletE.append(verE)
bemE = m * (beemanVX[i] * beemanVX[i])/2 + k1 * (beemanX[i] * beemanX[i]/2)
beemanE.append(bemE)
#Animacja
xdata = []
ydata = []
xdata2 = []
ydata2 = []
xdata3 = []
ydata3 = []
font1 = {'family': 'serif', 'color': 'blue', 'size': 20}
font2 = {'family': 'serif', 'color': 'darkred', 'size': 15}
fig, ax = plt.subplots()
ax.set_xlim(setXl, setXr)
ax.set_ylim(setYl, setYr)
plt.title("Energia całkowita oscylatora", fontdict=font1)
plt.xlabel("t", fontdict = font2)
plt.ylabel("E", fontdict = font2)
line, = ax.plot(0, 0, '.') #niebieski
line2, = ax.plot(0, 0, 'r.') #czerwony
line3, = ax.plot(0, 0, 'g.') #zielony
line.set_label('rozwiązanie Dokładne')
line2.set_label('metoda Verleta')
line3.set_label('metoda Beemana')
ax.legend()
def animation_frame(i):
xdata.append(time[i])
ydata.append(dokladneE[i])
xdata2.append(time[i])
ydata2.append(verletE[i])
xdata3.append(time[i])
ydata3.append(beemanE[i])
line.set_xdata(xdata)
line.set_ydata(ydata)
line2.set_xdata(xdata2)
line2.set_ydata(ydata2)
line3.set_xdata(xdata3)
line3.set_ydata(ydata3)
return line, line2, line3,
animation = FuncAnimation(fig, func = animation_frame, frames = np.arange(0, iloscPunktow + 1, 1), interval = 5)
plt.show() | OskarLewandowski/My_Learning | Python/Oscylator-energia.py | Oscylator-energia.py | py | 4,595 | python | pl | code | 0 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "math.acos",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 26,
"u... |
15156725753 | import os
import math
import numpy as np
from tqdm import tqdm
import pickle
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from models import l2norm
## Memory
class Memory(nn.Module):
def __init__(self, mem_size=500000, feat_dim=256, margin=1, topk=1000, update_rate=0.1):
super(Memory, self).__init__()
self.mem_size = mem_size
self.feat_dim = feat_dim
self.Mem = nn.Parameter(torch.zeros(mem_size, feat_dim))
self.Ages = nn.Parameter(torch.zeros(mem_size, 1))
self.topk = topk
self.margin = margin
self.update_rate = update_rate
# At this time, we don't train mem by gradient descent
self.Mem.requires_grad = False
self.Ages.requires_grad = False
def update_mem(self, x, labels):
with torch.no_grad():
self.Mem[labels] = l2norm(self.update_rate * x.data + (1 - self.update_rate) * self.Mem[labels])
def update_mem_with_ages(self, x, labels):
with torch.no_grad():
self.Ages[labels] += 1.
self.Mem[labels] = l2norm(x.data + self.Mem[labels] * self.Ages[labels])
def search_l2(self, x, topk):
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.mem_size) + \
torch.pow(self.Mem, 2).sum(dim=1, keepdim=True).expand(self.mem_size, batch_size).t()
distmat.addmm_(x, self.Mem.t(), beta=1, alpha=-2)
distances, indices = torch.topk(distmat, topk, largest=False)
return distances, indices
def compute_l2loss(self, x, labels):
""" L2 Distance
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (batch_size).
"""
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.mem_size) + \
torch.pow(self.Mem, 2).sum(dim=1, keepdim=True).expand(self.mem_size, batch_size).t()
distmat.addmm_(x, self.Mem.t(), beta=1, alpha=-2)
classes = torch.arange(self.mem_size).long()
if labels.is_cuda:
classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.mem_size)
mask = labels.eq(classes.expand(batch_size, self.mem_size))
dist1 = distmat * mask.float()
min_loss = dist1.clamp(min=1e-12, max=1e+12).sum(1)
dist2 = distmat * (1.0 - mask.float())
max_loss = torch.topk(dist2, self.topk, dim=1, largest=False)[0].sum(1) / (self.topk - 1)
loss = F.relu(min_loss - max_loss + self.margin)
return loss.mean(), min_loss.mean(), max_loss.mean()
| toanhvu/learning-to-remember-beauty-products | memory.py | memory.py | py | 2,793 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
25359325465 | # -*- coding: utf-8 -*-
from __future__ import division
import scrapy
from scrapy import Request
# from street_food.items import StreetFoodItem, StreetFoodDatTimeItem
from street_food.items import StreetFoodDatTimeItem
from street_food.spiders import tools
import json
from urllib import urlopen
# import random
from street_food.tools import basic_tools
class GetFoodOffTheGrid(scrapy.Spider):
name = "offthegrid"
allowed_domains = ["offthegridmarkets.com", "offthegrid.com"]
start_urls = [
'https://offthegrid.com/otg-api/passthrough/markets.json/?latitude=37.7749295&longitude=-122.41941550000001&sort-order=distance-asc'
]
custom_settings = {
"ITEM_PIPELINES": {
"street_food.pipelines.ApiUploader": 10,
}
}
def parse(self, response):
''' Parse list of markets '''
markets = json.loads(response.text)
market_url = "https://offthegrid.com/otg-api/passthrough/markets/{}.json/"
# Get list of markets in San Francisco.
for market in [market for market in markets["Markets"]]:
market = market['Market']
market_id = market['id']
yield Request(market_url.format(market_id),
callback=self.parse_market)
def parse_market(self, response):
''' Parse a market '''
# load Maize Vendors.
maizeresp = urlopen('http://yumbli.herokuapp.com/api/v1/allkitchens/?format=json')
vendors = json.loads(maizeresp.read().decode('utf8'))
maizevendors = {}
for v in vendors:
maizevendors[v['name'].lower()] = v['id']
item = StreetFoodDatTimeItem()
market = json.loads(response.text)
market_detail = market["MarketDetail"]["Market"]["Market"]
market_events = market["MarketDetail"]["Events"]
# Market Address.
market_address = market_detail["address"].strip()
market_city = market_detail["city"].strip()
full_address = "{} {}".format(market_address, market_city)
# Market location.
market_latitude = market_detail['latitude']
market_longitude = market_detail['longitude']
# geolocation = "{} {}".format(market_latitude, market_longitude)
# Add data to item.
item['address'] = full_address
# Parse market events.
for event in market_events:
start_datetime, end_datetime = tools.get_start_end_datetime(event['Event'])
item['start_datetime'] = start_datetime
item['end_datetime'] = end_datetime
# Parse vendors of event.
for vendor in event['Vendors']:
vendor_name = vendor['name']
item['VendorName'] = vendor_name
# randlongpos = random.randint(-150, 150) / 1000000
# randlatpos = random.randint(-200, 200) / 1000000
# item['latitude'] = abs(float(market_latitude)) + randlatpos
# abs then *-1 b/c off the grid has some wrong values
# item['longitude'] = abs(float(market_longitude))*-1 + randlongpos
item['latitude'] = basic_tools.mix_location(market_latitude)
item['longitude'] = basic_tools.mix_location(market_longitude)
if vendor_name and vendor_name.lower() in maizevendors.keys():
item['maize_status'] = 'found'
item['maize_id'] = maizevendors[vendor_name.lower()]
else:
item['maize_status'] = 'not found'
item['maize_id'] = 'n/a'
yield item
| kirimaks/street-food-scraper | street_food/street_food/spiders/offthegrid.py | offthegrid.py | py | 3,638 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"li... |
33851174074 | '''
boss class
'''
import pygame
class Boss(pygame.sprite.Sprite):
def __init__(self,laser):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("images/Boss.gif").convert()
self.rect = self.image.get_rect()
self.rect.x = 500
self.rect.y = 0
self.health = 200
self.laser = laser
self.lasertimer = 0
self.left = False
self.right = True
#update the boss
def update(self):
self.movement()
self.attack()
#get the health
def getHealth(self):
return self.health
#set the health
def setHealth(self):
self.health = self.health - 10
#laser attack of the boss
def attack(self):
self.lasertimer += 1
if self.lasertimer == 20:
self.laser.rect.x = self.rect.x + 50
self.laser.rect.y = self.rect.y
if self.lasertimer > 20:
self.laser.rect.y += 15
if self.laser.rect.y > 600:
self.lasertimer = 0
self.laser.rect.x = -500
self.laser.rect.y = -500
#set up movement for boss
def movement(self):
if self.rect.x > 900:
self.right = False
self.left = True
if self.rect.x < 50:
self.left = False
self.right = True
if self.left:
self.rect.x -= 10
if self.right:
self.rect.x += 10
| Inviernos/Alien-Lord | boss.py | boss.py | py | 1,580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "py... |
32203126633 | import datetime
import random
import yaml
from requests import get
def compute_median(lst):
"""
Вычисление медианты списка
:param lst: входящий список значений
:return: медиана
"""
quotient, remainder = divmod(len(lst), 2)
return lst[quotient] if remainder else sum(sorted(lst)[quotient - 1:quotient + 1]) / 2
def compute_avg(lst):
"""
Вычисление среднего арифметические значения списка
:param lst: входящий список значений
:return: среднее арифметические значение
"""
return sum(lst) / len(lst)
def usage_type(avg, median):
"""
Вычисление типа использования
:param avg: среднее занечение метрик
:param median: медианное значение метрик
:return: возврат значения типа использования
"""
if (avg < 1.25 * median) and (avg > 0.75 * median):
return "стабильна"
elif avg > 1.25 * median:
return "скачки"
else:
return "снижения"
def intensity(median):
"""
Вычисление интенсивности использования
:param median: медианное значение метрик
:return: возврат значения интенсивности
"""
if (0 < median) and (median <= 30):
return "низкая"
if (30 < median) and (median <= 60):
return "умеренная"
if (60 < median) and (median <= 90):
return "высокая"
return "запредельная"
def decision(usage, intens):
"""
Принятие решения о дальнецшем использовании ресурса
:param usage: тип использования
:param intens: интенсивности использованя
:return: возврат решения
"""
if intens == "низкая":
return "отказаться"
if intens == "запредельная":
return "усилить"
if intens == "умеренная" and usage in ("стабильна", "скачки"):
return "отсавить"
if intens == "высокая" and usage in ("снижения", "стабильна"):
return "отсавить"
if usage == "снижения" and intens == "умеренная":
return "отказаться"
if usage == "скачки" and intens == "высокая":
return "усилить"
def obj_creator(data):
"""
Генератор обьекта заданной структуры из сырых данных
:param data: сырые данные
:return: Обект заданной тсруктуры
"""
final_data = {}
for msg in data:
team_name, project, resource, due, resource_metric = msg
a = {"time": due, "value": int(resource_metric)}
final_data.setdefault(team_name, {}).setdefault(project, {}).setdefault(resource, []).append(a)
return final_data
def get_data_from_http(url):
"""
Генерация списка метрик по HTTP
:param url: адресс веб сервера источника метрик
:return: лист метрик
"""
rnd_seed = random.randint(1, 3)
team_raw = get(url + f"/monitoring/infrastructure/using/summary/{rnd_seed}").text.split("$")
final_list = []
for team_raw_data in team_raw:
team_name, team_data = team_raw_data.split("|")
team_data = team_data.split(";")
for team_data_split_data in team_data:
project, resource, due, resource_metric = team_data_split_data[1:-1].split(",")
yr, mt, dy = due[0:10].split("-")
date = datetime.date(year=int(yr), month=int(mt), day=int(dy))
final_list.append((team_name, project, resource, date, int(resource_metric)))
return final_list
if __name__ == '__main__':
print("start")
full_msg = get_data_from_http("http://127.0.0.1:21122/")
final_data = obj_creator(full_msg)
yaml_price = get("http://127.0.0.1:21122/monitoring/infrastructure/using/prices").text
price_full = yaml.safe_load(yaml_price)["values"]
print("Ресурс|Значение|среднее|медиана|использование|интенсивность|решение|дата последний метрики|цена")
for name, prj in final_data.items():
print(f"команда {name}")
for prj_name, res_values in prj.items():
summ = 0
for res, values in res_values.items():
value_list = []
time = []
for value in values:
value_list.append(value["value"])
time.append(value["time"])
last_time = time[-1] + datetime.timedelta(14)
median = compute_median(value_list)
avg = compute_avg(value_list)
usage = usage_type(avg, median)
intens = intensity(median)
final_decision = decision(usage, intens)
cost = price_full[prj_name]
summ += int(cost[res])
print(f"{prj_name} | {res} | {avg} | {median} | {usage} | {intens} | {final_decision} | {last_time} | {cost[res]}")
print(f"Цена за ресурс = {summ}")
| zombym/devops-tasks | 5.5.1.py | 5.5.1.py | py | 5,538 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_n... |
23184643387 | #!/usr/bin/env python3
#encoding: UTF-8
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import numpy as np
import matplotlib.pyplot as plt
import math
import TrashCan.Mathieson as mat
import C.PyCWrapper as PCWrap
import Util.plot as uPlt
import Util.dataTools as tUtil
def vErf(x):
y = np.zeros( x.shape )
for i in range( x.shape[0]):
y[i] = math.erf( x[i] )
return y
def computeGaussian1D( x, mu=0.0, var=1.0):
# print ( "???", __name__, x[-1], x.shape )
# print "x", x
TwoPi = 2 * np.pi
SqrtTwoPi = np.sqrt( TwoPi )
sig = np.sqrt(var)
u = (x - mu) / sig
# print "u", u
u = - 0.5 * u * u
cst = 1.0 / ( sig * SqrtTwoPi)
y = cst * np.exp( u )
return y
def gaussianIntegral(x):
mu0 = 0.0
var0 = 1.0
sig0 = np.sqrt( var0 )
cstx = 1.0 / ( np.sqrt(2.0)*sig0 )
integral = vErf( (x - mu0) * cstx )
return integral
class TabulatedChargeIntegration:
# Spline implementation of the book "Numerical Analysis" - 9th edition
# Richard L Burden, J Douglas Faires
# Section 3.5, p. 146
# Restrictions : planed with a regular sampling (dx = cst)
# spline(x) :[-inf, +inf] -> [-1/2, +1/2]
# Error < 7.0 e-11 for 1001 sampling between [0, 3.0]
def __init__(self, x, f, dx, lDerivate, rDerivate ):
self.nTabulations = x.size
N = x.size
self.a = np.copy( f )
self.b = np.zeros(N)
self.c = np.zeros(N)
self.d = np.zeros(N)
self.dx = dx
# Step 1
# for (i = 0; i < n - 1; ++i) h[i] = x[i + 1] - x[i];
#for i in range(0, N-1):
# self.h[i] = x[i+1] - x[i];
# h = x[0:N-1] = x[1:N] - x[0:N-1]
h = self.dx
# Step 2
# for (i = 1; i < n-1; ++i)
# A[i] = 3 * (a[i + 1] - a[i]) / h[i] - 3 * (a[i] - a[i - 1]) / h[i - 1];
# A[1:N-1] = 3 * (a[2:N] - a[1:N-1]) / h[1:N-1] - 3 * (a[1:N-1] - a[0:N-2]) / h[0:N-2]];
# Step 2 & 3
alpha = np.zeros(N)
# alpha[0] = 3.0 / self.h[0] * (f[1] - f[0]) - 3*lDerivate
alpha[0] = 3.0 / h * (f[1] - f[0]) - 3*lDerivate
# alpha[N-1] = 3*rDerivate - 3.0 / self.h[N-2] * (f[N-1] - f[N-2])
alpha[N-1] = 3*rDerivate - 3.0 / h * (f[N-1] - f[N-2])
# for (i = 1; i < n-1; ++i)
for i in range(1, N-1):
# alpha[i] = 3.0/self.h[i] * (f[i+1] - f[i]) - 3.0/self.h[i-1] * (f[i] - f[i-1]);
alpha[i] = 3.0/h * (f[i+1] - f[i]) - 3.0/h * (f[i] - f[i-1]);
# Step 4 to 6 solve a tridiagonal linear system
# Step 4
l = np.zeros(N)
mu = np.zeros(N)
z = np.zeros(N)
# l[0] = 2 * self.h[0]
l[0] = 2 * h
mu[0] = 0.5
z[0] = alpha[0] / l[0]
# Step 5
# for (i = 1; i < n - 1; ++i) {
for i in range(1, N-1):
# l[i] = 2 * (x[i+1] - x[i-1]) - self.h[i-1] * mu[i - 1];
# mu[i] = self.h[i] / l[i];
# z[i] = (alpha[i] - self.h[i-1]*z[i-1]) / l[i];
l[i] = 2 * (x[i+1] - x[i-1]) - h * mu[i-1];
mu[i] = h / l[i];
z[i] = (alpha[i] - h*z[i-1]) / l[i];
# Step 6 & 7
# l[N-1] = self.h[N-2]*(2.0-mu[N-2])
# z[N-1] = (alpha[N-1] - self.h[N-2]*z[N-2]) / l[N-1]
l[N-1] = h*(2.0-mu[N-2])
z[N-1] = (alpha[N-1] - h*z[N-2]) / l[N-1]
self.c[N-1] = z[N-1]
# for (j = n - 2; j >= 0; --j) {
for j in range(N-2, -1, -1):
self.c[j] = z[j] - mu[j] * self.c[j+1]
# self.b[j] = (f[j+1]-f[j]) / self.h[j] - self.h[j]/3.0 * (self.c[j+1] + 2*self.c[j])
# self.d[j] = (self.c[j+1]-self.c[j]) / (3 * self.h[j])
self.b[j] = (f[j+1]-f[j]) / h - h/3.0 * (self.c[j+1] + 2*self.c[j])
self.d[j] = (self.c[j+1]-self.c[j]) / (3 * h)
def splineAtanTanh( self, x ):
a = self.a
b = self.b
c = self.c
d = self.d
N = self.nTabulations
signX = np.where( x >= 0, 1.0, -1.0 )
# unsigned x
uX = x * signX
# 0.49999999724624
# 0.499999996965014 point precedent f0(2OO-1)
# 0.49999999724624 f0(200)
# 0.499999997245073 f(200-1)
# 0.499999997232819 y[200-1]
# 0.49999999748923 y[200]
# 0. 890
# 0.49999999724624 f0(200)
np.set_printoptions(precision=15)
# print("??? x / self.dx", x / self.dx)
cst = 1.0 / self.dx
u = np.trunc( uX * cst + self.dx*0.1)
# idx = u.astype(np.int)
idx = np.int32(u)
# print("??? idx ", idx)
idx = np.where( idx >= N, N-1, idx)
h = np.where( idx < N-1, uX - idx * self.dx, 0)
# h = x - idx * self.dx
# print("??? idx filter large indexes", idx)
print ("uX ", uX)
print ("h ", h)
print ("f(x0) ", a[idx])
print ("df|dx0", h*( b[idx] + h*( c[idx] + h *(d[idx]))))
print ("f, ", a[idx] + h*( b[idx] + h*( c[idx] + h *(d[idx]))))
f = signX * (a[idx] + h*( b[idx] + h*( c[idx] + h *(d[idx]))))
return f
if __name__ == "__main__":
#pcWrap = PCWrap.setupPyCWrapper()
#pcWrap.initMathieson()
xPrecision = 1.0e-3
xLimit = 3.0
N = int(xLimit / xPrecision) + 1
x = np.linspace(0.0, xLimit, N)
dxVar = x[1:] - x[0:-1]
print("Verify sampling N, xPrecision, dxMin, dxMax", N, xPrecision, np.min(dxVar), np.max(dxVar))
dx = xPrecision
mat0 = mat.Mathieson( 0, 1.0 )
leftDerivate = 2.0 * mat0.curK4x * mat0.curSqrtK3x * mat0.curK2x * mat0.curInvPitch
print("leftDerivate", leftDerivate)
# leftDerivate = 2.77
y = mat0.computeAtanTanh( x)
tf = TabulatedChargeIntegration(x, y, dx, leftDerivate, 0.0)
"""
m = int( N/2 )
print("N", N, x.size )
print("x ", x[0], x[1], x[2], '...', x[m-1], x[m], x[m+1], "...", x[-3], x[-2], x[-1] )
print("\n")
print("maxErr", np.max(np.abs(f-y)) )
"""
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 7))
# Spline at sampling points
f = tf.splineAtanTanh(x)
ax[0,0].plot( x, y)
ax[0,0].scatter( x, f, marker='x', color="red")
# , markersize=4)
ax[0,0].set_ylabel( "atan(tanh(x0)) and spline(x0) [in red]")
#
ax[0,1].scatter( x, f-y, marker='x')
ax[0,1].set_ylabel( "atan(tanh(x0)) - spline(x0)")
# Far away points
print("--------------------")
x1 = x + 0.0095
y1 = mat0.computeAtanTanh( x1)
f1 = tf.splineAtanTanh(x1)
print("y1", y1)
ax[1,0].scatter( x1, f1-y1, marker='x')
ax[1,0].set_ylabel( "atan(tanh(x1)) - spline(x1)")
print("--------------------")
# RND
xrnd = (np.random.ranf(20*N) * 2 - 1.0) * (xLimit + 1.0)
frnd = tf.splineAtanTanh(xrnd)
yrnd = mat0.computeAtanTanh(xrnd)
#
ax[1,1].scatter( xrnd, frnd-yrnd, marker='x')
ax[1,1].set_ylabel( "atan(tanh(rnd)) - spline(rnd)")
# relative error
# ax[1,1].scatter( x1[1:], (f1[1:]-y1[1:]) / y1[1:] )
#
print("maxErr f1", np.max(np.abs(f1-y1)) )
print( "convergence last point y, dy ", y1[-1], np.max(np.abs(f1[-1]-y1[-1])))
np.set_printoptions(precision=15)
print( "f(x) x=[0, ..,9]", mat0.computeAtanTanh( np.arange(10.0)) - 0.5 )
print("FIRST POINT")
tf.splineAtanTanh( np.array([0.0]) )
print("Function", mat0.computeAtanTanh( np.array([0.0])) )
print("Last POINT")
tf.splineAtanTanh( np.array([2.0]) )
print("Function", mat0.computeAtanTanh( np.array([2.0])) )
print("Outer POINT")
tf.splineAtanTanh( np.array([15.0]) )
print("Function", mat0.computeAtanTanh( np.array([15.0])) )
xx = np.arange(6.0)
print("xx", xx )
print("f(xx) - 0.5", mat0.computeAtanTanh( xx ) - 0.5)
plt.show()
| grasseau/MCHClustering | src/PyTests/spline_t.py | spline_t.py | py | 7,607 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.erf",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 26... |
27529865063 | import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
def load_data(filename):
# 读入数据文件
data = np.loadtxt(filename)
return data
def plot_signal_waveform(data, fs):
# 绘制信号波形
duration = len(data) / fs # 持续时间,单位为秒
time = np.linspace(0, duration, len(data))
plt.subplot(3,1,1)
plt.plot(time, data)
plt.xlabel("Time (s)")
plt.ylabel("Amplitude")
plt.title("Original Signal")
def plot_stft_spectrogram(data, fs, window, nperseg, noverlap):
# 进行STFT
f, t, Zxx = signal.stft(data, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap)
# 绘制时频图
plt.subplot(3,1,2)
plt.pcolormesh(t, f, np.abs(Zxx), cmap='YlOrBr')
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
def plot_fft_magnitude(data, fs):
# 进行FFT
fft_data = np.fft.fft(data)
freqs = np.fft.fftfreq(len(fft_data), 1/fs)
# 绘制FFT图
plt.subplot(3,1,3)
plt.plot(freqs, np.abs(fft_data))
plt.title('FFT Magnitude')
plt.ylabel('Magnitude')
plt.xlabel('Frequency [Hz]')
if __name__ == '__main__':
filename = 'Software/data/1.csv'
data = load_data(filename)
fs = 1000
window = signal.windows.hann(128) # 窗函数
nperseg = 128 # STFT段长
noverlap = nperseg//2 # STFT重叠长度
plot_signal_waveform(data, fs)
plot_stft_spectrogram(data, fs, window, nperseg, noverlap)
plot_fft_magnitude(data, fs)
# 调整布局
plt.tight_layout()
# 显示图形
plt.show()
| huigang39/TENG | Software/dl/signal_analysis.py | signal_analysis.py | py | 1,602 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.loadtxt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
43968820856 | #!/usr/bin/env python
from Bio import SeqIO
import argparse
import json
import os
from CPT_GFFParser import gffParse, gffWrite
def parse_xmfa(xmfa):
"""Simple XMFA parser until https://github.com/biopython/biopython/pull/544
"""
current_lcb = []
current_seq = {}
for line in xmfa.readlines():
if line.startswith("#"):
continue
if line.strip() == "=":
if "id" in current_seq:
current_lcb.append(current_seq)
current_seq = {}
yield current_lcb
current_lcb = []
else:
line = line.strip()
if line.startswith(">"):
if "id" in current_seq:
current_lcb.append(current_seq)
current_seq = {}
data = line.strip().split()
# 0 1 2 3 4 5
# > 1:5986-6406 + CbK.fa # CbK_gp011
id, loc = data[1].split(":")
start, end = loc.split("-")
current_seq = {
"rid": "_".join(data[1:]),
"id": id,
"start": int(start),
"end": int(end),
"strand": 1 if data[2] == "+" else -1,
"seq": "",
"comment": "",
}
if len(data) > 5:
current_seq["comment"] = " ".join(data[5:])
# else:
# current_seq['seq'] += line.strip()
def percent_identity(a, b):
"""Calculate % identity, ignoring gaps in the host sequence
"""
match = 0
mismatch = 0
for char_a, char_b in zip(list(a), list(b)):
if char_a == "-":
continue
if char_a == char_b:
match += 1
else:
mismatch += 1
if match + mismatch == 0:
return 0.0
return 100 * float(match) / (match + mismatch)
def get_fasta_ids(sequences):
"""Returns a list of fasta records in the order they appear
"""
ids = []
for seq in SeqIO.parse(sequences, "fasta"):
ids.append(seq.id)
return ids
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="parse xmfa file")
parser.add_argument("gff3", type=argparse.FileType("r"), help="Multi-GFF3 File")
parser.add_argument("fasta", type=argparse.FileType("r"), help="Multi-FA file")
parser.add_argument("xmfa", type=argparse.FileType("r"), help="XMFA File")
parser.add_argument("output_dir", type=str, help="output directory")
args = parser.parse_args()
fasta_list = get_fasta_ids(args.fasta)
lcbs = parse_xmfa(args.xmfa)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
output = {"fasta": [], "gff3": [], "xmfa": None}
processed_xmfa = os.path.join(args.output_dir, "regions.json")
with open(processed_xmfa, "w") as handle:
json.dump([lcb for lcb in lcbs if len(lcb) > 1], handle, sort_keys=True)
output["xmfa"] = processed_xmfa
# Have to seek because we already access args.fasta once in id_tn_dict
args.fasta.seek(0)
# Load up sequence(s) for GFF3 data
seq_dict = SeqIO.to_dict(SeqIO.parse(args.fasta, "fasta"))
# Parse GFF3 records
gffs = gffParse(args.gff3, base_dict=seq_dict)
for record in sorted(gffs, key=lambda rec: fasta_list.index(rec.id)):
gff_output = os.path.join(args.output_dir, record.id + ".gff")
with open(gff_output, "w") as handle:
gffWrite([record], handle)
output["gff3"].append(gff_output)
fa_output = os.path.join(args.output_dir, record.id + ".txt")
with open(fa_output, "w") as handle:
handle.write(str(record.seq))
output["fasta"].append(
{"path": fa_output, "length": len(record.seq), "name": record.id}
)
print(json.dumps(output, sort_keys=True))
| TAMU-CPT/galaxy-tools | tools/comparative/xmfa_process.py | xmfa_process.py | py | 3,928 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "Bio.SeqIO.parse",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",... |
11323411187 | import random
import sys
from pyfiglet import Figlet
import requests
import json
import os
from dotenv import load_dotenv
# Setting up TMDB API Key
load_dotenv()
API_KEY = os.getenv('TMDB_API_KEY')
# Retrieve top rated movies in TheMovieDB
pages = {'results': []}
for i in range(5):
page = requests.get(f'https://api.themoviedb.org/3/movie/top_rated?api_key={API_KEY}&language=en-US&page={i+1}').json()
pages['results'].extend(page['results'])
# Create a list that will contain the names of the movies to be guessed by the player
list_of_movies = []
for result in pages['results']:
if result['original_language'] == 'en' and len(result['title']) < 40:
list_of_movies.append(result['title'].strip())
# Setting up header font
figlet = Figlet()
fonts = figlet.getFonts()
figlet.setFont(font='ogre')
def main():
print(figlet.renderText('Welcome to\n Movie\n Hangman!'))
while True:
user_input = input('Press s to start a new game or e to exit: ').strip()
try:
start = start_new_game(user_input)
except ValueError:
print('Invalid input')
continue
else:
if start:
movie_to_guess = get_movie(list_of_movies)
game(movie_to_guess)
else:
sys.exit()
# Checks user input on the main screen to start a new game, exit the program or ask for input again if it was not valid
def start_new_game(play):
if play.lower() == "s":
print("Good luck!")
return True
elif play.lower() == "e":
print("Ok. Goodbye!")
return False
else:
raise ValueError('Invalid input')
# Selects a random movie from the list if available movies
def get_movie(list_of_movies):
return random.choice(list_of_movies)
# Returns a list containing a '_' for each letter in the movie to guess
def hide_movie(movie):
hidden_movie = ['_' if letter.isalpha() else letter for letter in movie]
return hidden_movie
# Starts up a game of Hangman.
def game(title):
hidden_movie = hide_movie(title) # a list containing a '_' for each letter in the movie to guess
movie = title # name of the movie to be guessed as a string
number_of_guesses = 8 # number of tries that the player has left.
print(f'Your movie contains {hidden_movie.count("_")} letters.')
print(' '.join(hidden_movie))
# The following block will run while the player has guesses left. It will be interrupted if the player
# guesses the correct word before running out of guesses.
while number_of_guesses > 0:
# As long as there are any '_' remaining in hidden_movie , the player will be asked to make a guess.
if '_' in hidden_movie:
print(f"You have {number_of_guesses} {'guess' if number_of_guesses == 1 else 'guesses'} left")
user_guess = input('Enter a letter:').lower().strip()
result = play_round(user_guess, movie, hidden_movie)
if result is None:
print(' '.join(hidden_movie))
continue
elif result:
# If the player's guess was correct, any '_' in hidden_movie will be replaced with the correct letter
indices = [i for i, x in enumerate(movie) if x.lower() == user_guess]
for index in indices:
hidden_movie[index] = movie[index]
print(' '.join(hidden_movie))
else:
number_of_guesses -= 1
print(' '.join(hidden_movie))
# If there aren't any '_' left in hidden_movie it means that all the letters have been
# discovered and the player has won.
else:
print('You win!')
break
# If the player doesn't have any guesses left, a message including the correct word is shown.
if number_of_guesses == 0:
print(f"You Lose! The movie was {movie}")
def play_round(guess, title, hidden_title):
if len(guess) != 1 or not guess.isalpha() :
print('Invalid input. Please enter a letter')
return None
elif guess in hidden_title or guess.upper() in hidden_title:
print('You already guessed this letter. Try a different one')
return None
elif guess in title.lower():
print('Correct!')
return True
elif guess not in title.lower():
print('Wrong! Try again!')
return False
if __name__ == '__main__':
main()
| MaCeleste/Movie-Hangman | project.py | project.py | py | 4,490 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyfiglet.Figlet",
"line... |
5361671905 |
import bottle
import json
import random
from . import DatabaseManager
from .product import Product
import recommender.vector.arithmetic
import recommender.rocchio.algorithm
@bottle.route('/product/get/<doc_id:int>')
def product_get(doc_id):
d = product_manager.get_product(doc_id).as_dictionary()
result = {'result': d}
return result
@bottle.route('/product/remove/<doc_id:int>', method='DELETE')
def product_remove(doc_id):
try:
product_manager.remove_document(doc_id)
except:
return {'result': False}
return {'result': True}
@bottle.route('/product/all')
def product_get_all():
l = [ p.as_dictionary() for p in product_manager.get_all_products()]
result = {'result': l}
#bottle.response.content_type = 'application/json'
return result
@bottle.route('/product/random/<count:int>')
def product_random(count):
products = product_manager.get_all_products()
rands = []
while len(rands) < count:
index = random_generator.randint(0, len(products)-1)
rands.append(products[index].as_dictionary())
products.remove(products[index])
pass
result = {'result': rands};
return result
@bottle.route('/product/insert', method='POST')
def product_insert():
"""
curl -X POST -d "product={'image_name':'img.jpg','terms':{'a':1,'b':3}}"
"""
try:
product_json = bottle.request.forms.get('product')
product_dict = json.loads(product_json)
p = Product()
p.image_name = product_dict['image_name']
p.terms = product_dict['terms']
product_manager.add_document(p)
except:
return {'result': False}
return {'result': True}
@bottle.route('/vector/default/<doc_id:int>')
def vector_default(doc_id):
d = (product_vector_manager
.get_vector_for_document_id(doc_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/df')
def vector_df():
d = (
product_vector_manager
.get_document_frequency_vector()
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/idf')
def vector_idf():
d = (
product_vector_manager
.get_inverse_document_frequency_vector()
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/tf/<doc_id:int>')
def vector_tf(doc_id):
d = (
product_vector_manager
.get_term_frequency_vector(doc_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/tfidf/<doc_id:int>')
def vector_tfidf(doc_id):
d = (
product_vector_manager
.get_tfidf_vector(doc_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/user/<user_id:int>')
def vector_user_by_id(user_id):
d = (
user_vector_manager
.get_user_vector_for_id(user_id)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/vector/user/<user_name>')
def vector_user_by_name(user_name):
d = (
user_vector_manager
.get_user_vector_for_name(user_name)
.as_dictionary()
)
result = {'result': d}
return result
@bottle.route('/user/all')
def get_all_users():
user_list = user_vector_manager.get_all_users_by_name()
result = {'result': user_list}
return result
@bottle.route('/user/create/<user_name>')
def create_user_by_name(user_name):
user_vector_manager.create_user(user_name)
return {'result': True}
@bottle.route('/user/exists/<user_name>')
def exists_user_by_name(user_name):
d = {}
d['exists'] = user_vector_manager.has_user_with_name(user_name)
result = {'result': d}
return result
@bottle.route('/user/remove/<user_name>', method='DELETE')
def remove_user_by_name(user_name):
try:
user_id = user_vector_manager.get_user_id_for_name(user_name)
user_vector_manager.remove_user(user_id)
except:
return {'result': False}
return {'result': True}
@bottle.route('/user/createifnotexist/<user_name>')
def create_user_if_not_exists(user_name):
if not user_vector_manager.has_user_with_name(user_name):
create_user_by_name(user_name)
return {'result': True}
@bottle.route('/user/setpreference/<user_name>/<product_id:int>')
def add_preference_to_user(user_name, product_id):
user_id = user_vector_manager.get_user_id_for_name(user_name)
user_vector_manager.set_user_preference(user_id, product_id, True)
return {'result': True}
@bottle.route('/user/setnopreference/<user_name>/<product_id:int>')
def add_preference_to_user(user_name, product_id):
user_id = user_vector_manager.get_user_id_for_name(user_name)
user_vector_manager.set_user_preference(user_id, product_id, False)
return {'result': True}
@bottle.route('/user/update/<user_name>')
def get_user_update(user_name):
user_id = user_vector_manager.get_user_id_for_name(user_name)
weights = recommender.rocchio.default_weights()
update_user(user_id, weights)
return {'result': True}
@bottle.route('/user/update/<user_name>/<alpha:int>/<beta:int>/<gamma:int>')
def get_user_update(user_name, alpha, beta, gamma):
user_id = user_vector_manager.get_user_id_for_name(user_name)
if alpha < 0:
alpha = 0
elif alpha > 100:
alpha = 100;
if beta < 0:
beta = 0
elif beta > 100:
beta = 100
if gamma < 0:
gamma = 0
elif gamma > 100:
gamma = 100
weights = alpha / 100, beta / 100, gamma / 100
update_user(user_id, weights)
return {'result': True}
@bottle.route('/user/relevant/<user_name>')
def get_user_preference(user_name):
user_id = user_vector_manager.get_user_id_for_name(user_name)
relevant_vectors = user_vector_manager.get_relevant_document_vector_list(user_id)
relevant_products = [
product_manager.get_product(v.document_id).as_dictionary()
for v in relevant_vectors
]
result = {'result': relevant_products}
return result
@bottle.route('/user/nonrelevant/<user_name>')
def get_user_no_preference(user_name):
user_id = user_vector_manager.get_user_id_for_name(user_name)
non_relevant_vectors = user_vector_manager.get_non_relevant_document_vector_list(user_id)
non_relevant_products = [
product_manager.get_product(v.document_id).as_dictionary()
for v in non_relevant_vectors
]
result = {'result': non_relevant_products}
return result
@bottle.route('/recommendations/<user_name>/<k:int>')
def get_recommendation(user_name, k):
vector = user_vector_manager.get_user_vector_for_name(user_name)
others = product_vector_manager.get_all_vectors()
#distance_function = recommender.vector.arithmetic.hamming_distance
#distance_function = recommender.vector.arithmetic.euclidean_distance
recommendations = vector_arithmetic.k_nearest_neighbours(k, vector, others)
products = [
product_manager.get_product(vector.document_id).as_dictionary()
for _, vector in recommendations
]
result = {'result': products}
return result
database_manager = None
product_manager = None
product_vector_manager = None
document_manager = None
user_vector_manager = None
term_manager = None
random_generator = None
vector_arithmetic = recommender.vector.arithmetic
def run(database_path, host, port):
_init(database_path)
bottle.run(host=host, port=port, debug=True)
def _init(database_path):
global database_manager
global product_manager
global product_vector_manager
global document_manager
global user_vector_manager
global term_manager
global random_generator
database_manager = DatabaseManager(database_path)
product_manager = database_manager.get_product_manager()
product_vector_manager = database_manager.get_product_vector_manager()
document_manager = database_manager.get_document_manager()
user_vector_manager = database_manager.get_user_vector_manager()
term_manager = database_manager.get_term_manager()
random_generator = random.Random()
def update_user(user_id, weights):
user_vector = user_vector_manager.get_user_vector_for_id(user_id)
relevant = user_vector_manager.get_relevant_document_vector_list(user_id)
non_relevant = user_vector_manager.get_non_relevant_document_vector_list(user_id)
uvector = recommender.rocchio.algorithm.calculate(user_vector, relevant, non_relevant, weights)
user_vector_manager.update_user_vector(user_id, uvector);
pass
| dustywind/bachelor-thesis | impl/recommender/webapi.py | webapi.py | py | 8,641 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bottle.route",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bottle.route",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bottle.route",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bottle.route",
"line_numbe... |
39858982363 | import os
import sys
if not "DEVITO_OPENMP" in os.environ or os.environ["DEVITO_OPENMP"] != "1":
print("*** WARNING: Devito OpenMP environment variable has not been set ***", file=sys.stderr)
import numpy as np
from sympy import Matrix, Eq, solve
import progressbar
from devito import TimeData, Operator, t, x, y, z, logger as devito_logger, parameters as devito_parameters
from . import sim
devito_logger.set_log_level('WARNING')
def vector_laplacian(u):
return Matrix([u[0].dx2 + u[0].dy2 + u[0].dz2,
u[1].dx2 + u[1].dy2 + u[1].dz2,
u[2].dx2 + u[2].dy2 + u[2].dz2])
def vector_gradient(u):
return u[0].dx**2 + u[0].dy**2 + u[0].dz**2 + u[1].dx**2 + u[1].dy**2 + u[1].dz**2 + u[2].dx**2 + u[2].dy**2 + u[2].dz**2
def curl(u):
return Matrix([u[2].dy - u[1].dz,
u[0].dz - u[2].dx,
u[1].dx - u[0].dy])
expression_cache = {}
class Sim(sim.Sim):
framework_name = "Devito"
@property
def data_shape(self):
# Devito doesn't like numpy types for the grid dimensions, and it needs to be a tuple, so shape needs to be converted
return tuple(int(i) for i in self.grid_params.n)
def data_matrix(self, settings):
return Matrix([TimeData(name='m_x', **settings),
TimeData(name='m_y', **settings),
TimeData(name='m_z', **settings)])
def generate_step_kernel(self):
settings = {"shape":self.buffer_dims, "space_order":2}
m = self.data_matrix(settings)
c = 2 / (self.mu0 * self.sim_params.Ms)
zeeman = Matrix(self.sim_params.H)
exchange = self.sim_params.A * c * vector_laplacian(m)
e = Matrix(self.sim_params.e)
anisotropy = self.sim_params.K * c * m.dot(e) * e
dmi = self.sim_params.D * c * curl(m)
heff = zeeman + exchange + anisotropy + dmi
crossHeff = m.cross(heff)
dmdt_rhs = -self.gamma0 / (1 + self.sim_params.alpha**2) * (crossHeff + self.sim_params.alpha * m.cross(crossHeff))
dmdt_lhs = Matrix([TimeData(name='dmdt_x', **settings),
TimeData(name='dmdt_y', **settings),
TimeData(name='dmdt_z', **settings)])
dmdt_correction = self.correction * dmdt_lhs.dot(dmdt_lhs)**0.5 * (1 - m.dot(m)) * m
string_llg = str(dmdt_rhs) + str(dmdt_correction)
if string_llg in expression_cache:
update = expression_cache[string_llg]
else:
update = []
if self.correction > 0:
# if using correction solve in 2 steps; calculate dmdt, then calculate m[t+1] = dmdt + correction
for i, dmdti in enumerate(dmdt_lhs):
update.append(Eq(dmdti, dmdt_rhs[i]))
llg_eqn = Matrix([mi.dt for mi in m]) - (dmdt_lhs + dmdt_correction)
else:
# if not using correction; m[t+1] = dmdt
llg_eqn = Matrix([mi.dt for mi in m]) - dmdt_rhs
print("Solving LLG Sympy expressions ...", file=sys.stderr)
with progressbar.ProgressBar(max_value=len(m)) as bar:
for i, mi in enumerate(m):
update.append(Eq(mi.forward, solve(llg_eqn[i], mi.forward)[0]))
bar.update(i + 1)
expression_cache[string_llg] = update
bcs = []
nx, ny, nz = self.buffer_dims
if self.periodic_boundary:
for mi in m:
bcs += [Eq(mi.indexed[t, x, y, 0], mi.indexed[t, x, y, nz - 2])]
bcs += [Eq(mi.indexed[t, x, y, nz - 1], mi.indexed[t, x, y, 1])]
bcs += [Eq(mi.indexed[t, x, 0, z], mi.indexed[t, x, ny - 2, z])]
bcs += [Eq(mi.indexed[t, x, ny - 1, z], mi.indexed[t, x, 1, z])]
bcs += [Eq(mi.indexed[t, 0, y, z], mi.indexed[t, nx - 2, y, z])]
bcs += [Eq(mi.indexed[t, nx - 1, y, z], mi.indexed[t, 1, y, z])]
else:
for mi in m:
bcs += [Eq(mi.indexed[t, x, y, 0], 0.)]
bcs += [Eq(mi.indexed[t, x, y, nz - 1], 0.)]
bcs += [Eq(mi.indexed[t, x, 0, z], 0.)]
bcs += [Eq(mi.indexed[t, x, ny - 1, z], 0.)]
bcs += [Eq(mi.indexed[t, 0, y, z], 0.)]
bcs += [Eq(mi.indexed[t, nx - 1, y, z], 0.)]
dx, dy, dz = self.grid_params.d
dt = self.time_params.d
subs = {x.spacing: dx, y.spacing: dy, z.spacing: dz, t.spacing: dt}
op = Operator(bcs + update, subs=subs)
# Call op trigger compilation
op(time=1)
def step(f, t):
for i, mi in enumerate(m):
mi.data[(0, ) + self.buffer_slice] = f[i]
op(time=self.save_every + 1)
for i, mi in enumerate(m):
t[i] = mi.data[(self.save_every % 2, ) + self.buffer_slice]
return step
"""
def energy_expr(self, m):
dV = self.grid_params.prod_d
e = Matrix(self.sim_params.e)
H = Matrix(self.sim_params.H)
Kc = dV * -self.sim_params.K
Ac = dV * self.sim_params.A
Dc = dV * -self.sim_params.D
Hc = dV * -self.mu0 * self.sim_params.Ms
return {"Zeeman":Hc * m.dot(H),
"Exchange":Ac * vector_gradient(m),
"Anisotropy":Kc * (m.dot(e))**2,
"DMI":Dc * m.dot(curl(m))}
def generate_energy_kernel(self):
settings = {"shape":self.buffer_dims, "space_order":2}
m = self.data_matrix(settings)
energy_expr = self.energy_expr(m)
E = TimeData(name='E', **settings)
eqn = Eq(E, sum(energy_expr.values()))
dx, dy, dz = self.grid_params.d
subs = {x.spacing: dx, y.spacing: dy, z.spacing: dz}
# turn dle off because some eqns are 1st and some are 2nd order, requiring different bounds.
op = Operator(eqn, subs=subs, dle=False)
# Call op trigger compilation
op()
def energy(d):
for i, mi in enumerate(m):
mi.data[0] = d[i]
op(time=1)
return E.data[0]
return energy
def generate_detailed_energy_kernel(self, terms):
def energy(d):
settings = {"shape":self.buffer_dims, "space_order":2, "time_dim":len(d), "save":True}
m = self.data_matrix(settings)
energy_expr = self.energy_expr(m)
names = [k for k in terms if k in energy_expr]
symbols = []
eqns = []
for key in names:
symbol = TimeData(name='E_{}'.format(key), **settings)
symbols.append(symbol)
eqns.append(Eq(symbol, energy_expr[key]))
dx, dy, dz = self.grid_params.d
subs = {x.spacing: dx, y.spacing: dy, z.spacing: dz}
# turn dle off because some eqns are 1st and some are 2nd order, requiring different bounds.
op = Operator(eqns, subs=subs, dle=False)
for i, mi in enumerate(m):
for j, dj in enumerate(d):
mi.data[j] = dj[i]
op()
ret = {}
for i, name in enumerate(names):
ret[name] = []
for dj in symbols[i].data:
ret[name].append(dj)
return ret
return energy
"""
| gamdow/ACG-feasibility | wrapper_pkg/devito.py | devito.py | py | 7,301 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "devito.logger.set_log_level",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "devito.logg... |
27318807553 | # @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import json
import time
from sys import platform
import serial
class MoveControllerCommunication:
def __init__(self,carModel, com = None, baudrate= 9600, changeMoveCallback = None):
self.radiusBig = 0.55
self.radiusSmall = 0.365
self.error = False
self.carModel = carModel
self.changeMoveCallback = changeMoveCallback
if com is None:
if platform == "win32":
com = 'COM7'
else:
com ='/dev/ttyACM0'
try:
self.communication = serial.Serial(com, baudrate= 9600,
timeout=2.5,
parity=serial.PARITY_NONE,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE)
time.sleep(1)
self.communication.reset_input_buffer()
except:
print('Error: No Move controller available over port ' + com)
self.error = True
def turnLeft(self):
self.driveCircle(self.radiusSmall,True, True)
def turnRight(self):
self.driveCircle(self.radiusSmall,True, False)
def drive(self):
self.driveCircle(float('inf'),True, False)
def driveLeft(self):
self.driveCircle(self.radiusBig,True, True)
def driveRight(self):
self.driveCircle(self.radiusBig,True, False)
def backwardLeft(self):
self.driveCircle(self.radiusBig,False, True)
def backwardRight(self):
self.driveCircle(self.radiusBig,False, False)
def backward(self):
self.driveCircle(float('inf'), False, False)
def stop(self):
if self.changeMoveCallback is not None:
self.changeMoveCallback(0, 0)
self.move([0,0])
def fullLeft(self):
self.move([-100,100])
time.sleep(0.2)
self.stop()
def driveCircle(self, radius, forward, left):
motor, gear, speed = self.carModel.getMotorSpeedFromRadius(radius, forward, left)
print('l:', round(motor[0]), 'r:', round(motor[1]), 'g:', round(gear,4), 's:', round(speed,4))
if self.changeMoveCallback is not None:
self.changeMoveCallback(gear, speed)
self.move(motor)
def move(self, motor):
if not self.error:
command = {}
command["command"] = "move"
command["left"] = int(motor[0])
command["right"] = int(motor[1])
self.communication.write(json.dumps(command).encode('ascii'))
def getSonic(self):
if not self.error:
inputBuffer = self.communication.readline()
command = {}
command["command"] = "sonic"
self.communication.write(json.dumps(command).encode('ascii'))
try:
sonic = json.loads(inputBuffer)
return sonic
except:
print("exception in get Sonic")
return {"left": 0, "right": 0, "middle":0}
| iisys-hof/autonomous-driving | car-controller/src/mainController/Controller/MoveController/MoveControllerCommunication.py | MoveControllerCommunication.py | py | 3,571 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.platform",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "serial.Serial",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "serial.PARITY_NONE",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "serial.EIGHTBITS",... |
72532362749 | import logging
from asyncio import CancelledError, Task, create_task
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager, suppress
from fastapi import FastAPI
from servicelib.logging_utils import log_context
from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT
from ._context import OutputsContext
from ._event_filter import EventFilter
from ._event_handler import EventHandlerObserver
from ._manager import OutputsManager
_logger = logging.getLogger(__name__)
class OutputsWatcher:
def __init__(
self, *, outputs_manager: OutputsManager, outputs_context: OutputsContext
) -> None:
self.outputs_manager = outputs_manager
self.outputs_context = outputs_context
self._task_events_worker: Task | None = None
self._event_filter = EventFilter(outputs_manager=outputs_manager)
self._observer_monitor: EventHandlerObserver = EventHandlerObserver(
outputs_context=self.outputs_context,
outputs_manager=self.outputs_manager,
heart_beat_interval_s=DEFAULT_OBSERVER_TIMEOUT,
)
async def _worker_events(self) -> None:
while True:
event: str | None = (
await self.outputs_context.port_key_events_queue.coro_get()
)
if event is None:
break
await self._event_filter.enqueue(event)
async def enable_event_propagation(self) -> None:
await self.outputs_context.toggle_event_propagation(is_enabled=True)
async def disable_event_propagation(self) -> None:
await self.outputs_context.toggle_event_propagation(is_enabled=False)
async def start(self) -> None:
with log_context(_logger, logging.INFO, f"{OutputsWatcher.__name__} start"):
self._task_events_worker = create_task(
self._worker_events(), name="outputs_watcher_events_worker"
)
await self._event_filter.start()
await self._observer_monitor.start()
async def shutdown(self) -> None:
"""cleans up spawned tasks which might be pending"""
with log_context(_logger, logging.INFO, f"{OutputsWatcher.__name__} shutdown"):
await self._event_filter.shutdown()
await self._observer_monitor.stop()
if self._task_events_worker is not None:
self._task_events_worker.cancel()
with suppress(CancelledError):
await self._task_events_worker
def setup_outputs_watcher(app: FastAPI) -> None:
async def on_startup() -> None:
assert isinstance(app.state.outputs_context, OutputsContext) # nosec
outputs_context: OutputsContext = app.state.outputs_context
outputs_manager: OutputsManager
outputs_manager = app.state.outputs_manager # nosec
app.state.outputs_watcher = OutputsWatcher(
outputs_manager=outputs_manager,
outputs_context=outputs_context,
)
await app.state.outputs_watcher.start()
await disable_event_propagation(app)
async def on_shutdown() -> None:
outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher
if outputs_watcher is not None:
await outputs_watcher.shutdown()
app.add_event_handler("startup", on_startup)
app.add_event_handler("shutdown", on_shutdown)
async def disable_event_propagation(app: FastAPI) -> None:
outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher
if outputs_watcher is not None:
await outputs_watcher.disable_event_propagation()
async def enable_event_propagation(app: FastAPI) -> None:
outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher
if outputs_watcher is not None:
await outputs_watcher.enable_event_propagation()
@asynccontextmanager
async def event_propagation_disabled(app: FastAPI) -> AsyncGenerator[None, None]:
try:
await disable_event_propagation(app)
yield None
finally:
await enable_event_propagation(app)
| ITISFoundation/osparc-simcore | services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watcher.py | _watcher.py | py | 4,081 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "_manager.OutputsManager",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "_context.OutputsContext",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "asy... |
8022762471 | #!/usr/bin/env python
# encoding: utf-8
# @Time : 2019-07-31 10:24
__author__ = 'Ted'
from PIL import Image, ImageFont, ImageDraw
content={
"back_img":"pre/paper.jpg",
"001":{
"ad":'老板,买10盒月饼呗',
"head":'001.jpg'
},
"002": {
"ad": '老板,买20盒月饼呗',
"head": '002.jpg'
},
"003": {
"ad": '老板,生活不易,买50盒月饼呗',
"head": '003.jpg'
},
"004": {
"ad": '老板,买个80盒月饼,不多',
"head": '004.jpg'
},
"005": {
"ad": '老板,看面相,你应该买100盒月饼',
"head": '005.jpg'
},
"006": {
"ad": '老板,恭喜你中奖了,奖品是150盒月饼',
"head": '006.jpg'
},
"007": {
"ad": '老板,你的员工让我告诉你,他们想吃月饼了',
"head": '007.jpg'
},
"008": {
"ad": '老板,我卖月饼,买200盒呗',
"head": '008.jpg'
},
"009": {
"ad": '老板,不整500盒月饼送礼啊',
"head": '009.jpg'
}
}
def get_pic(background,head,adcontent,mark,pic_name):
im = Image.open(background)
head_img = Image.open(f"head/{head}").resize((150,150),Image.ANTIALIAS)
im.paste(head_img,(75,20))
draw = ImageDraw.Draw(im)
fnt = ImageFont.truetype("pre/SimSun.ttf",20)
ad_parts = adcontent.split(",")
y_pos = 180
for ad_part in ad_parts:
if ad_part!=ad_parts[-1]:
ad_w,ad_h = draw.textsize(ad_part+",", font=fnt)
draw.text(((300-ad_w)/2,y_pos),ad_part+",",font=fnt,fill=(0,0,0))
y_pos+=ad_h+10
else:
ad_w, ad_h = draw.textsize(ad_part, font=fnt)
draw.text(((300 - ad_w) / 2, y_pos), ad_part, font=fnt, fill=(0, 0, 0))
y_pos += ad_h + 10
mark_font = ImageFont.truetype("pre/arial.ttf",100)
draw.text((125,400),mark,font=mark_font,fill=(0,0,0))
haha = Image.open("pre/haha.jpg")
im.paste(haha,(0,650))
qrcode = Image.open("pre/tedxpy.jpg").resize((80,80),Image.ANTIALIAS)
im.paste(qrcode,(180,810))
sign_font = ImageFont.truetype("pre/SimSun.ttf",10)
draw.text((60,875),"自定义制作图片,请扫码",font=sign_font,fill=(0,0,0))
im.save(pic_name)
if __name__== "__main__":
for i in range(1,10):
background = "pre/paper.jpg"
head = content[f'00{i}']['head']
adcontent = content[f'00{i}']['ad']
get_pic(background,head,adcontent,f"{i}",f"{i}.jpg")
print("九宫格图片生成完毕!")
| pengfexue2/friends_ad | create_pics.py | create_pics.py | py | 2,590 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.