seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41724455675 | from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import Sequential, Model, optimizers
from keras.layers import Dropout, Flatten, Dense, Input
import os
# Run with GPU
import tensorflow as tf
config = tf.compat.v1.ConfigProto( device_count = {'GPU': 1 , 'CPU': 56} )
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
# Path to the model weights files
weights_path = './input/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
top_model_weights_path = ''
# Dimensions of our images
img_width, img_height = 224, 224
train_data_dir = './input/data/train'
validation_data_dir = './input/data/validation'
# Input tensor
input_tensor = Input(shape=(img_height, img_width, 3))
# Build the VGG16 network
model = applications.VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
# Build a classifier model to put on top of the conv model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0,5))
top_model.add(Dense(1, activation='sigmoid'))
# Load fully-trained
# top_model.load_weights(top_model_weights_path)
model = Model(inputs = model.input, outputs = top_model(model.output))
# set the first 15 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:15]:
layer.trainable = False
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr = 1e-4, momentum=0.9),
metrics=['accuracy'])
# model.compile(loss='binary_crossentropy',
# optimizer=optimizers.adam(lr = 1e-4),
# metrics=['accuracy'])
# prepare data augmentation configuration
batch_size = 15
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# Train model
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size + 1
STEP_SIZE_VALI = validation_generator.n//validation_generator.batch_size + 1
print(validation_generator.n)
history = model.fit_generator(
train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
epochs=50,
validation_data=validation_generator,
validation_steps=STEP_SIZE_VALI,
verbose=2
)
model.save('model.h5')
import pickle
pickle.dump(history.history, open('log', "wb")) | lucas2298/midtermIMP | vgg16.py | vgg16.py | py | 2,816 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.compat.v1.ConfigProto",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.Session",
"line_number": 11,
"usage_type": "call"
},
{
... |
7419507068 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import discretization
from tensor2tensor.layers import latent_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import t2t_model
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from galflow import convolve
from galaxy2galaxy.layers.image_utils import pack_images
def loglikelihood_fn(xin, yin, features, hparams):
size = xin.get_shape().as_list()[1]
if hparams.likelihood_type == 'Fourier':
# Compute FFT normalization factor
x = tf.spectral.rfft2d(xin[...,0]) / tf.complex(tf.sqrt(tf.exp(features['ps'])),0.) / size**2 * (2*np.pi)**2
y = tf.spectral.rfft2d(yin[...,0]) / tf.complex(tf.sqrt(tf.exp(features['ps'])),0.) / size**2 * (2*np.pi)**2
pz = 0.5 * tf.reduce_sum(tf.abs(x - y)**2, axis=[-1, -2]) #/ size**2
return -pz
elif hparams.likelihood_type == 'Pixel':
# TODO: include per example noise std
pz = 0.5 * tf.reduce_sum(tf.abs(xin[:,:,:,0] - yin[...,0])**2, axis=[-1, -2]) / hparams.noise_rms**2 #/ size**2
return -pz
else:
raise NotImplementedError
def image_summary(name, image_logits, max_outputs=1, rows=4, cols=4):
"""Helper for image summaries that are safe on TPU."""
if len(image_logits.get_shape()) != 4:
tf.logging.info("Not generating image summary, maybe not an image.")
return
return tf.summary.image(name, pack_images(image_logits, rows, cols),
max_outputs=max_outputs)
def autoencoder_body(self, features):
""" Customized body function for autoencoders acting on continuous images.
This is based on tensor2tensor.models.research.AutoencoderBasic.body
and should be compatible with most derived classes.
The original autoencoder class relies on embedding the channels to a discrete
vocabulary and defines the loss on that vocab. It's cool and all, but here we
prefer expressing the reconstruction loss as an actual continuous likelihood
function.
"""
hparams = self.hparams
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
output_activation = tf.nn.softplus if hparams.output_activation == 'softplus' else None
input_shape = [None, ] + common_layers.shape_list(features["inputs"])[1:]
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
# In predict mode, we also define TensorFlow Hub modules for all pieces of
# the autoencoder
if hparams.encode_psf and 'psf' in features:
psf_shape = [None, ] + common_layers.shape_list(features["psf"])[1:]
# First build encoder spec
def make_model_spec():
input_layer = tf.placeholder(tf.float32, shape=input_shape)
x = self.embed(tf.expand_dims(input_layer, -1))
x, encoder_layers = self.encoder(x)
b, b_loss = self.bottleneck(x)
hub.add_signature(inputs=input_layer, outputs=b)
def make_model_spec_psf():
input_layer = tf.placeholder(tf.float32, shape=input_shape)
psf_layer = tf.placeholder(tf.float32, shape=psf_shape)
x = self.embed(tf.expand_dims(input_layer, -1))
# If we have access to the PSF, we add this information to the encoder
if hparams.encode_psf and 'psf' in features:
psf_image = tf.expand_dims(tf.signal.irfft2d(tf.cast(psf_layer[...,0], tf.complex64)), axis=-1)
# Roll the image to undo the fftshift, assuming x1 zero padding and x2 subsampling
psf_image = tf.roll(psf_image, shift=[input_shape[1], input_shape[2]], axis=[1,2])
psf_image = tf.image.resize_with_crop_or_pad(psf_image, input_shape[1], input_shape[2])
net_psf = tf.layers.conv2d(psf_image,
hparams.hidden_size // 4, 5,
padding='same', name="psf_embed_1")
net_psf = common_layers.layer_norm(net_psf, name="psf_norm")
x, encoder_layers = self.encoder(tf.concat([x, net_psf], axis=-1))
else:
x, encoder_layers = self.encoder(x)
b, b_loss = self.bottleneck(x)
hub.add_signature(inputs={'input':input_layer, 'psf':psf_layer}, outputs=b)
spec = hub.create_module_spec(make_model_spec_psf if hparams.encode_psf else make_model_spec, drop_collections=['checkpoints'])
encoder = hub.Module(spec, name="encoder_module")
hub.register_module_for_export(encoder, "encoder")
if hparams.encode_psf:
code = encoder({'input':features["inputs"], 'psf': features['psf']})
else:
code = encoder(features["inputs"])
b_shape = [None, ] + common_layers.shape_list(code)[1:]
res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
res_size = min(res_size, hparams.max_hidden_size)
# Second build decoder spec
def make_model_spec():
input_layer = tf.placeholder(tf.float32, shape=b_shape)
x = self.unbottleneck(input_layer, res_size)
x = self.decoder(x, None)
reconstr = tf.layers.dense(x, self.num_channels, name="autoencoder_final",
activation=output_activation)
hub.add_signature(inputs=input_layer, outputs=reconstr)
hub.attach_message("stamp_size", tf.train.Int64List(value=[hparams.problem_hparams.img_len]))
hub.attach_message("pixel_size", tf.train.FloatList(value=[hparams.problem_hparams.pixel_scale]))
spec = hub.create_module_spec(make_model_spec, drop_collections=['checkpoints'])
decoder = hub.Module(spec, name="decoder_module")
hub.register_module_for_export(decoder, "decoder")
reconstr = decoder(code)
return reconstr , {"bottleneck_loss": 0.0}
encoder_layers = None
self.is1d = hparams.sample_width == 1
if (hparams.mode != tf.estimator.ModeKeys.PREDICT
or self._encode_on_predict):
labels = features["targets_raw"]
labels_shape = common_layers.shape_list(labels)
shape = common_layers.shape_list(labels)
with tf.variable_scope('encoder_module'):
x = self.embed(tf.expand_dims(labels, -1))
if shape[2] == 1:
self.is1d = True
# Run encoder.
with tf.variable_scope('encoder_module'):
# If we have access to the PSF, we add this information to the encoder
# Note that we only support single band images so far...
if hparams.encode_psf and 'psf' in features:
psf_image = tf.expand_dims(tf.signal.irfft2d(tf.cast(features['psf'][...,0], tf.complex64)), axis=-1)
# Roll the image to undo the fftshift, assuming x1 zero padding and x2 subsampling
psf_image = tf.roll(psf_image, shift=[input_shape[1], input_shape[2]], axis=[1,2])
psf_image = tf.image.resize_with_crop_or_pad(psf_image, input_shape[1], input_shape[2])
net_psf = tf.layers.conv2d(psf_image,
hparams.hidden_size // 4, 5,
padding='same', name="psf_embed_1")
net_psf = common_layers.layer_norm(net_psf, name="psf_norm")
x, encoder_layers = self.encoder(tf.concat([x, net_psf], axis=-1))
else:
x, encoder_layers = self.encoder(x)
# Bottleneck.
with tf.variable_scope('encoder_module'):
b, b_loss = self.bottleneck(x)
xb_loss = 0.0
b_shape = common_layers.shape_list(b)
self._cur_bottleneck_tensor = b
res_size = common_layers.shape_list(x)[-1]
with tf.variable_scope('decoder_module'):
b = self.unbottleneck(b, res_size)
if not is_training:
x = b
else:
l = 2**hparams.num_hidden_layers
warm_step = int(hparams.bottleneck_warmup_steps * 0.25 * l)
nomix_p = common_layers.inverse_lin_decay(warm_step) + 0.01
if common_layers.should_generate_summaries():
tf.summary.scalar("nomix_p_bottleneck", nomix_p)
rand = tf.random_uniform(common_layers.shape_list(x))
# This is the distance between b and x. Having this as loss helps learn
# the bottleneck function, but if we back-propagated to x it would be
# minimized by just setting x=0 and b=0 -- so we don't want too much
# of the influence of this, and we stop-gradient to not zero-out x.
x_stop = tf.stop_gradient(x)
xb_loss = tf.reduce_mean(tf.reduce_sum(
tf.squared_difference(x_stop, b), axis=-1))
# To prevent this loss from exploding we clip at 1, but anneal clipping.
clip_max = 1.0 / common_layers.inverse_exp_decay(
warm_step, min_value=0.001)
xb_clip = tf.maximum(tf.stop_gradient(xb_loss), clip_max)
xb_loss *= clip_max / xb_clip
x = tf.where(tf.less(rand, nomix_p), b, x)
else:
if self._cur_bottleneck_tensor is None:
b = self.sample()
else:
b = self._cur_bottleneck_tensor
self._cur_bottleneck_tensor = b
res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
res_size = min(res_size, hparams.max_hidden_size)
with tf.variable_scope('decoder_module'):
x = self.unbottleneck(b, res_size)
# Run decoder.
with tf.variable_scope('decoder_module'):
x = self.decoder(x, encoder_layers)
# Cut to the right size and mix before returning.
res = x
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
res = x[:, :shape[1], :shape[2], :]
with tf.variable_scope('decoder_module'):
reconstr = tf.layers.dense(res, self.num_channels, name="autoencoder_final",
activation=output_activation)
# We apply an optional apodization of the output before taking the
if hparams.output_apodization > 0:
nx = reconstr.get_shape().as_list()[1]
alpha = 2 * hparams.output_apodization / nx
from scipy.signal.windows import tukey
# Create a tukey window
w = tukey(nx, alpha)
w = np.outer(w,w).reshape((1, nx, nx,1)).astype('float32')
# And penalize non zero things at the border
apo_loss = tf.reduce_mean(tf.reduce_sum(((1.- w)*reconstr)**2, axis=[1,2,3]))
else:
w = 1.0
apo_loss = 0.
# We apply the window
reconstr = reconstr * w
# Optionally regularizes further the output
# Anisotropic TV:
tv = tf.reduce_mean(tf.image.total_variation(reconstr))
# Smoothed Isotropic TV:
#im_dx, im_dy = tf.image.image_gradients(reconstr)
#tv = tf.reduce_sum(tf.sqrt(im_dx**2 + im_dy**2 + 1e-6), axis=[1,2,3])
#tv = tf.reduce_mean(tv)
# Apply channel-wise convolution with the PSF if requested
# TODO: Handle multiple bands
if hparams.apply_psf and 'psf' in features:
if self.num_channels > 1:
raise NotImplementedError
reconstr = convolve(reconstr, tf.cast(features['psf'][...,0], tf.complex64),
zero_padding_factor=1)
# Losses.
losses = {
"bottleneck_extra": b_loss,
"bottleneck_l2": hparams.bottleneck_l2_factor * xb_loss,
"total_variation": hparams.total_variation_loss * tv,
"apodization_loss": hparams.apodization_loss * apo_loss,
}
loglik = loglikelihood_fn(labels, reconstr, features, hparams)
targets_loss = tf.reduce_mean(- loglik)
tf.summary.scalar("negloglik", targets_loss)
tf.summary.scalar("bottleneck_loss", b_loss)
# Compute final loss
losses["training"] = targets_loss + b_loss + hparams.bottleneck_l2_factor * xb_loss + hparams.total_variation_loss * tv + hparams.apodization_loss * apo_loss
logits = tf.reshape(reconstr, labels_shape)
image_summary("ae", reconstr)
image_summary("input", labels)
return logits, losses
| ml4astro/galaxy2galaxy | galaxy2galaxy/models/autoencoders_utils.py | autoencoders_utils.py | py | 11,475 | python | en | code | 27 | github-code | 1 | [
{
"api_name": "tensorflow.spectral.rfft2d",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.spectral",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.complex",
"line_number": 24,
"usage_type": "call"
},
{
"api_name":... |
70880919075 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
# 传入URL
r = requests.get('https://www.csdn.net/')
# 解析URL
soup = BeautifulSoup(r.text, 'html.parser')
content_list = soup.find_all('div', attrs={'class': 'title'})
comment_count = soup.find_all('dl', attrs={'class': 'list_userbar'})
for content in content_list:
print(content.h2.a.text)
| CHOPPERJJ/Python | LearningProject/PythonBasics/Crawler.py | Crawler.py | py | 397 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
}
] |
17599916978 | import threading
import time
from datetime import datetime
class StartOverException(Exception):
"""Raise to restart the tails thread loop"""
pass
class FluffyTailsThread(threading.Thread):
def __init__(self, bot_object, bot):
threading.Thread.__init__(self)
# self.new_reminder = threading.Event()
self.bot = bot
self.bot_object = bot_object
def run(self):
while True:
try:
try:
time_to_wait, affected_person = self._time_until_expiry()
except (KeyError, TypeError, IndexError):
raise StartOverException
time.sleep(1)
for second in range(0, time_to_wait + 1):
time.sleep(1)
if second >= time_to_wait:
self.bot_object._clear_tails_effect(affected_person)
raise StartOverException
raise StartOverException
except StartOverException as ex:
time.sleep(1)
continue
def _time_until_expiry(self):
current_time = datetime.now()
try:
tail_effects = self.bot_object._Plugin__db_get(['fluffy_tails'])
affected_person = self._get_earliest_expiry()
earliest_expiry_time = datetime.strptime(tail_effects[affected_person]['expiration_date'], '%Y-%m-%d %H:%M:%S.%f')
except (KeyError, IndexError, TypeError):
return
if current_time < earliest_expiry_time:
difference = earliest_expiry_time - current_time
time_to_wait = int(difference.total_seconds())
else:
time_to_wait = 1
return time_to_wait, affected_person
def _get_earliest_expiry(self):
tail_effects = self.bot_object._Plugin__db_get(['fluffy_tails'])
earliest_expiry = '2200-01-01 00:00:00.000000'
earliest_username_key = None
for i, user in enumerate(tail_effects):
if tail_effects[user]['expiration_date'] < earliest_expiry:
earliest_username_key = user
earliest_expiry = tail_effects[user]['expiration_date']
return earliest_username_key
| Petricpwnz/NyAI | modules/fluffy_tails_thread.py | fluffy_tails_thread.py | py | 2,234 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "threading.Thread",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": ... |
12252245392 | import pandas as pd
import numpy as np
import os
import warnings
from .dataset import Dataset
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, ReindexMapError
class Endometrial(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the endometrial dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
valid_versions = ["2.1", "2.1.1"] # This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
data_files = {
"2.1": [
"acetylproteomics.cct.gz",
"clinical.txt",
"CNA.cct.gz",
"definitions.txt",
"miRNA.cct.gz",
"phosphoproteomics_gene.cct.gz",
"phosphoproteomics_site.cct.gz",
"proteomics.cct.gz",
"somatic_binary.cbt.gz",
"somatic.maf.gz",
"transcriptomics_circular.cct.gz",
"transcriptomics_linear.cct.gz"],
"2.1.1": [
"acetylproteomics.cct.gz",
"clinical.txt",
"CNA.cct.gz",
"definitions.txt",
"miRNA.cct.gz",
"phosphoproteomics_site.cct.gz",
"proteomics.cct.gz",
"somatic_binary.cbt.gz",
"somatic.maf.gz",
"transcriptomics_circular.cct.gz",
"transcriptomics_linear.cct.gz",
"UCEC_followup_9_12.xlsx"],
}
super().__init__(cancer_type="endometrial", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet)
# Load the data files into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths:
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file
df_name = file_name.split(".")[0] # Dataframe name will be the first section of file name; i.e. proteomics.txt.gz becomes proteomics
# Load the file, based on what it is
if file_name == "clinical.txt":
# Fix for reading error on clinical.txt:
with open(file_path, "r", errors="ignore") as clinical_file:
df = pd.read_csv(clinical_file, sep="\t", index_col=0)
df = df.sort_index()
self._data[df_name] = df # Maps dataframe name to dataframe
elif file_name == "definitions.txt":
with open(file_path, "r") as definitions_file:
for line in definitions_file.readlines():
line = line.strip()
line = line.split("\t")
term = line[0]
definition = line[1]
self._definitions[term] = definition
elif file_name == "somatic.maf.gz":
# Make changes here:
# Parse the MAF file into a dataframe
# Here with the following change, nothing gets removed
df = pd.read_csv(file_path, sep = "\t")
###################################################################################
self._maf = df # Directly put in the read file
###################################################################################
split_barcode = df["Tumor_Sample_Barcode"].str.split("_", n=1, expand=True) # The first part of the barcode is the patient id, which we need want to make the index
df["Tumor_Sample_Barcode"] = split_barcode[0]
df = df[["Tumor_Sample_Barcode","Hugo_Symbol","Variant_Classification","HGVSp_Short"]]
df = df.rename({"Tumor_Sample_Barcode":"Patient_ID","Hugo_Symbol":"Gene","Variant_Classification":"Mutation","HGVSp_Short":"Location"}, axis='columns')
df = df.sort_values(by=["Patient_ID", "Gene"])
df = df.set_index("Patient_ID")
self._data["somatic_mutation"] = df # Maps dataframe name to dataframe
elif file_name == "acetylproteomics.cct.gz" or file_name == "phosphoproteomics_site.cct.gz":
df = pd.read_csv(file_path, sep = "\t", index_col=0)
df.index = df.index.str.rsplit('-', n=1, expand=True) # Separate the index into a multiindex where the 1st level is the gene, and 2nd is the site
df.index = df.index.set_names(["Name", "Site"]) # Properly name the levels
df = df.sort_index()
df = df.transpose()
self._data[df_name] = df # Maps dataframe name to dataframe
elif file_name == 'UCEC_followup_9_12.xlsx' and self._version == "2.1.1":
df = pd.read_excel(file_path)
# Replace redundant values for 'not reported' with NaN
nan_equivalents = ['Not Reported/ Unknown', 'Reported/ Unknown', 'Not Applicable', 'na', 'unknown',
'Not Performed', 'Unknown tumor status', 'Unknown', 'Unknown Tumor Status', 'Not specified']
df = df.replace(nan_equivalents, np.nan)
# Rename, set, and sort index
df = df.rename(columns={'Case ID': 'Patient_ID'})
df = df.set_index("Patient_ID")
df = df.sort_index()
self._data["followup"] = df
else:
df = pd.read_csv(file_path, sep="\t", index_col=0)
df = df.transpose()
df = df.sort_index()
self._data[df_name] = df # Maps dataframe name to dataframe
print(' ' * len(loading_msg), end='\r') # Erase the loading message
formatting_msg = "Formatting dataframes..."
print(formatting_msg, end='\r')
# Separate out clinical, derived_molecular, and experimental_design dataframes
all_clinical = self._data["clinical"]
clinical = all_clinical[[
'Proteomics_Participant_ID', 'Case_excluded', 'Proteomics_Tumor_Normal', 'Country',
'Histologic_Grade_FIGO', 'Myometrial_invasion_Specify', 'Histologic_type', 'Treatment_naive', 'Tumor_purity',
'Path_Stage_Primary_Tumor-pT', 'Path_Stage_Reg_Lymph_Nodes-pN', 'Clin_Stage_Dist_Mets-cM', 'Path_Stage_Dist_Mets-pM',
'tumor_Stage-Pathological', 'FIGO_stage', 'LVSI', 'BMI', 'Age', 'Diabetes', 'Race', 'Ethnicity', 'Gender', 'Tumor_Site',
'Tumor_Site_Other', 'Tumor_Focality', 'Tumor_Size_cm', 'Num_full_term_pregnancies']]
clinical = clinical.rename(columns={"Proteomics_Participant_ID":"Patient_ID"})
self._data["clinical"] = clinical
derived_molecular = all_clinical.drop(['Proteomics_Participant_ID', 'Case_excluded', 'Proteomics_Tumor_Normal', 'Country',
'Histologic_Grade_FIGO', 'Myometrial_invasion_Specify', 'Histologic_type', 'Treatment_naive', 'Tumor_purity',
'Path_Stage_Primary_Tumor-pT', 'Path_Stage_Reg_Lymph_Nodes-pN', 'Clin_Stage_Dist_Mets-cM', 'Path_Stage_Dist_Mets-pM',
'tumor_Stage-Pathological', 'FIGO_stage', 'LVSI', 'BMI', 'Age', 'Diabetes', 'Race', 'Ethnicity', 'Gender', 'Tumor_Site',
'Tumor_Site_Other', 'Tumor_Focality', 'Tumor_Size_cm', 'Num_full_term_pregnancies',
'Proteomics_TMT_batch', 'Proteomics_TMT_plex', 'Proteomics_TMT_channel', 'Proteomics_Parent_Sample_IDs',
'Proteomics_Aliquot_ID', 'Proteomics_OCT', 'WXS_normal_sample_type', 'WXS_normal_filename', 'WXS_normal_UUID', 'WXS_tumor_sample_type', 'WXS_tumor_filename',
'WXS_tumor_UUID', 'WGS_normal_sample_type', 'WGS_normal_UUID', 'WGS_tumor_sample_type', 'WGS_tumor_UUID', 'RNAseq_R1_sample_type', 'RNAseq_R1_filename', 'RNAseq_R1_UUID',
'RNAseq_R2_sample_type', 'RNAseq_R2_filename', 'RNAseq_R2_UUID', 'miRNAseq_sample_type', 'miRNAseq_UUID', 'Methylation_available', 'Methylation_quality'], axis=1)
derived_molecular = derived_molecular.rename(columns={"JAK1_Mutation":"JAK1_Mutation_status"})
self._data["derived_molecular"] = derived_molecular
experimental_design = all_clinical[['Proteomics_TMT_batch', 'Proteomics_TMT_plex', 'Proteomics_TMT_channel', 'Proteomics_Parent_Sample_IDs',
'Proteomics_Aliquot_ID', 'Proteomics_OCT', 'WXS_normal_sample_type', 'WXS_normal_filename', 'WXS_normal_UUID', 'WXS_tumor_sample_type', 'WXS_tumor_filename',
'WXS_tumor_UUID', 'WGS_normal_sample_type', 'WGS_normal_UUID', 'WGS_tumor_sample_type', 'WGS_tumor_UUID', 'RNAseq_R1_sample_type', 'RNAseq_R1_filename', 'RNAseq_R1_UUID',
'RNAseq_R2_sample_type', 'RNAseq_R2_filename', 'RNAseq_R2_UUID', 'miRNAseq_sample_type', 'miRNAseq_UUID', 'Methylation_available', 'Methylation_quality']]
self._data["experimental_design"] = experimental_design
# Drop all excluded samples from the dataset. They were excluded due to poor sample quality, etc.
clinical = self._data["clinical"]
cases_to_drop = clinical[clinical["Case_excluded"] == "Yes"].index.union(clinical[clinical["Case_excluded"] == "Yes"]["Patient_ID"])
for name in self._data.keys(): # Loop over the keys so we can alter the values without any issues
df = self._data[name]
df_filtered = df.drop(index=cases_to_drop, errors="ignore")
self._data[name] = df_filtered
# Drop Case_excluded column from clinical, now that we've dropped all excluded cases in the dataset.
clinical = self._data["clinical"]
clinical = clinical.drop(columns=["Case_excluded"])
# Add a Sample_Tumor_Normal column to the clinical dataframe, with just "Tumor" or "Normal" values (unlike the Proteomics_Tumor_Normal column, which gives the different types of normal samples)
raw_map = clinical["Proteomics_Tumor_Normal"]
parsed_map = raw_map.where(raw_map == "Tumor", other="Normal") # Replace various types of normal (Adjacent_normal, Myometrium_normal, etc.) with just "Normal"
clinical.insert(1, "Sample_Tumor_Normal", parsed_map)
# Mark the Patient_IDs of the normal samples by appending a ".N" to them
clinical["Patient_ID"] = clinical["Patient_ID"].where(clinical["Sample_Tumor_Normal"] == "Tumor", other=clinical["Patient_ID"] + ".N")
# Save our new and improved clinical dataframe!
self._data["clinical"] = clinical
# Sort CNV dataframe columns alphabetically
cna = self._data["CNA"]
cna_sorted = cna.sort_index(axis=1)
self._data["CNA"] = cna_sorted
# Fix dataframe names
rename_dict = { # Keys are old names, values are new names
"CNA":"CNV",
"transcriptomics_linear":"transcriptomics",
"transcriptomics_circular":"circular_RNA",
"phosphoproteomics_site":"phosphoproteomics",
"somatic_binary":"somatic_mutation_binary",
}
for old, new in rename_dict.items():
self._data[new] = self._data[old]
del self._data[old]
# Call a function from dataframe_tools.py to reindex all the dataframes with sample IDs instead of patient IDs
# Skip the followup and somatic_mutation dataframes because they're already indexed with Patient_IDs
sample_id_to_patient_id_map = self._data["clinical"]["Patient_ID"]
self._data = reindex_all_sample_id_to_patient_id(self._data, sample_id_to_patient_id_map, skip=["followup", "somatic_mutation"])
# We no longer need the Patient_ID column in the clinical dataframe, because it's in the index. So we'll remove it.
clinical = self._data["clinical"]
clinical = clinical.drop(columns="Patient_ID")
self._data["clinical"] = clinical
# Get a union of all dataframes' indices, with duplicates removed
# Exclude the followup dataframe because it has samples from a different cohort that aren't included anywhere else in the dataset
master_index = unionize_indices(self._data, exclude="followup")
# Use the master index to reindex the clinical dataframe, so the clinical dataframe has a record of every sample in the dataset.
clinical = self._data["clinical"]
clinical = clinical.reindex(master_index)
self._data['clinical'] = clinical
# Call function from dataframe_tools.py to sort all tables first by sample status, and then by the index
self._data = sort_all_rows(self._data)
# Call function from dataframe_tools.py to standardize the names of the index and column axes
self._data = standardize_axes_names(self._data)
print(" " * len(formatting_msg), end='\r') # Erase the formatting message
def how_to_cite(self):
return super().how_to_cite(cancer_type='endometrial carcinoma (uterine)', pmid=32059776)
| noaoch/CPTAC-data-parser | cptac/endometrial.py | endometrial.py | py | 13,971 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dataset.Dataset",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.sep",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"lin... |
5951517574 | import openai
openai.api_key = "Fill your API key here"
days = 4
place = "Paris"
prompt = f"Make me a travel plan for {days} days to {place}"
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=4000,
temperature=0.6,
n=1,
stop=None
)
print(str(response["choices"][0]["text"]))
| snagnik-coder/Travel-Planner | main.py | main.py | py | 344 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openai.api_key",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "openai.Completion.create",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "openai.Completion",
"line_number": 10,
"usage_type": "attribute"
}
] |
35871335694 | from multiprocessing.managers import BaseManager
import random
import time
import queue
BaseManager.register('get_task_queue')
BaseManager.register('get_result_queue')
server_addr = '127.0.0.1'
manager = BaseManager(address=(server_addr,5000),authkey=b'abc')
manager.connect()
task = manager.get_task_queue()
result = manager.get_result_queue()
for i in range(10):
try:
temp = task.get(timeout=1)
ans = temp * 2
print("%s *2 = %s" % (temp,ans))
time.sleep(1)
result.put(ans)
except queue.Queue().empty():
print("task is empty")
print("Test Done....")
| PETERMAOSX/Pycharm_Code | Demo_one/page_1/t13.py | t13.py | py | 612 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "multiprocessing.managers.BaseManager.register",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "multiprocessing.managers.BaseManager",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "multiprocessing.managers.BaseManager.register",
"line_number... |
8476706656 | from flask import Flask, jsonify, request
import json
import numpy as np
from datetime import datetime
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def filtering_api():
if request.method == 'GET':
with open('data.txt', 'r') as f:
data = f.read().splitlines()
data = [json.loads(item) for item in data]
return jsonify({"response": data}), 200
elif request.method == 'POST':
if request.is_json:
data = json.loads(request.data)
main, input = data.get('main'), data.get('input')
x1, y1 = main["x"], main["y"]
x2 , y2 = x1 + main["width"], y1 + main["height"]
out_file = open("data.txt", "a")
for candid in input:
x, y, w, h = candid["x"], candid["y"], candid["width"], candid["height"]
if ((1 + np.min([x2, x+w])) - np.max([x1, x]) > 0) and ((1 + np.min([y2,y+h]) - np.max([y1, y])) > 0) :
candid["time"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
out_file.write(json.dumps(candid)+"\n")
out_file.close()
return jsonify({"status": "saved"}), 201
else:
return "Content type is not supported. It must be json.", 415
if __name__ == '__main__':
# app.run(debug=True)
# To make the Flask app accessible from outside the container, you need to modify the app to listen on 0.0.0.0.
app.run(host='0.0.0.0', debug=True)
| elahe-mohammadi/bounding-box-filtering | app.py | app.py | py | 1,534 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "json.loads",
"... |
41244608097 | import logging
import socket
import threading
from src import (
parse_args,
TestObject,
TestCase,
TEST_CASE_TO_TEST,
OBJECTS,
)
def perform_calculations(test_object, test_case):
test = TEST_CASE_TO_TEST[test_case]()
test.run(OBJECTS[TestObject(test_object)])
return str(test.report)
def handle_client_request(args, server_socket, client_address, data):
try:
logging.debug(f"Установлено соединение с клиентом {client_address}")
command = data.split()
logging.debug(command)
assert "/get_result" == command[0]
assert command[1] in [obj.value for obj in TestObject]
for c in command[2:]:
if c == "all":
continue
assert c in [case.value for case in TestCase]
tests = (
[item for item in TestCase]
if "all" in command[2:]
else [TestCase(item) for item in command[2:]]
)
if TestCase(args.format) not in tests:
server_socket.sendto("".encode("utf-8"), client_address)
return
tests = [TestCase(args.format)]
logging.info(args.format)
logging.debug(tests)
result = []
for test in tests:
res = perform_calculations(command[1], test)
result.append(res)
logging.debug(res)
server_socket.sendto("\n".join(result).encode("utf-8"), client_address)
except Exception as e:
server_socket.sendto(str(e).encode("utf-8"), client_address)
def start_server(args):
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
host = f"server-{args.format}"
server_address = (host, 2924)
logging.debug(server_address)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind(server_address)
logging.debug(f"Сервер запущен на {server_address[0]}:{server_address[1]}")
while True:
data, client_address = server_socket.recvfrom(1024)
data = data.decode()
logging.debug(client_address)
handle_client_request(args, server_socket, client_address, data)
server_socket.close()
| miska924/Serialization | src/server/__init__.py | __init__.py | py | 2,226 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.TEST_CASE_TO_TEST",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "src.OBJECTS",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "src.TestObject",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.debug",
... |
28098104562 | #!/bin/env python
# This is a script to use descriptive statistics calculated previously to produce presentation-quality graphics
# and save them as PNG files for use in a powerpoint presentation
# written by Justin Meyer
# last edited 2022-05-04
#
# import required modules
import pandas as pd
import matplotlib.pyplot as plt
import os
os.chdir('c:/Users/justi/OneDrive/Documents/GitHub/final-project-meyer443')
def ReadData( fileName ):
"""This function takes a filename as input, and returns a dataframe with
raw data read from that file in a Pandas DataFrame."""
# open and read the file
DataDF = pd.read_csv( fileName, sep=',' )
return DataDF
def Plot_Temp(DataDF, outFileName):
plt.scatter(DataDF["Date"], DataDF["temp"], marker=".") # scatter date vs temp
plt.xlabel('Date')
plt.xticks([]) # can't read x axis with overlapping dates
plt.ylabel('Average Daily Temperature (C)')
plt.savefig('Daily_Temp.png', dpi=96) # save the plot as PNG with 96 dpi
plt.close()
# the following condition checks whether we are running as a script, in which
# case run the test code, otherwise functions are being imported so do not.
# put the main routines from your code after this conditional check.
if __name__ == '__main__':
fileName = "Daily_Temperature_Averages.csv"
DataDF = ReadData( fileName )
# Figure generation
outTempName = "Daily_Temp.png"
Plot_Temp( DataDF, outTempName )
| meyer443/ABE65100-Final-Project | Temp_Graphic.py | Temp_Graphic.py | py | 1,507 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.chdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot"... |
32731325683 | """
Driver for PES-Learn
"""
import timeit
import sys
import os
import json
from six.moves import input
from collections import OrderedDict
import peslearn
import numpy as np
import pandas as pd
with open('input.dat', 'r') as f:
input_string = f.read()
input_obj = peslearn.InputProcessor(input_string)
if input_obj.keywords['mode'] == None:
text = input("Do you want to 'generate' data, 'parse' data, or 'learn'?")
text = text.strip()
else:
text = input_obj.keywords['mode']
start = timeit.default_timer()
if text == 'generate' or text == 'g':
mol = peslearn.datagen.Molecule(input_obj.zmat_string)
config = peslearn.datagen.ConfigurationSpace(mol, input_obj)
template_obj = peslearn.datagen.Template("./template.dat")
config.generate_PES(template_obj)
print("Data generation finished in {} seconds".format(round((timeit.default_timer() - start),2)))
if text == 'parse' or text == 'p':
mol = peslearn.datagen.Molecule(input_obj.zmat_string)
peslearn.utils.parsing_helper.parse(input_obj, mol)
if text == 'learn' or text == 'l':
if input_obj.keywords['use_pips'] == 'true':
mol = peslearn.datagen.Molecule(input_obj.zmat_string)
if input_obj.keywords["ml_model"] == 'gp':
if input_obj.keywords['use_pips'] == 'true':
gp = peslearn.ml.GaussianProcess(input_obj.keywords["pes_name"], input_obj, molecule_type=mol.molecule_type)
else:
gp = peslearn.ml.GaussianProcess(input_obj.keywords["pes_name"], input_obj)
gp.optimize_model()
if input_obj.keywords["ml_model"] == 'nn':
if input_obj.keywords['use_pips'] == 'true':
nn = peslearn.ml.NeuralNetwork(input_obj.keywords["pes_name"], input_obj, molecule_type=mol.molecule_type)
else:
nn = peslearn.ml.NeuralNetwork(input_obj.keywords["pes_name"], input_obj)
nn.optimize_model()
stop = timeit.default_timer()
print("Total run time: {} seconds".format(round(stop - start,2)))
| CCQC/PES-Learn | peslearn/driver.py | driver.py | py | 1,993 | python | en | code | 57 | github-code | 1 | [
{
"api_name": "peslearn.InputProcessor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "six.moves.input",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "peslearn... |
22540069696 | import numpy as np
import matplotlib.pyplot as plt
# Color
# https://matplotlib.org/stable/gallery/color/named_colors.html
#https://www.yutaka-note.com/entry/matplotlib_subplots
x = np.linspace(-3,3)
y1 = x**2
y2 = x
fig, ax = plt.subplots(1, 2, squeeze=False,figsize=(8,3),tight_layout=True)
ax[0,0].plot(x, y1,"SteelBlue", linewidth=2)
ax[0,0].plot(x, y2,"LightSteelBlue")
ax[0,0].set_title("A")
ax[0,0].legend(["test1","test2"])
ax[0,0].set_xlabel("x-label")
ax[0,0].set_ylabel("y-label")
ax[0,1].plot(x, y2)
ax[0,1].set_title("B")
ax[0,1].legend(["test2"])
ax[0,1].set_xticks(list(range(-3,8,2)))
ax[0,1].set_xlim(-3, 10)
fig.autofmt_xdate()
plt.show()
| ken-100/Investment_Python | Basic/Chart/Subplots.py | Subplots.py | py | 663 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotli... |
72358637153 | # -*- coding: utf-8 -*-
from flask import Flask, render_template, request
from webapp.forms import db, ModelPosts, ModelComments, ModelTags
from read_db import *
# импорт семантического анализа
from semantic import semantic_res
def f_toxic_vals(v0, v1, v2):
# processing results function for provide into web-page
# функция предназначена для передачи данных в index.html в виде частей кода с целью отризовки круговых диаграмм
# эмециональности
toxic_rslts = [[], [], []]
v0 = round(v0)
v1 = round(v1)
v2 = round(v2)
over50 = 'progress-circle over50 p'
under50 = 'progress-circle p'
toxic_rslts[0] = [v0, over50 + str(v0) if v0 >= 50 else under50 + str(v0)]
toxic_rslts[1] = [v1, over50 + str(v1) if v1 >= 50 else under50 + str(v1)]
toxic_rslts[2] = [v2, over50 + str(v2) if v2 >= 50 else under50 + str(v2)]
return toxic_rslts
def create_app(): # фабрика Flask
app = Flask(__name__)
app.config.from_pyfile('config.py')
db.init_app(app)
@app.route('/', methods=['POST', 'GET'])
def index():
# временные текстовые константы для страницы:
v0, v1, v2 = 5, 10, 50
title = 'Введите запрос'
flag = False
result = ''
if request.method == 'POST':
answer = request.form['text']
answer = answer.lower()
# information about answer
#
print('#'*100)
print(type(answer))
print(answer)
print('#' * 100)
result = '----- no answer -----'
# флаг для вызова отображения результатов на странице html
flag = True
try:
# function for getting data by input value
# вызов функции для извлечениия и предобработки данных для предоставления ее в семантический анализатор
result = get_data(answer)
except: # использовать без параметров не реккомендуется, но тут для красоты сделано исключение
result = '----- no answer -----'
finally:
flag = True
# печать результатов в стадии отладки приложения
# for r in result:
# print(type(r))
# print(r)
result_in_digits = semantic_res(result)
v0 = result_in_digits['neg']
v1 = result_in_digits['neu']
v2 = result_in_digits['pos']
# v0 = 10
# v1 = 20
# v2 = 30
if request.method == 'GET':
return render_template('index.html', title=title, answer='', flag=flag, toxic_vals=f_toxic_vals(v0, v1, v2))
return render_template('index.html', title=title, answer=result, flag=flag, toxic_vals=f_toxic_vals(v0, v1, v2))
return app
############################
if __name__ == '__main__':
create_app().run(port='5000', debug=True)
#export FLASK_APP=webapp_f && export FLASK_ENV=development && flask run
#-h 172.19.65.99 | Leonid-SV/ToxicStackOverflow | webapp/__init__.py | __init__.py | py | 3,469 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "webapp.forms.db.init_app",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "webapp.forms.db",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.request.me... |
72417026914 | from aiogram import Bot, Dispatcher,executor,types
from token_auth import token
from script import get_user_info,get_user_subs,get_friends_user,get_user_photo
import json
def telegram_bot(token):
bot = Bot(token=token,parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot=bot)
#@dp.message_handler(content_types=["text"])
async def echo(message: types.Message):
await message.answer("[ 🔧 ] Бот на разработке, но он обязательно оживёт ")
await message.answer("🧑💻")
print(f"[+] LOGGING: '{message.text}' от {message.from_user.full_name} ({message.from_user.id})")
@dp.message_handler(commands=['start'])
async def send_message(message: types.Message):
await message.answer("<u>Важная информация</u>: данный бот находится в разработке, поэтому я жду от Вас фидбэк по работе бота в лс @gifgufgaf")
await message.answer("Вкратце о боте: данный бот берёт данные исключительно из открытых источников (VK, Instagram(пока не добавлено)), поэтому важно, чтобы цели бота были открытыми профилями.")
print(f"[+] LOGGING: '{message.text}' от {message.from_user.full_name} ({message.from_user.id})")
@dp.message_handler(commands=['help'])
async def send_message(message: types.Message):
await message.answer("<b>Вводи в любом формате (числовой или буквенный)</b> \n\nНе нагружай бота страницами с 500> фото, а то ему станет плохо🥹 ")
print(f"[+] LOGGING: '{message.text}' от {message.from_user.full_name} ({message.from_user.id})")
@dp.message_handler(content_types=["text"])
async def echo(message: types.Message):
try:
print(f"[+] LOGGING: '{message.text}' от {message.from_user.full_name} ({message.from_user.id})")
user_id = message.text
await message.answer(f"Информация, найденная по айди: {message.text}")
get_user_info(user_id)
with open("account_info.json", "r") as read_file:
data = json.load(read_file)
first_name = data["first_name"]
second_name = data["second_name"]
birth_date = data["birth_date"]
interests = data["interests"]
home_town = data["home_town"]
city = data["city"]
mobile_phone = data["mobile_phone"]
read_file.close()
await message.answer(f"Вот что удалось найти: \n \t 📋 Имя Фамилия: {first_name} {second_name} \n \t 🎉 День рождения: {birth_date} \n \t 😍 Интересы: {interests} \n \t 🏠 Родной город: {home_town} \n \t 🏙 Город: {city} \n \t 📱 Мобильный телефон: {mobile_phone} \n Подробная информация файлами ниже.")
await message.answer_document(open('account_info.json', 'rb'))
get_user_subs(user_id)
await message.answer_document(open('account_subs.json', 'rb'))
get_friends_user(user_id)
await message.answer_document(open('account_friends.json', 'rb'))
await message.answer(f"Раздел с фотографиями на доработке")
#get_user_photo(user_id)
#await message.answer_document(open('account_photos.zip', 'rb'))
#await message.answer_video(open('success.mp4','rb'))
except:
await message.answer("Чёт пошло не так, но это обязательно пофиксится, надо просто подождать, правда ведь? ")
await message.answer("🥺")
print(f"[!] WARNING: '{message.text}' от {message.from_user.full_name} ({message.from_user.id})")
executor.start_polling(dispatcher = dp)
if __name__ == "__main__":
telegram_bot(token)
| antifalcone/myhobby | searchingforwatching.py | searchingforwatching.py | py | 4,285 | python | ru | code | 1 | github-code | 1 | [
{
"api_name": "aiogram.Bot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "token_auth.token",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "aiogram.types.ParseMode",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types"... |
22408996664 | import sys
from collections import deque
input = sys.stdin.readline
# bfs와 이분탐색 활용문제
# 처음에는 다리무게를 활용하여 도착지에 갈 수 있는 최소 무게를 탐색
# 나중에는 이분탐색을 활용해 도달할 수 있는 무게의 최대값을 탐색
def bfs(w):
q = deque()
q.append(S)
visited = [0] * (N+1)
visited[S] = 1
while q:
A = q.popleft()
for B, C in G[A]:
if not visited[B] and w <= C:
visited[B] = 1
q.append(B)
if visited[E]:
return True
return False
N, M = map(int, input().split())
G = [[] for _ in range(N+1)]
for _ in range(M):
A, B, C = map(int, input().split())
G[A].append((B, C))
G[B].append((A, C))
S, E = map(int, input().split())
minW = 1
maxW = 1000000000
ans = 0
while minW <= maxW:
midW = (maxW + minW) // 2
if bfs(midW):
ans = midW
minW = midW + 1
else:
maxW = midW - 1
print(ans) | kky0455/Beakjoon_code | rank/gold/1939.py | 1939.py | py | 999 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
70976183713 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 11:41:36 2018
@author: Susan
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# making phi matrix
def base_func(x,j,M,s):
muj = np.asarray(4*j/M).reshape(1,len(j)) # 各種不同muj
phi = (np.tile(x,M)-np.tile(muj,(x.shape[0],1)))/s # sigmoid 內的方程式:(x-muj)/s
phi = 1/(1+np.exp(-phi)) # sigmoid
return phi
data = pd.read_csv('E:/Machine Learning/HW2/1_data.csv')
# Bayesian Linear Regression
beta = 1
M = 7
s = 0.1
N = [10,15,30,80]
x = np.asarray(data['x']).reshape(-1,1)
t = np.asarray(data['t']).reshape(-1,1)
j = np.arange(M).reshape(-1,1)
I = np.eye(len(j))
S0 = (10**-6)*I
m0 = 0
''' Q1-1 find mN & SN '''
phi = base_func(x,j,M,s)
SN = np.linalg.inv(S0 + beta*np.matmul(phi.T,phi))
mN = beta * np.matmul(np.matmul(SN,phi.T),t)
''' Q1-2 & 3'''
# con:continuous x in range 0-4 (要連續數值才可以畫出線條)
con = np.arange(0, 4.01, 0.01)
con = base_func(np.asarray(con).reshape(-1,1), j, M, s)
for i in range(4):
sample = data[:N[i]]
# transfer sample_x into phi matrix
sample_x = base_func(np.asarray(sample['x']).reshape(-1,1), j, M, s)
SN = np.linalg.inv(S0 + beta*np.matmul(sample_x.T,sample_x))
mN = beta * np.matmul(np.matmul(SN,sample_x.T),sample['t'])
# sample 5 w
w = np.random.multivariate_normal(mN.reshape((-1,)),SN,5)
# make the distribution under sample_x & 5 w
con_y = con.dot(w.T)
sample_y = sample_x.dot(w.T)
''' Q1-2 plot the distribution with 5 different w'''
# plot the distribution of what we sampled and set x axis in range(0 - 4,0.01)
plt.plot(sample['x'],sample['t'],'.')
plt.plot(np.arange(0, 4.01, 0.01),con_y)
plt.show()
''' Q1-3 plot the distribution with (x,t) & mean curve'''
mean = mN.reshape(-1,1).T.dot(con.T).reshape((-1,))
std = 1/beta + np.diag(con.dot(SN).dot(con.T))
plt.plot(sample['x'],sample['t'],'.')
plt.plot(np.arange(0, 4.01, 0.01),mean,'b')
plt.fill_between(np.arange(0, 4.01, 0.01), mean+std ,mean-std,color='lightblue')
plt.show()
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
''' Q2-1 '''
train = pd.read_csv('E:/Machine Learning/HW2/train.csv',header=None) # The data does'nt have header
test = pd.read_csv('E:/Machine Learning/HW2/test.csv',header=None)
x = train.iloc[:,3:].values
t = train.iloc[:,0:3].values
w0 = np.zeros((3,7))
ak = x.dot(w0.T)
yk = np.exp(ak) / np.sum(np.exp(ak), axis=1).reshape((-1,1))
def update_w(wo,x,y,t):
wn = np.zeros((3,x.shape[1]))
H1, H2, H3 = H(x,y)
dE1, dE2, dE3 = delta_E(x,y,t)
wn[0] = wo[0] - H1.dot(dE1).T
wn[1] = wo[1] - H2.dot(dE2).T
wn[2] = wo[2] - H3.dot(dE3).T
y[:,0] = x.dot(wn[0].T)
y[:,1] = x.dot(wn[1].T)
y[:,2] = x.dot(wn[2].T)
y = softmax(y)
error = cross_entropy(y,t)
return wn,y,error
def H(x,y):
R = y*(1-y)
R1, R2, R3 = np.zeros((180,180)),np.zeros((180,180)),np.zeros((180,180))
np.fill_diagonal(R1, R[:,0])
np.fill_diagonal(R2, R[:,1])
np.fill_diagonal(R3, R[:,2])
Hfunc1 = np.linalg.pinv(x.T.dot(R1).dot(x))
Hfunc2 = np.linalg.pinv(x.T.dot(R2).dot(x))
Hfunc3 = np.linalg.pinv(x.T.dot(R3).dot(x))
return Hfunc1, Hfunc2, Hfunc3
def softmax(y):
y = np.exp(y) / np.sum(np.exp(y), axis=1).reshape((-1,1))
return y
def delta_E(x,y,t):
dE1 = x.T.dot(y[:,0]-t[:,0])
dE2 = x.T.dot(y[:,1]-t[:,1])
dE3 = x.T.dot(y[:,2]-t[:,2])
return dE1, dE2, dE3
# calculate the cross entropy
def cross_entropy(y,t):
error = -np.sum(t * np.log(y))
return error
# calculate the accuracy
def Accuracy(y,t):
#pre = np.round(y)
pre = np.zeros_like(y)
pre[np.arange(len(y)), y.argmax(1)] = 1
acc = 1-(np.count_nonzero(pre-t)/pre.shape[0])
return acc
# Initial y & w
y = yk.copy()
wn, y,error = update_w( w0, x, yk, t)
acc = []
err = []
while( error > 0.005):
wn, y, error = update_w( wn, x, y, t)
acc.append(Accuracy(y,t))
err.append(error)
# plot accuracy matrix
fig,ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(acc,'b')
ax2.plot(err,color='orange')
ax1.set_xlabel('epochs')
ax1.set_ylabel('Accuracy')
ax2.set_ylabel('Loss')
plt.title('Accuracy rate & loss')
fig.tight_layout()
plt.show()
#%%
''' Q2-2 '''
test_x = test.values
predict = np.round(softmax(test_x.dot(wn.T)))
#%%
''' Q2-3 '''
for i in range(x.shape[1]):
plt.hist(x[:60,i],color = 'salmon',alpha=0.7)
plt.hist(x[60:120,i],color = 'yellowgreen',alpha=0.7)
plt.hist(x[120:,i],color = 'lightblue',alpha=0.7)
plt.show()
#%%
''' Q2-5 '''
plt.plot(x[:60, 0], x[:60, 1], '.')
plt.plot(x[60:120, 0], x[60:120, 1], '.')
plt.plot(x[120:, 0], x[120:, 1], '.')
plt.show()
#%%
''' Q2-6 '''
# Initial y & w
x2 = x[:,:2]
wn2 = np.zeros((3,2))
y2 = softmax(x2.dot(wn2.T))
error2 = 1
acc2 = []
err2 = []
while(error2>0.005):
wn2, y2, error2 = update_w( wn2, x2, y2, t)
acc2.append(Accuracy(y2,t))
err2.append(error2)
if (len(err2) > 1 and err2[-2]-error2<0.001):
break
# plot accuracy matrix
fig,ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(acc2,'b')
ax2.plot(err2,color='orange')
ax1.set_xlabel('epochs')
ax1.set_ylabel('Accuracy')
ax2.set_ylabel('Loss')
plt.title('Accuracy rate & loss')
fig.tight_layout()
plt.show()
test_x = test.values
predict2 = np.round(softmax(test_x.dot(wn.T)))
#%%
''' Q2-7 '''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('E:/Machine Learning/HW2/train.csv',header=None)
test = pd.read_csv('E:/Machine Learning/HW2/test.csv',header=None)
x = train.iloc[:,3:].values
t = train.iloc[:,0:3].values
x1 = x[:60]
x2 = x[60:120]
x3 = x[120:]
m = np.mean(x,axis=0)
m1 = np.mean(x1,axis=0)
m2 = np.mean(x2,axis=0)
m3 = np.mean(x3,axis=0)
SW = (x - m1).T.dot((x - m1)) + (x - m2).T.dot((x - m2)) + (x - m3).T.dot((x - m3))
M1 = (m1 - m).reshape((-1,1))
M2 = (m2 - m).reshape((-1,1))
M3 = (m3 - m).reshape((-1,1))
SB = 60*(M1.dot(M1.T)) + 60*(M2.dot(M2.T)) + 60*(M3.dot(M3.T))
ST = SW +SB
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(SW).dot(SB))
eig_pairs = []
for i in range(len(eig_vals)):
eig_pairs.append((np.abs(eig_vals[i]), eig_vecs[:,i]))
eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True)
W = np.hstack((eig_pairs[0][1].reshape(7,1), eig_pairs[1][1].reshape(7,1)))
y = x.dot(W)
plt.plot(y[:60,0],y[:60,1],'.',color = 'salmon')
plt.plot(y[60:120,0],y[60:120,1],'.',color = 'yellowgreen')
plt.plot(y[120:,0],y[120:,1],'.',color = 'lightblue')
plt.grid()
plt.tight_layout
plt.show()
#%%
''' Q3-1 '''
def Accuracy_Rate(y,t):
A = 1 - (np.count_nonzero(t-y) / len(y))
return A
def normalize(x):
std = np.std(x,axis=0).reshape((-1,1))
mean = np.mean(x,axis=0).reshape((-1,1))
normalized_x = (x - mean.T) / std.T
return normalized_x
def KNN(test,train,target,K):
test_num = test.shape[0]
train_num = train.shape[0]
_test = np.repeat(test, train_num, axis=0)
_train = np.tile(train,( test_num,1))
dist = np.sqrt(np.sum(np.square(_test - _train),axis=1)).reshape((60,150)).T
arg_sort = np.argsort(dist, axis=0)[:K]
ans = target[arg_sort].astype(np.int32)
y = np.zeros((test_num, ))
for i in range(test_num):
y[i] = np.bincount(ans[:, i]).argmax()
return y
seeds = pd.read_csv('E:/Machine Learning/HW2/seeds.csv').values
# normalize the data
x = seeds[:,:7]
t = seeds[:,-1].reshape((-1,1))
x_nor = normalize(x)
# split data into trainning data and testing data
data = np.concatenate((x_nor,t),axis=1)
train = np.concatenate((data[:50],data[70:120],data[140:190]),axis=0)
train_x = train[:,:7]
train_y = train[:,-1]
test = np.concatenate((data[50:70],data[120:140],data[190:210]),axis=0)
test_x = test[:,:7]
test_y = test[:,-1]
Ans, Acc = [], []
K = np.arange(1,11)
for k in K:
Ans.append(KNN(test_x,train_x,train_y,k))
Acc.append(Accuracy_Rate(Ans[k-1],test_y))
import matplotlib.pyplot as plt
plt.plot(Acc)
plt.show()
#%%
''' Q3-2 '''
def KNN_dist(test,train,target,distence):
test_num = test.shape[0]
train_num = train.shape[0]
_test = np.repeat(test, train_num, axis=0)
_train = np.tile(train,( test_num,1))
dist = np.sqrt(np.sum(np.square(_test - _train),axis=1)).reshape((60,150)).T
y = np.zeros((test_num, ))
for i in range(test_num):
pred = target[dist[:, i] < distence].astype(np.int32)
y[i] = np.bincount(pred).argmax()
return y
train_x = train[:,:7]
train_y = train[:,-1]
test_x = test[:,:7]
test_y = test[:,-1]
Ans, Acc = [], []
V = np.arange(2,11)
for v in V:
Ans.append(KNN_dist(test_x,train_x,train_y,v))
Acc.append(Accuracy_Rate(Ans[v-2],test_y))
import matplotlib.pyplot as plt
plt.plot(Acc)
plt.show() | chunshou-Liu/MachineLearning | Assignment2/Bayesian Linear Regression.py | Bayesian Linear Regression.py | py | 8,786 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.asarray",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number... |
33821375551 | from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import PyPDF2
import os
files=Tk()
files.title('Easy Files')
files.geometry(f'400x600')
files.resizable(False,False)
# Add a frame to set the size of the window
frame= Frame(files, relief= 'sunken')
frame.pack(fill= BOTH, expand= True, padx= 10, pady=0)
def browseFiles():
global filename
filename = filedialog.askopenfilename(initialdir="/",
title="Select a File",
filetypes=(("Pdf files",
"*.pdf*"),
("all files",
"*.*")))
label_file_explorer.config(text=os.path.split(filename)[1])
def convert_pdf():
global filename
global name
name = name_entry.get()
start=int(Entry2.get())
end=int(Entry3.get())
with open(filename, mode='rb') as f:
reader = PyPDF2.PdfFileReader(f)
doc_path = os.path.expanduser("~\Desktop") +'\\'+ name + '.doc'
print(doc_path)
# Open a file with access mode 'a'
file_object = open(doc_path, 'a')
for i in range(start-1,end):
page = reader.getPage(i)
# Append page content at the end of file
file_object.write(page.extractText())
# Close the file
file_object.close()
return messagebox.showinfo('Success', f'File saved in desktop as {name}.doc')
#Print success
label_success.pack()
#Create heading
label = Label(
frame,
font = "Helvetica 40 bold",
foreground = "grey", text = "Welcome")
label.pack(pady=30)
#Create a File Explorer button
button1 = Button(frame, text="Browse file", command=browseFiles)
button1.pack(pady=1)
# Create a File Explorer label
label_file_explorer = Label(frame,
text="Upload pdf",
width=100, height=4,
fg="grey")
label_file_explorer.pack(pady=1)
#Frame 2
frame2=Frame(files)
frame2.pack(fill= BOTH, expand= True, padx= 10, pady=0)
#Page Range
Label2 = Label(frame2,width=25)
Label2.configure(text=''' Enter page range ''')
Label2.grid(row=0,column=0)
Entry2 = Entry(frame2,width=3)
Entry2.grid(row=0,column=1)
Entry3 = Entry(frame2,width=3)
Entry3.grid(row=0,column=3)
Label3 = Label(frame2)
Label3.configure(text='''-''')
Label3.grid(row=0,column=2)
#Frame3
frame3=Frame(files)
frame3.pack(fill= BOTH, expand= True, padx= 10, pady=0)
#Enter name of new file
name_label = Label(frame3, text = 'New file name', font=('calibre',10, 'bold'))
name_label.pack()
name_entry = Entry(frame3, font=('calibre',10,'normal'))
name_entry.pack()
#Create a File Converter button
button2 = Button(frame3, text="Convert File",height=5,bg='grey',fg='white', command= convert_pdf)
button2.pack(pady=50)
# Create a Message label
label_success = Label(frame3,
text='Success!!',
fg='green',
font=("Arial", 15),
width=100, height=4)
# close window after 3 seconds
#files.after(3000, lambda: files.destroy())
files.mainloop() | irfanrasheedkc/pdf_to_doc | easyfiles.py | easyfiles.py | py | 3,409 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "o... |
1155368532 | import sqlite3, datetime
from EmployeeEdit import randserial
from Login import clear
from Inventory import unique
conn = sqlite3.connect('MarksHardware.db')
cursor = conn.cursor()
def uniquesales(ID): #ensures unique Ids
for id in conn.execute("SELECT SALESID FROM SALES"):
if id[0] == ID:
return False
return True
def addsales(USERID): #Adds sales uses current date time and Id from who logged on to categorize sales
clear()
try:
cashier = conn.execute(f"SELECT NAME from COMPANY where ID = {USERID}").fetchall()[0][0]
while True:
SALESID = randserial(6)
if uniquesales(SALESID):
break
for row in conn.execute("SELECT ID, NAME from INVENTORY"):
print(f'ID: {row[0]} || Name: {row[1]} ')
while True:
print('Sales ID:', SALESID)
print('Cashier:', cashier)
Sale = int(input('Enter serial #. Enter 0 to cancel: ')) # getsinventory id of item
if Sale == 0:
input('Cancel. \nPress ente to continue...')
return
if unique(Sale) == False: # decides if the serial number provided is valid
print("Found!")
Name = conn.execute(f"SELECT NAME FROM INVENTORY WHERE ID = {Sale}").fetchone()[0] # gets the product namewith the id from the inventory
date = datetime.datetime.now().date() # Gets date of sale
unitsleft2 = conn.execute(f"SELECT STOCK FROM INVENTORY WHERE ID={Sale}").fetchone()[0] # gets units left in stock of item
print('There are', unitsleft2, 'left in stock')
while True: # loop asks for unts being sold and if the input is invalid loop it
sold = int(input('Input number of units being sold: '))
if unitsleft2 >= sold: # checks to see if there is enough in stock to sell requested amount
break
else:
print('Invalid. Not enough stock.')
continue
salval = (SALESID, Sale, Name, sold, date, cashier)
conn.execute("INSERT INTO SALES VALUES (?,?,?,?,?,?)", salval)
conn.execute(f"UPDATE INVENTORY SET STOCK = {unitsleft2 - sold} where ID = {Sale}")
conn.commit()
input('Success. \nPress enter to continue...')
return
else: # if serial Id does not match any in the inventory asks for id again
input("Serial # does not exist. \nPress enter to continue...")
continue
except:
input('Invalid. \nPress enter to continue...')
def viewsales(): #This function prints out all the past sales
for row in conn.execute("SELECT * FROM SALES").fetchall():
print(f'Sale ID: {row[0]} || Item ID: {row[1]}\nItem: {row[2]} || # of items sold: {row[3]}\nDate: {row[4]} || Cashier: {row[5]}\n')
input('Press enter to continue...')
def main(id): #This function gives you choices to choose from to continue
while True:
clear()
choice = input("""
-------Sales information-------
Please choose:
1 - View Sales
2 - Add Sales
3 - Return
----------------------------------
""")
if choice == "1":
viewsales()
elif choice == "2":
addsales(id)
elif choice == "3":
return
else:
input("Invalid.\nPress enter to continue:...") | m247murray/MacroHard | Sales.py | Sales.py | py | 3,169 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "Login.clear",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "EmployeeEdit.randserial",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "Inventory.unique",... |
72582152355 | import pandas as pd
from re import split
from tqdm import tqdm
## COUNT GAMES BY INITIAL PLYS
d = pd.read_csv('../data/csv/caissa_clean.csv', index_col=0)
f = lambda pgn: sum([m.strip().split() for m in split(r'\d+\.', pgn) if len(m) > 0],[])
def extract_first_plys(row):
plys = {}
plys['Year'] = row.Year
plys['Result'] = row.Result
pgn = f(row.pgn)
for i in range(16):
if i < len(pgn):
plys['Ply'+str(i+1)] = pgn[i]
else:
plys['Ply'+str(i+1)] = pd.NA
return plys
ply_rows = []
for _, row in tqdm(d.iterrows()):
ply_rows.append(extract_first_plys(row))
d_plys = pd.DataFrame.from_records(ply_rows)
d_plys = d_plys[['Year']+list(d_plys.columns[2:])+['Result']]
for i in tqdm(range(12)):
d_plys_gb = d_plys.groupby(list(d_plys.columns[:i+2])).agg({d_plys.columns[i+2]: 'count', 'Result': 'mean'}).reset_index()
d_plys_gb.columns = list(d_plys_gb.columns[:-2]) + ['Count', 'Result']
d_plys_gb['Moves'] = d_plys_gb[d_plys_gb.columns[1:i+2]].agg(' '.join, axis=1)
d_plys_gb.to_csv('../data/csv/caissa_counts_by_ply'+str(i+1)+'.csv')
## WRITE A TABLE WITH A ROW PER PLAYER IN A GAME (two rows per actual game)
d = pd.read_csv('../data/csv/caissa_clean_only_frequent_players.csv', index_col=0)
player_rows = []
for i, row in tqdm(d.iterrows()):
player_rows.append({'Player': row.White, 'Game':i, 'Elo': row.WhiteElo, 'Country': row.WhiteCountry, 'Result': row.Result, 'ECO': row.ECO[:3], 'Year': row.Year, 'FideId': row.WhiteFideId, 'Title': row.WhiteTitle, 'Pieces': 'White', 'pgn': row.pgn})
player_rows.append({'Player': row.Black, 'Game':i, 'Elo': row.BlackElo, 'Country': row.BlackCountry, 'Result': -row.Result, 'ECO': row.ECO[:3], 'Year': row.Year, 'FideId': row.BlackFideId, 'Title': row.BlackTitle, 'Pieces': 'Black', 'pgn': row.pgn})
d_players = pd.DataFrame.from_records(player_rows)
d_players.to_csv('../data/csv/caissa_player_games.csv')
| EgorLappo/cultural_transmission_in_chess | data_processing/generate_tables.py | generate_tables.py | py | 1,957 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.NA",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": ... |
17363307259 | import torch.optim as optim
import torchvision.transforms as T
# For list of supported models use timm.list_models
MODEL_NAME = "resnet18"
NUM_ClASSES = 10
IN_CHANNELS = 3
USE_TORCHVISION = False # If you need to use timm models set to False.
# USE_TORCHVISION = True # Should use Torchvision Models or timm models
PRETRAINED = True # If True -> Fine Tuning else Scratch Training
EPOCHS = 5
TRAIN_BATCH_SIZE = 512 # Training Batch Size
VALID_BATCH_SIZE = 512 # Validation Batch Size
NUM_WORKERS = 4 # Workers for training and validation
EARLY_STOPPING = True # If you need early stoppoing for validation loss
SAVE_PATH = "{}.pt".format(MODEL_NAME)
# IMG_WIDTH = 224 # Width of the image
# IMG_HEIGHT = 224 # Height of the image
MOMENTUM = 0.8 # Use only for SGD
LEARNING_RATE = 1e-3 # Learning Rate
SEED = 42
LOG_INTERVAL = 300 # Interval to print between epoch
# Train and validation Transforms which you would like
train_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5,), (0.5,))])
valid_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5,), (0.5,))])
# Classes to be detected.
FASHION_MNIST_CLASSES = (
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle Boot",
)
# Classes to be detected.
CIFAR10_CLASSES = (
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
)
| oke-aditya/pytorch_cnn_trainer | examples/config.py | config.py | py | 1,462 | python | en | code | 26 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 30,
"usage_type": "call"
},
{
... |
10000155956 | import pytest
from rest_framework.test import APIClient
from django.urls import reverse
from website.models import Trip
from website.models.profile import UserDetail
import mock
from rest_framework.response import Response
from website.api.v1.main_page.serializers import TripSerializerRetrieve
from django.shortcuts import get_object_or_404
from website.api.v1.main_page import views
@pytest.fixture
def ApiClient():
# anonymous user
client = APIClient()
return client
@pytest.fixture
def CommonBusiness():
trip = Trip.objects.create(
pk = 10,
appear_in_search=False,
)
return trip
@pytest.mark.django_db
class TestTripRetrieve():
url = reverse(
'website:main-page:retrieve-trip', kwargs={
"pk": 10,
})
def test_trip_retrieve_response_200(self, ApiClient,CommonBusiness):
with mock.patch('website.api.v1.main_page.views.TripViewSet.retrieve') as mock_trip_retrieve:
trip = get_object_or_404(Trip, pk=CommonBusiness.pk, appear_in_search=False)
serializer = TripSerializerRetrieve(trip)
mock_trip_retrieve.return_value = Response(serializer.data)
response = ApiClient.get(self.url)
assert response.status_code == 200
| mahdi-darvishzadeh/Travelo-BackEnd | core/website/tests/main_page/test_trip_retrieve.py | test_trip_retrieve.py | py | 1,273 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.test.APIClient",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "website.models.Trip.objects.create",
"line_number": 20,
"usage_type": "call"
},
{
... |
72507372834 | from __future__ import division
import cv2
import numpy as np
# global
# HSV 色值:
GREEN = [40, 65, 13, 80, 255, 255] # Green
YELLOW = [20, 103, 80, 40, 255, 255] # Yellow
BLUE = [94, 81, 82, 126, 255, 255] # Blue
RED = [0, 144, 0, 20, 255, 255] # Red
# default value: Blue 蓝色
lowHue = BLUE[0]
lowSat = BLUE[1]
lowVal = BLUE[2]
highHue = BLUE[3]
highSat = BLUE[4]
highVal = BLUE[5]
# 排列:green, yellow, blue, red
defaultColor = [0, 0, 1, 0] # 蓝色
color = "BLUE" # 现在的颜色
blueColor = (255, 0, 0) # 蓝色
greenColor = (0, 255, 0) # 绿色
redColor = (0, 0, 255) # 红色
# 图像色调和追踪
def frame_mask_contour(image):
frameHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
colorLow = np.array([lowHue, lowSat, lowVal])
colorHigh = np.array([highHue, highSat, highVal])
mask = cv2.inRange(frameHSV, colorLow, colorHigh)
# get contours
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
count = len(contour_sizes)
found = False
i=0
if count > 0:
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 4000:
x, y, w, h = cv2.boundingRect(cnt)
i=i+1
print(i)
cv2.rectangle(image, (x, y), (x + w, y + h), greenColor, 2)
print(x,y,w,h)
found = True
if i == 1 :
square = "x={} , y={} , x+w={} , y+h={}".format(x,y,x+w,y+h)
area = "S = {}".format(w*h)
cv2.putText(image, square,(50,100),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
cv2.putText(image, area,(50,150),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
elif i == 2:
square = "x={} , y={} , x+w={} , y+h={}".format(x,y,x+w,y+h)
area = "S = {}".format(w*h)
cv2.putText(image, square,(50,200),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
cv2.putText(image, area,(50,250),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
elif i == 3:
square = "x={} , y={} , x+w={} , y+h={}".format(x,y,x+w,y+h)
area = "S = {}".format(w*h)
cv2.putText(image, square,(50,300),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
cv2.putText(image, area,(50,350),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
elif i == 4:
square = "x={} , y={} , x+w={} , y+h={}".format(x,y,x+w,y+h)
area = "S = {}".format(w*h)
cv2.putText(image, square,(50,400),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
cv2.putText(image, area,(50,450),cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
else:
continue
if found:
text = f'Found {color} object'
print(text)
cv2.putText(image, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
else:
text = "Not Found!"
print("Not Found! 没有找到!")
cv2.putText(image, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, redColor, 2)
return mask, image
# 绿色控制
def greenCtrl(val):
global defaultColor, color
color = 'GREEN'
defaultColor = [1, 0, 0, 0]
assignColor(GREEN)
# 黄色控制
def yellowCtrl(val):
global defaultColor, color
color = 'YELLOW'
defaultColor = [0, 1, 0, 0]
assignColor(YELLOW)
# 蓝色控制
def blueCtrl(val):
global defaultColor, color
color = 'BLUE'
defaultColor = [0, 0, 1, 0]
assignColor(BLUE)
# 红色控制
def redCtrl(val):
global defaultColor, color
color = 'RED'
defaultColor = [0, 0, 0, 1]
assignColor(RED)
vidCapture = cv2.VideoCapture(0)
vidCapture.set(cv2.CAP_PROP_FRAME_WIDTH,800)#FRAME_WIDTH = 800
vidCapture.set(cv2.CAP_PROP_FRAME_HEIGHT,600)#FRAME_HEIGHT = 600
video_win = 'Frame'
cv2.namedWindow(video_win, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(video_win, 855, 0)
'''
# =====================================
# Window 窗口: Demo
# =====================================
demo_win = 'Demo'
cv2.namedWindow(demo_win)
cv2.moveWindow(demo_win, 205, 0)
'''
while True:
# Get webcam frame.获取网络摄像头帧。
_, frame = vidCapture.read()
# Show the original image.显示原始图像。
demo, frame_contour = frame_mask_contour(frame)
cv2.imshow(video_win, frame_contour)
#cv2.imshow(demo_win, demo)
key = cv2.waitKey(10)
vidCapture.release() # release()释放摄像头
cv2.destroyAllWindows() # 调用destroyAllWindows()关闭所有图像窗口。
| SPOOKY01/Vanilla | RASPBERRYPI/detected_END.py | detected_END.py | py | 4,752 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.cvtColor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"li... |
72143450595 | import datetime
from typing import Any, Dict, List, Optional, Type, TypeVar, Union, cast
import attr
from dateutil.parser import isoparse
from ..types import UNSET, Unset
T = TypeVar("T", bound="PatchedStepInvocation")
@attr.s(auto_attribs=True)
class PatchedStepInvocation:
"""Dynamically removes fields from serializer.
https://stackoverflow.com/questions/27935558/dynamically-exclude-or-include-a-field-in-django-rest-framework-serializer"""
url: Union[Unset, str] = UNSET
id: Union[Unset, int] = UNSET
step: Union[Unset, str] = UNSET
release: Union[Unset, str] = UNSET
invoker: Union[Unset, List[str]] = UNSET
origin: Union[Unset, Optional[str]] = UNSET
created: Union[Unset, datetime.datetime] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
url = self.url
id = self.id
step = self.step
release = self.release
invoker: Union[Unset, List[Any]] = UNSET
if not isinstance(self.invoker, Unset):
invoker = self.invoker
origin = self.origin
created: Union[Unset, str] = UNSET
if not isinstance(self.created, Unset):
created = self.created.isoformat()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if url is not UNSET:
field_dict["url"] = url
if id is not UNSET:
field_dict["id"] = id
if step is not UNSET:
field_dict["step"] = step
if release is not UNSET:
field_dict["release"] = release
if invoker is not UNSET:
field_dict["invoker"] = invoker
if origin is not UNSET:
field_dict["origin"] = origin
if created is not UNSET:
field_dict["created"] = created
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
url = d.pop("url", UNSET)
id = d.pop("id", UNSET)
step = d.pop("step", UNSET)
release = d.pop("release", UNSET)
invoker = cast(List[str], d.pop("invoker", UNSET))
origin = d.pop("origin", UNSET)
created: Union[Unset, datetime.datetime] = UNSET
_created = d.pop("created", UNSET)
if not isinstance(_created, Unset):
created = isoparse(_created)
patched_step_invocation = cls(
url=url,
id=id,
step=step,
release=release,
invoker=invoker,
origin=origin,
created=created,
)
patched_step_invocation.additional_properties = d
return patched_step_invocation
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| caltechads/brigid-api-client | brigid_api_client/models/patched_step_invocation.py | patched_step_invocation.py | py | 3,288 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number... |
1401342227 | import pandas as pd
import numpy as np
import pdfplumber
import re
import zipfile
import os
def unzipper(zip):
"""
unzips zipfile and stores content in tempfolder on same level directory.
returns list of directories of pdf files in temp folder.
______
takes in the zipfile.
"""
path = 'data'
with zipfile.ZipFile(zip, 'r') as zip_ref:
zip_ref.extractall(path)
files = os.listdir(path)
dirs = [os.path.join(path, file) for file in files if file.endswith('.pdf') or file.endswith('.PDF')]
return dirs
def pdf_parser(files):
"""
Returns a df filled with buy and sell information of the input PDF files.
________
Takes in files, a list of directories or bytes files (pdfplumber).
"""
print('pdf_parser was called.')
df = pd.DataFrame()
for file in files:
with pdfplumber.open(file) as pdf:
pages = [page.extract_text() for page in pdf.pages]
match = [p.start() for p in re.finditer('Nominale', pages[0])]
trans = pages[0][match[0]:].split('\n')
action = re.findall('Wertpapier Abrechnung (\w+)', pages[0])
if action:
# if 'Abrechnung' was found
val = re.findall(r'Stück (\d+)', trans[1])[0]
wkn = trans[1].replace(')', '').split('(')[1]
date = re.findall(r'Datum (\d+.\d+.\d+)', pages[0])[0]
# assemble df
data = [val, wkn, date]
columns = [action[0], 'WKN', 'Datum']
# scan 2nd page
if action[0] == 'Verkauf':
buy = [p.start() for p in re.finditer('Ber\wcksichtigte Anschaffungsgesch\wfte', pages[1])][0]
# columns2 = columns + pages[1][buy:].split('\n')[1].split()
columns2 = columns + ['Geschäft',
'Auftragsnr.',
'Ausführ.-tag',
'Whr./St.',
'Nennwert/Stück',
'AS-Kosten',
'Erlös', 'ant.Ergebnis',
'Land']
data2 = data + pages[1][buy:].split('\n')[2].split()
try:
new_df = pd.DataFrame([data2], columns=columns2)
except:
new_df = (pd.DataFrame([data], columns=columns))
print(f"couldn't process 2nd page of file: {file}")
else:
rate = re.findall(r'Ausf\whrungskurs(\d+,\d+)', pages[0])[0]
data.append(rate)
columns.append('Kurs')
new_df = pd.DataFrame([data], columns=columns)
df = pd.concat([df, new_df])
return df
| moritzgeiger/stockist | stockist/pdfparser.py | pdfparser.py | py | 2,792 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
14205279543 | from django.conf import settings
from django.contrib.auth import (
login as auth_login,
logout as auth_logout,
REDIRECT_FIELD_NAME
)
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.shortcuts import redirect, render, resolve_url, reverse
from django.utils.http import is_safe_url
from django.utils.translation import ugettext_lazy as _
from django.views import View
from clublink.cms.forms import LoginForm
from clublink.clubs.models import Club, Department
from clublink.users.models import User
from clublink.cms.views import CMSView
def login(request):
redirect_to = request.GET.get(REDIRECT_FIELD_NAME, '')
# Ensure the user-originating redirection URL is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(reverse('home'))
form = LoginForm()
if request.user.is_authenticated:
if redirect_to == request.path:
return redirect(resolve_url(settings.LOGIN_REDIRECT_URL))
return redirect(redirect_to)
elif request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
auth_login(request, form.get_user())
return redirect(redirect_to)
return render(request, 'cms/login.jinja', {'form': form})
def logout(request):
auth_logout(request)
return redirect(reverse('home'))
class HomeView(CMSView):
def get(self, request, *args, **kwargs):
return render(request, 'cms/dashboard/home.jinja')
class ListView(View):
title = ''
queryset = None
list_fields = None
per_page = 10
actions = (
('delete_items', _('Delete selected')),
)
def get_queryset(self, request):
return self.queryset
def delete_items(self, request):
item_pks = request.POST.get('items', [])
items = self.get_queryset(request).filter(pk__in=item_pks)
items.delete()
def get(self, request):
per_page = request.GET.get('per_page', self.per_page)
paginator = Paginator(self.get_queryset(request), per_page)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
page = paginator.num_pages
items = paginator.page(page)
return render(request, 'cms/list.jinja', {
'items': items, 'list_fields': self.list_fields, 'page': page, 'per_page': per_page,
'title': self.title})
def post(self, request):
action = request.POST.get('action')
for a in self.actions:
if a[0] == action:
fn = getattr(self, a[1])
if callable(fn):
fn(request)
return self.get(request)
class UserListView(ListView):
title = _('User Accounts')
list_fields = (
('username', _('Username')),
('first_name', _('First Name')),
('last_name', _('Last Name')),
('membership_number', _('Membership Number')),
)
queryset = User.objects.all()
class ClubListView(ListView):
title = _('Clubs')
list_fields = (
('name', _('Name')),
('slug', _('Slug')),
('code', _('Club Number')),
)
queryset = Club.objects.all()
class DepartmentListView(ListView):
title = _('Departments')
list_fields = (
('name', _('Name')),
('number', _('Department Number')),
('hidden', _('Hidden')),
)
queryset = Department.objects.all()
| reactsuperwizard/clublink_django_certificate | cms/modules/dashboard/views.py | views.py | py | 3,571 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.contrib.auth.REDIRECT_FIELD_NAME",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "django.utils.http.is_safe_url",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.resolve_url",
"line_number": 25,
"usage_type":... |
43959272703 | import pandas as pd
import streamlit as st
import plotly.express as px
st.set_page_config(page_title="Tickets Dashboard 📊 ", layout="wide" )
df = pd.read_csv('customer_support_tickets.csv')
print(df)
st.title("📊 Tickets Dashboard")
st.markdown("##")
st.sidebar.header("Please Filter Here:")
# ticket_ID = st.sidebar.multiselect(
# "select the ticket id:",
# options = df["Ticket ID"].unique(),
# default = df["Ticket ID"].unique()
# )
customer_Gender = st.sidebar.multiselect(
"select the Customer Gender:",
options = df["Customer Gender"].unique(),
default = df["Customer Gender"].unique()
)
ticket_Priority = st.sidebar.multiselect(
"select the Ticket Priority:",
options = df["Ticket Priority"].unique(),
default = df["Ticket Priority"].unique()
)
df_selection = pd.read_csv("customer_support_tickets.csv")
st.dataframe(df_selection)
total_tickets = int(df_selection["Ticket ID"].sum())
Customer_Satisfaction_Rating = round(df_selection["Customer Satisfaction Rating"].mean(), 1)
star_rating = ":star" * int(round(Customer_Satisfaction_Rating, 0))
left_column, right_column = st.columns(2)
with left_column:
st.subheader("Total Number of Tickets:")
st.subheader(f"{total_tickets:}")
with right_column:
st.subheader("Average Customer Rating:")
st.subheader(f"{Customer_Satisfaction_Rating}{star_rating}")
st.markdown("---")
Ticket_Type_Count = df_selection["Ticket Type"].value_counts().reset_index()
Ticket_Type_Count.columns = ["Ticket Type", "Count"]
fig_product_sales = px.bar(
Ticket_Type_Count,
x="Ticket Type",
y="Count",
orientation="v",
title="<b>Ticket Type Occurrence</b>",
template="plotly_white",
)
st.plotly_chart(fig_product_sales) | asmaakhaledd/-Automatic-Ticket-Classification-Tool | app.py | app.py | py | 1,749 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.mar... |
10786456409 | import cv2
import numpy as np
import glob
import re
img_array = []
files = sorted(glob.glob('C:\\Users\\Rodrigo\\Documents\\GitHub\\computerVision\\images\\output\\brighter\\*.png'))
files = sorted(files, key=lambda x:float(re.findall("(\d+)",x)[0]))
outputPath = 'C:\\Users\\Rodrigo\\Documents\\GitHub\\computerVision\\images\\output\\videos\\'
for filename in files:
print(filename)
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
out = cv2.VideoWriter('project.avi',cv2.VideoWriter_fourcc(*'DIVX'), 60, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release() | B4nr/computerVision | mainPython/videoProcessing.py | videoProcessing.py | py | 668 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter",
"line_number": 19... |
6623708133 | '''This program is run separately and manually to add new entries in the
csv file that is not in the database, to the database.
'''
import csv
import sqlite3
db_connection = sqlite3.connect('events.db')
db_cursor = db_connection.cursor()
db_cursor.execute('''DROP TABLE IF EXISTS events''')
db_cursor.execute('''CREATE TABLE events (
serial int NOT NULL PRIMARY KEY,
eventdate date,
name varchar,
event varchar,
priority int)'''
)
with open('events.csv', newline='') as csvfile:
csvobj = csv.reader(csvfile)
serial_nums = [x[0]
for x in db_cursor.execute('SELECT serial FROM events')]
for item in csvobj:
if int(item[0]) not in serial_nums:
db_cursor.execute(
"INSERT INTO events VALUES (?, ?, ?, ?, ?)", item)
db_connection.commit()
db_connection.close()
| secondspass/emreminder | addtodb.py | addtodb.py | py | 845 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 20,
"usage_type": "call"
}
] |
8675293214 | import aiohttp
API_URL = "http://gfapi.mlogcn.com/weather/v001/hour"
API_KEY = "" # 请替换为您的API密钥
class WeatherForecast:
def __init__(self, db_manager):
self.db_manager = db_manager
async def get_unique_district_codes(self):
async with self.db_manager.pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT DISTINCT district_code FROM stations")
result = await cur.fetchall()
return [row[0] for row in result]
async def fetch_weather_data(self, district_code):
params = {"areacode": district_code, "hours": 24, "key": API_KEY}
async with aiohttp.ClientSession() as session:
async with session.get(API_URL, params=params) as response:
if response.status == 200:
return await response.json()
else:
print(f"Error fetching data for district_code: {district_code}")
return None
async def get_forecasts(self):
district_codes = await self.get_unique_district_codes()
forecasts = {}
for district_code in district_codes:
forecast_data = await self.fetch_weather_data(district_code)
if forecast_data:
forecasts[district_code] = forecast_data
return forecasts
async def store_forecasts(self, forecasts):
for district_code, forecast in forecasts.items():
if forecast["status"] == 0: # 确保状态码为0,表示数据有效
await self.db_manager.insert_weather_forecast(
str(district_code), forecast["result"]
)
| hieda-raku/forecast-project | src/forecast_request.py | forecast_request.py | py | 1,755 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aiohttp.ClientSession",
"line_number": 20,
"usage_type": "call"
}
] |
43199315422 | import bpy
from bpy.props import StringProperty, BoolProperty
from ... utils.collection import get_groups_collection, get_scene_collections
class CreateCollection(bpy.types.Operator):
bl_idname = "machin3.create_collection"
bl_label = "MACHIN3: Create Collection"
bl_description = "description"
bl_options = {'REGISTER', 'UNDO'}
def update_name(self, context):
name = self.name.strip()
col = bpy.data.collections.get(name)
if col:
self.isduplicate = True
else:
self.isduplicate = False
name: StringProperty("Collection Name", default="", update=update_name)
isduplicate: BoolProperty("is duplicate name")
def draw(self, context):
layout = self.layout
column = layout.column()
column.prop(self, "name", text="Name")
if self.isduplicate:
column.label(text="Collection '%s' exists already" % (self.name.strip()), icon='ERROR')
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self, width=300)
def execute(self, context):
name = self.name.strip()
col = bpy.data.collections.new(name=name)
acol = context.view_layer.active_layer_collection.collection
acol.children.link(col)
self.name = ''
return {'FINISHED'}
class RemoveFromCollection(bpy.types.Operator):
bl_idname = "machin3.remove_from_collection"
bl_label = "MACHIN3: Remove from Collection"
bl_description = "Remove Selection from a Collection"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
view = context.space_data
return view.type == 'VIEW_3D' and context.selected_objects
def execute(self, context):
if context.active_object not in context.selected_objects:
context.view_layer.objects.active = context.selected_objects[0]
bpy.ops.collection.objects_remove('INVOKE_DEFAULT')
return {'FINISHED'}
class AddToCollection(bpy.types.Operator):
bl_idname = "machin3.add_to_collection"
bl_label = "MACHIN3: Add to Collection"
bl_description = "Add Selection to a Collection"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
view = context.space_data
if view.type == 'VIEW_3D' and context.selected_objects:
if view.local_view:
for area in context.screen.areas:
if area.type == 'OUTLINER':
return True
else:
return True
def execute(self, context):
view = context.space_data
if view.local_view:
for area in context.screen.areas:
if area.type == 'OUTLINER':
override = {'area': area}
break
bpy.ops.object.link_to_collection(override, 'INVOKE_DEFAULT')
else:
bpy.ops.object.link_to_collection('INVOKE_DEFAULT')
return {'FINISHED'}
class MoveToCollection(bpy.types.Operator):
bl_idname = "machin3.move_to_collection"
bl_label = "MACHIN3: Move to Collection"
bl_description = "Move Selection to a Collection"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
view = context.space_data
if view.type == 'VIEW_3D' and context.selected_objects:
if view.local_view:
for area in context.screen.areas:
if area.type == 'OUTLINER':
return True
else:
return True
def execute(self, context):
view = context.space_data
if view.local_view:
for area in context.screen.areas:
if area.type == 'OUTLINER':
override = {'area': area}
break
bpy.ops.object.move_to_collection(override, 'INVOKE_DEFAULT')
else:
bpy.ops.object.move_to_collection('INVOKE_DEFAULT')
return {'FINISHED'}
class Purge(bpy.types.Operator):
bl_idname = "machin3.purge_collections"
bl_label = "MACHIN3: Purge Collections"
bl_description = "Remove empty Collections"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for col in get_scene_collections(context.scene):
if not any([col.children, col.objects]):
print("Removing collection '%s'." % (col.name))
bpy.data.collections.remove(col, do_unlink=True)
return {'FINISHED'}
class Select(bpy.types.Operator):
bl_idname = "machin3.select_collection"
bl_label = "MACHIN3: (De)Select Collection"
bl_description = "Select Collection Objects\nSHIFT: Select all Collection Objects\nALT: Deselect Collection Objects\nSHIFT+ALT: Deselect all Collection Objects\nCTRL: Toggle Viewport Selection of Collection Objects"
bl_options = {'REGISTER'}
name: StringProperty()
force_all: BoolProperty()
def invoke(self, context, event):
col = bpy.data.collections.get(self.name, context.scene.collection)
objects = col.all_objects if event.shift or self.force_all else col.objects
if objects:
hideselect = objects[0].hide_select
if col:
for obj in objects:
if event.alt:
obj.select_set(False)
elif event.ctrl:
if obj.name in col.objects:
obj.hide_select = not hideselect
else:
obj.select_set(True)
self.force_all = False
return {'FINISHED'}
| AtixCG/Universal-3D-Shortcuts | Blender/With Addons/scripts/addons/MACHIN3tools/ui/operators/collection.py | collection.py | py | 5,714 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "bpy.types",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "bpy.data.collections.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.String... |
72387707234 | from collections import deque
def solution(board: list[str]):
answer = 0
dx = [1,0,-1,0]
dy = [0,1,0,-1]
n = len(board)
m = len(board[0])
q = deque()
matrix = [[0 for _ in range(len(board[0]))] for _ in range(len(board))]
for idx in range(len(board)):
for jdx in range(len(board[0])):
if board[idx][jdx] == 'R':
q.append((idx,jdx))
matrix[idx][jdx] = 1
while q:
x,y = q.popleft()
if board[x][y] == 'G':
return matrix[x][y]-1
for i in range(4):
nx = x
ny = y
while True:
nx += dx[i]
ny += dy[i]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
nx -= dx[i]
ny -= dy[i]
break
if 0<=nx<n and 0<=ny<m and board[nx][ny] == 'D':
nx -= dx[i]
ny -= dy[i]
break
if matrix[nx][ny] == 0:
matrix[nx][ny] = matrix[x][y]+1
q.append((nx,ny))
return -1
print(solution(["...D..R", ".D.G...", "....D.D", "D....D.", "..D...."]))
print(solution([".D.R", "....", ".G..", "...D"])) | cafe-jun/codingTest-Algo | programmers/리코쳇로봇.py | 리코쳇로봇.py | py | 1,305 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 12,
"usage_type": "call"
}
] |
21520963102 | from jinja2 import Environment, FileSystemLoader
from netaddr import IPAddress, IPNetwork
from yamlreader import yaml_load
from nameko.rpc import rpc, RpcProxy
import ast
from nameko.standalone.rpc import ClusterRpcProxy
CONFIG = {'AMQP_URI': "amqp://guest:guest@localhost:5672"}
def check_ip_network(ip, network):
if ip == 'all':
return False
if IPAddress(ip) in IPNetwork(network):
return True
else:
return False
def check_values(dict_intent):
if dict_intent['intent_type'] == 'acl':
parameters = ['from', 'to', 'rule', 'traffic', 'apply']
elif dict_intent['intent_type'] == 'nat_1to1':
parameters = ['from', 'to', 'protocol', 'apply']
elif dict_intent['intent_type'] == 'traffic_shaping':
parameters = ['from', 'to', 'with', 'traffic', 'apply']
elif dict_intent['intent_type'] == 'dst_route':
parameters = ['from', 'to', 'apply']
elif dict_intent['intent_type'] == 'nat_nto1':
parameters = ['from', 'to', 'apply']
elif dict_intent['intent_type'] == 'url_filter':
parameters = ['name', 'from', 'to', 'rule', 'apply']
else:
return "OPENFLOW TRANSLATOR: Intent type not supported"
for parameter in parameters:
if parameter not in dict_intent:
return 'OPENFLOW TRANSLATOR: ' + parameter.upper() + ' parameter is missing'
return True
def define_order(dict_intent):
line = 0
response = 0
if dict_intent['intent_type'] == 'acl':
file = 'rules/openflow_acls'
elif dict_intent['intent_type'] == 'traffic_shaping':
file = 'rules/openflow_ts'
else:
return "OPENFLOW MODULE: Order not found"
with open(file) as archive:
if dict_intent['apply'] == 'insert':
if 'after' in dict_intent:
if dict_intent['after'] == 'all-intents':
for line_num, l in enumerate(archive, 0):
line = line_num
response = 65535 - line
line = line + 1
else:
for line_num, l in enumerate(archive, 0):
if "'name': '" + dict_intent['after'] + "'" in l:
line = line_num + 1
response = 65535 - line_num
elif 'before' in dict_intent:
if dict_intent['before'] == 'all-intents':
line = 1
response = 65535
else:
for line_num, l in enumerate(archive, 0):
if "'name': '" + dict_intent['before'] + "'" in l:
line = line_num
response = 65535 - (line_num - 1)
else:
for line_num, l in enumerate(archive, 0):
if "'name': '" + dict_intent['name'] + "'" in l:
line = line_num
response = 65535 - line_num
archive.close()
if line != 0:
archive = open(file)
lines = archive.readlines()
archive.close()
dict_intent['order'] = response
if dict_intent['apply'] == 'insert':
lines.insert(line, str(dict_intent) + "\n")
if dict_intent['intent_type'] == 'traffic_shaping':
lines.insert(line + 1, str(dict_intent) + "\n")
else:
lines.pop(line)
if dict_intent['intent_type'] == 'traffic_shaping':
lines.pop(line - 1)
archive = open(file, 'w')
archive.writelines(lines)
archive.close()
return response
def process_acl(dict_intent):
# loading YAML with openflow settings
config = yaml_load('openflow_config.yml')
dict_intent['hostname'] = config['hostname']
dict_intent['args'] = ''
# from/to
if 'from_mask' in dict_intent:
dict_intent['from'] = dict_intent['from'] + '/' + dict_intent['from_mask']
if 'to_mask' in dict_intent:
dict_intent['to'] = dict_intent['to'] + '/' + dict_intent['to_mask']
if dict_intent['from'] != 'all':
dict_intent['args'] = dict_intent['args'] + 'nw_src=' + dict_intent['from'] + ','
else:
dict_intent['args'] = dict_intent['args'] + 'nw_src=0.0.0.0/0.0.0.0,'
if dict_intent['to'] != 'all':
dict_intent['args'] = dict_intent['args'] + 'nw_dst=' + dict_intent['to'] + ','
else:
dict_intent['args'] = dict_intent['args'] + 'nw_dst=0.0.0.0/0.0.0.0,'
# translate protocol/port
if dict_intent['traffic'] == 'icmp':
dict_intent['args'] = dict_intent['args'] + 'nw_proto=1,icmp_type=8,'
elif dict_intent['traffic'] != 'all':
protocol, port = dict_intent['traffic'].split('/')
dict_intent['traffic'] = protocol
dict_intent['traffic_port'] = 'eq ' + port
if protocol == 'tcp':
dict_intent['args'] = dict_intent['args'] + 'nw_proto=6,tcp_dst=' + port + ','
elif protocol == 'udp':
dict_intent['args'] = dict_intent['args'] + 'nw_proto=17,udp_dst=' + port + ','
if dict_intent['rule'] == 'block':
dict_intent['rule'] = 'drop'
else:
dict_intent['rule'] = 'normal'
order = define_order(dict_intent)
if order != 0:
dict_intent['order'] = order
else:
return 'OPENFLOW TRANSLATOR - ERROR ORDER: It was not possible to determine the order by name in order parameter'
file_loader = FileSystemLoader('.')
env = Environment(loader=file_loader)
template = env.get_template('openflow_template.j2')
response = template.render(dict_intent)
output = 'ovs-ofctl del-flows ' + dict_intent['hostname'] + '\n'
output = output + response + '\n'
lines = ['# log acl rules \n']
file = 'rules/openflow_acls'
with open(file) as archive:
for line in archive:
if '#' not in line[0:5] and line[0:1] != "\n":
dict_rule = ast.literal_eval(line)
if int(dict_rule['order']) > int(dict_intent['order']):
tmp = template.render(dict_rule)
output = output + tmp + '\n'
lines.append(str(dict_rule) + '\n')
elif int(dict_rule['order']) <= int(dict_intent['order']):
if dict_rule['name'] == dict_intent['name']:
lines.append(str(dict_rule) + '\n')
else:
if dict_intent['apply'] == 'insert':
dict_rule['order'] = int(dict_rule['order']) - 1
else:
dict_rule['order'] = int(dict_rule['order']) + 1
tmp = template.render(dict_rule)
output = output + tmp + '\n'
lines.append(str(dict_rule) + '\n')
archive.close()
archive = open(file, 'w')
archive.writelines(lines)
archive.close()
output = output + '\novs-ofctl add-flow ' + dict_intent['hostname'] + ' priority=0,action=normal'
#with ClusterRpcProxy(CONFIG) as rpc_connect:
# rpc_connect.linux_connector.apply_config(config['ip_manage'], config['ssh_port'], config['username'], config['password'],
# config['device_type'], output, 'openflow')
return response
def process_nat11(dict_intent):
# loading YAML file with firewall settings
config = yaml_load('openflow_config.yml')
dict_intent['hostname'] = config['hostname']
# loading and render template jinja2
file_loader = FileSystemLoader('.')
env = Environment(loader=file_loader)
template = env.get_template('openflow_template.j2')
output = template.render(dict_intent)
#with ClusterRpcProxy(CONFIG) as rpc_connect:
# rpc_connect.linux_connector.apply_config(config['ip_manage'], config['ssh_port'], config['username'], config['password'],
# config['device_type'], output, 'openflow')
return output
def process_traffic_shaping(dict_intent):
# converting throughput and rate
dict_intent['with'] = dict_intent['with'] * 1000
dict_intent['burst'] = int(dict_intent['with'] / 10)
# loading YAML file with firewall settings
config = yaml_load('openflow_config.yml')
dict_intent['hostname'] = config['hostname']
# loading and render template jinja2
file_loader = FileSystemLoader('.')
env = Environment(loader=file_loader)
template = env.get_template('openflow_template.j2')
output = template.render(dict_intent)
#with ClusterRpcProxy(CONFIG) as rpc_connect:
# rpc_connect.linux_connector.apply_config(config['ip_manage'], config['ssh_port'], config['username'],
# config['password'],
# config['device_type'], output, 'openflow')
return output
def process_dst_route(dict_intent):
return 'OPENFLOW TRANSLATOR: Route is not yet supported'
def process_natn1(dict_intent):
return 'OPENFLOW TRANSLATOR: NAT Nto1 is not yet supported'
def process_url_filter(dict_intent):
return 'OPENFLOW TRANSLATOR: URL Filter is not yet supported'
class OpenflowService:
"""
Openflow Service
Microservice that translates the information sent by the api to commands applicable in Openflow devices
Receive: this function receives a python dictionary, with at least the following information for each processing
Return:
- The microservice activates the application module via ssh and returns the result. If any incorrect
information in the dictionary, the error message is returned
Translations for NAT1toN and Route have not yet been implemented
"""
name = "openflow_translator"
zipcode_rpc = RpcProxy('openflow_service_translator')
@rpc
def translate_intent(self, dict_intent):
if 'intent_type' in dict_intent:
output = check_values(dict_intent)
if output is True:
if dict_intent['intent_type'] == 'acl':
return process_acl(dict_intent)
elif dict_intent['intent_type'] == 'nat_1to1':
return process_nat11(dict_intent)
elif dict_intent['intent_type'] == 'traffic_shaping':
return process_traffic_shaping(dict_intent)
elif dict_intent['intent_type'] == 'dst_route':
return process_dst_route(dict_intent)
elif dict_intent['intent_type'] == 'nat_nto1':
return process_natn1(dict_intent)
elif dict_intent['intent_type'] == 'url_filter':
return process_url_filter(dict_intent)
else:
return output
else:
return 'OPENFLOW TRANSLATOR: the key "intent_type" is unavailable in the dictionary'
| mmfiorenza/fwunify | services/translators/openflow/openflow.py | openflow.py | py | 10,913 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "netaddr.IPAddress",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "netaddr.IPNetwork",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "yamlreader.yaml_load",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "jinja2.File... |
1026582185 | #!pipenv run python3
from pprint import pprint
from PyInquirer import prompt, Separator
from git import Repo
import os
import sys
def get_git_root():
try:
git_repo = Repo(os.getcwd(), search_parent_directories=True)
git_root = git_repo.git.rev_parse("--show-toplevel")
return git_root
except:
print("Not currently in a git repo")
sys.exit()
repo = Repo(get_git_root())
assert not repo.bare
questions = [
{
'type': 'checkbox',
# 'qmark': '😃',
'message': 'Select Branches to Delete',
'name': 'branches',
'choices': [
{
'name': f"{b} -> {b.tracking_branch()}",
'value': b
}
for b in repo.branches],
'validate': lambda answer: 'You must choose at least one topping.' \
if len(answer) == 0 else True
}
]
answers = prompt(questions)
print("Deleted branches:")
try:
for b in answers['branches']:
b.delete(repo, b)
print(b)
except KeyError:
pass
| dmaahs2017/git-utils | gbdm/__main__.py | __main__.py | py | 1,055 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "git.Repo",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "git.Repo",
"line_number": 17,
"us... |
35196253380 | from direct.gui.OnscreenImage import OnscreenImage
from pandac.PandaModules import TransparencyAttrib, VBase3
from direct.showbase.DirectObject import DirectObject
from direct.gui.DirectGui import DirectButton, DGG
from direct.interval.IntervalGlobal import Sequence, LerpHprInterval
from gui.Popup import Popup
from gui.AlertPopup import AlertPopup
from gui.FullscreenPopup import FullscreenPopup
from event.ClientEvent import ServerJoinResponseEvent
import Settings
import SettingsController
import Globals
"""
egg-texture-cards -o Button_Offline.egg -p 500,75 offline.png offline_over.png
egg-texture-cards -o Button_Multiplayer.egg -p 500,75 multiplayer.png multiplayer_over.png
egg-texture-cards -o Button_Options.egg -p 500,75 options.png options_over.png
egg-texture-cards -o Button_Exit.egg -p 500,75 exit.png exit_over.png
"""
class MainMenu(DirectObject):
def __init__(self):
self.node = aspect2d.attachNewNode('MainMenu')
self.buttons = []
self.LoadContent()
self.SetupEventHandlers()
def SetupEventHandlers(self):
self.accept(ServerJoinResponseEvent.EventName, self.OnServerJoinResponseEvent)
def LoadContent(self):
bg = OnscreenImage(image = 'Assets/Images/Menus/MainMenu/background.png', scale = (2, 1, 1))
bg.setTransparency(TransparencyAttrib.MAlpha)
bg.reparentTo(self.node)
bg.setBin('fixed', 1)
title = OnscreenImage(image = 'Assets/Images/Menus/MainMenu/title.png')
title.setTransparency(TransparencyAttrib.MAlpha)
title.reparentTo(self.node)
self.spinner = OnscreenImage(image = 'Assets/Images/Menus/MainMenu/loadingSpinner.png', pos = (-0.15, 1, 0.15), scale = 128.0/1024.0)
self.spinner.setTransparency(TransparencyAttrib.MAlpha)
self.spinner.reparentTo(base.a2dBottomRight)
self.spinner.setBin('gui-popup', 0)
self.spinner.hide()
self.LoadButton('Button_Offline', 'offline', 'offline_over', 0, 0.2, self.OnButtonClicked, ['offline'])
self.LoadButton('Button_Multiplayer', 'multiplayer', 'multiplayer_over', 0, 0, self.OnButtonClicked, ['multiplayer'])
self.LoadButton('Button_Options', 'options', 'options_over', 0, -0.2, self.OnButtonClicked, ['options'])
self.LoadButton('Button_Exit', 'exit', 'exit_over', 0, -0.4, self.OnButtonClicked, ['exit'])
def LoadButton(self, egg, up, over, x, y, cmd, args):
maps = loader.loadModel("Assets/Images/Menus/MainMenu/%s" % (egg))
b = DirectButton(geom = (maps.find('**/%s' % (up)),
maps.find('**/%s' % (over)),
maps.find('**/%s' % (over)),
maps.find('**/%s' % (up))),
command = cmd,
extraArgs = args,
pressEffect = 0,
relief = None,
rolloverSound = None,
clickSound = None,
pos = (x, 1, y),
scale = (1, 1, 75.0/500.0))
b.reparentTo(self.node)
self.buttons.append(b)
def OnButtonClicked(self, buttonText):
if(buttonText == 'multiplayer'):
Globals.ROCKET_CONTEXT.LoadDocument('Assets/libRocket/multiplayer.rml').Show()
self.DisableButtons()
self.acceptOnce('multiplayerPopupClose', self.OnMultiplayerClose)
elif(buttonText == 'options'):
Globals.ROCKET_CONTEXT.LoadDocument('Assets/libRocket/options.rml').Show()
self.DisableButtons()
self.acceptOnce('optionsPopupClose', self.OnOptionsClose)
elif(buttonText == 'exit'):
self.CreateAlertPopup('Exit Game', 'Do you really want to exit?', self.OnExitPopupOkay, self.OnExitPopupCancel)
elif(buttonText == 'offline'):
Globals.ROCKET_CONTEXT.LoadDocument('Assets/libRocket/offline.rml').Show()
self.DisableButtons()
self.acceptOnce('offlinePopupClose', self.OnOfflineClose)
def CreatePopup(self, title, fields, values, onOkay, onCancel):
p = Popup(title, fields, values, onOkay, onCancel)
self.OnPopupCreated(p)
def CreateAlertPopup(self, title, text, onOkay, onCancel):
p = AlertPopup(title, text, onOkay, onCancel)
self.OnPopupCreated(p)
def CreateFullScreenPopup(self, title, fields, values, onOkay, onCancel):
p = FullscreenPopup(title, fields, values, onOkay, onCancel)
self.OnPopupCreated(p)
def OnOfflineClose(self, accept):
if(accept):
SettingsController.SaveClientSettings()
taskMgr.doMethodLater(0.1, messenger.send, 'as', ['startOffline'])
self.EnableButtons()
def OnOptionsClose(self, accept):
if(accept):
SettingsController.SaveClientSettings()
self.EnableButtons()
def OnMultiplayerClose(self, accept):
if(accept):
SettingsController.SaveClientSettings()
self.StartLoadingSpinner()
self.DisableButtons()
taskMgr.doMethodLater(0.1, messenger.send, 'as1', ['mainMenuMulti'])
else:
self.EnableButtons()
def DisableButtons(self):
for b in self.buttons:
b['state'] = DGG.DISABLED
def EnableButtons(self):
for b in self.buttons:
b['state'] = DGG.NORMAL
def OnAlertPopupClose(self, popup):
self.DestroyPopup(popup)
def OnExitPopupOkay(self, popup):
self.DestroyPopup(popup)
messenger.send('mainMenuExit')
def OnExitPopupCancel(self, popup):
self.DestroyPopup(popup)
def OnPopupCreated(self, popup):
self.DisableButtons()
def DestroyPopup(self, popup):
popup.Destroy()
del popup
self.EnableButtons()
def Hide(self):
self.node.hide()
def Show(self):
self.node.show()
def StartLoadingSpinner(self):
self.spinner.show()
self.spinSequence = Sequence(LerpHprInterval(self.spinner, 2, VBase3(0, 0, 180), VBase3(0, 0, 0)),
LerpHprInterval(self.spinner, 2, VBase3(0, 0, 360), VBase3(0, 0, 180)))
self.spinSequence.loop()
def StopLoadingSpinner(self):
self.spinSequence.finish()
self.spinner.hide()
def OnServerJoinResponseEvent(self, event):
self.StopLoadingSpinner()
self.EnableButtons()
if(not event.GetResponse()):
p = AlertPopup('Join Game Failed', event.GetReason(), self.ClosePopup, self.ClosePopup)
self.OnPopupCreated(p)
def ClosePopup(self, popup):
self.DestroyPopup(popup)
| czorn/Modifire | net/modifire/gui/menus/MainMenu.py | MainMenu.py | py | 7,038 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "direct.showbase.DirectObject.DirectObject",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "event.ClientEvent.ServerJoinResponseEvent.EventName",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "event.ClientEvent.ServerJoinResponseEvent",
... |
73476461794 | import os
from dotenv import load_dotenv
from dataclasses import dataclass, asdict
import os
import openai
from metaphor_python import Metaphor
import html2text
from gtts import gTTS, lang
# set up environment
load_dotenv()
# Class represents information about the articles collected (title, author, published date, content, url)
@dataclass
class Article:
title: str
author: str
published_date: str
content: str
summary : str
url: str
# Represents instance as dictionary
def dict(self) -> dict:
return asdict(self)
# Class represents the text to speech service (find articles, translation, SSML conversion, audio generation)
class Audicle:
# Message codes used
generate_query_message = "You are a helpful assistant that generates search queries based on user questions. Only generate one search query. The query should help find articles related to the users query. If the user uses abbreviations, expand these to full phrases. If the user inputs something you don’t understand, look for the articles closest to their search."
summarize_text_message = "You are a helpful assistant that summarizes the content of a webpage. Summarize the user's input. Only output the summary."
text_to_ssml_message = "You are a helpful assistant that converts extracted HTML text to SSML using Google Cloud's SSML reference. Convert the user's input while retaining the semantic meaning. Only output the SSML text."
clean_message = "You are a helpful assistant that makes sure that sentences make sense logically and grammatically based on user input. Make sure the sentence flows correctly and is grammatically correct. Wherever it says null, ignore these fields and continue."
# Supported language conversions
supported_lang = {k: v.lower() for k, v in lang.tts_langs().items()}
def __init__(self):
self.openai_key = os.getenv('OPENAI_API_KEY')
openai.api_key = self.openai_key
self.metaphor = Metaphor(os.getenv('METAPHOR_API_KEY'))
def main(self) -> None:
# Ask user prelim questions for inputs
topic = input("Hello! Thanks for using Audicle. What kind of article do you want to read today?\n")
# Make sure requested language is supported
listen_lang = input("Great! Is there a specific language you'd like to listen in? Please write out the full name of the language.\n")
while listen_lang.lower() not in self.supported_lang.values():
listen_lang = input("Sorry, that language is not supported. Please select a language from this list: https://cloud.google.com/text-to-speech/docs/voices\n")
# Generate query
query = self.generate(self.generate_query_message, topic)
print("Loading your article...")
# Seach for primary article and extract contents
article = self.metaphor.search(query, num_results = 1, use_autoprompt=True)
article_list = []
contents = article.get_contents().contents
result = article.results
for i in range(len(contents)):
article_list.append(self.scrape(result[i].title, result[i].author, result[i].published_date, contents[i].extract, contents[i].url, contents[i].id))
# Find similar articles and extract contents
similar_articles = self.metaphor.find_similar(url = article_list[0].url, num_results = 3)
contents = similar_articles.get_contents().contents
result = similar_articles.results
for i in range(len(contents)):
article_list.append(self.scrape(result[i].title, result[i].author, result[i].published_date, contents[i].extract, contents[i].url, contents[i].id))
# Assign name to each article
main, similar1, similar2, similar3 = [i for i in article_list]
# Give audio file + thanks for listening message
self.audio_file(main, similar1, similar2, similar3, listen_lang)
print(self.thanks_for_listening(main, similar1, similar2, similar3))
# Generate appropriate text output based on user input
def generate(self, message: str, query: str) -> str:
completion = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "system", "content": message},
{"role": "user", "content": query},
]
)
return completion.choices[0].message.content
# Extract content from query output to create list of Articles
def scrape(self, title: str, author: str, date: str, content: str, url: str, id_num: str) -> Article:
article_temp = Article(title = title, author = author, published_date = date, content = content, summary = content, url = url)
self.get_contents(article_temp, id_num)
self.html_to_plain(article_temp)
self.summarize(article_temp)
return article_temp
# Retrieve contents
def get_contents(self, article: Article, id_num = str) -> None:
content = self.metaphor.get_contents(id_num)
for content in content.contents:
article.content = content.extract
# Convert extracted HTML text to plain text
def html_to_plain(self, article: Article) -> None:
convert = html2text.html2text(article.content)
article.content = convert
# Update content summary
def summarize(self, article: Article) -> None:
summary = self.generate(self.summarize_text_message, article.content)
article.summary = summary
# Generate standard reading format ("script") for audio generation
def text_to_script(self, main: Article, first: Article, second: Article, third: Article) -> str:
script = f"Here is {main.title} written by {main.author} on {main.published_date}. {main.content}. For further reading, check out {first.title} by {first.author} which discusses {first.summary}. Additionally, you can check out {second.title} by {second.author} or {third.title} by {third.author}."
script = self.generate(self.clean_message, script)
return script
#return self.generate(self.text_to_ssml_message, script)
# Generate audio delivery message
def thanks_for_listening(self, main: Article, first: Article, second: Article, third: Article) -> str:
message = f"Happy listening! This article discusses {main.summary}. For further reads, check out {first.title} by {first.author} at {first.url}, {second.title} by {second.author} at {second.url}, or {third.title} by {third.author} at {third.url}. If you’d like to listen to any of these articles, run the tool again and type the title into search!"
return self.generate(self.clean_message, message)
# Create and deliver audio
def audio_file(self, main: Article, first: Article, second: Article, third: Article, language: str) -> None:
audio = gTTS(self.text_to_script(main, first, second, third), lang = "".join([key for key, value in self.supported_lang.items() if value == language]))
title = f"{main.title}.mp3"
audio.save(title)
if __name__ == '__main__':
audicle = Audicle()
audicle.main() | bhargavilanka/Audicle | main.py | main.py | py | 7,140 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dataclasses.asdict",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "gtts.lang... |
37657073356 | import os
import copy
import logging
import numpy as np
import json_tricks
from gym import spaces
import nni
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward
from .model import Model
from .util import set_global_seeds
from .policy import build_lstm_policy
logger = logging.getLogger('ppo_tuner_AutoML')
def constfn(val):
"""wrap as function"""
def f(_):
return val
return f
class ModelConfig:
"""
Configurations of the PPO model
"""
def __init__(self):
self.observation_space = None
self.action_space = None
self.num_envs = 0
self.nsteps = 0
self.ent_coef = 0.0
self.lr = 3e-4
self.vf_coef = 0.5
self.max_grad_norm = 0.5
self.gamma = 0.99
self.lam = 0.95
self.cliprange = 0.2
self.embedding_size = None # the embedding is for each action
self.noptepochs = 4 # number of training epochs per update
self.total_timesteps = 5000 # number of timesteps (i.e. number of actions taken in the environment)
self.nminibatches = 4 # number of training minibatches per update. For recurrent policies,
# should be smaller or equal than number of environments run in parallel.
class TrialsInfo:
"""
Informations of each trial from one model inference
"""
def __init__(self, obs, actions, values, neglogpacs, dones, last_value, inf_batch_size):
self.iter = 0
self.obs = obs
self.actions = actions
self.values = values
self.neglogpacs = neglogpacs
self.dones = dones
self.last_value = last_value
self.rewards = None
self.returns = None
self.inf_batch_size = inf_batch_size
#self.states = None
def get_next(self):
"""
get actions of the next trial
"""
if self.iter >= self.inf_batch_size:
return None, None
actions = []
for step in self.actions:
actions.append(step[self.iter])
self.iter += 1
return self.iter - 1, actions
def update_rewards(self, rewards, returns):
"""
after the trial is finished, reward and return of this trial is updated
"""
self.rewards = rewards
self.returns = returns
def convert_shape(self):
"""
convert shape
"""
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
self.obs = sf01(self.obs)
self.returns = sf01(self.returns)
self.dones = sf01(self.dones)
self.actions = sf01(self.actions)
self.values = sf01(self.values)
self.neglogpacs = sf01(self.neglogpacs)
class PPOModel:
"""
PPO Model
"""
def __init__(self, model_config, mask):
self.model_config = model_config
self.states = None # initial state of lstm in policy/value network
self.nupdates = None # the number of func train is invoked, used to tune lr and cliprange
self.cur_update = 1 # record the current update
self.np_mask = mask # record the mask of each action within one trial
set_global_seeds(None)
assert isinstance(self.model_config.lr, float)
self.lr = constfn(self.model_config.lr)
assert isinstance(self.model_config.cliprange, float)
self.cliprange = constfn(self.model_config.cliprange)
# build lstm policy network, value share the same network
policy = build_lstm_policy(model_config)
# Get the nb of env
nenvs = model_config.num_envs
# Calculate the batch_size
self.nbatch = nbatch = nenvs * model_config.nsteps # num of record per update
nbatch_train = nbatch // model_config.nminibatches # get batch size
# self.nupdates is used to tune lr and cliprange
self.nupdates = self.model_config.total_timesteps // self.nbatch
# Instantiate the model object (that creates act_model and train_model)
self.model = Model(policy=policy, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=model_config.nsteps, ent_coef=model_config.ent_coef, vf_coef=model_config.vf_coef,
max_grad_norm=model_config.max_grad_norm, np_mask=self.np_mask)
self.states = self.model.initial_state
logger.info('=== finished PPOModel initialization')
def inference(self, num):
"""
generate actions along with related info from policy network.
observation is the action of the last step.
Parameters:
----------
num: the number of trials to generate
"""
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], []
# initial observation
# use the (n+1)th embedding to represent the first step action
first_step_ob = self.model_config.action_space.n
obs = [first_step_ob for _ in range(num)]
dones = [True for _ in range(num)]
states = self.states
# For n in range number of steps
for cur_step in range(self.model_config.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, neglogpacs = self.model.step(cur_step, obs, S=states, M=dones)
mb_obs.append(obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
obs[:] = actions
if cur_step == self.model_config.nsteps - 1:
dones = [True for _ in range(num)]
else:
dones = [False for _ in range(num)]
#batch of steps to batch of rollouts
np_obs = np.asarray(obs)
mb_obs = np.asarray(mb_obs, dtype=np_obs.dtype)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(np_obs, S=states, M=dones)
return mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values
def compute_rewards(self, trials_info, trials_result):
"""
compute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info
Parameters:
----------
trials_info: info of the generated trials
trials_result: final results (e.g., acc) of the generated trials
"""
mb_rewards = np.asarray([trials_result for _ in trials_info.actions], dtype=np.float32)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
last_dones = np.asarray([True for _ in trials_result], dtype=np.bool) # ugly
for t in reversed(range(self.model_config.nsteps)):
if t == self.model_config.nsteps - 1:
nextnonterminal = 1.0 - last_dones
nextvalues = trials_info.last_value
else:
nextnonterminal = 1.0 - trials_info.dones[t+1]
nextvalues = trials_info.values[t+1]
delta = mb_rewards[t] + self.model_config.gamma * nextvalues * nextnonterminal - trials_info.values[t]
mb_advs[t] = lastgaelam = delta + self.model_config.gamma * self.model_config.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + trials_info.values
trials_info.update_rewards(mb_rewards, mb_returns)
trials_info.convert_shape()
def train(self, trials_info, nenvs):
"""
train the policy/value network using trials_info
Parameters:
----------
trials_info: complete info of the generated trials from the previous inference
nenvs: the batch size of the (previous) inference
"""
# keep frac decay for future optimization
if self.cur_update <= self.nupdates:
frac = 1.0 - (self.cur_update - 1.0) / self.nupdates
else:
logger.warning('current update (self.cur_update) %d has exceeded total updates (self.nupdates) %d',
self.cur_update, self.nupdates)
frac = 1.0 - (self.nupdates - 1.0) / self.nupdates
lrnow = self.lr(frac)
cliprangenow = self.cliprange(frac)
self.cur_update += 1
states = self.states
assert states is not None # recurrent version
assert nenvs % self.model_config.nminibatches == 0
envsperbatch = nenvs // self.model_config.nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * self.model_config.nsteps).reshape(nenvs, self.model_config.nsteps)
for _ in range(self.model_config.noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (trials_info.obs, trials_info.returns, trials_info.dones,
trials_info.actions, trials_info.values, trials_info.neglogpacs))
mbstates = states[mbenvinds]
self.model.train(lrnow, cliprangenow, *slices, mbstates)
class PPOTuner(Tuner):
"""
PPOTuner
"""
def __init__(self, optimize_mode, trials_per_update=20, epochs_per_update=4, minibatch_size=4,
ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, cliprange=0.2):
"""
initialization, PPO model is not initialized here as search space is not received yet.
Parameters:
----------
optimize_mode: maximize or minimize
trials_per_update: number of trials to have for each model update
epochs_per_update: number of epochs to run for each model update
minibatch_size: minibatch size (number of trials) for the update
ent_coef: policy entropy coefficient in the optimization objective
lr: learning rate of the model (lstm network), constant
vf_coef: value function loss coefficient in the optimization objective
max_grad_norm: gradient norm clipping coefficient
gamma: discounting factor
lam: advantage estimation discounting factor (lambda in the paper)
cliprange: cliprange in the PPO algorithm, constant
"""
self.optimize_mode = OptimizeMode(optimize_mode)
self.model_config = ModelConfig()
self.model = None
self.search_space = None
self.running_trials = {} # key: parameter_id, value: actions/states/etc.
self.inf_batch_size = trials_per_update # number of trials to generate in one inference
self.first_inf = True # indicate whether it is the first time to inference new trials
self.trials_result = [None for _ in range(self.inf_batch_size)] # results of finished trials
self.credit = 0 # record the unsatisfied trial requests
self.param_ids = []
self.finished_trials = 0
self.chosen_arch_template = {}
self.actions_spaces = None
self.actions_to_config = None
self.full_act_space = None
self.trials_info = None
self.all_trials = {} # used to dedup the same trial, key: config, value: final result
self.model_config.num_envs = self.inf_batch_size
self.model_config.noptepochs = epochs_per_update
self.model_config.nminibatches = minibatch_size
self.send_trial_callback = None
logger.info('=== finished PPOTuner initialization')
def _process_one_nas_space(self, block_name, block_space):
"""
process nas space to determine observation space and action space
Parameters:
----------
block_name: the name of the mutable block
block_space: search space of this mutable block
Returns:
----------
actions_spaces: list of the space of each action
actions_to_config: the mapping from action to generated configuration
"""
actions_spaces = []
actions_to_config = []
block_arch_temp = {}
for l_name, layer in block_space.items():
chosen_layer_temp = {}
if len(layer['layer_choice']) > 1:
actions_spaces.append(layer['layer_choice'])
actions_to_config.append((block_name, l_name, 'chosen_layer'))
chosen_layer_temp['chosen_layer'] = None
else:
assert len(layer['layer_choice']) == 1
chosen_layer_temp['chosen_layer'] = layer['layer_choice'][0]
if layer['optional_input_size'] not in [0, 1, [0, 1]]:
raise ValueError('Optional_input_size can only be 0, 1, or [0, 1], but the pecified one is %s'
% (layer['optional_input_size']))
if isinstance(layer['optional_input_size'], list):
actions_spaces.append(["None", *layer['optional_inputs']])
actions_to_config.append((block_name, l_name, 'chosen_inputs'))
chosen_layer_temp['chosen_inputs'] = None
elif layer['optional_input_size'] == 1:
actions_spaces.append(layer['optional_inputs'])
actions_to_config.append((block_name, l_name, 'chosen_inputs'))
chosen_layer_temp['chosen_inputs'] = None
elif layer['optional_input_size'] == 0:
chosen_layer_temp['chosen_inputs'] = []
else:
raise ValueError('invalid type and value of optional_input_size')
block_arch_temp[l_name] = chosen_layer_temp
self.chosen_arch_template[block_name] = block_arch_temp
return actions_spaces, actions_to_config
def _process_nas_space(self, search_space):
"""
process nas search space to get action/observation space
"""
actions_spaces = []
actions_to_config = []
for b_name, block in search_space.items():
if block['_type'] != 'mutable_layer':
raise ValueError('PPOTuner only accept mutable_layer type in search space, but the current one is %s'%(block['_type']))
block = block['_value']
act, act_map = self._process_one_nas_space(b_name, block)
actions_spaces.extend(act)
actions_to_config.extend(act_map)
# calculate observation space
dedup = {}
for step in actions_spaces:
for action in step:
dedup[action] = 1
full_act_space = [act for act, _ in dedup.items()]
assert len(full_act_space) == len(dedup)
observation_space = len(full_act_space)
nsteps = len(actions_spaces)
return actions_spaces, actions_to_config, full_act_space, observation_space, nsteps
def _generate_action_mask(self):
"""
different step could have different action space. to deal with this case, we merge all the
possible actions into one action space, and use mask to indicate available actions for each step
"""
two_masks = []
mask = []
for acts in self.actions_spaces:
one_mask = [0 for _ in range(len(self.full_act_space))]
for act in acts:
idx = self.full_act_space.index(act)
one_mask[idx] = 1
mask.append(one_mask)
two_masks.append(mask)
mask = []
for acts in self.actions_spaces:
one_mask = [-np.inf for _ in range(len(self.full_act_space))]
for act in acts:
idx = self.full_act_space.index(act)
one_mask[idx] = 0
mask.append(one_mask)
two_masks.append(mask)
return np.asarray(two_masks, dtype=np.float32)
def update_search_space(self, search_space):
"""
get search space, currently the space only includes that for NAS
Parameters:
----------
search_space: search space for NAS
Returns:
-------
no return
"""
logger.info('=== update search space %s', search_space)
assert self.search_space is None
self.search_space = search_space
assert self.model_config.observation_space is None
assert self.model_config.action_space is None
self.actions_spaces, self.actions_to_config, self.full_act_space, obs_space, nsteps = self._process_nas_space(search_space)
self.model_config.observation_space = spaces.Discrete(obs_space)
self.model_config.action_space = spaces.Discrete(obs_space)
self.model_config.nsteps = nsteps
# generate mask in numpy
mask = self._generate_action_mask()
assert self.model is None
self.model = PPOModel(self.model_config, mask)
def _actions_to_config(self, actions):
"""
given actions, to generate the corresponding trial configuration
"""
chosen_arch = copy.deepcopy(self.chosen_arch_template)
for cnt, act in enumerate(actions):
act_name = self.full_act_space[act]
(block_name, layer_name, key) = self.actions_to_config[cnt]
if key == 'chosen_inputs':
if act_name == 'None':
chosen_arch[block_name][layer_name][key] = []
else:
chosen_arch[block_name][layer_name][key] = [act_name]
elif key == 'chosen_layer':
chosen_arch[block_name][layer_name][key] = act_name
else:
raise ValueError('unrecognized key: {0}'.format(key))
return chosen_arch
def generate_multiple_parameters(self, parameter_id_list, **kwargs):
"""
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
"""
result = []
self.send_trial_callback = kwargs['st_callback']
for parameter_id in parameter_id_list:
had_exception = False
try:
logger.debug("generating param for %s", parameter_id)
res = self.generate_parameters(parameter_id, **kwargs)
except nni.NoMoreTrialError:
had_exception = True
if not had_exception:
result.append(res)
return result
def generate_parameters(self, parameter_id, **kwargs):
"""
generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
"""
if self.first_inf:
self.trials_result = [None for _ in range(self.inf_batch_size)]
mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions, mb_values, mb_neglogpacs,
mb_dones, last_values, self.inf_batch_size)
self.first_inf = False
trial_info_idx, actions = self.trials_info.get_next()
if trial_info_idx is None:
self.credit += 1
self.param_ids.append(parameter_id)
raise nni.NoMoreTrialError('no more parameters now.')
self.running_trials[parameter_id] = trial_info_idx
new_config = self._actions_to_config(actions)
return new_config
def _next_round_inference(self):
"""
"""
self.finished_trials = 0
self.model.compute_rewards(self.trials_info, self.trials_result)
self.model.train(self.trials_info, self.inf_batch_size)
self.running_trials = {}
# generate new trials
self.trials_result = [None for _ in range(self.inf_batch_size)]
mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions, mb_values, mb_neglogpacs,
mb_dones, last_values, self.inf_batch_size)
# check credit and submit new trials
for _ in range(self.credit):
trial_info_idx, actions = self.trials_info.get_next()
if trial_info_idx is None:
logger.warning('No enough trial config, trials_per_update is suggested to be larger than trialConcurrency')
break
assert self.param_ids
param_id = self.param_ids.pop()
self.running_trials[param_id] = trial_info_idx
new_config = self._actions_to_config(actions)
self.send_trial_callback(param_id, new_config)
self.credit -= 1
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model
"""
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Minimize:
value = -value
self.trials_result[trial_info_idx] = value
self.finished_trials += 1
if self.finished_trials == self.inf_batch_size:
self._next_round_inference()
def trial_end(self, parameter_id, success, **kwargs):
"""
to deal with trial failure
"""
if not success:
if parameter_id not in self.running_trials:
logger.warning('The trial is failed, but self.running_trial does not have this trial')
return
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
# use mean of finished trials as the result of this failed trial
values = [val for val in self.trials_result if val is not None]
logger.warning('zql values: {0}'.format(values))
self.trials_result[trial_info_idx] = (sum(values) / len(values)) if len(values) > 0 else 0
self.finished_trials += 1
if self.finished_trials == self.inf_batch_size:
self._next_round_inference()
def import_data(self, data):
"""
Import additional data for tuning
Parameters
----------
data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
"""
logger.warning('PPOTuner cannot leverage imported data.')
| danijimmy19/nni | src/sdk/pynni/nni/ppo_tuner/ppo_tuner.py | ppo_tuner.py | py | 23,509 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "util.set_global_seeds",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "policy.build_lstm_policy",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "mo... |
41332458191 | import json
from django.http import JsonResponse
from django.views import View
from owners.models import Owner, Dogs
class OwnerRegister(View):
def get(self, request):
result = []
owner = Owner.objects.all()
# 쿼리문이기 때문에 바로 response를 하지못한다 그래서 반복문을 통해 딕셔너리형테로 만들어준뒤에 response를 해야한다.
for owners in owner:
result.append(
{
'id':owners.id,
'owner_name': owners.owner_name,
'owner_email': owners.owner_email,
'owner_age': owners.owner_age,
}
)
return JsonResponse({'owner':result}, status=200)
def post(self, request):
data = json.loads(request.body)
owner = Owner(
owner_name = data['owner_name'],
owner_email = data['owner_email'],
owner_age = data['owner_age'])
owner.save()
return JsonResponse({'message': '등록되었습니다'}, status=201)
class DogRegister(View):
def get(self, request):
result = []
dog = Dogs.objects.all()
# 쿼리문이기 때문에 바로 response를 하지못한다 그래서 반복문을 통해 딕셔너리형테로 만들어준뒤에 response를 해야한다.
for dogs in dog:
result.append(
{
'id':dogs.id,
'dog_name': dogs.dog_name,
'dog_age': dogs.dog_age,
'owner_id': dogs.owner.owner_name
# 정참조의경우 .id, .owner_name, .owner_email, .owner_age 다 가능하다.
}
)
return JsonResponse({'dogs':result}, status=200)
def post(self, request):
data = json.loads(request.body)
owner_name = Owner.objects.get(owner_name=data['owner_name'])
dogs = Dogs(
dog_name = data['dog_name'],
dog_age = data['dog_age'],
owner_id = owner_name.id
)
dogs.save()
return JsonResponse({'message': '등록되었습니다'}, status=201)
class OwnerList(View):
def get(self, request):
result = []
owner = Owner.objects.all()
for owners in owner:
dog = owners.dogs_set.all()
# 역참조를 할때 _set 사용할땐 models에서 related_name= 속성을 사용할 수 없다.
# 역참조는 정참조와 다르게 여러개일수 있음으로 모든내역을 다 출력하려면 반복문으로 리스트화시켜서 넣어주고,
# 하나만 찝어서 넣고싶다면 범위를 정확하게 지정해줘야한다.
result1 = []
for dogs in dog:
result1.append(
{
'dog_name': dogs.dog_name,
'dog_age': dogs.dog_age,
}
)
result.append(
{
'id':owners.id,
'owner_name': owners.owner_name,
'owner_email': owners.owner_email,
'owner_age': owners.owner_age,
'owner_id':result1,
}
)
return JsonResponse({'owner_doglist':result}, status=200)
| nicholas019/crud2_owner | owners/views.py | views.py | py | 3,456 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.views.View",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "owners.models.Owner.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "owners.models.Owner.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
40420058444 | from pathlib import Path
import numpy as np
import re
from bisect import bisect_left
from copy import deepcopy
from itertools import compress
reg = re.compile(r"(y|x)=(\d+), (y|x)=(\d+)\.\.(\d+)")
def main():
data_folder = Path(".").resolve()
data = data_folder.joinpath("input.txt").read_text()
res = Reservoir(data)
res.flow()
res.print_ground()
print(f"The water can reach {res.count_water()} tiles")
print(f"There are {res.count_rest_water()} tiles left after the spring stops.")
class Reservoir():
def __init__(self,data):
data = data.split("\n")
ax = {'x':1,'y':0}
sep = '..'
for i,line in enumerate(data):
m = reg.match(line).group(1,2,3,4,5)
data[i] = [[],[]]
data[i][ax[m[0]]] = [int(m[1])]
data[i][ax[m[2]]] = [int(m[3]),int(m[4])]
self.lims = [[0,0],[500,500]]
self.lowest_y = None
for line in data:
for j,dim in enumerate(line):
for point in dim:
if point > self.lims[j][1]:
self.lims[j][1] = point
if point < self.lims[j][0]:
self.lims[j][0] = point
if (j == 0) and ((self.lowest_y is None) or (point < self.lowest_y)) :
self.lowest_y = point
self.lims[1][0] -= 1
self.lims[1][1] += 1
self.size = (self.lims[0][1]+1,self.lims[1][1]-self.lims[1][0]+1)
self.ground = np.full(self.size,ord('.'),dtype=int)
for line in data:
if len(line[1]) == 2:
self.ground[line[0][0],(line[1][0]-self.lims[1][0]):(line[1][1]+1-self.lims[1][0])] = ord('#')
else:
self.ground[line[0][0]:(line[0][1]+1),line[1][0]-self.lims[1][0]] = ord('#')
self.spring = (0,500-self.lims[1][0])
self.ground[self.spring[0],self.spring[1]] = ord('+')
self.flowing = [self.spring]
def print_ground(self):
s = ""
col_size = self.lims[1][1]
col_digits = len(str(col_size))
row_size = self.lims[0][1]
row_digits = len(str(row_size))
form_str = "{:" + str(col_digits) + "d}"
for j in range(col_digits):
s += " "*row_digits
for loc in range(self.lims[1][0],self.lims[1][1]+1):
loc_str = form_str.format(loc)
s += loc_str[j]
s += "\n"
form_str = "{:" + str(row_digits) + "d}"
for y in range(row_size+1):
s += form_str.format(y)
for k in self.ground[y]:
s += chr(k)
s += "\n"
data_folder = Path(".").resolve()
data_folder.joinpath("output.txt").write_text(s)
def flow_step(self):
active = [True]*len(self.flowing)
active_len = len(active)
for j,tile in enumerate(self.flowing):
if active[j]:
below = (tile[0]+1,tile[1])
left = (tile[0],tile[1]-1)
right = (tile[0],tile[1]+1)
if below[0] >= self.size[0]:
active[j] = False
elif self.ground[below] == ord('.'):
self.ground[below] = ord('|')
self.flowing.append(below)
active.append(True)
elif self.ground[below] in [ord('~'),ord('#')]:
i_l = 1
left_below = (tile[0]+1,tile[1]-i_l)
while (self.ground[left] in [ord('.'),ord('|')]) and (self.ground[left_below] in [ord('~'),ord('#')]):
i_l += 1
left = (tile[0],tile[1]-i_l)
left_below = (tile[0]+1,tile[1]-i_l)
i_r = 1
right_below = (tile[0]+1,tile[1]+i_r)
while (self.ground[right] in [ord('.'),ord('|')]) and (self.ground[right_below] in [ord('~'),ord('#')]):
i_r += 1
right = (tile[0],tile[1]+i_r)
right_below = (tile[0]+1,tile[1]+i_r)
if (self.ground[right] == ord('#')) and (self.ground[left] == ord('#')):
self.ground[tile[0],(left[1]+1):right[1]] = ord('~')
for k,_ in enumerate(active):
if (self.flowing[k][0] == tile[0]) and (left[1] < self.flowing[k][1] < right[1]):
active[k] = False
else:
closed_left = int(self.ground[left] not in [ord('.'),ord('|')])
open_right = int(self.ground[right] in [ord('.'),ord('|')])
self.ground[tile[0],(left[1]+closed_left):(right[1]+open_right)] = ord('|')
if not closed_left:
self.flowing.append(left)
active.append(True)
if open_right:
self.flowing.append(right)
active.append(True)
active[j] = False
self.flowing = list(compress(self.flowing,active))
return (len(active) != active_len) or (sum(active) != active_len)
def flow(self):
while self.flow_step():
pass
def count_water(self):
return np.sum(np.isin(self.ground[self.lowest_y:],[ord('|'),ord('~')]))
def count_rest_water(self):
return np.sum(self.ground[self.lowest_y:] == ord('~'))
if __name__ == "__main__":
main() | eirikhoe/advent-of-code | 2018/17/sol.py | sol.py | py | 5,780 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 8... |
75153371233 | from scipy.io import wavfile as wav
from os import walk
import os, glob, wave
import csv
path = 'C:/Users/Muhammad/Desktop/REU19/Talker_Speaker.csv'
audio_location = 'C:/Users/Muhammad/Desktop/REU19/lombardgrid/audio'
with open(path, 'a') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
line_count = 0
names = []
duration = []
# adds file name to list names
for (dirpath, dirnames, filenames) in walk(audio_location):
names.extend(filenames)
# gets rid of file extension
names = [os.path.splitext(x)[0] for x in names]
# for each file in directory, add duration to list
for soundfiles in glob.glob(os.path.join(audio_location, '*.wav')):
(freq, sig) = wav.read(soundfiles)
nframes = sig.size
length_of_file = nframes/freq
duration.append(length_of_file)
print(names)
print(duration)
print('Length of f: ', len(names))
print('Length of duration:', len(duration))
# clears csv
clear_csv = open(path, 'w+')
clear_csv.close()
# names the first row
fieldnames = ['File_Name',
'Speaker_1',
'Speaker_2',
'Speaker_3',
'Speaker_4',
'Speaker_5',
'Speaker_6',
'File_Name_Out_S1',
'File_Name_Out_S2',
'File_Name_Out_S3',
'File_Name_Out_S4',
'File_Name_Out_S5',
'File_Name_Out_S6',
'Duration']
writer = csv.DictWriter(csv_file, fieldnames = fieldnames, lineterminator = '\n')
writer.writeheader()
for i in range(len(names)):
writer.writerow({'File_Name' : names[i],
'Speaker_1' : 'N',
'Speaker_2' : 'N',
'Speaker_3' : 'N',
'Speaker_4' : 'N',
'Speaker_5' : 'N',
'Speaker_6' : 'N',
'File_Name_Out_S1' : names[i] + '_speaker_1',
'File_Name_Out_S2' : names[i] + '_speaker_2',
'File_Name_Out_S3' : names[i] + '_speaker_3',
'File_Name_Out_S4' : names[i] + '_speaker_4',
'File_Name_Out_S5' : names[i] + '_speaker_5',
'File_Name_Out_S6' : names[i] + '_speaker_6',
'Duration' : duration[i]}) | asgharm1999/Script-for-REU | Generate_CSV.py | Generate_CSV.py | py | 2,599 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
... |
3710806835 | from django.shortcuts import render, get_object_or_404, redirect
from blog.models import Post
from .models import Reply
from .forms import CommentForm, PostReplyForm
def post_comment(request, post_pk):
post = get_object_or_404(Post, pk=post_pk)
user = request.user
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.user = user
comment.save()
return redirect(post)
else:
comment_list = post.comment_set.all()
context = {
'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
return redirect(post)
def post_reply(request, post_pk):
post = get_object_or_404(Post, pk=post_pk)
# post_comment = get_object_or_404(PostComent, pk= post_comment_pk)
user = request.user
if request.method == "POST":
reply_form = PostReplyForm(request.POST)
if reply_form.is_valid():
reply_form = reply_form.save(commit=False)
if request.POST['comment_reply']:
reply_form.comment_reply = Reply.objects.filter(pk=request.POST['comment_reply'])[0]
reply_form.user = user
reply_form.save()
return redirect(post)
else:
comment_list = post.comment_set.all()
context = {
'post': post,
'form': CommentForm(),
'reply_form': reply_form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
return redirect(post)
| xiaoming000/blog_django | comments/views.py | views.py | py | 1,803 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "blog.models.Post",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "forms.CommentForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name"... |
17360391205 | """Name hold a name choice for a Request
"""
# from . import db, ma
from marshmallow import fields
from sqlalchemy import event
from sqlalchemy.orm import backref
from sqlalchemy.orm.attributes import get_history
from namex.models import db, ma
class Name(db.Model):
__tablename__ = 'names'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(1024), index=True)
state = db.Column(db.String(15), default='NE') # NE=Not Examined; R=Rejected; A=Accepted; C=Cond. Accepted
choice = db.Column(db.Integer)
designation = db.Column(db.String(50), default=None)
consumptionDate = db.Column('consumption_date', db.DateTime(timezone=True))
corpNum = db.Column('corp_num', db.String(10), default=None)
remoteNameId = db.Column('remote_name_id', db.BigInteger)
# Decision info
conflict1 = db.Column(db.String(250), default='') # optional conflict name
conflict2 = db.Column(db.String(250), default='') # optional conflict name
conflict3 = db.Column(db.String(250), default='') # optional conflict name
conflict1_num = db.Column(db.String(250), default='') # optional conflict name - corp or NR number
conflict2_num = db.Column(db.String(250), default='') # optional conflict name - corp or NR number
conflict3_num = db.Column(db.String(250), default='') # optional conflict name - corp or NR number
decision_text = db.Column(db.String(1000), default='')
nrId = db.Column('nr_id', db.Integer, db.ForeignKey('requests.id'), index=True)
commentId = db.Column('comment_id', db.Integer, db.ForeignKey('comments.id'))
# nameRequest = db.relationship('Request')
# if a comment is added during decision, link it to the name record to be sent back to NRO
comment = db.relationship("Comment", backref=backref("related_name", uselist=False), foreign_keys=[commentId])
# Required for name request name analysis
_name_type_cd = db.Column('name_type_cd', db.String(10))
NOT_EXAMINED = 'NE'
APPROVED = 'APPROVED'
REJECTED = 'REJECTED'
CONDITION = 'CONDITION'
# Needed for name request reservation before completing the nr
RESERVED = 'RESERVED'
COND_RESERVE = 'COND-RESERVE'
# Properties added for Name Request
@property
def name_type_cd(self):
"""Property containing the name type which is used by name Request."""
return self._name_type_cd
@name_type_cd.setter
def name_type_cd(self, value: str):
self._name_type_cd = value
def as_dict(self):
return {
'id': self.id,
'name': self.name,
'name_type_cd': self.name_type_cd,
'designation': self.designation,
'choice': self.choice,
'state': self.state,
'conflict1': self.conflict1,
'conflict2': self.conflict2,
'conflict3': self.conflict3,
'conflict1_num': self.conflict1_num,
'conflict2_num': self.conflict2_num,
'conflict3_num': self.conflict3_num,
'decision_text': self.decision_text,
'consumptionDate': self.consumptionDate.isoformat() if self.consumptionDate else None,
'corpNum': self.corpNum,
'comment': None if self.comment is None else self.comment.as_dict()
}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
def save_to_db(self):
# force uppercase names
self.name = self.name.upper()
db.session.add(self)
db.session.commit()
def add_to_db(self):
db.session.add(self)
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
@event.listens_for(Name, 'after_insert')
@event.listens_for(Name, 'after_update')
def update_nr_name_search(mapper, connection, target):
"""Add any changes to the name to the request.nameSearch column and publish name state changes where applicable."""
from flask.globals import current_app
from namex.models import Event, Request, State
from namex.services.audit_trail.event_recorder import EventRecorder
name = target
nr = Request.find_by_id(name.nrId)
if nr:
# get the names associated with the NR
names_q = connection.execute(
f"""
SELECT names.name from names
JOIN requests on requests.id = names.nr_id
WHERE requests.id={nr.id}
"""
)
# format the names into a string like: |1<name1>|2<name2>|3<name3>
names = [x[0] for x in names_q.all()]
name_search = ''
for item, index in zip(names, range(len(names))):
name_search += f'|{index + 1}{item}{index + 1}|'
# update the name_search field of the nr with the formatted string
connection.execute(
"""
UPDATE requests
SET name_search=%s
WHERE id=%s
""",
('(' + name_search + ')', nr.id)
)
# set nr state to consumed
name_consume_history = get_history(name, 'consumptionDate')
current_app.logger\
.debug('name_consume_history check - nrNum: {}, consumptionDate: {}, corpNum: {}, state: {}'
.format(nr.nrNum, name.consumptionDate, name.corpNum, name.state))
# Note: we cannot just check for a corpNum addition due to some Society change of name NRs coming over from
# NRO extractor providing a value for the corpNum field.
if len(name_consume_history.added) \
and name.consumptionDate \
and name.corpNum \
and name.state in ['APPROVED', 'CONDITION']:
# Adding an after_flush_postexec to avoid connection and transaction closed issue's
# Creating one time execution event when ever corpNum is added to a name
# corpNum sets from nro-extractor job
@event.listens_for(db.session, 'after_flush_postexec', once=True)
def receive_after_flush_postexec(session, flush_context):
nr = Request.find_by_id(name.nrId)
nr.stateCd = State.CONSUMED
nr.add_to_db()
current_app.logger.debug('moved to CONSUMED state {}'.format(name.corpNum))
EventRecorder.record_as_system(Event.UPDATE_FROM_NRO, nr, {
'id': nr.id,
'nrNum': nr.nrNum,
'stateCd': nr.stateCd
}, True)
current_app.logger.debug('moved to CONSUMED state event logged {}'.format(nr.nrNum))
class NameSchema(ma.SQLAlchemySchema):
class Meta:
model = Name
fields = (
'choice',
'comment',
'conflict1',
'conflict2',
'conflict3',
'conflict1_num',
'conflict2_num',
'conflict3_num',
'consumptionDate',
'corpNum',
'decision_text',
'designation',
'id',
'name_type_cd',
'name',
'state'
)
conflict1 = fields.String(required=False, allow_none=True)
conflict2 = fields.String(required=False, allow_none=True)
conflict3 = fields.String(required=False, allow_none=True)
conflict1_num = fields.Field(required=False, allow_none=True)
conflict2_num = fields.Field(required=False, allow_none=True)
conflict3_num = fields.Field(required=False, allow_none=True)
decision_text = fields.String(required=False, allow_none=True)
comment = fields.String(required=False, allow_none=True)
consumptionDate = fields.DateTime(required=False, allow_none=True)
corpNum = fields.String(required=False, allow_none=True)
designation = fields.String(required=False, allow_none=True)
name = fields.String(
required=True,
error_messages={'required': {'message': 'name is a required field'}}
)
name_type_cd = fields.String(required=False, allow_none=True)
| bcgov/namex | api/namex/models/name.py | name.py | py | 8,023 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "namex.models.db.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "namex.models.db",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "namex.models.db.Column",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nam... |
72255698915 | import requests
import json
import jsonpath
import openpyxl
from DataDriven import Library
def test_add_multiple_students():
# API
API_URL = "http://thetestingworldapi.com/api/studentsDetails"
file = open("/home/afzhal-ahmed-s/PycharmProjects/AddNewStudent.json")
json_request = json.loads(file.read())
obj = Library.Common("/home/afzhal-ahmed-s/Downloads/Noduco_Udemy_C-PPA.xlsx", 'Sheet1')
col = obj.fetch_column_count()
row = obj.fetch_row_count()
keyList = obj.fetch_key_names()
for i in range(2, row + 1):
updated_json_request = obj.update_request_with_data(i, json_request, keyList)
response = requests.post(API_URL, updated_json_request)
print(response)
| Afzhal-ahmed-s/Noduco_SDET_training | PycharmProjects/PyTest_Learning/DataDriven/TestCase.py | TestCase.py | py | 725 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "DataDriven.Library.Common",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "DataDriven.Library",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "requests.post... |
74952017313 |
"""
Module plotpages
Utilities for taking a set of plot files and creating a set of html and/or
latex/pdf pages displaying the plots.
"""
from __future__ import absolute_import
from __future__ import print_function
import os, time, string, glob
import sys
from functools import wraps
# increase resolution for images in animations:
html_movie_dpi = 100
import matplotlib as mpl
mpl.rcParams['figure.dpi']= html_movie_dpi
#print('+++ backend =', mpl.rcParams['backend'])
# Required for new animation style modified MAY 2013
import numpy as np
from matplotlib import image as Image
from matplotlib import pyplot as plt
import six
from six.moves import range
from clawpack.visclaw import gaugetools
from clawpack.visclaw import animation_tools
# Clawpack logo... not used on plot pages currently.
clawdir = os.getenv('CLAW')
if clawdir is not None:
logo = os.path.join(clawdir,'doc/images/clawlogo.jpg')
if not os.path.isfile(logo):
logo = None
#===========================
class PlotPagesData(object):
#===========================
def __init__(self):
self.plotdir = 'plots'
self.overwrite = True
self.verbose = True
self.latex = True # make latex files for figures
self.latex_fname = 'plots' # name of latex file to create
self.latex_title = 'Plots' # title on top of latex file
self.latex_itemsperpage = 'all' # number of items on each page
self.latex_itemsperline = 2 # number of items on each line
self.latex_framesperpage = 'all' # number of frames on each page
self.latex_framesperline = 2 # number of frames on each line
self.latex_figsperline = 'all' # number of figures on each line
self.latex_makepdf = False # run pdflatex on latex file
self.latex_preplots = None # latex to for top of page before plots
self.html = True # make html files for figures
self.html_index_fname = '_PlotIndex.html' # name of html index file
self.html_index_title = 'Plot Index' # title on top of index file
self.html_homelink = None # link to here from top of index file
self.html_itemsperline = 2 # number of items on each line
self.html_preplots = None # html to for top of page before plots
self.html_movie = "JSAnimation" # make html with java script for movie
self.html_eagle = False # use EagleClaw titles on html pages?
self.gif_movie = False # make animated gif movie of frames
self.timeframes_framenos = 'all'
self.timeframes_frametimes = {}
self.timeframes_fignos = 'all'
self.timeframes_fignames = {}
self.timeframes_prefix = 'frame'
self.pageitem_list = []
def new_pageitem(self):
"""
Create a new PageItem to be printed on this page
"""
pageitem = PageItem()
self.pageitem_list.append(pageitem)
return pageitem
def make_html(self):
plots2html(self)
path_to_html_index = os.path.join(os.path.abspath(self.plotdir), \
self.html_index_fname)
print_html_pointers(path_to_html_index)
def make_latex(self):
plots2latex(self)
def make_pages(self):
if self.latex:
self.make_latex()
if self.html:
self.make_html()
def make_timeframes_latex(self):
timeframes2latex(self)
def make_timeframes_html(self):
timeframes2html(self)
path_to_html_index = os.path.join(os.path.abspath(self.plotdir), \
self.html_index_fname)
print_html_pointers(path_to_html_index)
#=======================
class PageItem(object):
#=======================
def __init__(self):
self.fname = '' # full path to png or other figure file
self.html_index_entry = 'Untitled figure' # Name for link from
# html index page
self.html_preitem = None # any html to be inserted in file
# just before this item.
self.latex_preitem = None # any latex to be inserted in file
# just before this item.
#=======================
class HtmlIndex(object):
#=======================
def __init__(self, fname='_Index.html', title="Index"):
self.fname = fname
self.file = open(fname, 'w')
self.file.write('<html><meta http-equiv="expires" content="0">')
self.file.write('\n<title>Index</title>')
self.file.write('\n<body><center><h1>%s</h1></center>\n' \
% title)
def add(self,text = '', link = None):
if link:
self.file.write("""
<p>
<a href="%s">%s</a>
""" % (link,text))
else:
self.file.write("""
<p>
%s
""" % text)
def close(self):
self.file.write("\n</body></html>")
self.file.close()
path_to_html_index = os.path.join(os.getcwd(), \
self.fname)
print_html_pointers(path_to_html_index)
#======================================================================
def plots2html(plot_pages_data):
#======================================================================
"""
take a sequence of figure files and produce an html file to display them.
"""
print('\n-----------------------------------\n')
print('\nCreating html pages...\n')
startdir = os.getcwd()
ppd = plot_pages_data
numitems = len(ppd.pageitem_list) # number of page items (separate plots)
if numitems == 0:
print('*** Warning: 0 plots to put in html file')
return
ppd =plot_pages_data
try:
cd_with_mkdir(ppd.plotdir, ppd.overwrite, ppd.verbose)
except:
print("*** Error, aborting plots2html")
raise
creationtime = current_time()
for pageitem in ppd.pageitem_list:
splitname = os.path.splitext(pageitem.fname)
pageitem.hname = splitname[0] + '.html'
pageitem.ext = splitname[1]
# Create the index page:
#-----------------------
html = open(ppd.html_index_fname,'w')
if ppd.html_eagle:
html.write("""
<html><meta http-equiv="expires" content="0">
<title>EagleClaw Plot Index</title>
<head>
<link type="text/css" rel="stylesheet"
href="http://localhost:50005/eagleclaw/eagleclaw.css">
</head>
<eagle1>EagleClaw -- Plot Index</eagle1>
<eagle2>Easy Access Graphical Laboratory for Exploring Conservation
Laws</eagle2>
<p>
<center><eagle3>
<a href="../eaglemenu.html">Main Menu for this run-directory
</a></eagle3> </center><p>
""")
else:
html.write('<html><meta http-equiv="expires" content="0">')
html.write('\n<title>%s</title>' % ppd.html_index_title)
html.write('\n<body><center><h1>%s</h1></center>\n' \
% ppd.html_index_title)
homelink = getattr(ppd,'html_homelink',None)
if homelink:
html.write('<center><a href="%s">Back to %s</a></center>\n' \
% (homelink, homelink))
html.write('<p>\n')
html.write('<center>Plots created: %s ' % creationtime )
html.write('</center><p>\n')
html.write('<p>\n<table border=0 cellpadding=5 cellspacing=5>\n')
html.write("""<p>\n<tr><td><b>All figures:</b></td>
<td><a href="allfigures.html">html<a> </td>""")
if ppd.latex_makepdf:
html.write(' <td><a href="%s.pdf">%s.pdf</a></td>' \
% (ppd.latex_fname,ppd.latex_fname))
html.write('</tr>\n')
html.write('<p>\n<tr><td><b>Individual Figures:</b></td> </tr>\n')
for pageitem in ppd.pageitem_list:
html.write("""
<td>%s</td>
<td><a href="%s">html</a></td>
<td><a href="%s">%s</a></td><tr>
""" % (pageitem.html_index_entry, \
pageitem.hname,\
pageitem.fname, pageitem.fname))
html.write('</table>\n')
html.write('</body></html>')
#----------------------------------------------------------------------
# allfigures.html
#-------------------
html = open('allfigures.html', 'w')
html.write("""
<html><meta http-equiv="expires" content="0">
<title>Plots</title>
<p>
<h1>All figures</h1>
<p>
<h3><a href=%s>Return to Plot Index</a> </h3>
<p>
<h3>Click on a figure to enlarge</h3>
<p>
""" % ppd.html_index_fname)
for pageitem in ppd.pageitem_list:
html.write(' <a href="%s"><img src="%s" width=400></a>\n' \
% (pageitem.hname, pageitem.fname))
html.write('\n<p><h3><a href=%s>Return to Plot Index</a> </h3>' \
% ppd.html_index_fname)
html.write('\n</center></body></html>\n')
html.close()
# individual html files for each figure
#--------------------------------------
for j in range(len(ppd.pageitem_list)):
pageitem = ppd.pageitem_list[j]
html = open(pageitem.hname, 'w')
html.write("""
<html><meta http-equiv="expires" content="0">
<title>%s</title>
<p>
<h1>%s</h1>
<p>
<p>
""" % (pageitem.html_index_entry,pageitem.html_index_entry))
html.write("""
<p><img src="%s" ><p>
<h3><a href=%s>Return to Plot Index</a>
""" % (pageitem.fname,ppd.html_index_fname))
if j>0:
html.write(" ... <a href=%s>Previous Figure</a> "\
% ppd.pageitem_list[j-1].hname)
if j<len(ppd.pageitem_list)-2:
html.write(" ... <a href=%s>Next Figure</a> "\
% ppd.pageitem_list[j+1].hname)
html.write("\n</h3>")
html.write('\n</center></body></html>\n')
html.close()
os.chdir(startdir)
# end of plots2html
#======================================================================
def print_html_pointers(path_to_html_index):
#======================================================================
#PlotPath = os.getcwd()
#if PlotPath[0] != '/':
#PlotPath = '/' + PlotPath
#PlotPath.replace('\\','/') # for windows
# check if path appears to be in format of SageMathCloud:
smc_user = (path_to_html_index[:9] == '/projects') \
& (path_to_html_index[18] == '-')
if smc_user:
# For SageMathCloud only: modify the path to be of the form that can
# be opened in a browser window to view the html files
s1 = path_to_html_index.replace('/projects','https://cloud.sagemath.com')
path_to_html_index = s1[:64] + 'raw/' + s1[64:]
else:
# make the URL point to a local file:
path_to_html_index = 'file://' + path_to_html_index
print("\n--------------------------------------------------------")
print("\nPoint your browser to:")
print(" %s" % path_to_html_index)
clawdir = os.getenv('CLAW','')
# Removed next message since clawpack server is rarely used...
#if clawdir in path_to_html_index:
if False:
path_to_html_index = path_to_html_index.replace(clawdir,'')
print("\nOr, if you have the Clawpack server running, point your browser to:")
print(" http://localhost:50005%s" % path_to_html_index)
#=====================================
def htmlmovie(html_index_fname,pngfile,framenos,figno):
#=====================================
"""
Input:
pngfile: a dictionary indexed by (frameno,figno) with value the
corresponding png file for this figure.
framenos: a list of frame numbers to include in movie
figno: integer with the figure number for this movie.
Returns:
text for an html file that incorporates javascript to loop through the
plots one after another.
New 6/7/10: The html page also has buttons for controlling the movie.
The parameter iterval below is the time interval between loading
successive images and is in milliseconds.
The img_width and img_height parameters do not seem to have any effect.
"""
text = """
<html>
<head>
<script language="Javascript">
<!---
var num_images = %s; """ % len(framenos)
text += """
var img_width = 800;
var img_height = 600;
var interval = 300;
var images = new Array();
function preload_images()
{
t = document.getElementById("progress");
"""
i = 0
for frameno in framenos:
i = i+1
text += """
t.innerHTML = "Preloading image ";
images[%s] = new Image(img_width, img_height);
images[%s].src = "%s";
""" % (i,i,pngfile[frameno,figno])
text += """
t.innerHTML = "";
}
function tick()
{
frame += 1;
if (frame > num_images+1)
frame = 1;
document.movie.src = images[frame].src;
tt = setTimeout("tick()", interval);
}
function startup()
{
preload_images();
frame = 1;
document.movie.src = images[frame].src;
}
function rewind()
{
frame = 1;
document.movie.src = images[frame].src;
}
function start()
{
tt = setTimeout("tick()", interval);
}
function pause()
{
clearTimeout(tt);
}
function restart()
{
tt = setTimeout("tick()", interval);
}
function slower()
{
interval = interval / 0.7;
}
function faster()
{
interval = interval * 0.7;
}
// --->
</script>
</head>
<body onLoad="startup();">
<form>
<input type="button" value="Start movie" onClick="start()">
<input type="button" value="Pause" onClick="pause()">
<input type="button" value="Rewind" onClick="rewind()">
<input type="button" value="Slower" onClick="slower()">
<input type="button" value="Faster" onClick="faster()">
<a href="%s">Plot Index</a>
</form>
<p><div ID="progress"></div></p>
<img src="%s" name="movie"/>
</body>
</html>
""" % (html_index_fname,pngfile[framenos[0],figno])
return text
# end of htmlmovie
#======================================================================
def plots2latex(plot_pages_data):
#======================================================================
"""
Take a list of figure files and produce latex file to display them.
So far only works with time frames, not with gauges or other plots.
"""
print('\n-----------------------------------\n')
print('\nCreating latex file...\n')
startdir = os.getcwd()
ppd = plot_pages_data
plotdir = ppd.plotdir
numitems = len(ppd.pageitem_list) # number of page items (separate plots)
if numitems == 0:
print('*** Warning: 0 plots to put in latex file')
print('No latex file generated')
return
try:
cd_with_mkdir(ppd.plotdir, ppd.overwrite, ppd.verbose)
except:
print("*** Error, aborting plots2latex")
raise
creationtime = current_time()
latexfile = open(ppd.latex_fname + '.tex', 'w')
# latex header
#-------------
latexfile.write(r"""
\documentclass[11pt]{article}
\usepackage{graphicx}
\setlength{\textwidth}{7.5in}
\setlength{\oddsidemargin}{-0.5in}
\setlength{\evensidemargin}{-0.5in}
\setlength{\textheight}{9.2in}
\setlength{\voffset}{-1in}
\setlength{\headsep}{5pt}
\begin{document}
\begin{center}{\Large\bf %s}\vskip 5pt
""" % ppd.latex_title)
latexfile.write(r"""
\bf Plots created {\tt %s} in directory: \vskip 5pt
\verb+%s+
\end{center}
\vskip 5pt
""" % (creationtime, startdir))
# latex layout
#-------------
itemsperline = ppd.latex_itemsperline
if itemsperline == 'all': itemsperline = numitems
itemsperpage = ppd.latex_itemsperpage
if itemsperpage == 'all': itemsperpage = numitems
# width each plot must be:
fwidth = 0.95/itemsperline
# latex for each item:
#---------------------
itemlinecnt = 0
itempagecnt = 0
for pageitem in ppd.pageitem_list:
if itempagecnt >= itemsperpage:
latexfile.write('\\newpage \n')
itempagecnt = 0
itemlinecnt = 0
elif itemlinecnt >= itemsperline:
latexfile.write('\\vskip 10pt \n')
itemlinecnt = 0
itemlinecnt += 1
itempagecnt += 1
if pageitem.latex_preitem:
latexfile.write(pageitem.latex_preitem)
latexfile.write('\\includegraphics[width=%s\\textwidth]{%s}\n' \
% (fwidth,pageitem.fname))
#latexfile.write('\\vskip 10pt \n')
latexfile.write('\\end{document}\n')
latexfile.close()
print("\nLatex file created: ")
print(" %s/%s.tex" % (plotdir, ppd.latex_fname))
print("\nUse pdflatex to create pdf file")
if ppd.latex_makepdf:
try:
os.system('pdflatex %s' % ppd.latex_fname)
print("\nSuccessfully created pdf file: %s/%s.pdf" \
% (plotdir, ppd.latex_fname))
except:
print('*** pdflatex command failed')
os.chdir(startdir)
# end of plots2latex
#======================================================================
def plotclaw2kml(plotdata):
#======================================================================
"""
Take a list of figure files and produce kml file to display them.
# Files that get created :
_GoogleEarthfig?.kmz : Zipped file containing all files, including doc.kml
_GoogleEarthfig?.kml : Network links to remote images
gauges.kml : Gauge Placemarks
regions.kml : Region polygons
levels.kml : Patch border polygons
"""
print(" ")
print("KML ===> Creating file %s.kmz" % plotdata.kml_index_fname)
startdir = os.getcwd()
from lxml import etree
from pykml.factory import KML_ElementMaker as KML
from pykml.factory import GX_ElementMaker as GX
from pykml.factory import ATOM_ElementMaker as ATOM
import zipfile
import shutil
from copy import deepcopy
from clawpack.geoclaw import kmltools
if plotdata.format is 'forestclaw':
level_base = 0
else:
level_base = 1
try:
cd_with_mkdir(plotdata.plotdir, plotdata.overwrite, plotdata.verbose)
except:
print("KML ===> Error, aborting plotclaw2kml (cannot create plot directory")
raise
gaugenos = plotdata.gauges_gaugenos
if gaugenos is not None:
if plotdata.gauges_fignos is not None:
plotdata = massage_gauges_data(plotdata)
gauge_pngfile = plotdata._gauge_pngfile
creationtime = current_time()
plotdata = massage_frames_data(plotdata)
framenos = plotdata.timeframes_framenos
frametimes = plotdata.timeframes_frametimes
fignos = plotdata.timeframes_fignos
fignames = plotdata.timeframes_fignames
pngfile = plotdata._pngfile
htmlfile = plotdata._htmlfile
frametimef = plotdata._frametimef
allfigsfile = plotdata._allfigsfile
allframesfile = plotdata._allframesfile
numframes = len(framenos)
numfigs = len(fignos)
creationtime = current_time()
# ------------------- get time span ----------------------
# Collect time spans for use in several places.
TS = []
event_time = plotdata.kml_starttime
tz = plotdata.kml_tz_offset
tscale = plotdata.kml_time_scale
if numframes == 1:
frameno = framenos[0]
t1 = frametimes[frameno]
t2 = t1 + 5 # Add time second so final figure shows up
sbegin, send = kmltools.kml_timespan(t1,t2,event_time,tz,tscale=tscale)
# To be used below
TS.append(KML.TimeSpan(
KML.begin(sbegin),
KML.end(send)))
else:
for i in range(0,numframes):
frameno = framenos[i]
t1 = frametimes[frameno]
if i < numframes-1:
t2 = frametimes[framenos[i+1]]
else:
# We could add 1 second at the end, or more time, depending on what
# effect is desired. In any case, the time span can't be empty or the
# last figure won't show up.
dt = (frametimes[framenos[numframes-1]] - frametimes[framenos[0]])/numframes
t2 = t1 + dt # Add enough time for looping through animations
print("Final time in Google Earth slider set to {:6.2f}".format(t2))
sbegin, send = kmltools.kml_timespan(t1,t2,event_time,tz,tscale=tscale)
TS.append(KML.TimeSpan(
KML.begin(sbegin),
KML.end(send)))
# Top level doc.kml file
doc = KML.kml(
KML.Document(
KML.name(plotdata.kml_name),
KML.open(1)))
# Open main zip file
zip = zipfile.ZipFile(plotdata.kml_index_fname + ".kmz",'w',allowZip64=True)
# --------------------- Set initial view --------------------------
first_found = False
for i,figname in enumerate(plotdata._fignames):
plotfigure = plotdata.plotfigure_dict[figname]
figno = plotfigure.figno
if not figno in fignos:
continue
if not plotfigure.use_for_kml:
continue
# Get a view that is used when GE first loads.
if plotfigure.kml_use_for_initial_view or not first_found:
x1 = plotfigure.kml_xlimits[0]
x2 = plotfigure.kml_xlimits[1]
y1 = plotfigure.kml_ylimits[0]
y2 = plotfigure.kml_ylimits[1]
ulinit = np.array([plotfigure.kml_xlimits[0], plotfigure.kml_ylimits[1]])
urinit = np.array([plotfigure.kml_xlimits[1], plotfigure.kml_ylimits[1]])
lrinit = np.array([plotfigure.kml_xlimits[1], plotfigure.kml_ylimits[0]])
R = 6371.0 # radius of earth
domain_width = R*np.cos(abs(y1+y2)*np.pi/360.0)*(x2-x1)*np.pi/180.0
dist_factor = 2 # factor by which height should exceed width
initial_height = min([1000*dist_factor*domain_width,9656064.0]) # <= 6000 miles
initial_view = KML.LookAt(
KML.longitude((ulinit[0]+urinit[0])/2),
KML.latitude((urinit[1]+lrinit[1])/2),
KML.tilt(0),
KML.range(initial_height)) # in meters?
doc.Document.append(deepcopy(initial_view))
# we found something; any other figures will have to have
# 'kml_use_for_initial_view' set to override this view.
first_found = True
# ------------------- Loop over figures ----------------------
fig_folder = KML.Folder(
KML.name("Figures"),
KML.open(1))
for figname in plotdata._fignames:
plotfigure = plotdata.plotfigure_dict[figname]
figno = plotfigure.figno
if not figno in fignos:
continue
if not plotfigure.use_for_kml:
continue
fig_dir = "fig" + str(figno)
if plotfigure.kml_show_figure:
fig_vis = 1
else:
fig_vis = 0
shutil.rmtree(fig_dir,True)
os.mkdir(fig_dir)
doc_fig = KML.kml(
KML.Document(
KML.name(plotfigure.name),
KML.open(0),
KML.Folder(
KML.name("Frames"))))
# Needed for each figure
ul = np.array([plotfigure.kml_xlimits[0], plotfigure.kml_ylimits[1]])
ur = np.array([plotfigure.kml_xlimits[1], plotfigure.kml_ylimits[1]])
lr = np.array([plotfigure.kml_xlimits[1], plotfigure.kml_ylimits[0]])
# Shift so plots that cross the 180 meridian, rather than the -180 Meridian
if ul[0] < -180:
ul[0] = ul[0] + 360
ur[0] = ur[0] + 360
lr[0] = lr[0] + 360
# ------------------- Loop over frames ----------------------
# This will get created for each figure, but I need it
# for createing the level boxes around each patch
for i in range(0,numframes):
frameno = framenos[i]
fname = 'frame' + str(frameno).rjust(4, '0')
fname_str = fname + 'fig%s' % figno
# ------------------- create subdirs with images ----------------------
if (not plotfigure.kml_tile_images):
print("KML ===> Adding %s.png to %s.kmz" \
" file (no tiling)" % (fname_str,plotdata.kml_index_fname))
# The 'etree'
doc_notile = KML.kml(KML.Document())
c = TS[i].getchildren()
desc = "t = %g\n" % frametimes[frameno] + c[0]
fstr = "%s.png" % fname_str
doc_notile.Document.append(
KML.GroundOverlay(
KML.name(fstr),
KML.Icon(KML.href(fstr)),
KML.LatLonBox(
KML.north(ur[1]),
KML.south(lr[1]),
KML.east(ur[0]),
KML.west(ul[0]))))
# Easier to just move into this directory to construct everything
os.chdir(fig_dir)
shutil.rmtree(fname_str,True) # remove directory and ignore errors
os.mkdir(fname_str)
# PNG file gets moved into subdirectory and will eventually be
# zipped into KMZ file.
if plotdata.html:
shutil.copy(os.path.join("..","%s.png" % fname_str),fname_str)
else:
shutil.move(os.path.join("..","%s.png" % fname_str),fname_str)
# The actual file to be written <framename>/doc.kml
docfile = os.path.join(fname_str,'doc.kml')
docfile_notile = open(os.path.join(fname_str,'doc.kml'),'wt')
docfile_notile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
kml_text = etree.tostring(etree.ElementTree(doc_notile),
pretty_print=True)
docfile_notile.write(kml_text.decode())
docfile_notile.close()
os.chdir("..")
else:
print(" ")
print("KML ===> Tiling %s.png" % fname_str)
os.chdir(fig_dir)
pngfile = os.path.join("..","%s.png"% fname_str)
shutil.copy(pngfile,".")
im = plt.imread("%s.png" % fname_str)
sx = im.shape[1] # reversed?
sy = im.shape[0]
arg_list = ["gdal_translate", "-of", "VRT", \
"-a_srs", "EPSG:4326", \
"-gcp", "0", "0", "%f"%(ul[0]), "%f"%(ul[1]), \
"-gcp", "%d"%(sx), "0", "%f"%(ur[0]), "%f"%(ur[1]), \
"-gcp", "%d"%(sx), "%d"%(sy), "%f"%(lr[0]), "%f"%(lr[1]), "-90", \
"%s.png"%(fname_str), "%s_tmp.vrt"%(fname_str)]
import subprocess
retval = subprocess.call(arg_list)
arg_list = ["gdalwarp", "-of", "VRT", "-t_srs", "EPSG:4326 ", "-overwrite", \
"%s_tmp.vrt"%(fname_str), "%s.vrt"%(fname_str)]
retval = retval or subprocess.call(arg_list)
arg_list = ["gdal2tiles.py", \
"--profile=geodetic", \
"--force-kml", \
"--resampling=near", \
"%s.vrt" % (fname_str)]
retval = retval or subprocess.call(arg_list)
if retval > 0:
print("KML ===> gdal : something went wrong!\n")
sys.exit(1)
# Change back to top level directory before adding zipped files
os.chdir("..")
# Add the <fname>.vrt file to zipped file. Remove
# figure PNG file
zip.write(os.path.join(fig_dir,"%s.vrt" % fname_str))
# Leave the PNG file in the KMZ file?
# os.remove(os.path.join(fig_dir,"%s.png" % fname_str))
# Clean up files
os.remove(os.path.join(fig_dir,"%s_tmp.vrt" % fname_str))
os.remove(os.path.join(fig_dir,"%s.vrt" % fname_str))
# add Network link to high level doc.kml file. This will referenece either
# tiled files or non-tiled files.
c = TS[i].getchildren()
desc = "Time : t = %g\n" \
"UTC : %s\n"\
"File : %s.png" % (frametimes[frameno],c[0],fname_str)
# Description in Places panel
snippet_str = "<![CDATA[<b><pre>%s</pre></b>]]>" % desc
# Data that shows up in balloon
desc_style = "<b><pre><font style=\"font-size:10pt\">%s</font></pre></b>" % desc
desc_str = "<![CDATA[%s]]>" % desc_style
lstr = os.path.join(fname_str,'doc.kml')
doc_fig.Document.Folder.append(
KML.NetworkLink(
KML.name("Frame %d" % frameno),
KML.Snippet(snippet_str,maxLines="2"),
KML.description(desc_str),
deepcopy(TS[i]),
KML.Link(KML.href(lstr))))
# ----------------- Done with frame loop --------------------
lstr = os.path.join(fig_dir,"doc.kml")
fig_folder.append(
KML.NetworkLink(
KML.name("%s (%d)" % (figname,figno)),
KML.visibility(fig_vis),
KML.Link(
KML.href(lstr))))
# fig_vis = 0 # All figures referenced after the first one will not be shown
# when first loading GE.
# -------------- add colorbar image file -----------------
# Build the colorbar.
if plotfigure.kml_colorbar is not None:
print(" ")
print("KML ===> Building colorbar for figure %s" % plotfigure.name)
cb_img = "images"
cb_dir = os.path.join(fig_dir,cb_img)
shutil.rmtree(cb_dir,True)
os.mkdir(cb_dir)
cb_filename = "colorbarfig%s.png" % figno
try:
plotfigure.kml_colorbar(cb_filename)
shutil.move(cb_filename,cb_dir)
except:
print("KML ===> Warning : Something went wrong when creating colorbar")
# add link to KML file, even if colorbar didn't get created.
cb_str = os.path.join(cb_img,cb_filename)
colorbar = KML.ScreenOverlay(
KML.name("Colorbar"),
KML.Icon(KML.href(cb_str)),
KML.overlayXY(x="0.025", y="0.05", xunits="fraction", yunits="fraction"),
KML.screenXY(x="0.025", y="0.05",xunits="fraction", yunits="fraction"))
doc_fig.Document.append(colorbar)
# ----- Done with colorbar ------
# ------------------ done with fig<N>/doc.kml file ------------------
fig_file = open(os.path.join(fig_dir,"doc.kml"),'wt')
fig_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
# In case we used CDATA in any snippets or descriptions. For some reason
# <tags> get converted to >tags<, which balloons don't translate.
kml_text = etree.tostring(etree.ElementTree(doc_fig),pretty_print=True).decode()
kml_text = kml_text.replace('>','>')
kml_text = kml_text.replace('<','<')
fig_file.write(kml_text)
fig_file.close()
# Done with fig<n>/doc.kml file
# Clean up everything in the figure directory
for dirname, subdirs, files in os.walk(fig_dir):
zip.write(dirname)
for filename in files:
zip.write(os.path.join(dirname, filename))
shutil.rmtree(fig_dir)
# ---------------------- Done with figure loop ------------------
# Add "Figures" folder to doc.kml
doc.Document.append(deepcopy(fig_folder))
# ---------- Create top-level resource subdirectories -----------
kml_dir = 'kml'
shutil.rmtree(kml_dir,True) # remove directory and ignore errors
os.mkdir(kml_dir)
img_dir = 'images'
shutil.rmtree(img_dir,True) # remove directory and ignore errors
os.mkdir(img_dir)
# ------------------ Creating gauges.kml file -------------------------
gauge_kml_file = "gauges.kml"
print(" ")
print("KML ===> Creating file %s" % gauge_kml_file)
has_gauge_data = True
try:
setgauges = gaugetools.read_setgauges(plotdata.outdir)
except:
print(" File gauges.data not found - this should not happen.")
has_gauge_data = False
if has_gauge_data and gaugenos is not None and len(gaugenos) > 0:
gauges = setgauges.gauges
# Location of gauges PNG files (stored under <file>.kmz/images
basehref = "<base href=\"%s\"/>" % os.path.join('..','..','images','') # need trailing "/"
# Format the text in the Placemark balloon.
btext = \
"<style media=\"screen\" type=\"text/css\">" \
"pre {font-weight:bold;font-style:12pt}" + \
"span.title {font-weight:bold;font-size:12pt} " + \
"</style>" + \
"%s" % basehref + \
"<center><span class=\"title\">$[name]</span></center>" + \
"<pre>" + \
"Time : t1 = $[t1], t2 = $[t2]\n" + \
"Location : x1 = $[x1], y1 = $[y1]\n" + \
"</pre>" + \
"<center><img style=\"width:500\" src=\"$[pngfile]\"/></center>" + \
"<pre><b>File : $[pngfile]</pre>"
# the 'text' tag will replace Placemark description
bstyle = KML.text("<![CDATA[%s]]>" % btext)
# Start builing KML document
doc_gauges = KML.kml(KML.Document())
# Only one style for all of the gauges
doc_gauges.Document.append(KML.Style(
KML.BalloonStyle(bstyle),
id="gauge_style"))
# Loop over all gauges
for gnum,gauge in enumerate(gauges):
gaugeno = int(gauge[0])
if plotdata.print_gaugenos != 'all':
if gaugeno not in plotdata.print_gaugenos:
#print('+++ skipping gauge %i, not in print_gaugenos' % gaugeno)
continue # to next gauge
t1,t2 = gauge[3:5]
x1,y1 = gauge[1:3]
if plotdata.kml_map_topo_to_latlong is not None:
x1,y1 = plotdata.kml_map_topo_to_latlong(x1,y1)
# Get proper coordinates, otherwise placemark doesn't show up.
if x1 > 180:
longitude = x1 - 360
elif x1 < -180:
longitude = x1 + 360
else:
longitude = x1
print("Gauge %i: %10.6f %10.6f \n" % (gaugeno,x1,y1) \
+ " t1 = %10.1f, t2 = %10.1f" % (t1,t2))
# plotdata.gauges_fignos
# Not clear how to get the figure number for each gauge. Assume that
# there is only one figure number figno for all gauges
# If user has set 'gaugeno=[]', gauge files will not be added to the KMLfile.
if plotdata.gauges_fignos is not None:
figno = plotdata.gauges_fignos[0] # use just the first
figname = gauge_pngfile[gaugeno,figno]
elev = 0
coords = "%10.4f %10.4f %10.4f" % (longitude,y1,elev)
# Text for 'Places' panel
snippet = "t1 = %g, t2 = %g\n" % (t1,t2) + \
"x1 = %g, y1 = %g\n" % (x1,y1)
snippet_str = "<![CDATA[<pre><b>%s</b></pre>]]>" % snippet
# ExtendedData is used in BalloonStyle.text() fields.
placemark = KML.Placemark(
KML.name("%s %d" % (plotfigure.kml_gauge_name,gaugeno)),
KML.Snippet(snippet_str),
KML.styleUrl(chr(35) + "gauge_style"),
KML.ExtendedData(
KML.Data(KML.value(figname),name="pngfile"),
KML.Data(KML.value("%g" % t1),name="t1"),
KML.Data(KML.value("%g" % t2),name="t2"),
KML.Data(KML.value("%g" % x1),name="x1"),
KML.Data(KML.value("%g" % y1),name="y1")),
KML.Point(
KML.coordinates(coords)))
doc_gauges.Document.append(placemark)
kml_file = open(gauge_kml_file,'wt')
kml_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
kml_text = etree.tostring(etree.ElementTree(doc_gauges),
pretty_print=True).decode()
kml_text = kml_text.replace('>','>') # Needed for CDATA blocks
kml_text = kml_text.replace('<','<')
kml_file.write(kml_text)
kml_file.close()
# -------------- add gauge image and KML files -----------------
if plotdata.gauges_fignos is None:
gauge_vis = 0
else:
gauge_vis = 1
doc.Document.append(
KML.NetworkLink(
KML.name("Gauges"),
KML.visibility(gauge_vis),
KML.Link(KML.href(os.path.join(kml_dir,
gauge_kml_file)))))
if os.path.isfile(gauge_kml_file):
shutil.move(gauge_kml_file,kml_dir)
# Add any gauge PNG files to images directory.
if plotdata.gauges_fignos is not None:
for k in gauge_pngfile.keys():
if os.path.isfile(gauge_pngfile[k]):
shutil.copy(gauge_pngfile[k],img_dir)
# ----------------- Add a region for the computational domain ----------
# Top level regions.kml file
doc_regions = KML.kml(KML.Document())
region_kml_file = "regions.kml"
# collect all the placemarks in a folder and append later
placemark_folder = []
# Read claw.data to get computational domain
print(" ")
print("KML ===> Creating file %s" % region_kml_file)
try:
f = open(os.path.join(plotdata.outdir,"claw.data"),'r')
except:
# We don't have the dimensions of the full domain
print(" Cannot find claw.data. Region for the computational domain will not be created.")
else:
# Read past comments; last 'l' is blank line
l = f.readline()
while (l.startswith('#')):
l = f.readline()
# read line containing number of gauges
l = f.readline()
# read lower
c = f.readline()
lower = np.fromstring(c.strip(),sep=' ')
c = f.readline()
upper = np.fromstring(c.strip(),sep=' ')
if plotdata.kml_map_topo_to_latlong is not None:
x1,y1 = plotdata.kml_map_topo_to_latlong(lower[0],lower[1])
x2,y2 = plotdata.kml_map_topo_to_latlong(upper[0],upper[1])
else:
x1 = lower[0]
x2 = upper[0]
y1 = lower[1]
y2 = upper[1]
bcomp_domain = \
"<style media=\"screen\" type=\"text/css\">" \
"pre {font-weight:bold;font-style:12pt}" + \
"span.title {font-weight:bold;font-size:12pt} " + \
"</style>" + \
"<center><span class=\"title\">Computational Domain</span></center>" + \
"<pre>" + \
"Location : x1 = $[x1], x2 = $[x2]\n" + \
" y1 = $[y1], y2 = $[y2]\n" + \
"</pre>"
domain_text = KML.text("<![CDATA[%s]]>" % bcomp_domain)
print("Computational domain : %10.6f %10.6f %10.6f %10.6f" \
% (x1,x2,y1,y2))
snippet_str = \
"x1 = %g, x2 = %g\n" % (x1,x2) + \
"y1 = %g, y2 = %g\n" % (y1,y2)
snippet = "<![CDATA[<b><pre>%s</pre></b>]]>" % snippet_str
# Style for this region
doc_regions.Document.append(
KML.Style(
KML.PolyStyle(
KML.color("FF98644E"), # light blue 4E6498
KML.fill(1),
KML.outline(0)),
KML.BalloonStyle(deepcopy(domain_text)),
id="comp_domain"))
lv = []
if x1 > 180 and x2 > 180:
for x in [x1,x2]:
lv.append(x - 360)
elif x1 < -180 and x2 < -180:
for x in [x1,x2]:
lv.append(x + 360)
else:
lv = [x1,x2] # Doesn't work for regions that straddle +/- 180.
longitude = lv
# rectangle with 2 corners specified
mapping = {}
mapping['x1'] = longitude[0]
mapping['x2'] = longitude[1]
mapping['y1'] = y1
mapping['y2'] = y2
mapping['elev'] = 0
# The polygons tend to disappear when zooming. One fix might be to
# add more points to the edges of the polygon
coords = """\
{x1:10.4f},{y1:10.4f},{elev:10.4f}
{x2:10.4f},{y1:10.4f},{elev:10.4f}
{x2:10.4f},{y2:10.4f},{elev:10.4f}
{x1:10.4f},{y2:10.4f},{elev:10.4f}
{x1:10.4f},{y1:10.4f},{elev:10.4f}
""".format(**mapping).replace(' ','')
# ExtendedData is used in BalloonStyle.text() fields.
placemark = KML.Placemark(
KML.name("Computational Domain"),
KML.visibility(0),
KML.Snippet(snippet,maxLines="2"),
KML.styleUrl(chr(35) + "comp_domain"),
KML.ExtendedData(
KML.Data(KML.value("%g"% x1),name="x1"),
KML.Data(KML.value("%g"% y1),name="y1"),
KML.Data(KML.value("%g"% x2),name="x2"),
KML.Data(KML.value("%g"% y2),name="y2")),
KML.Polygon(
KML.tessellate(1),
KML.altitudeMode("clampToGround"),
KML.outerBoundaryIs(
KML.LinearRing(
KML.coordinates(coords)))))
placemark_folder.append(placemark)
print(" ")
# Create regions for remaining regions specifed in regions.data
try:
f = open(os.path.join(plotdata.outdir,"regions.data"),'r')
except:
print(" No regions.data file found.")
else:
# Read past comments; last 'l' is blank line
l = f.readline()
while (l.startswith('#')):
l = f.readline()
# read line containing number of gauges
l = f.readline()
# Read the data lines containing gauge information
regions = []
for r in f.readlines():
regions.append(np.fromstring(r.strip(),sep=' '))
# Format the text in the Placemark balloon.
btext = \
"<style media=\"screen\" type=\"text/css\">" \
"pre {font-weight:bold;font-style:12pt}" + \
"span.title {font-weight:bold;font-size:12pt} " + \
"</style>" + \
"<center><span class=\"title\">$[name]</span></center>" + \
"<pre>" + \
"Levels : minlevel = $[minlevel], maxlevel = $[maxlevel]\n" + \
"Time : t1 = $[t1], t2 = $[t2]\n" + \
"Location : x1 = $[x1], x2 = $[x2]\n" + \
" y1 = $[y1], y2 = $[y2]\n" + \
"\n" + \
"From (UTC) : $[tsbegin]\n" + \
"To (UTC) : $[tsend]" + \
"</pre>"
# the 'text' tag will replace Placemark description
balloon_text = KML.text("<![CDATA[%s]]>" % btext)
width = 2
box_color = "FFFFFFFF"
# Now start creating real regions.
for rnum,region in enumerate(regions):
minlevel,maxlevel = region[0:2]
t1,t2 = region[2:4]
x1,x2,y1,y2 = region[4:]
print("Region %i: %10.6f %10.6f %10.6f %10.6f" \
% (rnum,x1,x2,y1,y2))
print(" minlevel = %i, maxlevel = %i" \
% (minlevel,maxlevel) \
+ " t1 = %10.1f, t2 = %10.1f" % (t1,t2))
# get TimeSpan for region
event_time = plotdata.kml_starttime
tz = plotdata.kml_tz_offset
frameno = framenos[-1]
t2_slider = min([t2,frametimes[frameno]]) # Don't show times like 1e+9
sbegin, send = kmltools.kml_timespan(t1,t2_slider,event_time,tz)
TS_region = KML.TimeSpan(
KML.begin(sbegin),
KML.end(send))
c = TS_region.getchildren()
tsbegin = c[0]
tsend = c[1]
# Style for this region
pathstr = "Path_region_%02d" % rnum
doc_regions.Document.append(
KML.Style(
KML.LineStyle(
KML.color(box_color),
KML.width(width)),
KML.PolyStyle(KML.color("000000")),
KML.BalloonStyle(deepcopy(balloon_text)),
id=pathstr))
# Description for Places panel
snippet_str = \
"<b><pre>" + \
"minlevel = %i, maxlevel = %i\n" % (minlevel,maxlevel) + \
"t1 = %g, t2 = %g\n" % (t1,t2) +\
"\n" + \
"From (UTC) : %s\n" % tsbegin + \
"To (UTC) : %s\n" % tsend + \
"</pre></b>"
snippet = "<![CDATA[%s]]>" % snippet_str
# Get x coordinates in longitude (-180 to 180). Otherwise, the
# polygons don't show up after zooming.
lv = []
if x1 > 180 and x2 > 180:
for x in [x1,x2]:
lv.append(x - 360)
elif x1 < -180 and x2 < -180:
for x in [x1,x2]:
lv.append(x + 360)
else:
lv = [x1,x2] # Also okay if [x1,x2] straddle 180 or -180
longitude = lv
# rectangle with 2 corners specified
mapping = {}
mapping['x1'] = longitude[0]
mapping['x2'] = longitude[1]
mapping['y1'] = y1
mapping['y2'] = y2
mapping['elev'] = 0
# The polygons tend to disappear when zooming. One fix might be to
# add more points to the edges of the polygon
coords = """\
{x1:10.4f},{y1:10.4f},{elev:10.4f}
{x2:10.4f},{y1:10.4f},{elev:10.4f}
{x2:10.4f},{y2:10.4f},{elev:10.4f}
{x1:10.4f},{y2:10.4f},{elev:10.4f}
{x1:10.4f},{y1:10.4f},{elev:10.4f}
""".format(**mapping).replace(' ','')
# ExtendedData is used in BalloonStyle.text() fields.
placemark = KML.Placemark(
KML.name("Region %d" % rnum),
KML.visibility(0),
KML.Snippet(snippet,maxLines="2"),
TS_region,
KML.styleUrl(chr(35) + pathstr),
KML.ExtendedData(
KML.Data(KML.value("%g"% minlevel),name="minlevel"),
KML.Data(KML.value("%g"% maxlevel),name="maxlevel"),
KML.Data(KML.value("%g"% t1),name="t1"),
KML.Data(KML.value("%g"% t2),name="t2"),
KML.Data(KML.value("%g"% x1),name="x1"),
KML.Data(KML.value("%g"% y1),name="y1"),
KML.Data(KML.value("%g"% x2),name="x2"),
KML.Data(KML.value("%g"% y2),name="y2"),
KML.Data(KML.value("%s"% tsbegin),name="tsbegin"),
KML.Data(KML.value("%s"% tsend),name="tsend")),
KML.Polygon(
KML.tessellate(1),
KML.altitudeMode("clampToGround"),
KML.outerBoundaryIs(
KML.LinearRing(
KML.coordinates(coords)))))
placemark_folder.append(placemark)
# Do we have any regions (either computational (from claw.data) or from regions.data?
for p in placemark_folder:
doc_regions.Document.append(p)
kml_file = open(region_kml_file,'wt')
kml_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
kml_text = etree.tostring(etree.ElementTree(doc_regions),
pretty_print=True).decode()
kml_text = kml_text.replace('>','>') # needed for CDATA blocks
kml_text = kml_text.replace('<','<')
kml_file.write(kml_text)
kml_file.close()
# -------------------- Add link to regions.kml file to top level doc.kml
# Note that we do this, even if a regions.kml file wasn't created.
doc.Document.append(
KML.NetworkLink(
KML.name("Regions"),
KML.visibility(0),
KML.Link(KML.href(os.path.join(kml_dir,
region_kml_file)))))
if os.path.isfile(region_kml_file):
shutil.move(region_kml_file,kml_dir)
# --------------- Create polygons for AMR patch borders --------------
level_kml_file = "levels.kml"
print(" ")
print("KML ===> Creating file %s" % level_kml_file)
try:
f = open(os.path.join(plotdata.outdir,"amr.data"),'r')
except:
# Nothing terrible happens; we just set maxlevels to some large value
maxlevels = 20
for figname in plotdata._fignames:
plotfigure = plotdata.plotfigure_dict[figname]
if not plotfigure.use_for_kml:
continue
else:
maxlevels = plotfigure.kml_maxlevel
else:
# read past comments - last line is blank
a = f.readline()
while (a.startswith('#')):
a = f.readline()
# read line max1d
f.readline()
# read line containing max number of levels
a = f.readline()
ainfo = np.fromstring(a.strip(),sep=' ')
maxlevels = int(ainfo[0]) # This is assumed to be correct for either AMRClaw or ForestClaw
# set _outdirs attribute to be list of all outdirs for all items
plotdata.set_outdirs()
level_dir = "levels"
shutil.rmtree(level_dir,True)
os.mkdir(os.path.join(kml_dir,level_dir))
# Level colors, in (alpha, blue, green, red)
black = ["FF000000"]
white = ["FFFFFFFF"]
ge_theme = ["FFCEC0C4", "FF476653", "FF9C5E4D", "#FF536F92",
"#FF9CC2CC", "FF935B47","FF000000"]
colorcube = ["FF0000FF", "FF00FF00","FFFFFFFF","FF000000","FFFFFF00",
"FFFF00FF","FF00FFFF","FFFF0000"]
# Color scheme to use for level patch borders.
colors = black
width = 1
# Create high level 'levels.kml' file
level_files = []
doc_levels = []
styles = []
# Assume that if we are using ForestClaw, that we have set maxlevels correctly
for i in range(0,maxlevels+1-level_base):
level_file_name = "level_" + str(i+level_base).rjust(2,'0')
level_files.append(level_file_name)
# KML Document for each level
doc_levels.append(KML.kml(KML.Document()))
# Styles for levels
styles.append(KML.Style(
KML.LineStyle(
KML.color(colors[i % len(colors)]), # cycle through colors
KML.width(width)),
KML.PolyStyle(KML.color("00000000")),
id="patchborder"))
# Create individual level files in subdirectories
doc_frames = [[0 for j in range(numframes)] for i in range(0,maxlevels+1-level_base)]
for j in range(0,numframes):
frameno = framenos[j]
for i in range(0,maxlevels+1-level_base):
frame_file_name = level_files[i] + "_" + str(frameno).rjust(4,'0') + ".kml"
if i == 0:
vis = 0 # Don't show first level
else:
vis = 1
N = KML.NetworkLink(
KML.name("Frame %s" % str(frameno).rjust(4,'0')),
KML.visibility(vis),
deepcopy(TS[j]),
KML.Link(
KML.href(os.path.join(level_files[i],frame_file_name))))
doc_levels[i].Document.append(deepcopy(N))
# Create files in each subdirectory
doc_frames[i][j] = KML.kml(KML.Document())
doc_frames[i][j].Document.append(deepcopy(styles[i]))
print(" Re-reading output files to get patch information")
print(" ")
for j in range(0,numframes):
frameno = framenos[j]
framesolns = []
# loop over all outdirs:
if len(plotdata._outdirs) == 0:
plotdata._outdirs = [plotdata.outdir]
for outdir in plotdata._outdirs:
framesolns.append(plotdata.getframe(frameno, outdir))
if type(framesolns) is not list:
framesolns = [framesolns]
for k, framesoln in enumerate(framesolns): # patches?
for stateno,state in enumerate(framesoln.states):
patch = state.patch
xlower = patch.dimensions[0].lower
xupper = patch.dimensions[0].upper
ylower = patch.dimensions[1].lower
yupper = patch.dimensions[1].upper
level = patch.level
if plotdata.kml_map_topo_to_latlong is not None:
xlower,ylower = plotdata.kml_map_topo_to_latlong(xlower,ylower)
xupper,yupper = plotdata.kml_map_topo_to_latlong(xupper,yupper)
lv = []
if xlower > 180:
for x in [xlower,xupper]:
lv.append(x - 360)
elif xupper < -180:
for x in [xlower,xupper]:
lv.append(x + 360)
else:
# Not quite sure why this works in the case when x1,x2 cross 180 ...
lv = [xlower,xupper]
mapping = {}
mapping["x1"] = lv[0]
mapping["y1"] = ylower
mapping["x2"] = lv[1]
mapping["y2"] = yupper
mapping["elev"] = 0
border_text = """
{x1:10.4f},{y1:10.4f},{elev:10.4f}
{x2:10.4f},{y1:10.4f},{elev:10.4f}
{x2:10.4f},{y2:10.4f},{elev:10.4f}
{x1:10.4f},{y2:10.4f},{elev:10.4f}
{x1:10.4f},{y1:10.4f},{elev:10.4f}
""".format(**mapping).replace(' ','')
r = KML.Polygon(
KML.tessellate(1),
KML.altitudeMode("clampToGround"),
KML.outerBoundaryIs(
KML.LinearRing(
KML.coordinates(border_text))))
p = KML.Placemark(
KML.name("Grid %d" % stateno),
KML.visibility(1),
KML.styleUrl(chr(35) + "patchborder"))
p.append(deepcopy(r))
try:
doc_frames[level-level_base][j].Document.append(deepcopy(p))
except:
import pdb
pdb.set_trace()
# Create directories for each level.
for i in range(0,maxlevels+1-level_base):
# Directory for storing levels for each time step
shutil.rmtree(os.path.join(kml_dir,level_dir,level_files[i]),True)
os.mkdir(os.path.join(kml_dir,level_dir,level_files[i]))
# Print out individual frame files for each element
for j in range(0,numframes):
for i in range(0,maxlevels+1-level_base):
frameno = framenos[j]
level_file_name = level_files[i] + "_" + str(frameno).rjust(4,'0') + ".kml"
kml_frame_file = open(os.path.join(kml_dir,level_dir,
level_files[i],level_file_name),'wt')
kml_frame_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
kml_text = etree.tostring(etree.ElementTree(doc_frames[i][j]),
pretty_print=True)
kml_frame_file.write(kml_text.decode())
kml_frame_file.close()
# Print out level files containing time stamps and references to frame files
for i in range(0,maxlevels+1-level_base):
kml_level_file = open(os.path.join(kml_dir,level_dir,level_files[i]+".kml"),'w')
kml_level_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
kml_text = etree.tostring(etree.ElementTree(doc_levels[i]),
pretty_print=True)
kml_level_file.write(kml_text.decode())
kml_level_file.close()
# Folders in top level file 'levels.kml'
doc_levels_top = KML.kml(KML.Document())
for i in range(0,maxlevels+1-level_base):
level_file_name = "level_" + str(i+level_base).rjust(2,'0')
f = KML.Folder(KML.name("Level " + str(i+level_base)))
f.append(KML.NetworkLink(
KML.name("Frames"),
KML.Link(
KML.href(os.path.join(level_dir,level_file_name + ".kml")))))
doc_levels_top.Document.append(f)
kml_levels = open(os.path.join(kml_dir,level_kml_file),'wt')
kml_levels.write('<?xml version="1.0" encoding="UTF-8"?>\n')
kml_text = etree.tostring(etree.ElementTree(doc_levels_top),
pretty_print=True).decode()
kml_levels.write(kml_text)
kml_levels.close()
# Add to top level KML file
doc.Document.append(
KML.NetworkLink(
KML.name("Levels"),
KML.visibility(1),
KML.Link(KML.href(os.path.join(kml_dir,"levels.kml")))))
# ----------- add user-supplied KML files ------------
user_dir = "user_files"
shutil.rmtree(user_dir,True)
os.mkdir(user_dir)
if len(plotdata.kml_user_files) > 0:
for f in plotdata.kml_user_files:
print(" ")
print("KML ===> Adding user KML file %s" % f[0])
fname = f[0].partition('.')[0]
if f[1]:
vis = 1
else:
vis = 0
shutil.copy(os.path.join("..",f[0]),user_dir)
doc.Document.append(
KML.NetworkLink(
KML.name(fname),
KML.visibility(vis),
KML.Link(KML.href(os.path.join(user_dir,f[0])))))
# ----------- zip additional directories and clean up ------------
dir_list = [kml_dir, img_dir, user_dir]
for d in dir_list:
for dirname, subdirs, files in os.walk(d):
zip.write(dirname)
for filename in files:
zip.write(os.path.join(dirname, filename))
shutil.rmtree(d)
# ----------- Write doc.kml file --------------------
# Top level KML file
docfile = open("doc.kml",'wt')
docfile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
kml_text = etree.tostring(etree.ElementTree(doc),pretty_print=True).decode()
kml_text = kml_text.replace('>','>') # needed for CDATA blocks
kml_text = kml_text.replace('<','<')
docfile.write(kml_text)
#docfile.write(etree.tostring(etree.ElementTree(doc),pretty_print=True))
docfile.close()
# Store this in the zip file and remove it.
zip.write("doc.kml") # Root KML file
os.remove("doc.kml")
zip.close()
if plotdata.kml_publish is not None:
print(" ")
print("KML ===> Creating file %s.kml" % plotdata.kml_index_fname)
# Create a KML file that can be used to link to a remote server
update_time = 5 # minutes
doc = KML.kml(KML.Document(
KML.name("GeoClaw"),
KML.visibility(1),
KML.open(1),
deepcopy(initial_view),
KML.NetworkLink(
KML.name(plotdata.kml_name),
KML.visibility(1),
KML.open(1),
KML.Snippet("Updates every %d minutes" % update_time),
KML.Link(
KML.href(os.path.join(plotdata.kml_publish,
plotdata.kml_index_fname + ".kmz")),
KML.refreshMode("onInterval"),
KML.refreshInterval(update_time*60)))))
file = open(plotdata.kml_index_fname + ".kml",'wt')
file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
kml_text = etree.tostring(etree.ElementTree(doc),pretty_print=True)
file.write(kml_text.decode())
file.close()
print(" ")
print("KML ===> Done creating files for Google Earth. Open " \
"%s.kmz in the Google Earth browser" % plotdata.kml_index_fname)
print(" ")
os.chdir(startdir)
# end of plotclaw2kml
#======================================================================
def cd_with_mkdir(newdir, overwrite=False, verbose=True):
#======================================================================
newdir = os.path.abspath(newdir)
if os.path.isfile(newdir):
print("*** Error in cd_with_mkdir: directory specified is a file")
raise
elif (os.path.isdir(newdir) & overwrite):
if verbose:
print("Directory '%s' " % newdir)
print(" already exists, files may be overwritten ")
elif (os.path.isdir(newdir) & (not overwrite)):
print("*** Error in cd_with_mkdir")
print("Directory already exists:\n ",newdir)
print("Remove directory with \n ' rm -r %s' " % newdir)
print(" and try again, or set overwrite=True ")
raise
else:
try:
os.mkdir(newdir)
if verbose:
print("Created directory:\n ", newdir)
except:
print("*** Error in cd_with_mkdir")
print("Cannot make directory: \n ",newdir)
raise
try:
os.chdir(newdir)
except:
print("*** Error in cd_with_mkdir")
print("Cannot change directory to \n ",newdir)
#======================================================================
def cd_plotdir(plotdir, overwrite):
#======================================================================
verbose = False
if os.path.isfile(plotdir):
print("*** Error in cd_plotdir: plotdir specified is a file")
raise
elif (os.path.isdir(plotdir) & overwrite):
if verbose:
print("Directory '%s' " % plotdir)
print(" already exists, files may be overwritten ")
elif (os.path.isdir(plotdir) & (not overwrite)):
print("Directory '%s'" % plotdir)
print(" already exists")
print("Remove directory with \n ' rm -r %s' " % plotdir)
print(" and try again, or set overwrite=True ")
print("*** Error in cd_plotdir")
raise
else:
try:
os.mkdir(plotdir)
except:
print("Cannot make directory ",plotdir)
print("*** Error in cd_plotdir")
raise
try:
os.chdir(plotdir)
except:
print("*** Error trying to cd to ",plotdir)
#=====================================
def massage_frames_data(plot_pages_data):
#=====================================
ppd = plot_pages_data
try:
framenos = ppd.timeframes_framenos
frametimes = ppd.timeframes_frametimes
fignos = ppd.timeframes_fignos
fignames = ppd.timeframes_fignames
prefix = getattr(ppd, 'file_prefix', 'fort')
if prefix is 'fort':
prefix = getattr(ppd, 'timeframes_prefix', 'frame')
if prefix != 'frame':
prefix = prefix + 'frame'
except:
print('*** Error: timeframes not set properly')
return
startdir = os.getcwd()
if framenos == 'all' or fignos == 'all':
# need to determine which figures exist
files = glob.glob('%s*.png' % prefix)
np = len(prefix)
if framenos == 'all':
framenos = set()
for file in files:
frameno = int(file[np:(np+4)])
framenos.add(frameno)
framenos = list(framenos)
framenos.sort()
if fignos == 'all':
fignos = set()
for file in files:
figno = int(os.path.splitext(file)[0][(np+7):])
fignos.add(figno)
fignos = list(fignos)
fignos.sort()
allframesfile = {}
moviefile = {}
for figno in fignos:
if figno not in fignames:
fignames[figno] = 'Solution'
allframesfile[figno] = 'allframes_fig%s.html' % figno
moviefile[figno] = 'movie_fig%s.html' % figno
numframes = len(framenos)
numfigs = len(fignos)
if len(framenos) == 0:
print('*** Warning: 0 frames to print')
if len(fignos) == 0:
print('*** Warning: 0 figures to print each frame')
pngfile = {}
htmlfile = {}
frametimef = {}
allfigsfile = {}
#print ' Making png and html files for %i frames:' % numframes, framenos
for frameno in framenos:
framef = str(frameno).zfill(4)
try:
ftime = frametimes[frameno]
except:
ftime = '?'
if ftime == '?':
ftimef = ftime
elif ((ftime == 0) | ((ftime > 0.001) & (ftime < 1000))):
ftimef = '%9.5f' % ftime
else:
ftimef = '%12.5e' % ftime
frametimef[frameno] = ftimef
framef = str(frameno).zfill(4)
for figno in fignos:
pngfile[frameno,figno] = '%s%sfig%s.png' % (prefix,framef,figno)
htmlfile[frameno,figno] = '%s%sfig%s.html' % (prefix,framef,figno)
allfigsfile[frameno] = '%s_allfigs%s.html' % (prefix,framef)
ppd.timeframes_framenos = framenos
ppd.timeframes_fignos = fignos
ppd.timeframes_fignames = fignames
ppd._pngfile = pngfile
ppd._htmlfile = htmlfile
ppd._frametimef = frametimef
ppd._allfigsfile = allfigsfile
ppd._allframesfile = allframesfile
ppd._moviefile = moviefile
return ppd
#======================================================================
def timeframes2latex(plot_pages_data):
#======================================================================
"""
take a sequence of figure files in format frame000NfigJ.png for
N in framenos and J in fignos, and produce a latex file containing
them all.
plot_pages_data.timeframes_framenos is list of frames to use,
plot_pages_data.timeframes_frametimes is dictionary of time for each frame
plot_pages_data.timeframes_fignos is list of figs to use,
plot_pages_data.timeframes_fignames is dictionary of fig names for index.
plot_pages_data.timeframes_prefix is the string indicating how the
files are named ('frame' by default).
"""
print('\n-----------------------------------\n')
print('Creating latex file...')
startdir = os.getcwd()
ppd =plot_pages_data
try:
cd_with_mkdir(ppd.plotdir, ppd.overwrite, ppd.verbose)
except:
print("*** Error, aborting timeframes2latex")
raise
creationtime = current_time()
ppd = massage_frames_data(ppd)
plotdir = ppd.plotdir
framenos = ppd.timeframes_framenos
frametimes = ppd.timeframes_frametimes
fignos = ppd.timeframes_fignos
fignames = ppd.timeframes_fignames
pngfile = ppd._pngfile
numframes = len(framenos)
numfigs = len(fignos)
latexfile = open(ppd.latex_fname + '.tex', 'w')
# latex header
#-------------
latexfile.write(r"""
\documentclass[11pt]{article}
\usepackage{graphicx}
\setlength{\textwidth}{7.5in}
\setlength{\oddsidemargin}{-0.5in}
\setlength{\evensidemargin}{-0.5in}
\setlength{\textheight}{9.2in}
\setlength{\voffset}{-1in}
\setlength{\headsep}{5pt}
\begin{document}
\begin{center}{\Large\bf %s}\vskip 5pt
""" % ppd.latex_title)
latexfile.write(r"""
\bf Plots created {\tt %s} in directory: \vskip 5pt
\verb+%s+
\end{center}
\vskip 5pt
""" % (creationtime, startdir))
# latex layout
#-------------
# determine how many plots should appear on each page and line:
framesperpage = ppd.latex_framesperpage
if framesperpage == 'all':
framesperpage = len(framenos)
framesperline = ppd.latex_framesperline
if framesperline == 'all':
framesperline = len(framenos)
figsperline = ppd.latex_figsperline
if figsperline == 'all':
figsperline = len(fignos)
if (figsperline < len(fignos)) & (framesperline > 1):
print('*** Incompatible layout: resetting framesperline to 1')
framesperline = 1
totalperline = framesperline * figsperline
if totalperline < 1:
print('*** Warning: 0 figures per line requested in latex file')
print('No latex file generated due to format error')
return
# width each plot must be:
fwidth = 0.95/totalperline
framecnt = 0
for frameno in framenos:
#latexfile.write('\\centerline{\Large Frame %s at time = %s' \
# % (frameno frametime[frameno])
if framecnt >= framesperpage:
latexfile.write('\\newpage \n')
framecnt = 0
elif framecnt >= framesperline:
latexfile.write('\\vskip 10pt \n')
framecnt = 0
framecnt += 1
figcnt = 0
for figno in fignos:
if figcnt >= figsperline:
latexfile.write('\\vskip 10pt \n')
figcnt = 0
figcnt += 1
latexfile.write('\\includegraphics[width=%s\\textwidth]{%s}\n' \
% (fwidth,pngfile[frameno,figno]))
#latexfile.write('\\vskip 10pt \n')
latexfile.write('\\end{document}\n')
latexfile.close()
print("\nLatex file created: ")
print(" %s/%s.tex" % (plotdir, ppd.latex_fname))
print("\nUse pdflatex to create pdf file")
if ppd.latex & ppd.latex_makepdf:
try:
os.system('pdflatex %s' % ppd.latex_fname)
except:
print('*** pdflatex command failed')
print("\nSuccessfully created pdf file: %s/%s.pdf" \
% (plotdir, ppd.latex_fname))
os.chdir(startdir)
# end of timeframes2latex
#============================
def test(makeplots = True):
#============================
try:
from pylab import linspace,clf,plot,title,savefig,mod
except:
print('*** Error: could not import pylab')
return
ppd = PlotPagesData()
ppd.plotdir = 'plots'
ppd.html = True
ppd.latex = True
ppd.latex_itemsperline = 2
ppd.latex_itemsperpage = 4
ppd.latex_makepdf = False
# create test figures:
x = linspace(0,1,201)
for n in range(6):
fname = 'plot%s.png' % n
fname_savefig = os.path.join(ppd.plotdir, fname)
if makeplots:
clf()
y = x**n
plot(x,y)
title('$f(x) = x^%s$' % n)
savefig(fname_savefig)
pid = ppd.new_pageitem()
pid.fname = fname
pid.html_index_entry = "Plot of x^%s" % n
if mod(n,2) == 0:
pid.latex_preitem = r"""
\vskip 5pt \noindent{\large\bf Plot of $x^%s$}\vskip 2pt""" % n
ppd.make_pages()
#============================
def clawtest():
#============================
html_index = HtmlIndex(fname='vary_mx_index.html', \
title='Results from running vary_mx.py')
html_index.add(text = 'Experiments varying mx')
for mx in [50, 100]:
ppd = PlotPagesData()
outdir = 'output.mx%s' % mx
ppd.plotdir = outdir
ppd.overwrite = True
ppd.html = True
ppd.html_index_title = 'Clawpack Plots with mx = %s' % mx
ppd.latex = True
ppd.latex_makepdf = False
ppd.timeframes_framenos = 'all'
ppd.timeframes_frametimes = {}
ppd.timeframes_fignos = 'all'
ppd.timeframes_fignames = {}
ppd.make_timeframes_html()
ppd.make_timeframes_latex()
# update global index:
mx_text = 'mx = %s' % mx
mx_index = os.path.join(outdir, ppd.html_index_fname)
html_index.add(text = mx_text, link = mx_index)
html_index.close()
#-----------------------------
def current_time(addtz=False):
#-----------------------------
# determine current time and reformat:
time1 = time.asctime()
year = time1[-5:]
day = time1[:-14]
hour = time1[-13:-5]
current_time = day + year + ' at ' + hour
if addtz:
current_time = current_time + ' ' + time.tzname[time.daylight]
return current_time
#======================================================================
def plotclaw2html(plotdata):
#======================================================================
"""
Create and html index and html pages for each figure created from the
specified plotdata.
Assumes the following types of figures may exist:
time frame figures of the form frame000NfigJ.png
gauge figures of the form gauge000NfigJ.png
other each_run type figures of the form figJ.png
other figures can be specified in a dictionary plotdata.otherfigs
plotdata.timeframes_framenos is list of frames to use,
plotdata.timeframes_frametimes is dictionary of time for each frame
plotdata.timeframes_fignos is list of figs to use,
plotdata.timeframes_fignames is dictionary of fig names for index.
plotdata.gauges_gaugenos is list of gauges to use,
plotdata.gauges_fignos is list of figs to use,
plotdata.gauges_fignames is dictionary of fig names for index.
plotdata.eachrun_fignos is list of figs to use,
plotdata.eachrun_fignames is dictionary of fig names for index.
"""
print('\n-----------------------------------\n')
print('\nCreating html pages for figures...\n')
startdir = os.getcwd()
try:
cd_with_mkdir(plotdata.plotdir, plotdata.overwrite, plotdata.verbose)
except:
print("*** Error, aborting plotclaw2html")
raise
creationtime = current_time()
plotdata = massage_frames_data(plotdata)
if plotdata.gauges_fignos is not None:
plotdata = massage_gauges_data(plotdata)
gauge_pngfile = plotdata._gauge_pngfile
gauge_htmlfile = plotdata._gauge_htmlfile
gauge_allfigsfile = plotdata._gauge_allfigsfile
framenos = plotdata.timeframes_framenos
frametimes = plotdata.timeframes_frametimes
fignos = plotdata.timeframes_fignos
fignames = plotdata.timeframes_fignames
pngfile = plotdata._pngfile
htmlfile = plotdata._htmlfile
frametimef = plotdata._frametimef
allfigsfile = plotdata._allfigsfile
allframesfile = plotdata._allframesfile
moviefile = plotdata._moviefile
numframes = len(framenos)
numfigs = len(fignos)
eagle = getattr(plotdata,'html_eagle',False)
# Create the index page:
#-----------------------
html = open(plotdata.html_index_fname,'w')
if eagle:
html.write("""
<html><meta http-equiv="expires" content="0">
<title>EagleClaw Plot Index</title>
<head>
<link type="text/css" rel="stylesheet"
href="http://localhost:50005/eagleclaw/eagleclaw.css">
</head>
<eagle1>EagleClaw -- Plot Index</eagle1>
<eagle2>Easy Access Graphical Laboratory for Exploring Conservation
Laws</eagle2>
<p>
<center><eagle3>
<a href="../eaglemenu.html">Main Menu for this run-directory
</a></eagle3> </center><p>
""")
else:
html.write('<html><meta http-equiv="expires" content="0">')
html.write('\n<title>%s</title>' % plotdata.html_index_title)
html.write('\n<body><center><h1>%s</h1></center>\n' \
% plotdata.html_index_title)
homelink = getattr(plotdata,'html_homelink',None)
if homelink:
html.write('<center><a href="%s">Back to %s</a></center>\n' \
% (homelink, homelink))
html.write('<p>\n')
html.write('<center>Plots created: %s ' % creationtime )
html.write('</center><p>\n')
html.write('<p>\n<b>Go to:</b>\n')
gaugenos = plotdata.gauges_gaugenos
if gaugenos is not None:
numgauges = len(gaugenos)
if (len(plotdata.gauges_fignos)>0):
html.write(' <a href="#gauges">Gauges</a>\n')
html.write(' <a href="#eachrun">Other plots</a>\n')
html.write('<p>\n<a name="timeframes"><h3>Time frames:</h3></a>\n')
html.write('<p>\n<table border=0 cellpadding=5 cellspacing=5>\n')
if plotdata.latex_makepdf:
html.write('<p><tr><td><b>pdf file:</b></td>')
html.write('\n <td><a href="%s.pdf">%s.pdf</a></td>' \
% (plotdata.latex_fname,plotdata.latex_fname))
html.write('</tr>\n')
if plotdata.html_movie:
html.write('<p><tr><td><b>js Movies:</b></td>')
for figno in fignos:
html.write('\n <td><a href="%s">%s</a></td>' \
% (moviefile[figno],fignames[figno]))
html.write('</tr>\n')
if plotdata.gif_movie:
html.write('<p><tr><td><b>gif Movies:</b></td>')
for ifig in range(len(fignos)):
html.write('\n <td><a href="moviefig%s.gif">%s</a></td>' \
% (fignos[ifig],fignames[fignos[ifig]]))
html.write('</tr>\n')
html.write('<p>\n<tr><td><b>All Frames:</b></td> ')
for ifig in range(len(fignos)):
html.write('\n <td><a href="%s">%s</a></td>' \
% (allframesfile[fignos[ifig]],fignames[fignos[ifig]]))
html.write('</tr>\n')
html.write('<p>\n<tr><td><b>Individual Frames:</b></td> </tr>\n')
for frameno in framenos:
html.write('\n <tr><td>Frame %s, t = %s:</td>' \
% (frameno,frametimef[frameno]))
for figno in fignos:
figname = fignames[figno]
html.write('\n <td><a href="%s">%s</a></td>' \
% (htmlfile[frameno,figno],figname))
if numfigs > 1:
html.write('\n<td><a href="%s">All figures</a></td>' \
% allfigsfile[frameno])
html.write('</tr>\n')
html.write('</table>\n')
# Gauges:
#----------------
if gaugenos is not None:
fignos = plotdata.gauges_fignos
fignames = plotdata.gauges_fignames
if (len(fignos)>0):
html.write('<p>\n<a name="gauges"><h3>Gauges:</h3></a>\n')
html.write('<p>\n<table border=0 cellpadding=5 cellspacing=5>\n')
html.write('<p>\n<tr><td><b>All Gauges:</b></td> ')
for ifig in range(len(fignos)):
html.write('\n <td><a href="allgaugesfig%s.html">%s</a></td>' \
% (fignos[ifig],fignames[fignos[ifig]]))
html.write('</tr>\n')
html.write('<p>\n<tr><td><b>Individual Gauges:</b></td> </tr>\n')
for gaugeno in gaugenos:
html.write('\n <tr><td>Gauge %s:</td>' % (gaugeno))
for figno in fignos:
figname = fignames[figno]
html.write('\n <td><a href="%s">%s</a></td>' \
% (gauge_htmlfile[gaugeno,figno],figname))
if numfigs > 1:
html.write('\n<td><a href="%s">All figures</a></td>' \
% gauge_allfigsfile[gaugeno])
html.write('</tr>\n')
html.write('</table>\n')
# Other plots:
#----------------
if len(plotdata.otherfigure_dict)>0:
html.write('<p>\n<a name="eachrun"><h3>Other plots:</h3></a>\n')
html.write('<p><ul>\n')
for name in six.iterkeys(plotdata.otherfigure_dict):
otherfigure = plotdata.otherfigure_dict[name]
fname = otherfigure.fname
makefig = otherfigure.makefig
if makefig:
if type(makefig)==str:
try:
exec((makefig), globals(), locals())
except:
print("*** Problem executing makefig ")
print(" for otherfigure ",name)
else:
try:
makefig(plotdata)
except:
print("*** Problem executing makefig function")
print(" for otherfigure ",name)
raise
html.write('<p><li><a href="%s">%s</a>\n' %(fname,name))
html.write('<p></ul>\n')
html.write('</body></html>')
# end of index
#----------------------------------------------------------------------
fignos = plotdata.timeframes_fignos
fignames = plotdata.timeframes_fignames
# allframesfigJ.html
#-------------------
for figno in fignos:
html = open(allframesfile[figno], 'w')
html.write('<html><meta http-equiv="expires" content="0">')
html.write('<title>Plots</title>')
html.write('<body>\n<center><h1>All Frames -- %s</h1>\n' \
% fignames[figno])
html.write('<p>\n')
html.write('\n<p><h3><a href=%s>Plot Index</a></h3>\n' \
% (plotdata.html_index_fname))
html.write('<p>\n')
html.write('<h3>Click on a figure to enlarge</h3>\n')
html.write('<p>\n')
for frameno in framenos:
html.write(' <a href="%s"><img src="%s" width=400></a>\n' \
% (htmlfile[frameno,figno], pngfile[frameno,figno]))
html.write('\n</center></body></html>\n')
html.close()
# allfigsframeN.html
#-------------------
if numfigs > 1:
for iframe in range(numframes):
frameno = framenos[iframe]
html = open(allfigsfile[frameno], 'w')
html.write('<html><meta http-equiv="expires" content="0">')
html.write('<title>Plots</title>')
html.write('<body>\n<center><h3>All Figures -- Frame %s' \
% framenos[iframe])
html.write(' at time t = %s' % frametimef[frameno])
html.write('<p>\n')
# Write link commands to previous and next frame:
html.write('<p> <a href="%s">' % allfigsfile[framenos[0]])
html.write('< <</a> \n')
if iframe==0:
html.write('< ')
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
if numframes > 1:
html.write(' <a href="%s"> > </a> ' \
% allfigsfile[framenos[1]])
elif iframe==numframes-1:
if numframes > 1:
html.write('\n<a href="%s"> < </a> ' \
% allfigsfile[framenos[iframe-1]])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write(' > ')
else:
html.write('\n<a href="%s"> < </a> ' \
% allfigsfile[framenos[iframe-1]])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write('\n <a href="%s"> > </a> ' \
% allfigsfile[framenos[iframe+1]])
html.write(' \n<a href="%s"> ' \
% allfigsfile[framenos[numframes-1]])
html.write('> ></a> \n')
html.write('</h3><p>\n')
html.write('<h3>Click on a figure to enlarge</h3>\n')
html.write('<p>\n')
for figno in fignos:
html.write(' <a href="%s"><img src="%s" width=400></a>\n' \
% (htmlfile[frameno,figno], pngfile[frameno,figno]))
# list of all frames at bottom:
html.write('\n<p><b>Other frames:</b></a> ')
for frameno2 in framenos:
if frameno2 == frameno:
html.write('\n<font color=red>%i</font> ' \
% frameno)
else:
html.write('\n<a href="%s">%i</a> ' \
% (allfigsfile[frameno2],frameno2))
html.write('\n</center></body></html>\n')
html.close()
# frameNfigJ.html -- individual files for each frame/fig combo
#----------------
for iframe in range(numframes):
frameno = framenos[iframe]
for figno in fignos:
html = open(htmlfile[frameno,figno],'w')
html.write('<html><meta http-equiv="expires" content="0">\n')
html.write('<title>Plots</title>')
html.write('<body><center>\n')
html.write('\n<h3>Frame %i ' % frameno)
if numfigs > 1:
html.write(' --- %s' % fignames[figno] )
html.write(' at time t = %s</h3>' % frametimef[frameno])
# Write link commands to previous and next frame:
html.write('<p> <a href="%s">' % htmlfile[framenos[0],figno])
html.write('< <</a> \n')
if iframe==0:
html.write('< ')
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
if numframes > 1:
html.write(' <a href="%s"> > </a> ' \
% htmlfile[framenos[1],figno])
elif iframe==numframes-1:
if numframes > 1:
html.write('\n<a href="%s"> < </a> ' \
% htmlfile[framenos[iframe-1],figno])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write(' > ')
else:
html.write('\n<a href="%s"> < </a> ' \
% htmlfile[framenos[iframe-1],figno])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write('\n <a href="%s"> > </a> ' \
% htmlfile[framenos[iframe+1],figno])
html.write(' \n<a href="%s"> ' \
% htmlfile[framenos[numframes-1],figno])
html.write('> ></a> \n')
# image:
html.write('\n\n <p><img src="%s"><p> \n ' \
% pngfile[frameno,figno])
html.write('\n\nImage source: %s' \
% os.path.join(os.getcwd(),pngfile[frameno,figno]))
# list of all figures at bottom of page:
if numfigs > 1:
html.write('\n<p><b>Other figures at this time:</b> ')
for figno2 in fignos:
if figno2 == figno:
html.write('\n<font color=red>%s</font> ' \
% fignames[figno])
else:
html.write('\n<a href="%s">%s</a> ' \
% (htmlfile[frameno,figno2],fignames[figno2]))
html.write('\n<a href="%s"> All Figures </a>' \
% allfigsfile[frameno])
# list of all frames at bottom of page:
html.write('\n<p><b>Other frames:</b></a> ')
for frameno2 in framenos:
if frameno2 == frameno:
html.write('\n<font color=red>%i</font> ' \
% frameno)
else:
html.write('\n<a href="%s">%i</a> ' \
% (htmlfile[frameno2,figno],frameno2))
html.write('\n<a href="%s"> All Frames </a>' \
% allframesfile[figno])
html.write('\n<p><h3><a href=%s>Plot Index</a></h3>' \
% (plotdata.html_index_fname))
if eagle:
html.write("""<p><h3><a href="../eaglemenu.html">Main Menu for
this run-directory</a></h3> """)
html.write('</center></body></html>')
html.close()
# moviefigJ.html
#-------------------
if (plotdata.html_movie in [True, "4.x"]) and (len(framenos) > 0):
# original style still used if plotdata.html_movie == "4.x":
for figno in fignos:
html = open('movie%s' % allframesfile[figno], 'w')
text = htmlmovie(plotdata.html_index_fname,pngfile,framenos,figno)
html.write(text)
html.close()
#----------------------------------------------------------------------
fignos = plotdata.gauges_fignos
fignames = plotdata.gauges_fignames
# allgaugesfigJ.html
#-------------------
if fignos is None:
fignos = []
for figno in fignos:
html = open('allgaugesfig%s.html' % figno, 'w')
html.write('<html><meta http-equiv="expires" content="0">')
html.write('<title>Plots</title>')
html.write('<body>\n<center><h1>All Gauges -- %s</h1>\n' \
% fignames[figno])
html.write('<p>\n')
html.write('\n<p><h3><a href=%s>Plot Index</a></h3>\n' \
% (plotdata.html_index_fname))
html.write('<p>\n')
html.write('<h3>Click on a figure to enlarge</h3>\n')
html.write('<p>\n')
for gaugeno in gaugenos:
html.write(' <a href="%s"><img src="%s" width=400></a>\n' \
% (gauge_htmlfile[gaugeno,figno], gauge_pngfile[gaugeno,figno]))
html.write('\n</center></body></html>\n')
html.close()
# allfigsgaugeN.html
#-------------------
if gaugenos is not None:
if numfigs > 1:
for igauge in range(numgauges):
gaugeno = gaugenos[igauge]
html = open(gauge_allfigsfile[gaugeno], 'w')
html.write('<html><meta http-equiv="expires" content="0">')
html.write('<title>Plots</title>')
html.write('<body>\n<center><h3>All Figures -- Gauge %s' \
% gaugenos[igauge])
html.write('<p>\n')
# Write link commands to previous and next gauge:
html.write('<p> <a href="%s">' % gauge_allfigsfile[gaugenos[0]])
html.write('< <</a> \n')
if igauge==0:
html.write('< ')
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
if numgauges > 1:
html.write(' <a href="%s"> > </a> ' \
% gauge_allfigsfile[gaugenos[1]])
elif igauge==numgauges-1:
if numgauges > 1:
html.write('\n<a href="%s"> < </a> ' \
% gauge_allfigsfile[gaugenos[igauge-1]])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write(' > ')
else:
html.write('\n<a href="%s"> < </a> ' \
% gauge_allfigsfile[gaugenos[igauge-1]])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write('\n <a href="%s"> > </a> ' \
% gauge_allfigsfile[gaugenos[igauge+1]])
html.write(' \n<a href="%s"> ' \
% gauge_allfigsfile[gaugenos[numgauges-1]])
html.write('> ></a> \n')
html.write('</h3><p>\n')
html.write('<h3>Click on a figure to enlarge</h3>\n')
html.write('<p>\n')
for figno in fignos:
html.write(' <a href="%s"><img src="%s" width=400></a>\n' \
% (gauge_htmlfile[gaugeno,figno], gauge_pngfile[gaugeno,figno]))
# list of all gauges at bottom:
html.write('\n<p><b>Other gauges:</b></a> ')
for gaugeno2 in gaugenos:
if gaugeno2 == gaugeno:
html.write('\n<font color=red>%i</font> ' \
% gaugeno)
else:
html.write('\n<a href="%s">%i</a> ' \
% (gauge_allfigsfile[gaugeno2],gaugeno2))
html.write('\n</center></body></html>\n')
html.close()
# gaugeNfigJ.html -- individual files for each gauge/fig combo
#----------------
for igauge in range(numgauges):
gaugeno = gaugenos[igauge]
for figno in fignos:
html = open(gauge_htmlfile[gaugeno,figno],'w')
html.write('<html><meta http-equiv="expires" content="0">\n')
html.write('<title>Plots</title>')
html.write('<body><center>\n')
html.write('\n<h3>Gauge %i ' % gaugeno)
if numfigs > 1:
html.write(' --- %s' % fignames[figno] )
# Write link commands to previous and next gauge:
html.write('<p> <a href="%s">' % gauge_htmlfile[gaugenos[0],figno])
html.write('< <</a> \n')
if igauge==0:
html.write('< ')
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
if numgauges > 1:
html.write(' <a href="%s"> > </a> ' \
% gauge_htmlfile[gaugenos[1],figno])
elif igauge==numgauges-1:
if numgauges > 1:
html.write('\n<a href="%s"> < </a> ' \
% gauge_htmlfile[gaugenos[igauge-1],figno])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write(' > ')
else:
html.write('\n<a href="%s"> < </a> ' \
% gauge_htmlfile[gaugenos[igauge-1],figno])
html.write('\n<a href="%s">Index</a> ' \
% plotdata.html_index_fname)
html.write('\n <a href="%s"> > </a> ' \
% gauge_htmlfile[gaugenos[igauge+1],figno])
html.write(' \n<a href="%s"> ' \
% gauge_htmlfile[gaugenos[numgauges-1],figno])
html.write('> ></a> \n')
# image:
html.write('\n\n <p><img src="%s"><p> \n ' \
% gauge_pngfile[gaugeno,figno])
html.write('\n\nImage source: %s' \
% os.path.join(os.getcwd(),gauge_pngfile[gaugeno,figno]))
# list of all figures at bottom of page:
if numfigs > 1:
html.write('\n<p><b>Other figures at this time:</b> ')
for figno2 in fignos:
if figno2 == figno:
html.write('\n<font color=red>%s</font> ' \
% fignames[figno])
else:
html.write('\n<a href="%s">%s</a> ' \
% (gauge_htmlfile[gaugeno,figno2],fignames[figno2]))
html.write('\n<a href="%s"> All Figures </a>' \
% gauge_allfigsfile[gaugeno])
# list of all gauges at bottom of page:
html.write('\n<p><b>Other gauges:</b></a> ')
for gaugeno2 in gaugenos:
if gaugeno2 == gaugeno:
html.write('\n<font color=red>%i</font> ' \
% gaugeno)
else:
html.write('\n<a href="%s">%i</a> ' \
% (gauge_htmlfile[gaugeno2,figno],gaugeno2))
html.write('\n<a href="allgaugesfig%s.html"> All Gauges </a>' \
% figno)
html.write('\n<p><h3><a href=%s>Plot Index</a></h3>' \
% (plotdata.html_index_fname))
if eagle:
html.write("""<p><h3><a href="../eaglemenu.html">Main Menu for
this run-directory</a></h3> """)
html.write('</center></body></html>')
html.close()
os.chdir(startdir)
# end of plotclaw2html
#=====================================
def massage_gauges_data(plot_pages_data):
#=====================================
ppd = plot_pages_data
try:
gaugenos = ppd.gauges_gaugenos
fignos = ppd.gauges_fignos
fignames = ppd.gauges_fignames
prefix = getattr(ppd, 'gauges_prefix', 'gauge')
except:
print('*** Error: gauges not set properly')
return
startdir = os.getcwd()
for figno in fignos:
if figno not in fignames:
fignames[figno] = 'Solution'
numgauges = len(gaugenos)
numfigs = len(fignos)
#if len(gaugenos) == 0:
# print '*** Warning: 0 gauges to print'
#if len(fignos) == 0:
# print '*** Warning: 0 figures to print each gauge'
pngfile = {}
htmlfile = {}
allfigsfile = {}
for gaugeno in gaugenos:
gaugef = str(gaugeno).zfill(4)
for figno in fignos:
pngfile[gaugeno,figno] = '%s%sfig%s.png' % (prefix,gaugef,figno)
htmlfile[gaugeno,figno] = '%s%sfig%s.html' % (prefix,gaugef,figno)
allfigsfile[gaugeno] = 'allfigs%s%s.html' % (prefix,gaugef)
ppd.gauges_gaugenos = gaugenos
ppd.gauges_fignos = fignos
ppd.gauges_fignames = fignames
ppd._gauge_pngfile = pngfile
ppd._gauge_htmlfile = htmlfile
ppd._gauge_allfigsfile = allfigsfile
return ppd
def redirect_stdouts(f):
@wraps(f)
def wrapper(*args, **kwds):
stdout_save = sys.stdout
stderr_save = sys.stderr
try:
return f(*args, **kwds)
finally:
# reset stdout for future print statements
sys.stdout = stdout_save
sys.stderr = stderr_save
return wrapper
#============================================
@redirect_stdouts
def plotclaw_driver(plotdata, verbose=False, format='ascii'):
#============================================
"""
The ClawPlotData object plotdata will be initialized by a call to
function setplot unless plotdata.setplot=False.
If plotdata.setplot=True then it is assumed that the current directory
contains a module setplot.py that defines this function.
If plotdata.setplot is a string then it is assumed this is the name of
a module to import that contains the function setplot.
If plotdata.setplot is a function then this function will be used.
"""
import glob, sys, os
from clawpack.visclaw.data import ClawPlotData
from clawpack.visclaw import frametools, gaugetools, plotpages
# doing plots in parallel?
_parallel = plotdata.parallel and (plotdata.num_procs > 1)
if plotdata._parallel_todo == 'frames':
# all we need to do is make png's for some frames in this case:
for frameno in plotdata.print_framenos:
frametools.plotframe(frameno, plotdata, verbose)
print('Creating png for Frame %i' % frameno)
return
plotdata.save_frames = False
if plotdata.file_prefix is None:
plotdata.file_prefix = 'fort'
datadir = os.getcwd() # assume data files in this directory
if 'matplotlib' not in sys.modules:
print('*** Error: matplotlib not found, no plots will be done')
return plotdata
if not isinstance(plotdata,ClawPlotData):
print('*** Error, plotdata must be an object of type ClawPlotData')
return plotdata
plotdata._mode = 'printframes'
# plotdata = frametools.call_setplot(plotdata.setplot, plotdata)
try:
plotdata.rundir = os.path.abspath(plotdata.rundir)
plotdata.outdir = os.path.abspath(plotdata.outdir)
plotdata.plotdir = os.path.abspath(plotdata.plotdir)
framenos = plotdata.print_framenos # frames to plot
gaugenos = plotdata.print_gaugenos # gauges to plot
fignos = plotdata.print_fignos # figures to plot at each frame
fignames = {} # names to use in html files
rundir = plotdata.rundir # directory containing *.data files
outdir = plotdata.outdir # directory containing fort.* files
plotdir = plotdata.plotdir # where to put png and html files
overwrite = plotdata.overwrite # ok to overwrite?
msgfile = plotdata.msgfile # where to write error messages
except:
print('*** Error in printframes: plotdata missing attribute')
print(' *** plotdata = ',plotdata)
return plotdata
if fignos == 'all':
fignos = plotdata._fignos
#for (figname,plotfigure) in plotdata.plotfigure_dict.iteritems():
# fignos.append(plotfigure.figno)
# filter out the fignos that will be empty, i.e. plotfigure._show=False.
plotdata = frametools.set_show(plotdata)
fignos_to_show = []
for figname in plotdata._fignames:
figno = plotdata.plotfigure_dict[figname].figno
if (figno in fignos) and plotdata.plotfigure_dict[figname]._show:
fignos_to_show.append(figno)
fignos = fignos_to_show
# figure out what type each figure is:
fignos_each_frame = []
fignos_each_gauge = []
fignos_each_run = []
for figno in fignos:
figname = plotdata._figname_from_num[figno]
if plotdata.plotfigure_dict[figname].type == 'each_frame':
fignos_each_frame.append(figno)
if plotdata.plotfigure_dict[figname].type == 'each_gauge':
fignos_each_gauge.append(figno)
if plotdata.plotfigure_dict[figname].type == 'each_run':
fignos_each_run.append(figno)
rootdir = os.getcwd()
# annoying fix needed when EPD is used for plotting under cygwin:
if rootdir[0:9] == 'C:\cygwin' and outdir[0:9] != 'C:\cygwin':
outdir = 'C:\cygwin' + outdir
plotdata.outdir = outdir
if rootdir[0:9] == 'C:\cygwin' and rundir[0:9] != 'C:\cygwin':
rundir = 'C:\cygwin' + rundir
plotdata.rundir = rundir
if rootdir[0:9] == 'C:\cygwin' and plotdir[0:9] != 'C:\cygwin':
plotdir = 'C:\cygwin' + plotdir
plotdata.plotdir = plotdir
try:
os.chdir(rundir)
except:
print('*** Error: cannot move to run directory ',rundir)
print('rootdir = ',rootdir)
return plotdata
if msgfile != '':
sys.stdout = open(msgfile, 'w')
sys.stderr = sys.stdout
try:
plotpages.cd_plotdir(plotdata.plotdir, plotdata.overwrite)
except:
print("*** Error, aborting plotframes")
return plotdata
framefiles = glob.glob(os.path.join(plotdir,'frame*.png')) + \
glob.glob(os.path.join(plotdir,'frame*.html'))
if (not _parallel) or (plotdata._parallel_todo=='initialize'):
if overwrite:
# remove any old versions:
for file in framefiles:
os.remove(file)
else:
if len(framefiles) > 1:
print("*** Remove frame*.png and frame*.html and try again,")
print(" or use overwrite=True in call to printframes")
return plotdata
if plotdata._parallel_todo=='initialize':
os.chdir(rundir)
return plotdata
try:
os.chdir(outdir)
except:
print('*** Error plotclaw_driver: cannot move to outdir = ',outdir)
return plotdata
fortfile = {}
pngfile = {}
frametimes = {}
#import pdb; pdb.set_trace()
for file in glob.glob(plotdata.file_prefix + '.q*'):
frameno = int(file[-4:])
fortfile[frameno] = file
for figno in fignos_each_frame:
pngfile[frameno,figno] = 'frame' + file[-4:] + 'fig%s.png' % figno
#DK: In PetClaw, we don't output fort.q* files. Instead count the
#claw.pkl* files.
if len(fortfile) == 0:
for file in glob.glob('claw.pkl*'):
frameno = int(file[9:12])
fortfile[frameno] = file
for figno in fignos_each_frame:
pngfile[frameno,figno] = 'frame' + file[-4:] + 'fig%s.png' % figno
if len(fortfile) == 0:
print('*** No fort.q or claw.pkl files found in directory ', os.getcwd())
return plotdata
# Discard frames that are not from latest run, based on
# file modification time:
framenos = frametools.only_most_recent(framenos, plotdata.outdir,
plotdata.file_prefix)
numframes = len(framenos)
print("Will plot %i frames numbered:" % numframes, framenos)
print('Will make %i figure(s) for each frame, numbered: ' \
% len(fignos_each_frame), fignos_each_frame)
#fignames = {}
#for figname in plotdata._fignames:
#figno = plotdata.plotfigure_dict[figname].figno
#fignames[figno] = figname
# use new attribute:
fignames = plotdata._figname_from_num
# Only grab times by loading in time
for frameno in framenos:
plotdata.output_controller.output_path = plotdata.outdir
plotdata.output_controller.file_prefix = plotdata.file_prefix
frametimes[frameno] = plotdata.output_controller.get_time(frameno)
# for frameno in framenos:
# frametimes[frameno] = plotdata.getframe(frameno, plotdata.outdir).t
plotdata.timeframes_framenos = framenos
plotdata.timeframes_frametimes = frametimes
plotdata.timeframes_fignos = fignos_each_frame
plotdata.timeframes_fignames = fignames
# Gauges:
# -------
if os.path.exists(os.path.join(plotdata.outdir,"gauges.data")):
gaugenos = plotdata.print_gaugenos
if gaugenos == 'all':
# Read gauge numbers from setgauges.data if it exists:
setgauges = gaugetools.read_setgauges(plotdata.outdir)
gaugenos = setgauges.gauge_numbers
plotdata.gauges_gaugenos = gaugenos
plotdata.gauges_fignos = fignos_each_gauge
plotdata.gauges_fignames = fignames
else:
gaugenos = []
# Make html files for time frame figures:
# ---------------------------------------
if plotdata.html_movie == "JSAnimation":
# Only import if we need it:
try:
from matplotlib import animation
except:
print("*** Warning: Your version of matplotlib may not support JSAnimation")
print(" Switching to 4.x style animation")
plotdata.html_movie = "4.x"
os.chdir(plotdir)
if plotdata.html:
#plotpages.timeframes2html(plotdata)
plotpages.plotclaw2html(plotdata)
pass
# Make png files for all frames and gauges:
# -----------------------------------------
if not plotdata.printfigs:
print("Using previously printed figure files")
else:
print("Now making png files for all figures...")
if not _parallel:
# don't create the png for frames when run in parallel
# (unless plotdata._parallell_todo=='frames', handled earlier)
for frameno in framenos:
frametools.plotframe(frameno, plotdata, verbose)
print('Frame %i at time t = %s' % (frameno, frametimes[frameno]))
gaugenos_input = tuple(gaugenos)
gaugenos = []
for gaugeno in gaugenos_input:
try:
gaugetools.plotgauge(gaugeno, plotdata, verbose)
print('Found data for Gauge %i ' % gaugeno)
gaugenos.append(gaugeno)
except:
print('*** Warning: Unable to plot Gauge %i' \
% gaugeno)
if plotdata.latex:
plotpages.timeframes2latex(plotdata)
#
if plotdata.kml:
plotpages.plotclaw2kml(plotdata)
if (plotdata.html_movie == "JSAnimation") and (len(framenos) > 0):
# Create Animations
for figno in fignos_each_frame:
fname = '*fig' + str(figno) + '.png'
filenames=sorted(glob.glob(fname))
# RJL: This way gives better resolution although it basically does
# the same thing as the code I removed, so not sure why
raw_html = '<html>\n<center><h3><a href=%s>Plot Index</a></h3>\n' \
% plotdata.html_index_fname
if plotdata.file_prefix in ['fort',None]:
png_prefix = ''
else:
png_prefix = plotdata.file_prefix
animation_tools.make_anim_outputs_from_plotdir(plotdir=plotdir,
#file_name_prefix='movieframe_allframes',
file_name_prefix='movie_',
png_prefix=png_prefix,
figsize=None,
fignos=[figno], outputs=['html'], raw_html=raw_html)
# Note: setting figsize=None above chooses figsize with aspect
# ratio based on .png files read in, may fit better on page
#-----------
# gif movie:
#-----------
if plotdata.gif_movie and (len(framenos) > 0):
print('Making gif movies. This may take some time....')
for figno in fignos_each_frame:
try:
os.system('convert -delay 20 frame*fig%s.png moviefig%s.gif' \
% (figno,figno))
print(' Created moviefig%s.gif' % figno)
except:
print('*** Error creating moviefig%s.gif' % figno)
os.chdir(rootdir)
# print out pointers to html index page:
path_to_html_index = os.path.join(os.path.abspath(plotdata.plotdir), \
plotdata.html_index_fname)
if plotdata.html:
print_html_pointers(path_to_html_index)
return plotdata
# end of printframes
| clawpack/visclaw | src/python/visclaw/plotpages.py | plotpages.py | py | 112,744 | python | en | code | 29 | github-code | 1 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
18687715314 | #-*- encoding:utf-8 -*-
import MySQLdb
import sqlite3
import sys
import codecs
reload(sys)
sys.setdefaultencoding('utf-8')
def transfer(sqliteConn, mysqlConn, srcTableName, dstTableName, baseNo, ids, cityNo = 0):
mysqlCursor = mysqlConn.cursor()
sql = "select * from %s" % dstTableName
mysqlCursor.execute(sql)
columns = [i[0] for i in mysqlCursor.description]
columnsCopy = [i for i in columns if i != "city"]
l = range(0, len(columnsCopy))
for i in l:
if columnsCopy[i] in ids:
columnsCopy[i] = "%s + %d" % (columnsCopy[i], baseNo)
fmtStr = ",".join(columnsCopy)
# 从sqlite中读取数据放到list对象
sqliteCursor = sqliteConn.cursor()
sqliteCursor.execute("select %s from %s" % (fmtStr, srcTableName))
rows = []
while True:
row =sqliteCursor.fetchone()
if row != None:
rowList = [i for i in row]
if "city" in columns:
rowList.insert(columns.index("city"), cityNo)
rows.append(rowList)
else:
break
fmtStr = ",".join(["%s"] * len(columns))
sql = "insert %s values(%s)" % (dstTableName, fmtStr)
while True:
if len(rows) > 1000:
mysqlCursor.executemany(sql, rows[:1000])
rows = rows[1000:]
else:
mysqlCursor.executemany(sql, rows)
break
mysqlConn.commit();
# 导入城市数据
def importCity(cityDb, mysqlConn, baseNo, cityNo):
sqliteConn = sqlite3.connect(cityDb)
ids = ["id"] #值需要在加上baseNo的字段名称
transfer(sqliteConn, conn, "station", "station", baseNo, ids, cityNo)
transfer(sqliteConn, conn, "lines", "route", baseNo, ids, cityNo)
ids = ["id", "station", "lineid"]
transfer(sqliteConn, conn, "stations", "stations", baseNo, ids)
sqliteConn.close()
conn=MySQLdb.connect(host='127.0.0.1',user='root',passwd='811225',port=3306,charset="utf8")
#conn.select_db("symphony")
conn.select_db("new_db")
#importCity("./beijing", conn, 100000, 1)
importCity("./beijing", conn, 200000, 2)
conn.commit()
| waklin/wxSymphony | DbFiles/Sqlite3ToMySql.py | Sqlite3ToMySql.py | py | 2,102 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "MySQLdb.connect",
"line_number": 61,
"usage_type": "call"
}
] |
30160807315 | from django.shortcuts import render
from datetime import datetime
import random
import requests
# Create your views here.
#1. 기본 로직
def index(request):
return render(request, 'pages/index.html')
def introduce(request):
return render(request, 'pages/introduce.html')
def images(request):
return render(request, 'pages/images.html')
#2. 템플릿 변수(Template Variable)
def dinner(request):
menu = ['족발', '햄버거', '치킨', '피자', '엉터리 쌩고기', '떡볶이']
pick = random.choice(menu)
context = {'pick': pick}
return render(request, 'pages/dinner.html', context)
#3. 동적 라우팅(Varialbe Routing)
def hello(request, name, age):
menu = ['족발', '햄버거', '치킨', '피자', '엉터리 쌩고기', '떡볶이']
pick = random.choice(menu)
context = {'name': name, 'age': age, 'pick': pick}
return render(request, 'pages/hello.html', context)
#4. 실습
#4-1. 동적 라우팅을 활용해서(name과 age를 인자로 받아) 자기소개 페이지
def introduce2(request, name, age):
context = {'name': name, 'age': age}
return render(request, 'pages/introduce2.html', context)
#4-2. 두개의 숫자를 인자로 받아(num1, num2) 곱셈 결과를 출력
def multi(request, num1, num2):
num3 = num1 * num2
context = {'result': num3, 'num1': num1, 'num2': num2}
return render(request, 'pages/multi.html', context)
#4-3. 반지름을 인자로 받아 원의 넓이를 구하시오.
def circle(request, r):
area = 3.14 * r * r
context = {'area': area, 'r': r}
return render(request, 'pages/circle.html', context)
#5. DTL(Django Temolate Language)
def template_language(request):
menus = ['짜장면', '탕수육', '짬뽕', '유린기']
my_sentence = 'Life is short, you need python'
messages = ['apple', 'banana', 'watermelon', 'mango']
datetimenow = datetime.now()
empty_list = []
context = {
'menus': menus,
'my_sentence': my_sentence,
'messages': messages,
'empty_list': empty_list,
'datetimenow': datetimenow,
}
return render(request, 'pages/template_language.html', context)
#6. 실습
#6-1. isbirth
def isbirth(request):
today = datetime.now()
if today.month == 9 and today.day == 18:
result = True
else:
result = False
context = {
'result': result,
}
return render(request, 'pages/isbirth.html', context)
#6-2. 회문판별(palindrome)
#회문이면 회문입니다. 회문이 아니면 회문이 아닙니다.
def ispal(request, words):
rev_words = words[::-1]
if words == rev_words:
result = True
else:
result = False
context = {'result': result}
return render(request, 'pages/ispal.html', context)
'''
# 스앵님 풀이
def ispal(request, words):
result = False
if words == words[::-1]:
result = True
context = {
'words': words,
'result': result,
}
return render(request, 'ispal.html', context)
'''
#6-3. 로또 번호 추첨
# lottos -> 1~45까지의 번호 중 6개를 랜덤으로 pick한 리스트
# real_lottos -> [21, 24, 30, 32, 40, 42]
#1. lottos 번호를 DTL(for문) 문법을 활용해 하나씩 출력 해보기
#2. 컴퓨터가 뽑은 로또 번호와 실제 로또 당첨 번호를 비교해보기(DTL-if문)
def lotto(request):
real_lottos = [21, 24, 30, 32, 40, 42]
lottos = list(random.sample(range(1,46), 6))
context = {
'real_lottos': real_lottos,
'lottos': lottos,
}
return render(request, 'pages/lotto.html', context)
# if real_lottos == lottos:
# result = True
# else:
# result = False
#7. Form - GET(데이터를 조회할 때, html파일 하나 줘.) get방식은 입력값이 주소창에 노출된다.
def throw(request):
return render(request, 'throw.html')
def catch(request):
message = request.GET.get('message')
message2 = request.GET.get('message2')
context = {
'message': message,
'message2': message2,
}
return render(request, 'pages/catch.html', context)
# 연습하기
def ping(request):
return render(request,'pages/ping.html')
def pong(request):
text = request.GET.get('ping') #'ping'박스를 열어서 text라는 변수에 담았고..
context = {
'abc': text
}
return render(request, 'pages/pong.html', context)
#8. Form - GET 실습(아스키 아티)
def art(request):
return render(request, 'pages/art.html')
def result(request):
#1. form으로 날린 데이터를 받는다.(GET)
word = request.GET.get('word')
#2. ARTII api로 요청을 보내 응답 결과를 fonts에 저장한다.
fonts = requests.get('http://artii.herokuapp.com/fonts_list').text
#3. fonts(str)를 fonts(list)로 바꾼다.
fonts = fonts.split('\n')
#4. fonts(list)안에 들어있는 요소 중 하나를 선택해서 font라는 변수(str)에 저장한다.
font = random.choice(fonts)
#5. 사용자에게 입력받은 word와 font를 가지고 다시 요청을 보낸다.
#그리고 응답 결과를 result에 저장한다.
result = requests.get(f'http://artii.herokuapp.com/make?text={word}&font={font}').text
context = {
'result': result,
}
return render(request, 'pages/result.html', context)
#9. Form - POST
def user_new(request):
return render(request, 'pages/user_new.html')
def user_create(request):
name = request.POST.get('name')
pwd = request.POST.get('pwd')
context = {
'name': name,
'password': pwd,
}
return render(request, 'pages/user_create.html', context)
# 190807
#10. 정적 파일
def static_ex(request):
return render(request, 'pages/static_ex.html') | hyseo33/TIL | 03_Django/01_django_intro/pages/views.py | views.py | py | 5,792 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 16,
"usage_type": "call"
},
{
"api_name"... |
6008443264 | import pandas as pd
from tsfresh import extract_features, select_features
from os import listdir
from os.path import isfile, join
from auto_feature_extraction.config import *
def select():
features_files = [f for f in listdir(features_dir) if isfile(join(features_dir, f))]
# Select features individually from each of the signal components
train = pd.DataFrame()
test = pd.DataFrame()
for f_file in features_files: # One file for each signal component
print(f"loading {f_file}")
features = pd.read_csv(features_dir + f_file)
train_x = features.iloc[:validation_split_i].drop('y', axis=1)
test_x = features.iloc[validation_split_i:].drop('y', axis=1)
train_y = features.iloc[:validation_split_i].y
test_y = features.iloc[validation_split_i:].y
# Feature selection must be always done from the train set!
print("selecting features...")
train_features_selected = select_features(train_x, train_y, fdr_level=fdr_level)
print(f"selected {len( train_features_selected.columns )} features.")
comp_train = train_features_selected.copy()
comp_test = test_x[train_features_selected.columns].copy()
train = pd.concat([train, comp_train], axis=1)
test = pd.concat([test, comp_test], axis=1)
train['y'] = train_y
test['y'] = test_y
print(f"saving {train_file}")
train.to_csv(train_file, index=None)
print(f"saving {test_file}")
test.to_csv(test_file, index=None)
if __name__ == '__main__':
select()
| JoaquinRives/Deep-Learning-Project | auto_feature_extraction/select_features.py | select_features.py | py | 1,441 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_numb... |
16473614058 | import torch
import torch.nn as nn
import torch.nn.functional as F
NOISE_VECTOR_DIM = 100
class Generator(nn.Module):
'''
Generator model definition.
In the Conditional Deep Convolutional GAN it is defined as an CNN model with
input size equal to noise vector plus the 10-class one-ho-encoding vector.
The output size is the same as images we want to generate.
(in this case is 1 x 28 x 28).
The model has been divided into 3 blocks, and each block consists of:
- A Convolution 2D Transpose Layer
- Followed by a BatchNorm Layer and LeakyReLU Activation Function
- A tanh Activation Function in the last block, instead of ReLU.
Before process data through the CNN network, the input is preprocessed
in a fully connected layer that produce an output of size 4x4x512.
'''
def __init__(self, input_size=100, condition_size=10):
super(Generator, self).__init__()
self.fully_connected = nn.Sequential(
nn.Linear(input_size+condition_size, 4*4*512),
nn.ReLU(),
)
self.convolutional_network = nn.Sequential(
# input: 4 by 4, output: 7 by 7
nn.ConvTranspose2d(512, 256, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
# input: 7 by 7, output: 14 by 14
nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
# input: 14 by 14, output: 28 by 28
nn.ConvTranspose2d(128, 1, 4, stride=2, padding=1, bias=False),
nn.Tanh(),
)
def forward(self, x, c):
# x: (N, 100), c: (N, 10)
x, c = x.view(x.size(0), -1), c.float() # may not need
noise_vector_with_class = torch.cat((x, c), 1) # v: (N, 110)
y_ = self.fully_connected(noise_vector_with_class)
y_ = y_.view(y_.size(0), 512, 4, 4)
generated_img = self.convolutional_network(y_) # (N, 28, 28)
return generated_img | CristianCosci/Generative_Adversarial_Networks_GAN__Overview | cDCGAN/Generator.py | Generator.py | py | 2,048 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
73948619875 | import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import numpy as np
# Create the point data.
x = np.linspace(-1, 1)
y = x + np.random.normal(size=x.size)
# Vary the marker size.
fig = plt.figure()
ax = fig.gca()
ax.scatter(x, y, s=80)
fig.savefig("images/markers_large.png")
# Make markers different sizes
fig = plt.figure()
ax = fig.gca()
sizes = (np.random.sample(size=x.size) * 10) ** 2
ax.scatter(x, y, s=sizes)
fig.savefig("images/markers_sized.png")
# Vary the marker style.
fig = plt.figure()
ax = fig.gca()
ax.scatter(x, y, marker="v")
fig.savefig("images/markers_triangle.png")
# Make markers of different styles.
fig = plt.figure()
ax = fig.gca()
N = x.size // 3
ax.scatter(x[:N], y[:N], marker="o")
ax.scatter(x[N: 2 * N], y[N: 2 * N], marker="x")
ax.scatter(x[2 * N:], y[2 * N:], marker="s")
fig.savefig("images/markers_styled.png")
# Change the marker colors
fig = plt.figure()
ax = fig.gca()
ax.scatter(x, y, c="orange")
fig.savefig("images/markers_orange.png")
# Vary the marker colors.
fig = plt.figure()
ax = fig.gca()
ax.scatter(x, y, c=x - y)
fig.savefig("images/markers_colors.png")
# Create a lot of point data.
x = np.linspace(-1, 1, num=1e5)
y = x + np.random.normal(size=x.size)
# Make markers transparent.
fig = plt.figure()
ax = fig.gca()
ax.scatter(x, y, marker=".", alpha=.05, edgecolors="none")
fig.savefig("images/markers_transparent.png")
| brohrer/taming_matplotlib | points_examples.py | points_examples.py | py | 1,404 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"l... |
41559488736 | def summary(outroot='summary-18.05.17'):
from hsaquery import overlaps
from grizli import utils
overlaps.summary_table(output='pointing_summary')
tab = utils.GTable.gread('pointing_summary.fits')
roots = tab['NAME']
tab['Full'] = ['<a href=https://s3.amazonaws.com/aws-grivam/Pipeline/{0}/Extractions/{0}-full.html?chinu_max=2&bic_diff_min=30&zwidth1_max=0.01>Full</a>'.format(root.replace('+','%2B')) for root in roots]
tab['fp'] = ['<a href=https://s3.amazonaws.com/aws-grivam/Pipeline/{0}_footprint.png> <img height=200 src=https://s3.amazonaws.com/aws-grivam/Pipeline/{0}_footprint.png></a>'.format(root.replace('+','%2B')) for root in roots]
tab['zhist'] = ['<a href=https://s3.amazonaws.com/aws-grivam/Pipeline/{0}/Extractions/{0}_zhist.png> <img height=200 src=https://s3.amazonaws.com/aws-grivam/Pipeline/{0}/Extractions/{0}_zhist.png></a>'.format(root.replace('+','%2B')) for root in roots]
#cols = ['NAME', 'RA', 'DEC', 'E(B-V)', 'GalLat', 'GalLon', 'NFILT', 'filter', 'target', 'target_description', 'proposal_id', 'pi_name', 'TexpG102', 'PAG102', 'TexpG141', 'PAG141', 'MAST', 'Full', 'fp', 'zhist']
cols = ['NAME', 'RA', 'DEC', 'E(B-V)', 'GalLat', 'GalLon', 'NFILT', 'filter', 'target', 'proposal_id', 'pi_name', 'TexpG102', 'PAG102', 'TexpG141', 'PAG141', 'MAST', 'Full', 'fp', 'zhist']
tab[cols].write_sortable_html(outroot+'.html', replace_braces=True, localhost=False, max_lines=50000, table_id=None, table_class='display compact', css=None, filter_columns=['mag_auto', 'z_map', 'bic_diff', 'chinu', 'zwidth1', 'is_point', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII'], use_json=False)
print('aws s3 cp {0}.html s3://aws-grivam/Pipeline/ --acl public-read'.format(outroot))
def regenerate_webpages(outroot='master'):
"""
roots=`python -c "from grizli_aws import catalogs; roots, dates = catalogs.get_roots(verbose=False); print('\n'.join(roots))"`
roots=`ls *phot.fits | sed "s/_phot/ /" | awk '{print $1}'`
# ACS
BUCKET=grizli-grism
roots=`ls ../*footprint.fits | sed "s/\// /g" | sed "s/_foot/ /" | awk '{print $2}'`
for root in $roots; do
if [ ! -e ${root}.info.fits ]; then
aws s3 cp s3://${BUCKET}/Pipeline/${root}/Extractions/${root}.info.fits ./
#aws s3 cp s3://${BUCKET}/Pipeline/${root}/Extractions/${root}_phot.fits ./
else
echo $root
fi
done
for root in $roots; do
aws s3 ls s3://aws-grivam/Pipeline/${root}/Extractions/ > /tmp/x
echo $root
grep beams.fits /tmp/x | wc
grep full.fits /tmp/x | wc
done
"""
import glob
import numpy as np
from grizli import utils
from astropy import table
## retrieve
roots = [file.split('.info')[0] for file in glob.glob('*info.fits')]
fpi = open('sync_to_s3.sh', 'w')
fpi.write('# \n')
fpi.close()
tables = []
for root in roots:
fit = utils.GTable.gread(root+'.info.fits')
phot = utils.GTable.gread(root+'_phot.fits')
ix, dr = phot.match_to_catalog_sky(fit)
fit['phot_dr'] = dr
for c in phot.colnames:
if c not in fit.colnames:
fit[c] = phot[ix][c]
else:
pass
#print(c)
print(root, len(fit))
# # DQ on mag differences
# apcorr = fit['flux_auto']/fit['flux_aper_1']
# m140 = 23.9-2.5*np.log10(fit['f140w_flux_aper_1']*apcorr)
# dm140 = m140-fit['mag_wfc3,ir,f140w']
# mask = fit['{0}_mask_aper_{1}'.format('f140w', 1)]
# bad = mask > 0.2*np.percentile(mask[np.isfinite(mask)], 99.5)
# dm140[bad] = np.nan
#
# m160 = 23.9-2.5*np.log10(fit['f160w_flux_aper_1']*apcorr)
# dm160 = m160-fit['mag_wfc3,ir,f160w']
# mask = fit['{0}_mask_aper_{1}'.format('f160w', 1)]
# bad = mask > 0.2*np.percentile(mask[np.isfinite(mask)], 99.5)
# dm160[bad] = np.nan
#
# dm = dm140
# dm[(~np.isfinite(dm140)) & np.isfinite(dm160)] = dm160[(~np.isfinite(dm140)) & np.isfinite(dm160)]
# dm[~np.isfinite(dm)] = -99
#
# fit['fit_dmag'] = dm
# fit['fit_dmag'].format = '4.1f'
# Point source
pa = np.polyfit([16, 21, 24, 25.5], [4.5, 2.6, 2.3, 2.1], 2)
py = np.polyval(pa, fit['mag_auto'])
point_source = (fit['flux_radius'] < py) #& (fit['mag_auto'] < 23)
fit['is_point'] = point_source*2-1
bad = (fit['mag_auto'] > 22) & (fit['flux_radius'] < 1.2)
fit['too_small'] = bad*2-1
N = len(fit)
aws_col = {}
for c in ['png_stack', 'png_full', 'png_line']:
aws_col[c] = []
for i in range(N):
root = fit['root'][i]
aws = 'https://s3.amazonaws.com/aws-grivam/Pipeline/{0}/Extractions/{0}'.format(root)
for c in ['png_stack', 'png_full', 'png_line']:
pre = fit[c][i]
aws_col[c].append(pre.replace(root, aws).replace(root, root.replace('+','%2B')))
for c in ['png_stack', 'png_full', 'png_line']:
fit['aws_'+c] = aws_col[c]
fit['log_mass'] = np.log10(fit['stellar_mass'])
fit['log_mass'].format = '.2f'
cols = ['root', 'idx','ra', 'dec', 't_g102', 't_g141', 'mag_auto', 'is_point', 'z_map', 'chinu', 'bic_diff', 'zwidth1', 'a_image', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII', 'log_mass', 'png_stack', 'png_full', 'png_line']
# Check grisms
cols = ['root', 'idx','ra', 'dec', 'mag_auto', 'is_point', 'z_map', 'z02', 'z97', 'chinu', 'bic_diff', 'zwidth1', 'a_image', 'flux_radius', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII', 'log_mass', 'aws_png_stack', 'aws_png_full', 'aws_png_line']
for grism in ['g800l', 'g102', 'g141'][::-1]:
if np.isfinite(fit['t_'+grism]).sum() > 0:
cols.insert(4, 't_'+grism)
fit['ra'].format = '.4f'
fit['dec'].format = '.4f'
fit['z02'].format = '.2f'
fit['z97'].format = '.2f'
fit['flux_radius'].format = '.1f'
fit['mag_auto'].format = '.2f'
fit['t_g800l'].format = '.0f'
fit['t_g102'].format = '.0f'
fit['t_g141'].format = '.0f'
fit['zq'].format = '.1f'
fit['zwidth1'].format = '.3f'
fit['bic_diff'].format = '.0f'
fit['a_image'].format = '.1f'
for l in ['Ha','OIII','Hb','OII','SIII']:
fit['sn_'+l].format = '.1f'
fit[cols].write_sortable_html(root+'-full.html', replace_braces=True, localhost=False, max_lines=50000, table_id=None, table_class='display compact', css=None, filter_columns=['mag_auto', 'z_map', 'z02', 'z97', 'bic_diff', 'chinu', 'a_image', 'flux_radius', 'zwidth1', 'is_point', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII'], use_json=True)
if '+' in root:
ext = 'html'
lines = open(root+'-full.'+ext).readlines()
for i, line in enumerate(lines):
if root in line:
lines[i] = line.replace(root, root.replace('+', '%2B'))
fp = open(root+'-full.'+ext,'w')
fp.writelines(lines)
fp.close()
ext = 'json'
lines = open(root+'-full.'+ext).readlines()
for i, line in enumerate(lines):
if (root in line) & ('href' in line):
lines[i] = line.replace(root, root.replace('+', '%2B'))
fp = open(root+'-full.'+ext,'w')
fp.writelines(lines)
fp.close()
fpi = open('sync_to_s3.sh', 'a')
fpi.write('aws s3 cp {0}-full.html s3://aws-grivam/Pipeline/{0}/Extractions/ --acl public-read\n'.format(root))
fpi.write('aws s3 cp {0}-full.json s3://aws-grivam/Pipeline/{0}/Extractions/ --acl public-read\n'.format(root))
fpi.close()
tables.append(fit)
master = table.vstack(tables)
cols = ['root', 'idx','ra', 'dec', 'mag_auto', 'is_point', 'z_map', 'z02', 'z97', 'chinu', 'bic_diff', 'zwidth1', 'a_image', 'flux_radius', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII', 'log_mass', 'aws_png_stack', 'aws_png_full', 'aws_png_line']
for grism in ['g800l', 'g102', 'g141'][::-1]:
if np.isfinite(master['t_'+grism]).sum() > 0:
cols.insert(4, 't_'+grism)
master[cols].write_sortable_html(outroot+'.html', replace_braces=True, localhost=False, max_lines=500000, table_id=None, table_class='display compact', css=None, filter_columns=['mag_auto', 't_g102', 't_g141', 'z_map', 'z02', 'z97', 'bic_diff', 'chinu', 'a_image', 'flux_radius', 'zwidth1', 'is_point', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII', 'log_mass'], use_json=True)
#sel = master['bic_diff'] > 30
#master[sel][cols].write_sortable_html(outroot+'.html', replace_braces=True, localhost=False, max_lines=500000, table_id=None, table_class='display compact', css=None, filter_columns=['mag_auto', 't_g102', 't_g141', 'z_map', 'z02', 'z97', 'bic_diff', 'chinu', 'a_image', 'flux_radius', 'zwidth1', 'is_point', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII', 'log_mass'], use_json=True)
new = utils.GTable()
for col in fit.colnames:
new[col] = master[col]
new.write(outroot+'.fits', overwrite=True)
print('aws s3 cp {0}.html s3://aws-grivam/Pipeline/ --acl public-read\n'.format(outroot))
print('aws s3 cp {0}.json s3://aws-grivam/Pipeline/ --acl public-read\n'.format(outroot))
print('aws s3 cp {0}.fits s3://aws-grivam/Pipeline/ --acl public-read\n'.format(outroot))
print ('bash sync_to_s3.sh')
def master_catalog(outroot='grizli-18.05.17-full', bucket='grizli-v1', files=None):
import glob
import numpy as np
from grizli import utils
from astropy import table
## retrieve
if files is None:
files = glob.glob('*info.fits')
roots = [file.split('.info')[0] for file in files]
tabs = [utils.GTable.gread(root+'.info.fits') for root in roots]
fit = utils.GTable(table.vstack(tabs))
fit['flux_radius'].format = '.1f'
N = len(fit)
aws_col = {}
for c in ['png_stack', 'png_full', 'png_line', 'png_rgb']:
aws_col[c] = []
#bucket='aws-grivam'
#bucket='grizli-grism'
#bucket='grizli'
for i in range(N):
root = fit['root'][i]
aws = 'https://s3.amazonaws.com/{0}/Pipeline/{1}/Extractions/{1}'.format(bucket, root)
for c in ['png_stack', 'png_full', 'png_line']:
pre = fit[c].filled()[i]
aws_col[c].append(pre.replace(root, aws).replace(root, root.replace('+','%2B')))
pre = fit['png_rgb'].filled()[i]
aws_col['png_rgb'].append(pre.replace('../Thumbnails/', '').replace(root, aws).replace('Extractions', 'Thumbnails').replace(root, root.replace('+','%2B')))
for c in ['png_stack', 'png_full', 'png_line', 'png_rgb']:
fit['aws_'+c] = aws_col[c]
fit['aws_png_sed'] = [l.replace('full.png','sed.png') for l in fit['aws_png_full']]
psx = [16, 18.5, 21, 24, 25.5]
psy = [8, 4., 2.6, 2.3, 2.1]
# New pixel scale
psy = np.array(psy)*0.06/0.1
# ACS
if False:
psx = [16, 21, 24, 25.5, 27]
psy = [4.5, 2.9, 2.9, 2.6, 2.6]
pa = np.polyfit(psx, psy, 2)
py = np.interp(fit['mag_auto'], psx, psy)
# Point source
point_source = (fit['flux_radius'] < py) & (fit['mag_auto'] < 25.5)
#point_source = (fit['flux_radius'] < py) & (fit['mag_auto'] < 24) # ACS
fit['is_point'] = point_source*1
fit['log_mass'] = np.log10(fit['stellar_mass'])
fit['log_mass'].format = '.2f'
############
# Warnings for ambiguous line IDs
dz_line = np.array([5007, 6563])*(1./5007.-1/6563.)
col = 'ambiguous_HaOIII'
dz_line = np.array([3727, 6563])*(1./3727.-1/6563.)
col = 'ambiguous_HaOII'
zw1 = fit['zwidth1']/(1+fit['z_map'])
zw2 = fit['zwidth2']/(1+fit['z_map'])
fit['zw1'] = zw1
fit['zw1'].format = '.3f'
fit['zw2'] = zw2
fit['zw2'].format = '.3f'
for dz_line, col in zip([np.array([5007, 6563])*(1./5007.-1/6563.), np.array([3727, 6563])*(1./3727.-1/6563.)], ['ambiguous_HaOIII', 'ambiguous_HaOII']):
if col in fit.colnames: fit.remove_column(col)
for dz_i in dz_line:
if col not in fit.colnames:
fit[col] = np.abs(zw1 - dz_i) < 0.008
else:
fit[col] |= np.abs(zw1 - dz_i) < 0.008
amb = fit['ambiguous_HaOIII']*1+fit['ambiguous_HaOII']*2
############
# Reliable redshifts, based on 3D-HST COSMOS
ambiguous = (fit['ambiguous_HaOIII'] | fit['ambiguous_HaOII']) & (zw2 / zw1 < 1.1)
fit['ambiguous'] = ambiguous
# Define ambigous as wide zwidth but <= N isolated peaks with width
# ambig_sigma
ambig_sigma = 0.005
ambig_npeak = 2
ambig_logprob = np.log10(1/np.sqrt(np.pi*ambig_sigma**2)/ambig_npeak)
fit.meta['pdf_max'] = ambig_logprob, 'log_pdf_max limit for ambigous lines'
fit['ambiguous'] = (zw1 > 0.2) & (fit['log_pdf_max'] > ambig_logprob)
min_risk_lim = 0.4
fit['ambiguous'] &= fit['min_risk'] < min_risk_lim
fit.meta['min_risk'] = min_risk_lim, 'min_risk limit for ambigous lines'
fit['ok_width'] = (fit['zw1'] < 0.02) | fit['ambiguous']
fit['ok_width'].format = 'd'
fit['bic_diff_spl'] = fit['bic_spl'] - fit['bic_temp']
fit['bic_diff_spl'].format = '.0f'
# Overall data quality
fit['use_spec'] = (fit['ok_width'])
fit['use_spec'] &= (fit['chinu'] < 10)
fit['use_spec'] &= (fit['bic_diff'] > 0) | ((fit['bic_diff'] > -30) & (fit['min_risk'] < 0.05))
fit['use_spec'] &= (fit['flux_radius'] > 1)
dz_risk = (fit['z_map'] - fit['z_risk'])/(1+fit['z_map'])
fit['use_spec'] &= np.abs(dz_risk) < 0.01
fit['dchi'] = (fit['chimax'] - fit['chimin'])/(fit['chimin']/fit['dof'])
fit['dchi'].format = '.1f'
# ACS GLASS
if False:
fit['use_spec'] &= (fit['flux_radius'] > 1.9)
fit['use_spec'].format = 'd'
fit['z_map'].format = '.4f'
fit['chinu'].format = '.2f'
fit['min_risk'].format = '.2f'
fit['ra'].format = '.4f'
fit['dec'].format = '.4f'
fit['mag_auto'].format = '.2f'
fit['t_g102'].format = '.0f'
fit['t_g141'].format = '.0f'
fit['zq'].format = '.1f'
fit['zwidth1'].format = '.3f'
fit['bic_diff'].format = '.0f'
fit['a_image'].format = '.1f'
fit['log_pdf_max'].format = '.2f'
for l in ['Ha','OIII','Hb','OII','SIII']:
fit['sn_'+l].format = '.1f'
cols = ['root', 'idx','ra', 'dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'is_point', 'flux_radius', 'z_map', 'use_spec', 'chinu', 'bic_diff', 'min_risk', 'log_pdf_max', 'zw1', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII', 'd4000', 'd4000_e', 'aws_png_stack', 'aws_png_full', 'aws_png_rgb', 'aws_png_line']
try:
pred = run_classify(fit)
fit['pred'] = pred
cols.insert(10, 'pred')
print('Prediction!')
except:
pass
# for g in ['g102', 'g141', 'g800l']:
# if 't_'+g in fit.colnames:
# bad = ~np.isfinite(fit['t_'+g])
# fit['t_'+g][bad] = 0.
#
# if fit['t_'+g].max() == 0:
# pop = True
# else:
# pop = False
# else:
# pop = True
#
# if pop:
# cols.pop(cols.index('t_'+g))
filter_cols = cols[2:-4]
if 'sparcs' in outroot:
cols += ['aws_png_sed']
#filter_cols = ['mag_auto', 'z_map', 'z02', 'z97', 'bic_diff', 'chinu', 'flux_radius', 'zw1', 'use_spec', 'is_point', 'sn_SIII', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'sn_OII', 'log_mass', 't_g800l', 't_g102', 't_g141']
if False:
clip = (fit['bic_diff'] > 20) & (fit['chinu'] < 2) & (fit['zwidth1'] < 0.01)
else:
clip = fit['ra'] > 0
print('Clip: {0}'.format((~clip).sum()))
new = utils.GTable()
for col in fit.colnames:
new[col] = fit[col][clip]
new.write(outroot+'.fits', overwrite=True)
print('aws s3 cp {0}.fits s3://aws-grivam/Pipeline/ --acl public-read\n'.format(outroot))
#fit = fit[clip]
# Boolean columns
for c in cols:
if c not in new.colnames:
continue
if isinstance(new[c][0], np.bool_):
#print(c)
new[c] = new[c]*1
new[cols].filled(fill_value=-1).write_sortable_html(outroot+'.html', replace_braces=True, localhost=False, max_lines=50000, table_id=None, table_class='display compact', css=None, filter_columns=filter_cols, use_json=True)
print('aws s3 cp {0}.html s3://aws-grivam/Pipeline/ --acl public-read\n'.format(outroot))
print('aws s3 cp {0}.json s3://aws-grivam/Pipeline/ --acl public-read\n'.format(outroot))
if False:
leg = utils.read_catalog('../../Cutouts/legac_meeting.cat')
idx, dr = fit.match_to_catalog_sky(leg)
clip = (dr.value < 0.4)
f = fit[idx][clip]
f['z_spec'] = leg['z_spec'][clip]
f['legac_id'] = ['m{0}_{1}'.format(m, i) for m, i in zip(leg['mask'][clip], leg['id'][clip])]
f['legac_use'] = leg['use'][clip]
lc = cols.copy()
for c in ['legac_id', 'legac_use', 'z_spec'][::-1]:
lc.insert(3, c)
f['legac_use'].format = '0d'
f['z_spec'].format = '.4f'
f[lc].filled(fill_value=-1).write_sortable_html('may2019_legac.html', replace_braces=True, localhost=False, max_lines=50000, table_id=None, table_class='display compact', css=None, filter_columns=filter_cols, use_json=False)
# ######################
# # use_spec versions
# outroot += '.use_spec'
# clip &= (fit['use_spec'])
def summary_table(output_table='summary_glass-acs-2018.05.21'
):
from hsaquery import overlaps
from grizli import utils
overlaps.summary_table(output=output_table)
tab = utils.GTable.gread(output_table+'.fits')
tab['Browse'] = ['<a href=https://s3.amazonaws.com/aws-grivam/Pipeline/{0}/Extractions/{0}-full.html>Browse</a>'.format(root.replace('+', '%2B')) for root in tab['NAME']]
pixscale = np.array([0.06]*len(tab))
pixscale[9:] = 0.03
tab['driz_scale'] = pixscale
url = "http://archive.stsci.edu/hst/search.php?action=Search&RA={0}&DEC={1}&radius=5.&sci_aec=S"
root_url = [('< a href='+url+'>{2}</a>').format(line['RA'], line['DEC'], line['NAME']) for line in tab]
tab['root'] = root_url
cols = ['NAME', 'RA', 'DEC', 'MW_EBV', 'filter', 'proposal_id', 'proposal_pi', 'driz_scale', 'MAST', 'Browse']
tab[cols].write_sortable_html(output_table+'.html', use_json=False, localhost=False, max_lines=1000)
print('aws s3 cp {0}.html s3://aws-grivam/Pipeline/ --acl public-read'.format(output_table))
| grizli-project/grizli-aws | grizli_aws/master_catalog.py | master_catalog.py | py | 19,456 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "hsaquery.overlaps.summary_table",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "hsaquery.overlaps",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "grizli.utils.GTable.gread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name... |
41334766042 | import re
from pathlib import Path
import torch
def load_checkpoint(path, device='cpu'):
path = Path(path).expanduser()
is_deepspeed = False
if path.is_dir(): # DeepSpeed checkpoint
is_deepspeed = True
latest_path = path / 'latest'
if latest_path.is_file():
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
path /= f'{tag}/mp_rank_00_model_states.pt'
state_dict = torch.load(path, map_location=device)
if is_deepspeed:
state_dict = state_dict['module']
# Replace the names of some of the submodules
def key_mapping(key):
return re.sub(r'^module.model.', '', key)
state_dict = {key_mapping(k): v for k, v in state_dict.items()}
return state_dict | DiffEqML/kairos | src/utils/checkpoint.py | checkpoint.py | py | 881 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 25,
"usage_type": "call"
}
] |
22495146693 | from tkinter import *
from tkinter import ttk
from PIL import Image,ImageTk
import sqlite3
from tkinter import messagebox
from student import Student
import os
import subprocess
class Login:
def __init__(self, root):
self.root=root
self.root.geometry("1750x900+0+0")
self.root.title("Login System")
self.my_canvas=Canvas(self.root,width=1750,height=900,bd=1)
self.Lo=Image.open("images/n01.jpg")
self.Lo=self.Lo.resize((1850,1060),Image.ANTIALIAS)
self.Lo=ImageTk.PhotoImage(self.Lo)
self.my_canvas.create_image(0,0,image=self.Lo,anchor="nw")
self.my_canvas.pack(fill="both",expand=True)
self.my_canvas.create_text(850,80,text="PLACEMENT MANAGEMENT SYSTEM",font=("times new roman",40,"bold"),fill="brown")
self.L=Image.open("images/n01.jpg")
self.L=self.L.resize((625,400),Image.ANTIALIAS)
self.L=ImageTk.PhotoImage(self.L)
self.e11=Label(image=self.L,bd=0)
self.e11.pack(fill=X,expand=1)
wind=self.my_canvas.create_window(870,530,window=self.e11)
######## variables +++++++
self.stud_id=StringVar()
self.password=StringVar()
#####IMAGE##############
#self.Logo1=ImageTk.PhotoImage(file="images/n02.jpg")
self.Log1=Image.open("images/n02.jpg")
self.Log1=self.Log1.resize((625,400),Image.ANTIALIAS)
self.Log1=ImageTk.PhotoImage(self.Log1)
#self.Logo2=ImageTk.PhotoImage(file="images/n2.jpg")
self.Log2=Image.open("images/n01.jpg")
self.Log2=self.Log2.resize((625,400),Image.ANTIALIAS)
self.Log2=ImageTk.PhotoImage(self.Log2)
#self.Logo3=ImageTk.PhotoImage(file="images/n03.jpg")
self.Log3=Image.open("images/n03.jpg")
self.Log3=self.Log3.resize((625,400),Image.ANTIALIAS)
self.Log3=ImageTk.PhotoImage(self.Log3)
self.g3=Image.open("images/u1.png")
self.g3=self.g3.resize((35,40),Image.ANTIALIAS)
self.g3=ImageTk.PhotoImage(self.g3)
self.g13=Image.open("images/pa1.png")
self.g13=self.g13.resize((50,40),Image.ANTIALIAS)
self.g13=ImageTk.PhotoImage(self.g13)
self.g1=Image.open("images/pa2.png")
self.g1=self.g1.resize((50,40),Image.ANTIALIAS)
self.g1=ImageTk.PhotoImage(self.g1)
######## LOGIN ++++++++++++
self.use_l = Label(self.root,text="LOGIN",bd=6,font=("arial",20,"bold"), fg="BLACK")
self.use_l.place(x=830, y=280, height=31, width=154)
########### ++++++ LABEL ++++++++++++++++
user_stud=Label(self.root,text="User_id",image=self.g3,compound=RIGHT,font=("times new roman" ,15, "bold"),bg="white",bd=3)
user_stud.place(x=580,y=350,height=40,width=130)
doj_stud=Label(self.root,text="Password",font=("times new roman" ,15, "bold"),bg="white",bd=3)
doj_stud.place(x=580,y=440,height=40,width=110)
####### + ENTRY +++++++++
self.user_stud=Entry(self.root,textvariable=self.stud_id,font=("times new roman" ,15),bg="white",bd=2)
self.user_stud.place(x=750,y=350,height=40,width=210)
self.pwd=Entry(self.root,textvariable=self.password,show='*',font=("times new roman",15),bg="white",bd=2)
self.pwd.place(x=790,y=440,height=40,width=200)
self.b1=Button(self.root,cursor="hand2",image=self.g13,command=self.p,font=("arial 18 bold",19,"bold"),bd=3)
self.b1.place(x=700,y=440, height=40, width=70)
self.btn=True
self.BtnLogin = Button(self.root,text="Login",command=self.login1,font=("times new roman",20,"italic"),bg="white",bd=5,cursor="hand2",anchor="w")
self.BtnLogin.place(x=750,y=600, height=34, width=240)
self.ani()
def ani(self):
self.im=self.Log1
self.Log1=self.Log2
self.Log2=self.Log3
self.Log3=self.im
self.e11.config(image=self.im)
self.e11.after(1200,self.ani)
def p(self):
if self.btn:
self.b1.config(image=self.g1)
self.p1=self.pwd.get()
self.pwd.config(show=self.p1)
self.btn=False
else:
self.pwd.config(show='*')
self.b1.config(image=self.g13)
self.btn=True
def login1(self):
con=sqlite3.connect(database=r'student.db')
cur=con.cursor()
try:
if self.user_stud.get() == "" and self.pwd_stud.get()== "":
messagebox.showerror("Error", "All fields are required",parent=self.root)
else:
cur.execute("select name from admin where user_id=? and password=?",(self.user_stud.get(),self.pwd.get()))
user=cur.fetchone()
if user==('admin',):
try:
subprocess.run(['python3','student1.py'])
finally:
self.root.destroy()
else:
cur.execute("select stud_id from student where stud_id=? and password=?",(self.user_stud.get(),self.pwd.get()))
user=cur.fetchone()
ss=user[0]
if user==None:
messagebox.showerror('Invalid credentials', 'Try again with the correct credentials!!!!')
p1=int(self.user_stud.get())
if (ss==p1):
try:
self.root.destroy()
finally:
subprocess.run(['python3','studinfo.py'])
if user==None:
messagebox.showerror('Invalid credentials', 'Try again with the correct credentials!!!!')
except Exception as ex:
messagebox.showerror("Error",f"Error due to :{(str(ex))}",parent=self.root)
if __name__=="__main__":
root=Tk()
obj=Login(root)
root.mainloop()
| nagendra-h/Pyhton_Placement_Management | check.py | check.py | py | 6,215 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"li... |
14879425734 | from django.conf.urls import url
import views
# from django.views.generic import TemplateView #TemplateView.as_view(template_name="about.html")
app_name = 'park'
urlpatterns = [
# url(r'^$', views.Index.as_view(), name='index'), #index is button to check, list to charge, free space to public
url(r'^login/$', views.LoginView.as_view(), name='login'), #fake login page
# url(r'^check/', views.add, name='check'), #check picture to get status, paid or new parking ticket
# url(r'^results/', views.end, name='results'), #list of parking ticket, paid or not paid and end time, can be charge
# url(r'^user/', views.user, name='user'), #user index page, a list of numbers
# url(r'^ticket/', views.ticket, name='ticket'), #click car number to create a new ticket, and areas, can extend
url(r'^pic/$', views.PictureView.as_view(), name='pic'),
url(r'^$', views.IndexView.as_view(), name='index'),
] | Ganben/pygo | park/urls.py | urls.py | py | 941 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.LoginView.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.LoginView",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "dja... |
70318908513 | #!/bin/env python3
from utils.Verilog import Verilog
import utils.APIs.APIs as api
import argparse, os
import utils.APIs.from_module as mp
calculator = Verilog("calculator", "calculator.v")
adder = mp.asVerilog({"module" : "resources/rtl/test.v"})#Verilog("adder", "adder")
subtractor = Verilog("subtractor", "subtractor")
# adder.ports.add("a", direction="input", bit=7)
# adder.ports.add("b", direction="input", bit=7)
# adder.ports.add("sum", direction="output", bit=8)
# adder.assigns.add("sum", "a + b")
subtractor.ports.add("input1", direction="input", bit=7)
subtractor.ports.add("input2", direction="input", bit=7)
subtractor.ports.add("result", direction="output", bit=64)
subtractor.ports.add("test", direction="output", bit=128)
subtractor.assigns.add("result", "a - b")
calculator.add_instance(adder)
calculator.add_instance(subtractor)
api.toPort(calculator,adder, lambda x : (x.name, x.name != "sum"))
calculator.ports.add(name="control", bit=1, direction="input")
calculator.ports.add(name="output", bit=8, direction="output")
calculator.update_instance("adder", api.connect_instance_with_port, api.finalize)
calculator.update_instance("subtractor", api.connect_instance_with_port, api.finalize)
calculator.assigns.add("output", f"control ? Wire_adder_sum : Wire_subtractor_result")
line = calculator.toModule()
print(line) | jinrudals/generate-verilog-template | example.py | example.py | py | 1,345 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "utils.Verilog.Verilog",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "utils.APIs.from_module.asVerilog",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.APIs.from_module",
"line_number": 7,
"usage_type": "name"
},
{
"api_na... |
24928316156 | from django.shortcuts import render, redirect
from django.core.files.storage import FileSystemStorage
from django.utils.datastructures import MultiValueDictKeyError
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.decorators import login_required
from Admin.models import *
from User.models import*
def admindashboard(request):
customercount = Userregister.objects.all().count()
feddbackcount = Userfeedback.objects.all().count()
productscount = Productregister.objects.all().count()
orderconfirmedcount = Order.objects.filter(status = 1).count()
orderdeliveredcount = Order.objects.filter(status = 2).count()
context = {
'customercount':customercount, 'feddbackcount':feddbackcount, 'productscount':productscount,
'orderconfirmedcount':orderconfirmedcount, 'orderdeliveredcount':orderdeliveredcount
}
return render(request, 'admindashboard.html', context)
def adminlogin(request):
if request.method == 'POST':
aUsername = request.POST['aUsername']
aPassword = request.POST['aPassword']
user = authenticate(request, username = aUsername, password = aPassword)
if user is not None:
login(request, user)
return redirect('admindashboard')
else:
return redirect('adminlogin')
return render(request, 'admin_login.html')
def adminlogout(request):
logout(request)
return redirect('adminlogin')
@login_required(login_url='adminlogin')
def productregister(request):
category = Productcategory.objects.all()
if request.method == 'POST':
pname = request.POST['pname']
pimage = request.FILES['pimage']
pprice = request.POST['pprice']
pcategory = request.POST['pcategory']
pcategoryInstance = Productcategory.objects.get(id = pcategory)
pquantity = request.POST['pquantity']
Productregister.objects.create(pName = pname, pImage = pimage, pPrice = pprice, pCategory = pcategoryInstance, pQuantity = pquantity)
return redirect(admindashboard)
return render(request, 'productregister.html', {'category':category})
@login_required(login_url='adminlogin')
def productviews(request):
productall = Productregister.objects.all()
return render(request, 'productviewall.html', {'productall':productall})
@login_required(login_url='adminlogin')
def productviewindividual(request, pId):
individualproduct = Productregister.objects.filter(id = pId)
category = Productcategory.objects.all()
return render(request, 'productindividual.html', {'individualproduct':individualproduct, 'category':category} )
@login_required(login_url='adminlogin')
def productupdate(request, pId):
if request.method == 'POST':
pname = request.POST['pname']
try:
pimage = request.FILES['pimage']
fs = FileSystemStorage()
files = fs.save(pimage.name, pimage)
except MultiValueDictKeyError:
files = Productregister.objects.get(id = pId).pImage
pprice = request.POST['pprice']
pcategory = request.POST['pcategory']
pquantity = request.POST['pquantity']
Productregister.objects.filter(id = pId).update(pName = pname, pImage = files, pPrice = pprice, pCategory = pcategory, pQuantity = pquantity)
return redirect('admindashboard')
@login_required(login_url='adminlogin')
def productdelete(request, pId):
Productregister.objects.filter(id = pId).delete()
return redirect('productviews')
@login_required(login_url='adminlogin')
def productcategory(request):
if request.method == 'POST':
cname = request.POST['cname']
cimage = request.FILES['cimage']
Productcategory.objects.create(cName = cname, cImage = cimage)
return redirect('admindashboard')
return render(request, 'productcategory.html')
@login_required(login_url='adminlogin')
def categoryviewall(request):
categorydata = Productcategory.objects.all()
return render(request, 'viewcategory.html', {'categorydata':categorydata})
@login_required(login_url='adminlogin')
def categoryindividual(request, cId):
categoryindividualview = Productcategory.objects.filter(id = cId)
return render(request, 'categoryindividualview.html', {'categoryindividualview':categoryindividualview})
@login_required(login_url='adminlogin')
def categoryupdate(request, cId):
if request.method == 'POST':
cname = request.POST['cname']
try:
cimage = request.FILES['cimage']
fs = FileSystemStorage()
files = fs.save(cimage.name, cimage)
except:
files = Productcategory.objects.get(id = cId).cImage
Productcategory.objects.filter(id = cId).update(cName = cname, cImage = files)
return redirect('categoryviewall')
@login_required(login_url='adminlogin')
def categorydelete(request, cId):
Productcategory.objects.filter(id = cId).delete()
return redirect('categoryviewall')
@login_required(login_url='adminlogin')
def orderrequests(request):
orderdata = Order.objects.filter( status__in = [0,1]).order_by('orderdate')
return render(request, 'orderrequests.html', {'orderdata':orderdata})
@login_required(login_url='adminlogin')
def orderrequestsapprove(request, oId):
Order.objects.filter(id = oId).update(status = 1)
user_id = Order.objects.get(id = oId).userid.id
user_name = Userregister.objects.get(id = user_id).uname
user_email = Userregister.objects.get(id = user_id).uemail
order_date = Order.objects.get(id = oId).orderdate
product_name = Order.objects.get(id = oId).productid.pName
context = {
'user_name':user_name, 'order_date':order_date, 'product_name':product_name
}
template = render_to_string('orderrequestapproveemail.html', context)
email = EmailMessage(
'Greetings',
template,
settings.EMAIL_HOST_USER,
[user_email],
)
email.fail_silently = False
email.send()
return redirect('orderrequests')
@login_required(login_url='adminlogin')
def orderrequestsdelivered(request, oId):
Order.objects.filter(id = oId).update(status = 2)
user_id = Order.objects.get(id = oId).userid.id
user_name = Userregister.objects.get(id = user_id).uname
user_email = Userregister.objects.get(id = user_id).uemail
order_date = Order.objects.get(id = oId).orderdate
product_name = Order.objects.get(id = oId).productid.pName
context = {
'user_name':user_name, 'order_date':order_date, 'product_name':product_name
}
template = render_to_string('orderdeliveryconfirmemail.html', context)
email = EmailMessage(
'Greetings',
template,
settings.EMAIL_HOST_USER,
[user_email],
)
email.fail_silently = False
email.send()
return redirect('orderrequests')
@login_required(login_url='adminlogin')
def orderhistoryadmin(request):
orderhistory = Order.objects.filter(status = 2).order_by('orderdate')
return render(request, 'orderhistoryadmin.html', {'orderhistory':orderhistory})
@login_required(login_url='adminlogin')
def registeredusers(request):
userlist = Userregister.objects.all()
return render(request, 'registeredusers.html',{'userlist':userlist})
@login_required(login_url='adminlogin')
def userfeedbacklist(request):
feedbacklist = Userfeedback.objects.all()
return render(request, 'userfeedbackadmin.html', {'feedbacklist':feedbacklist})
| Anandu-SK/K10 | Admin/views.py | views.py | py | 7,614 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 33,
"usage_type": "call"
},
{
... |
27345275869 | import matplotlib.pyplot as plt
import numpy as np
import itertools
# from numba import njit
#import pandas as pd
#@njit
def transfer_matrix(N1, N2, polar='TE', n1=1, n2=1):
tm = np.empty((2,2), dtype = np.complex)
if polar == 'TE':
tm[0, 0] = (N2 + N1) / (2. * N2)
tm[0, 1] = (N2 - N1) / (2. * N2)
tm[1, 0] = (N2 - N1) / (2. * N2)
tm[1, 1] = (N2 + N1) / (2. * N2)
else:
tm[0, 0] = (n2 ** 2 * N1 + n1 ** 2 * N2) / (2. * n1 * n2 * N2)
tm[0, 1] = (n2 ** 2 * N1 - n1 ** 2 * N2) / (2. * n1 * n2 * N2)
tm[1, 0] = (n2 ** 2 * N1 - n1 ** 2 * N2) / (2. * n1 * n2 * N2)
tm[1, 1] = (n2 ** 2 * N1 + n1 ** 2 * N2) / (2. * n1 * n2 * N2)
return tm
#@njit
def N_calculation(n_first_media, incident_angle, n_current_media):
return np.sqrt(n_current_media ** 2 - n_first_media ** 2 * (np.sin(incident_angle)) ** 2, dtype=np.complex)
#@njit
def phase_shift(d, N, k0):
p_shift = np.empty((2, 2), dtype = np.complex)
p_shift[0, 0] = np.exp(1.j * d * N * k0)
p_shift[0, 1] = 0.+0.j
p_shift[1, 0] = 0.+0.j
p_shift[1, 1] = np.exp(-1.j * d * N * k0)
return p_shift
#@njit
def R_func(dictionary_structure={}, wl = 780, teta = 45, polar='TE'):
list_N = []
list_n = []
T = np.eye(2, dtype=np.complex)
tet = teta * np.pi / 180
k0 = 2 * np.pi / wl
n0 = complex(0., 0.) # current n
for i in range(0, len(dictionary_structure)):
if i == 0:
n0 = dictionary_structure[i]['n']
list_N.append(N_calculation(n0, tet, n0))
list_n.append(n0) # добавил n для TM
continue
if dictionary_structure[i]['name'] == 'ФК':
n1 = dictionary_structure[i]['n1']
n2 = dictionary_structure[i]['n2']
d1 = dictionary_structure[i]['d1']
d2 = dictionary_structure[i]['d2']
num_layers = int(dictionary_structure[i]['N'])
N1 = N_calculation(n0, tet, n1)
N2 = N_calculation(n0, tet, n2)
from_upper_to_1 = transfer_matrix(list_N[-1], N1, polar, list_n[-1], n1)
from_1_to_2 = transfer_matrix(N1, N2, polar, n1, n2)
from_2_to_1 = transfer_matrix(N2, N1, polar, n2, n1)
F_n1 = phase_shift(d1, N1, k0) # x1 - d, x2 - N, x3 - k0
F_n2 = phase_shift(d2, N2, k0)
T = F_n2 @ from_1_to_2 @ F_n1 @ from_upper_to_1 @ T
T_bilayer = F_n2 @from_1_to_2 @ F_n1 @from_2_to_1
T = np.linalg.matrix_power(T_bilayer, num_layers - 1) @ T
list_N.append(N1)
list_N.append(N2)
list_n.append(n1)
list_n.append(n2)
elif dictionary_structure[i]['name'] == 'Слой':
n = dictionary_structure[i]['n']
d = dictionary_structure[i]['d']
N = N_calculation(n0, tet, n)
from_this_to_next = transfer_matrix(list_N[-1], N, polar, list_n[-1], n)
F = phase_shift(d, N, k0)
T = np.dot(from_this_to_next, T)
T = np.dot(F, T)
list_N.append(N)
list_n.append(n) # добавил n для TM
elif dictionary_structure[i]['name'] == 'Среда':
n = dictionary_structure[i]['n']
N = N_calculation(n0, tet, n)
from_last_to_sub = transfer_matrix(list_N[-1], N, polar, list_n[-1], n)
T = np.dot(from_last_to_sub, T)
list_N.append(N)
list_n.append(n)
return -(T[1, 0] / T[1, 1])
'''
get_rotation function take angle
in grad and return rotation matrix
'''
#@njit
def get_rotation(angle):
rot_matrix = np.empty((2, 2), dtype = np.complex)
rot_matrix[0, 0] = np.cos(np.pi * angle / 180)
rot_matrix[0, 1] = np.sin(np.pi * angle / 180)
rot_matrix[1, 0] = np.sin(np.pi * angle / 180)
rot_matrix[1, 1] = -np.cos(np.pi * angle / 180)
return rot_matrix
#@njit
def decor1(angles, wl, struct, polar):
return R_func(struct, wl, angles, polar)
vectorize_R = np.vectorize(decor1)
vectorize_N = np.vectorize(N_calculation)
#@njit
def transfer_matrix_vec(N1, N2, polar='TE', n1=1+0.j, n2=1+0.j):
if polar == 'TE':
t00 = (N2 + N1) / (2. * N2)
t01 = (N2 - N1) / (2. * N2)
t10 = (N2 - N1) / (2. * N2)
t11 = (N2 + N1) / (2. * N2)
else:
t00 = (n2 ** 2 * N1 + n1 ** 2 * N2) / (2. * n1 * n2 * N2)
t01 = (n2 ** 2 * N1 - n1 ** 2 * N2) / (2. * n1 * n2 * N2)
t10 = (n2 ** 2 * N1 - n1 ** 2 * N2) / (2. * n1 * n2 * N2)
t11 = (n2 ** 2 * N1 + n1 ** 2 * N2) / (2. * n1 * n2 * N2)
return np.vstack((t00, t01, t10, t11)).reshape((2, 2, -1)).T
#@njit
def phase_shift_vec(d, N, k0):
p00 = np.exp(1.j * d * N * k0)
p01 = np.zeros(np.shape(p00), dtype=np.complex)
p10 = np.zeros(np.shape(p00), dtype=np.complex)
p11 = np.exp(-1.j * d * N * k0)
return np.vstack((p00, p01, p10, p11)).reshape((2, 2, -1)).T
#@njit
def generate_TMM(n0, n1, n2, angles, polar):
N1 = N_calculation(n0, np.pi / 180 * angles, n1)
N2 = N_calculation(n0, np.pi / 180 * angles, n2)
TMMs = transfer_matrix_vec(N1, N2, polar, n1, n2)
return TMMs
#@njit
def generate_TMM_to_metal(n0, n2, n3, angles, polar, zk, TMM1):
N2 = N_calculation(n0, np.pi / 180 * angles, n2)
N3 = N_calculation(n0, np.pi / 180 * angles, n3)
TMM2 = transfer_matrix_vec(N2, N3, polar, n2, n3)
Phases = phase_shift_vec(zk, N2, 1.)
TMMs = TMM2 @ Phases @ TMM1
return TMMs
#@njit
def get_K_xz_air(k_mkm, k_mkm_gap, angles):
k_x_b = k_mkm * np.sin(np.pi / 180 * angles, dtype=np.complex)
k_z_b = np.sqrt(k_mkm_gap ** 2 - k_x_b ** 2, dtype=np.complex)
k_matrix = np.vstack((k_x_b, k_z_b))
return k_matrix
#@njit
def E_air_spectrum(s, R, TMMs_):
E_air = np.empty((2, np.size(s)), dtype=np.complex)
E_inc = np.array(np.hstack((s, s * R)).T, dtype=np.complex)
# tmp = E_inc.T.reshape((np.size(s), 2, 1))
# res = TMMs_ @ tmp
# E_plus = res[:, 0, 0].reshape((-1, 1))
# E_minus = res[:, 1, 0].reshape((-1, 1))
for i in range(np.size(s)):
E_air[:, i] = TMMs_[i] @ E_inc[:, i]
E_plus = E_air[0].reshape((-1, 1))
E_minus = E_air[1].reshape((-1, 1))
return E_plus, E_minus
#@njit
def get_defocus_phase(k_mkm, angles, L):
if L == 0:
return 1.
median_angle = np.pi / 180 * angles[np.size(angles) // 2]
h_shift = L * np.cos(median_angle)
x_shift = L * np.sin(median_angle)
k_x = k_mkm * np.sin(np.pi / 180 * angles)
k_z = k_mkm * np.cos(np.pi / 180 * angles)
phase_shift = k_x * x_shift + k_z * h_shift
return np.exp(1.0j * phase_shift).reshape((-1, 1))
#@njit
def spectral(a, k):
if a == np.inf:
return np.array([1.], dtype=np.float64)
return np.sqrt(a)/(2 * np.sqrt(np.pi)) * np.exp(-a**2 * k**2 / 4)
#@njit
def get_k_x(a=5, N_x=401, decr_times=20):
if a == np.inf:
return np.array([0.], dtype=np.float64)
k_min = 2 * np.sqrt(np.log(decr_times)) / a
dk_x = 2 * k_min / N_x
return np.arange(-N_x // 2, N_x // 2) * dk_x
def cut_BSW_intesity(angles_range, X, struct, a=5, Z=0.3, L=0, polar='TE', wl=780., plasmon=False):
'''
set default parameters range
'''
# _, ax = plt.subplots(1, 4, figsize=(20, 5))
k = 2 * np.pi * 1.0e9 / wl * struct[0]['n']
k_mkm = k / 1.0e6
k_mkm_gap = k_mkm / struct[0]['n'] * struct[1]['n']
k_x = get_k_x(a=a, N_x=401, decr_times=5)
dk = k_x[1] - k_x[0]
s = spectral(a, k_x).reshape((-1, 1))
r_pos = np.empty((1, 2))
r_pos[0,1] = Z
E_destr = np.empty((np.size(angles_range), np.size(X)), dtype=np.complex)
for j, alpha in enumerate(angles_range):
angles = 180 / np.pi * np.arcsin(k_x / k_mkm) + alpha
R = vectorize_R(angles, wl, struct, polar).reshape((-1, 1))
TMM1 = generate_TMM(struct[0]['n'], struct[0]['n'], struct[1]['n'], angles, polar)
if plasmon == True:
TMM2 = generate_TMM_to_metal(struct[0]['n'], struct[1]['n'], struct[2]['n'],
angles, polar, Z * k_mkm / struct[0]['n'], TMM1)
TMMs = generate_TMM_to_metal(struct[0]['n'], struct[2]['n'], struct[3]['n1'],
angles, polar, struct[2]['d'] / 1000. * k_mkm / struct[0]['n'], TMM2)
r_pos[0,1] = 0
else:
TMMs = TMM1
k_xz = get_K_xz_air(k_mkm, k_mkm_gap, angles)
defocus_phase = get_defocus_phase(k_mkm, angles, L) # эта фаза считается в призме, поэтому вычтсленные выше k_xz для воздуха не используются
E_plus, E_minus = E_air_spectrum(s, R, TMMs)
E_forward = np.zeros(np.size(X), dtype=np.complex)
E_backward = np.zeros(np.size(X), dtype=np.complex)
# if j % 8 == 0:
# ax[0].plot(angles.real, abs(E_plus))
# ax[1].plot(angles.real, abs(E_minus))
# ax[2].plot(angles.real, abs(R) + 0.05 * (j // 8))
# ax[3].plot(angles.real, np.angle(R) + 0.05 * (j // 8))
# if angles[0].real < 42.2:
# E_destr[j] = np.zeros(np.size(X), dtype=np.complex)
# continue
# coord_arr = np.vstack((X, Z * np.ones(np.size(X)))).T
# E_forward = np.exp(1.0j * (coord_arr @ k_xz), dtype=np.complex) @ (E_plus * defocus_phase) * dk
# coord_arr = np.vstack((X, -Z * np.ones(np.size(X)))).T
# E_backward = np.exp(1.0j * (coord_arr @ k_xz), dtype=np.complex) @ (E_minus * defocus_phase) * dk
# E_destr[j] = (E_forward + E_backward).T
for i, x in enumerate(X):
r_pos[0, 0] = x
E_forward[i] = np.exp(1.0j * r_pos @ k_xz, dtype=np.complex) @ (E_plus * defocus_phase) * dk
r_pos[0, 1] *= -1
E_backward[i] = np.exp(1.0j * r_pos @ k_xz, dtype=np.complex) @(E_minus * defocus_phase) * dk
r_pos[0, 1] *= -1
E_destr[j] = E_forward + E_backward
return E_destr
#@njit
def plot_2Dmap(data, extent=None, isE=False):
plt.figure(figsize=(9, 5))
if isE == True:
proc_data = data
else:
proc_data = abs(data) ** 2
plt.imshow(proc_data, cmap=plt.cm.hot, origin='lower', aspect='auto', extent=extent)
cax = plt.axes([0.95, 0.13, 0.05, 0.75])
plt.colorbar(cax=cax)
plt.show()
#@njit
def E_beam_calc(X, Z, s, k_matrix, mode='i', alpha=0, l_0=0, plot_I=True):
'''
This function compute field distribution by matrix multiplications.
grid - array of coordinates in form:
[(x_0, z_0), (x_0, z_1), ..., (x_0, z_n),
...,
(x_m, z_0), (x_m, z_1), ..., (x_m, z_n)]
'''
rot_matrix = get_rotation(alpha)
dk = k_matrix[0][1] - k_matrix[0][0]
h_shift = 0. if l_0 == 0 else l_0 * np.cos(np.pi / 180 * alpha)
sign = 1. if mode == 'i' else -1
grid = np.array(list(itertools.product(X, sign * Z - h_shift)), dtype=np.complex)
E = np.exp(1.0j * (grid @ rot_matrix.T) @ k_matrix, dtype=np.complex) @ s * dk
E_res = E.reshape((np.size(X), -1)).T
if plot_I == True:
plot_2Dmap(E_res)
return E_res
#@njit
def vizualize_beam(X, Z, struct, alpha, l_0=0, a=5, mode='r', wl=780., polar='TE', plot=False):
'''
This function return complex value E (electromagnetic field)
distribution in the modulation field and plot Intensity
distribution, that can be set throught input parameters.
Parameters:
X - x-range coorinates
Z - z-range coorinates
struct - structure (Otto configuration for example) description
in dictionaty form
alpha - incidence angle (degree)
l_0 - defocusing length (mkm)
mode - simalation regime (r - reflected, i - incidence, f - full)
wl - wavelength (nm)
'''
k = 2 * np.pi * 1.0e9 / wl * struct[0]['n']
k_mkm = k / 1.0e6
k_x = get_k_x(a=a, N_x=401)
k_z = np.sqrt(k_mkm ** 2 - k_x ** 2, dtype=np.complex)
s = spectral(a=a, k=k_x).reshape((-1, 1))
k_matrix = np.vstack((k_x, k_z))
s_angle_range = 180 / np.pi * np.arcsin(k_x / k_mkm)
angles = s_angle_range + alpha
R = vectorize_R(angles, wl, struct, polar).reshape((-1, 1))
if mode == 'r' or mode == 'f':
E_refl = E_beam_calc(X, Z, s * R, k_matrix, 'r', alpha, l_0, plot)
if mode == 'i' or mode == 'f':
E_inc = E_beam_calc(X, Z, s, k_matrix, 'i', alpha, l_0, plot)
if mode == 'f':
return [E_inc, E_refl, E_inc + E_refl]
elif mode == 'r':
return [E_refl]
elif mode == 'i':
return [E_inc]
def get_max_angle(E_bsw, return_shift=None, plot_I_a=None, plot_I_x=None):
I = np.abs(E_bsw) ** 2
I_max = []
for line in I:
I_max.append(np.max(line))
if plot_I_a != None:
plt.plot(angles_range, I_max)
plt.grid()
plt.xlabel("Inc angle")
plt.ylabel("Intensity")
angle_max = angles_range[np.argmax(I_max)]
if return_shift != None:
if plot_I_x != None:
plt.plot(X_range, I[np.argmax(I_max)])
shift = X_range[np.argmax(I[np.argmax(I_max)])]
return [angle_max, shift, np.max(I)]
print("I_max: ", np.max(I_max), "angle_max: ", angles_range[np.argmax(I_max)])
return [angle_max] | Enedys/TMM_Otto | tmm_utils.py | tmm_utils.py | py | 11,969 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.empty",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.complex",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number"... |
40646941205 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.types import AddonPreferences, PropertyGroup, UIList
from bpy.props import ( StringProperty,
BoolProperty,
FloatProperty,
IntProperty,
EnumProperty,
)
class SHELVES_Preferences(AddonPreferences):
bl_idname = __package__
default_shelf: StringProperty(
name="default_shelf",
description="default_shelf",
default="Default")
def draw(self, context):
scene = context.scene
layout = self.layout
row = layout.row(align=True)
preset_label = bpy.types.SHELVES_MT_Presets.bl_label
row.menu('SHELVES_MT_Presets', text=self.default_shelf)
row.operator('shelves.save_preset', text='', icon='ADD')
row.operator('shelves.save_preset', text='', icon='REMOVE').remove_active = True
row = layout.row(align=True)
#row.operator('self.default_shelf', text="set as default", icon='ADD')
row.prop(self, 'default_shelf')
self.default_shelf = preset_label
#row.label(text=self.default_shelf)
row = layout.row(align=True)
col = row.column(align=True)
col.template_list("SHELVES_UL_ButtonsList",
"Custom Shelf List ",
scene,
"shelf_list",
scene,
"shelf_list_index",
type='DEFAULT',
columns=1,
)
col = row.column(align=True)
col.operator('shelf_list.new_button', text='', icon='ADD')
col.operator('shelf_list.delete_button', text='', icon='REMOVE')
col.operator('shelf_list.move_button', text='', icon='TRIA_UP').direction = 'UP'
col.operator('shelf_list.move_button', text='', icon='TRIA_DOWN').direction = 'DOWN' | kromar/blender_Shelves | preferences.py | preferences.py | py | 2,844 | python | en | code | 42 | github-code | 1 | [
{
"api_name": "bpy.types.AddonPreferences",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "bpy.props.StringProperty",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 44,
"usage_type": "attribute"
}
] |
7786560335 | import pandas as pd
import nltk
import numpy as np
def init_df(buffer, events):
df = pd.DataFrame()
df["text_buffer"] = buffer
df["events"] = events
return df
def extract_sent(df):
sentence_buffer = []
num_sentences = []
for text in df["text_buffer"]:
sentences = nltk.tokenize.sent_tokenize(text)
sentence_buffer.append(sentences)
num_sentences.append(len(sentences))
df["sentences"] = sentence_buffer
df["num_sentences"] = num_sentences
return df
def extract_event_name(df):
df["event_name"] = df["events"].apply(lambda x: x["eventName"])
return df
def correct_sent_num(df):
df = df.groupby("num_sentences", group_keys=True).apply(lambda x: x)
df = df.sort_index()
num_sentences = np.array(df["num_sentences"])
event_names = np.array(df["event_name"])
start_idx = 0
select_flag = False
for idx, event in enumerate(event_names):
if event == "suggestion-get":
start_idx = idx
if event == "suggestion-select":
select_flag = True
if select_flag and event == "text-insert":
if num_sentences[start_idx] == num_sentences[idx]:
end_idx = idx + 1
elif num_sentences[start_idx] < num_sentences[idx]:
end_idx = idx
for i in range(start_idx, end_idx):
num_sentences[i] += 1
select_flag = False
df["num_sentences"] = num_sentences
return df
def compute_seq(events):
# Remove suggestion-open, suggestion-hover, suggestion-down suggestion-up
events = np.delete(events, np.where(events == "suggestion-open"))
events = np.delete(events, np.where(events == "suggestion-hover"))
events = np.delete(events, np.where(events == "suggestion-down"))
events = np.delete(events, np.where(events == "suggestion-up"))
# Remove suggestion-reopen (for now; unsure of its impact)
events = np.delete(events, np.where(events == "suggestion-reopen"))
# Remove text-insert after suggestion-select
select_flag = False
new_events = []
for idx, event in enumerate(events):
if event == "suggestion-select":
select_flag = True
if event == "text-insert" and select_flag:
select_flag = False
continue
new_events.append(event)
events = np.array(new_events)
# Identify GPT-3 modifications
select_flag = False
new_events = []
for idx, event in enumerate(events):
if event == "suggestion-select":
select_flag = True
if event == "text-insert":
select_flag = False
if (event == "cursor-backward" or event == "cursor-select" or event == "text-delete") and select_flag:
select_flag = False
event = "gpt3-modify"
new_events.append(event)
events = np.array(new_events)
# Remove cursor-forward, cursor-backward, cursor-select
events = np.delete(events, np.where(events == "cursor-forward"))
events = np.delete(events, np.where(events == "cursor-backward"))
events = np.delete(events, np.where(events == "cursor-select"))
# Remove text-delete
events = np.delete(events, np.where(events == "text-delete"))
# Remove suggestion-close
events = np.delete(events, np.where(events == "suggestion-close"))
# Identify GTP-3 calls
events = events.tolist()
start_idx = 0
api_flag = False
pop_idx = []
for idx, event in enumerate(events):
if event == "suggestion-get":
start_idx = idx
api_flag = True
if event == "suggestion-select" and api_flag:
api_flag = False
for i in range(start_idx, idx):
pop_idx.append(i)
events = np.array(events)
events = np.delete(events, pop_idx)
# Group together text-inserts
new_events = []
temp = []
for event in events:
if event == "text-insert":
temp.append(event)
else:
if len(temp) != 0:
new_events.append("text-insert")
new_events.append(event)
temp = []
if len(temp) != 0:
new_events.append("text-insert")
events = np.array(new_events)
# Rename sequences
seq_name_dict = {
"system-initialize": "prompt",
"text-insert": "user",
"suggestion-get": "empty-call",
"suggestion-select": "gpt3-call",
"gpt3-modify": "modify-gpt3",
}
new_events = [seq_name_dict[event] for event in events]
events = np.array(new_events)
return events
def get_sent_num_and_event_seq(df):
temp_dict = {
"num_sent": [],
"sequence": [],
}
for num in np.unique(df["num_sentences"]):
# sent = np.array(df[df["num_sentences"] == num]["text_buffer"])[-1]
event_seq = np.array(df[df["num_sentences"] == num]["event_name"])
temp_dict["num_sent"].append(num)
temp_dict["sequence"].append(compute_seq(event_seq))
# Bug fix for prompt deletion
if temp_dict["num_sent"][0] == 0:
for idx in range(len(temp_dict["sequence"])):
if "prompt" in temp_dict["sequence"][idx]:
temp_arr = temp_dict["sequence"][idx]
temp_dict["sequence"][idx] = np.delete(temp_arr, np.where(temp_arr == "prompt"))
return temp_dict
def generate_event_seq(buffer, events):
df = init_df(buffer, events)
df = extract_sent(df)
df = extract_event_name(df)
df = correct_sent_num(df)
readable_sent_seq = get_sent_num_and_event_seq(df)
return readable_sent_seq
| vishalraj247/CoAuthorViz_Dashboard | events.py | events.py | py | 5,643 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.sent_tokenize",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.... |
24752379576 | """ JWTValidate Logic """
import argparse
import jwt
class JwtTokenValidator:
""" JWT Token Validator Class """
def __init__(self):
self.arg_parser = self.init_arg_parser()
def execute(self, args):
""" Execute """
parsed_args = self.arg_parser.parse_args(args)
secret = ''
with open(parsed_args.publicKey, 'r') as file:
secret = file.read()
alg = 'RS256'
if parsed_args.algorithm is not None:
alg = parsed_args.algorithm.upper()
decoded = jwt.decode(parsed_args.token, secret, algorithms=alg)
print(decoded)
@staticmethod
def init_arg_parser():
""" Initialize Argument Parser """
parser = argparse.ArgumentParser(prog='forge jwtvalidate')
parser.add_argument('-t', '--token', action='store', dest='token', required=True,
help='jwt token to validate')
parser.add_argument('-p', '--publickey', action='store', dest='publicKey', required=True,
help='the public key to decode token')
parser.add_argument('-alg', '--algorithm', action='store', dest='algorithm',
help='the algorithm used to sign the token, the default is RS256')
return parser
| TeleTrackingTechnologies/forge-jwtvalidate | jwtvalidate_logic/jwtvalidate_logic.py | jwtvalidate_logic.py | py | 1,293 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "jwt.decode",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
}
] |
42384509045 | import django
from os import path
SECRET_KEY = 'not secret'
INSTALLED_APPS = ('dumper', 'test', 'django.contrib.contenttypes')
TEMPLATE_DEBUG = DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
ROOT_URLCONF = 'test.urls'
TEMPLATES = []
# Testing
if django.VERSION[:2] < (1, 6):
INSTALLED_APPS += ('discover_runner',)
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = path.dirname(path.dirname(__file__))
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'dumper-default'
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'dumper-other'
},
}
MIDDLEWARE_CLASSES = (
'dumper.middleware.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'dumper.middleware.FetchFromCacheMiddleware',
)
LOGGING_CONFIG = None
| canada-nyc/django-dumper | test/settings.py | settings.py | py | 959 | python | en | code | 33 | github-code | 1 | [
{
"api_name": "django.VERSION",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "name"
}
] |
71830595234 | #%%
import pandas as pd
import numpy as np
import json
from datetime import datetime
from pathlib import Path
import os
from django.core.management.base import BaseCommand, CommandError
from trade_perf.models import AcctStatement
from sqlalchemy import create_engine
#%%
class Command(BaseCommand):
def handle(self, *args, **options):
ios = "etoro-account-statement.xlsx"
df = pd.read_excel(ios, sheet_name="Account Activity", engine="openpyxl")
df = df.drop(columns=["Position ID", "Asset type", "NWA"])
df.columns = [
"date",
"ordertype",
"details",
"amount",
"units",
"realized_equity_change",
"realized_equity",
"balance",
]
df["date"] = pd.to_datetime(
df["date"], format="%d/%m/%Y %H:%M:%S", utc=True
).dt.tz_convert("Asia/Manila")
df["date"] = df["date"].dt.date
print(df)
engine = create_engine("sqlite:///db.sqlite3", echo=True)
df.to_sql(
AcctStatement._meta.db_table,
con=engine,
if_exists="replace",
index=True,
index_label="id",
)
| loerllemit/portfolio | trade_perf/management/commands/getdata.py | getdata.py | py | 1,215 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pandas.read_excel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 30,
"usage_type": "call"
},
{
"api_... |
17860688347 | # From http://zetcode.com/gui/pyqt4/firstprograms/
import sys
from PyQt5 import QtWidgets
def main():
app = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QWidget()
w.resize(250, 150)
w.move(300, 300)
w.setWindowTitle('Simple Test')
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| meonBot/conda-recipes | pyqt5/run_test.py | run_test.py | py | 346 | python | en | code | null | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidge... |
13649087156 | import json
from rest_framework import status
from api.constans import AutoNotificationConstants, TaskStageConstants
from api.models import *
from api.tests import GigaTurnipTestHelper
class CaseTest(GigaTurnipTestHelper):
def test_case_info_for_map(self):
json_schema = {
"type": "object",
"properties": {
"weekday": {
"type": "string",
"title": "Select Weekday",
"enum": ["mon", "tue", "wed", "thu", "fri"]
},
"time": {
"type": "string",
"title": "What time",
"enum": ["10:00", "11:00", "12:00", "13:00", "14:00"]
}
}
}
self.initial_stage.json_schema = json.dumps(json_schema)
second_stage = self.initial_stage.add_stage(
TaskStage(
name='Second Task Stage',
json_schema=self.initial_stage.json_schema,
assign_user_by='ST',
assign_user_from_stage=self.initial_stage,
)
)
responses = {"weekday": "mon", "time": "10:00"}
task = self.create_initial_task()
self.complete_task(task, responses)
response = self.get_objects("case-info-by-case", pk=task.case.id)
maps_info = [
{'stage': self.initial_stage.id, 'stage__name': self.initial_stage.name, 'complete': [True],
'force_complete': [False], 'id': [task.id]},
{'stage': second_stage.id, 'stage__name': second_stage.name, 'complete': [False], 'force_complete': [False],
'id': [task.out_tasks.get().id]}
]
self.assertEqual(status.HTTP_200_OK, response.data['status'])
for i in maps_info:
self.assertIn(i, response.data['info'])
| KloopMedia/GigaTurnip | api/tests/test_case.py | test_case.py | py | 1,860 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "api.tests.GigaTurnipTestHelper",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 49,
"usage_type": "attribute"
},
{
"a... |
20436885034 | import os
import pygame
####################################################################
# 기본 초기화(무조건 해야함)
pygame.init() # 초기화
screen_width = 640
screen_height = 480
screen = pygame.display.set_mode((screen_width, screen_height))
# 화면 타이틀 설정
pygame.display.set_caption("Hana Love")
# FPS
clock = pygame.time.Clock()
####################################################################
# 사용자 게임 초기화(배경화면, 게임 이미지, 좌표, 폰트 등 설정)
current_path = os.path.dirname(__file__) # 현재 파일의 위치를 반환
image_path = os.path.join(current_path, "images")
# 배경 만들기
background = pygame.image.load(os.path.join(image_path, "background.jpg"))
# 스테이지 만들기
# stage = pygame.image.load(os.path.join(image_path, "stage.png"))
# stage_size = stage.get_rect().size
# stage_height = stage_size[1]
# 캐릭터 만들기
character = pygame.image.load(os.path.join(image_path, "character-re.png"))
character_size = character.get_rect().size
character_width = character_size[0]
character_height = character_size[1]
character_x_pos = (screen_width / 2) - (character_width - 2)
character_y_pos = screen_height - character_height
# 캐릭터 이동 방향
character_to_x = 0
# 캐릭터 이동 속도
character_speed = 5
# 무기 만들기
weapon = pygame.image.load(os.path.join(image_path, "weapon.png"))
weapon_size = weapon.get_rect().size
weapon_width = weapon_size[0]
# 무기는 한번에 여러번 발사 가능
weapons = []
# 무기 이동 속도
weapon_speed = 10
running = True
while running:
dt = clock.tick(30)
# 2. 키보드, 마우스 등 이벤트 처리
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
character_to_x -= character_speed
elif event.key == pygame.K_RIGHT:
character_to_x += character_speed
elif event.key == pygame.K_SPACE:
# 무기 발사를 위한 무기 추가 (무기 발사 위치 등)
weapon_x_pos = character_x_pos + \
(character_width / 2) - (weapon_width / 2)
weapon_y_pos = character_y_pos
weapons.append([weapon_x_pos, weapon_y_pos])
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
character_to_x = 0
# 3. 게임 캐릭터 위치 정의
character_x_pos += character_to_x
if character_x_pos < 0:
character_x_pos = 0
elif character_x_pos > screen_width - character_width:
character_x_pos = screen_width - character_width
# 무기 위치 조정
# 무기는 x좌표는 그대로 y좌료는 앞으로 나가야 하니
# 예 100, 200 -> 180, 160, 140 ...(speed 만큼 차감)
weapons = [[w[0], w[1] - weapon_speed] for w in weapons] # 무기 위치를 위로 올림
# 천장에 닿은 무기 없애기
weapons = [[w[0], w[1]] for w in weapons if w[1] > 0]
# 4. 충돌 처리
# 5. 화면에 그리기
screen.blit(background, (0, 0))
# screen.blit(stage, (0, screen_height - stage_height))
for weapon_x_pos, weapon_y_pos in weapons:
screen.blit(weapon, (weapon_x_pos, weapon_y_pos))
screen.blit(character, (character_x_pos, character_y_pos))
pygame.display.update()
# 종료
pygame.quit()
| SeungWookHan/Python-pygame | pygame_project/2_weapon_keyevent.py | 2_weapon_keyevent.py | py | 3,507 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.s... |
27476889408 | """ Enhancement of pathlib, representation with a tree data-structure.
This module enhances the functionalities of pathlib, interpreting the Path
objects as nodes of a tree and automatically creating their subtrees. The tree
can be explored for analysis purposes.
"""
from pathlib import Path
from typing import Union, Callable, Iterator, Any
from enum import Enum
import copy
import xlsxwriter
__all__ = ["PathNode", "PathTree", "PathTreeProperty", "TreeException", "Size"]
class PathTreeProperty(Enum):
""" Enumerator class for the tree properties.
"""
HEIGHT = "<height>"
""" Height (of a node): the minimal distance from the note to a leaf. """
DEPTH = "<depth>"
""" Depth (of a node): the distance from the note to the root. """
NUM_OF_DIRECTORIES = "<num_dir>"
""" No. of Directory (of a subtree): the number of directory in the subtree. """
NUM_OF_FILES = "<num_file>"
""" No. of Files (of a subtree): the number of files in the subtree. """
NUM_OF_INODES = "<num_inode>"
""" No. of inner nodes (of a subtree): the number of inner nodes in the subtree. """
NUM_OF_LEAVES = "<num_leaves>"
""" No. of leaves (of a subtree): the number of leaves in the subtree. """
NUM_OF_NODES = "<num_nodes>"
""" No. of nodes (of a subtree): the number of nodes in a subtree. """
SIZE = "<size>"
""" Size (of a subtree): the sum of the byte occupied by all the files in the subtree. """
SIMPLE_SIZE = "<simple_size>"
""" Size (of a subtree) in simplified format: the sum of the byte occupied by all the files in
the subtree, expressed in KB, ..., TB accordingly. """
PRUNED = "<pruned>"
""" Pruned status (of a node): true if the node has been virtually removed from the tree, false
otherwise. """
class Size(Enum):
""" Enumerator class for the main byte multiples.
"""
KB = 1024 ** 1
""" Number of bytes in a kilobyte. """
MB = 1024 ** 2
""" Number of bytes in a megabyte. """
GB = 1024 ** 3
""" Number of bytes in a gigabyte. """
TB = 1024 ** 4
""" Number of bytes in a terabyte. """
@staticmethod
def simplified_size(size:int):
""" Convert num of bytes to a sinthetic string.
The size expressed with a multiple of the byte and with 1-3 significant
digits. E.g. 23 MB
"""
return \
f"{size} B" if size < Size.KB.value else (
f"{size // Size.KB.value} KB" if size < Size.MB.value else (
f"{size // Size.MB.value} MB" if size < Size.GB.value else (
f"{size // Size.GB.value} GB" if size < Size.TB.value else
f"{size // Size.TB.value} TB"
)))
class TreeException(Exception):
""" Tree structure violated Exception.
"""
class PathNode():
""" The PathNode class describe a tree node containing information
associated to a path.
The children of a node are sorted by: type (directories first, then files),
path (alphabetically on the file/dir name).
Attributes:
path: the path associated to the node
parent: the parent directory of the node, as a :class:`PathNode` object
children: the children files/directories of the node, as list of\
:class:`PathNode` objects
data: a dictionary that can contain custom properties of the node\
(e.g. height, subtree size, ...)
"""
def __init__(self, path:Path, parent:Union["PathNode",None]=None) -> None:
""" Create a new path node and all its subtree.
Parameters:
path: the path of the node
parent: the path of the parent node (if None, it is a root node)
"""
self.path = path
self.parent = parent
self.children = self.__compute_children()
self.property = {}
def __compute_children(self):
""" Iteratively scan and generate the subtree of the node.
"""
children = []
try:
if self.path.is_dir():
for child in self.path.iterdir():
children.append(PathNode(child, self))
children.sort(key=lambda node:f"{0 if node.path.is_dir() else 1}|{node.path.name}")
except PermissionError:
pass
except FileNotFoundError:
pass
return children
def is_root(self) -> bool:
""" Return true if and only if the node is root.
Check if the node has a parent node.
Returns:
True if and only if the node is root.
"""
return self.parent is None
def is_leaf(self) -> bool:
""" Return true if and only if the node is a leaf.
Check if the node has children nodes.
Returns:
True if and only if the node is a leaf.
"""
return len(self.children) == 0
def is_inode(self) -> bool:
""" Return true if and only if the node is an inner node.
Check if the node has children nodes.
Returns:
True if and only if the node is an inner node.
"""
return not self.is_leaf()
def is_file(self) -> bool:
""" Return true if and only if the node is a file.
Check if the node is a file.
Returns:
True if and only if the node is a file.
"""
return not self.is_dir()
def is_dir(self) -> bool:
""" Return true if and only if the node is a directory.
Check if the node is a directory.
Returns:
True if and only if the node is a directory.
"""
return self.path.is_dir()
def compute_bottom_up_property(
self,
property_name:Union[str,PathTreeProperty],
base_func:Callable[["PathNode"], Any],
recursive_func:Callable[["PathNode", list["PathNode"]], Any]
) -> None:
""" Compute a property of bottom-up type in the subtree of the node.
The bottom-up properties are recursively computed from the leaves
(files) to the root: when the node is a leaf the proerty can be computed
directly (without involving other nodes), when the node is an inner node
the property is computed based also on the children nodes properties.
Hence there are 2 functions that must be specified: the base case
function (for leaves), the recursive function (for inner nodes).
Parameters:
property_name: the name of the property (used as key in property\
dict)
base_func: the function to compute the property on the leaves
recursive_func: the function to compute the property on the inner\
nodes (assuming it is already computed on the leaves)
Parameters of base_func:
* leaf (PathNode): the current node
Parameters of recursive_func:
* inode (PathNode): the current node
* children (list(PathNode)): the list of children of the current node
Examples::
Compute the height.
>>> base_func = lambda node: 0
>>> recursive_func = lambda node, children: 1 + min([
>>> int(child.property["height"])
>>> for child in children
>>> ])
>>> root.compute_bottom_up_property("height", base_func, recursive_func)
"""
if self.is_leaf():
self.property[property_name] = base_func(self)
else:
for child in self.children:
child.compute_bottom_up_property(property_name, base_func, recursive_func)
self.property[property_name] = recursive_func(self, self.children)
def compute_top_down_property(
self,
property_name:Union[str,PathTreeProperty],
root_func:Callable[["PathNode"], Any],
parent_func:Callable[["PathNode", "PathNode"], Any]
) -> None:
""" Compute a property of top-down type in the subtree of the node.
The top-down properties are recursively computed from the root to the
leaves: when the node is root the proerty can be computed directly
(without involving other nodes), when the node is a not-root node the
property is computed based also on the parent node properties. Hence
there are 2 functions that must be specified: the root function (for the
root), the parent function (for not-root nodes).
Parameters:
property_name: the name of the property (used as key in property\
dict)
root_func: the function to compute the property on the root
parent_func: the function to compute the property on the not-root\
nodes (assuming it is already computed on the parent)
Parameters of root_func:
* root (PathNode): the current node
Parameters of parent_func:
* node (PathNode): the current node
* parent (PathNode): the parent of the current node
Examples::
Compute the depth.
>>> root_func = lambda root: 0
>>> parent_func = lambda node, parent: 1 + parent.property["depth"]
>>> root.compute_top_down_property("depth", root_func, parent_func)
"""
if self.is_root():
self.property[property_name] = root_func(self)
else:
if self.parent is not None:
self.property[property_name] = parent_func(self, self.parent)
else:
raise TreeException(f"Not-root node has not a parent ({self})")
for child in self.children:
child.compute_top_down_property(property_name, root_func, parent_func)
def compute_individual_property(
self,
property_name:Union[str,PathTreeProperty],
property_func:Callable[["PathNode"], Any]
):
""" Compute an individual property in the subtree of the node.
The property does not exploit any knowledge on other nodes. However can
exploit information stored in the node property dict (e.g. previously
computed height).
Parameters:
property_name: the name of the property (used as key in property \
dict)
property_func: the function to compute the property on the node
Parameters of property_func:
* node (PathNode): the current node
Examples::
Compute a flag to identify directories.
>>> property_func = lambda node: node.path.is_dir()
>>> root.compute_individual_property("is_dir", property_func)
"""
self.property[property_name] = property_func(self)
for child in self.children:
child.compute_individual_property(property_name, property_func)
def remove_property(self, property_name:Union[str,PathTreeProperty]) -> bool:
""" Remove a property of the node.
Parameters:
property_name: the name of the property to remove.
Returns:
True if the property is successfully removed, false if the property\
is missing.
"""
if property_name in self.property:
self.property.pop(property_name)
return True
return False
def copy(self) -> "PathNode":
""" Return a deepcopy of the node.
The pointers to other nodes are copied without modification hence point
to the same nodes.
Returns:
A copy of the node.
"""
return copy.deepcopy(self)
def __str__(self) -> str:
""" Return a string describing the node properties.
Print the node name then the properties.
Returns:
A string describing the node.
"""
properties = []
for prop, value in self.property.items():
if isinstance(prop, PathTreeProperty):
properties.append(f"{prop.name}={value}")
else:
properties.append(f"{prop}={value}")
return f"[Pathnode] > Path: '{self.path}' | Properties: {', '.join(properties)}"
class PathTree():
""" The PathTree class describe a tree made up by PathNode nodes.
The structure mimic the directory tree but add analytic functionalities.
Attributes:
root: the root PathNode of the tree
property: the properties of the tree (equivalent to the root properties)
"""
def __init__(self, root:Union[str, Path, PathNode], skip_properties:bool=False) -> None:
""" Create a new PathTree, based on the root node.
Parameters:
root: the root path
skip_properties: ignore computation of tree's main properties.
"""
if isinstance(root, str):
root = Path(root)
if isinstance(root, Path):
root = PathNode(root)
self.root = root
self.property = root.property
if not skip_properties:
self.__compute_basic_properties()
def __iter__(self) -> Iterator:
""" Return a default iterator on the nodes of the tree.
The order is breadth-first and uses the iterator returned by the
`breadth_first_iter()` function. To use the depth/first order, use the
iterator returned by the `depth_first_iter()` function.
Returns:
An iterator for the nodes of the tree.
"""
return self.breadth_first_iter()
def breadth_first_iter(self) -> Iterator:
""" Return an iterator on the nodes of the tree using breadth-first
order.
Returns:
An iterator for the nodes of the tree, in breadth-first order.
"""
nodes = [self.root]
while len(nodes) > 0:
node = nodes.pop(0)
nodes = nodes + node.children
yield node
def depth_first_iter(self) -> Iterator:
""" Return an iterator on the nodes of the tree using depth-first
order.
Returns:
An iterator for the nodes of the tree, in depth-first order.
"""
nodes = [self.root]
while len(nodes) > 0:
node = nodes.pop(0)
nodes = node.children + nodes
yield node
def validated_iter(self, valid_func:Callable[[PathNode], bool]) -> Iterator:
""" Return an iterator on filtered nodes of the tree.
The nodes that do not satisfy the condition are excluded and so their
subtree.
Parameters:
valid_func: the criteria to keep nodes.
Returns:
An iterator that exclude not valid nodes and subtrees.
Parameters of valid_func:
* node (PathNode): the node to test
Return of valid_func:
* True if the node is acceptable, false otherwise.
"""
nodes = [self.root] if valid_func(self.root) else []
while len(nodes) > 0:
node = nodes.pop(0)
nodes = [child for child in node.children if valid_func(child)] + nodes
yield node
def compute_bottom_up_property(
self,
property_name:Union[str,PathTreeProperty],
leaf_func:Callable[["PathNode"], Any],
inode_func:Callable[["PathNode", list["PathNode"]], Any]
) -> None:
""" Compute a property of bottom-up type in the tree.
This function calls the namesake function of the `PathNode` on the root.
Check `PathNode.compute_bottom_up_property()` for more details.
Parameters:
property_name: the name of the property (used as key in property\
dict)
leaf_func: the function to compute the property on the leaves
inode_func: the function to compute the property on the inner\
nodes (assuming it is already computed on the leaves)
Parameters of leaf_func:
* leaf (PathNode): the current node
Parameters of inode_func:
* inode (PathNode): the current node
* children (list(PathNode)): the list of children of the current node
"""
self.root.compute_bottom_up_property(property_name, leaf_func, inode_func)
def compute_top_down_property(
self,
property_name:Union[str,PathTreeProperty],
root_func:Callable[["PathNode"], Any],
notroot_func:Callable[["PathNode", "PathNode"], Any]
) -> None:
""" Compute a property of top-down type in the subtree of the node.
This function calls the namesake function of the `PathNode` on the root.
Check `PathNode.compute_top_down_property()` for more details.
Parameters:
property_name: the name of the property (used as key in property\
dict)
root_func: the function to compute the property on the root
notroot_func: the function to compute the property on the not-root\
nodes (assuming it is already computed on the parent)
Parameters of root_func:
* root (PathNode): the current node
Parameters of notroot_func:
* node (PathNode): the current node
* parent (PathNode): the parent of the current node
"""
self.root.compute_top_down_property(property_name, root_func, notroot_func)
def compute_individual_property(
self,
property_name:Union[str,PathTreeProperty],
property_func:Callable[["PathNode"], Any]
):
""" Compute an individual property in the tree.
This function calls the namesake function of the `PathNode` on the root.
Check `PathNode.compute_individual_property()` for more details.
Parameters:
property_name: the name of the property (used as key in property\
dict)
property_func: the function to compute the property on the node
Parameters of property_func:
* node: the current node (as PathNode)
"""
self.root.compute_individual_property(property_name, property_func)
def compute_basic_property(self, property_name:PathTreeProperty) -> None:
""" Compute a basic property, contained in `PathTreeProperty` enum.
If the property specified is PathTreeProperty.PRUNED, this function is
a nop.
Parameters:
property_name: the name of the basic property to compute
"""
if property_name == PathTreeProperty.HEIGHT:
self.__compute_height()
elif property_name == PathTreeProperty.DEPTH:
self.__compute_depth()
elif property_name == PathTreeProperty.NUM_OF_DIRECTORIES:
self.__compute_num_of_directories()
elif property_name == PathTreeProperty.NUM_OF_FILES:
self.__compute_num_of_files()
elif property_name == PathTreeProperty.NUM_OF_INODES:
self.__compute_num_of_inode()
elif property_name == PathTreeProperty.NUM_OF_LEAVES:
self.__compute_num_of_leaves()
elif property_name == PathTreeProperty.NUM_OF_NODES:
self.__compute_num_of_nodes()
elif property_name == PathTreeProperty.SIZE:
self.__compute_size()
elif property_name == PathTreeProperty.SIMPLE_SIZE:
self.__compute_simple_size()
elif property_name == PathTreeProperty.PRUNED:
pass
def __compute_height(self):
""" Compute the height of the nodes in the tree.
"""
self.compute_bottom_up_property(
PathTreeProperty.HEIGHT,
lambda leaf: 0,
lambda inode, children:
1 + min(list(int(child.property[PathTreeProperty.HEIGHT])
for child in children
))
)
def __compute_depth(self):
""" Compute the depth of the nodes in the tree.
"""
self.compute_top_down_property(
PathTreeProperty.DEPTH,
lambda root: 0,
lambda node, parent: 1 + parent.property[PathTreeProperty.DEPTH]
)
def __compute_num_of_directories(self):
""" Compute the num. of directories in all the subtrees.
"""
self.compute_bottom_up_property(
PathTreeProperty.NUM_OF_DIRECTORIES,
lambda leaf: 1 if leaf.path.is_dir() else 0,
lambda inode, children:
(1 if inode.path.is_dir() else 0) +
sum(list(child.property[PathTreeProperty.NUM_OF_DIRECTORIES]
for child in children
))
)
def __compute_num_of_files(self):
""" Compute the num. of files in all the subtrees.
"""
self.compute_bottom_up_property(
PathTreeProperty.NUM_OF_FILES,
lambda leaf: 1 if leaf.path.is_file() else 0,
lambda inode, children:
sum(list(child.property[PathTreeProperty.NUM_OF_FILES]
for child in children
))
)
def __compute_num_of_inode(self):
""" Compute the num. of internal nodes in all the subtrees.
"""
self.compute_bottom_up_property(
PathTreeProperty.NUM_OF_INODES,
lambda leaf: 0,
lambda inode, children:
1 + sum(list(child.property[PathTreeProperty.NUM_OF_INODES]
for child in children
))
)
def __compute_num_of_leaves(self):
""" Compute the num. of leaves in all the subtrees.
"""
self.compute_bottom_up_property(
PathTreeProperty.NUM_OF_LEAVES,
lambda leaf: 1,
lambda inode, children:
sum(list(child.property[PathTreeProperty.NUM_OF_LEAVES]
for child in children
))
)
def __compute_num_of_nodes(self):
""" Compute the num. of nodes in all the subtrees.
"""
self.compute_individual_property(
PathTreeProperty.NUM_OF_NODES,
lambda node:
node.property[PathTreeProperty.NUM_OF_DIRECTORIES] +
node.property[PathTreeProperty.NUM_OF_FILES]
)
def __compute_size(self):
""" Compute the size (in bytes) of all the subtrees.
"""
self.compute_bottom_up_property(
PathTreeProperty.SIZE,
lambda leaf: leaf.path.stat().st_size if leaf.path.is_file() else 0,
lambda inode, children:
sum(list(child.property[PathTreeProperty.SIZE]
for child in children
))
)
def __compute_simple_size(self):
""" Compute the (simplified) size of all the subtrees.
The size expressed with a multiple of the byte and with 1-3 significant
digits. E.g. 23 MB
"""
def simplified_size(node:PathNode):
size = node.property[PathTreeProperty.SIZE]
return \
f"{size} B" if size < Size.KB.value else (
f"{size // Size.KB.value} KB" if size < Size.MB.value else (
f"{size // Size.MB.value} MB" if size < Size.GB.value else (
f"{size // Size.GB.value} GB" if size < Size.TB.value else
f"{size // Size.TB.value} TB"
)))
self.compute_individual_property(
PathTreeProperty.SIMPLE_SIZE,
simplified_size
)
def __compute_basic_properties(self) -> None:
""" Compute the basic properties for the nodes of the tree.
The basic properties are:
* height of the nodes
* depth of the nodes
* number of directories in the nodes' subtrees
* number of files in the nodes' subtrees
* number of nodes in the nodes' subtrees
* size (in bytes) of the nodes' subtrees
* simple size (in KB...TB) of the nodes' subtrees
"""
self.compute_basic_property(PathTreeProperty.HEIGHT)
self.compute_basic_property(PathTreeProperty.DEPTH)
self.compute_basic_property(PathTreeProperty.NUM_OF_DIRECTORIES)
self.compute_basic_property(PathTreeProperty.NUM_OF_FILES)
self.compute_basic_property(PathTreeProperty.NUM_OF_INODES)
self.compute_basic_property(PathTreeProperty.NUM_OF_LEAVES)
self.compute_basic_property(PathTreeProperty.NUM_OF_NODES)
self.compute_basic_property(PathTreeProperty.SIZE)
self.compute_basic_property(PathTreeProperty.SIMPLE_SIZE)
def remove_property(self, property_name:Union[str,PathTreeProperty]) -> tuple[bool, bool]:
""" Remove a property from all the nodes in the tree.
If the property is missing from one node, no exception is raised.
Parameters:
property_name: the name of the property to remove.
Returns:
A tuple containing two booleans:
* True if the property appeared in all nodes, false otherwise
* True if the property appeared in at least one node, false\
otherwise
"""
in_all_nodes = True
in_one_node = False
for node in self:
status = node.remove_property(property_name)
in_all_nodes = in_all_nodes and status
in_one_node = in_one_node or status
return in_all_nodes, in_one_node
def physical_pruning(self, keep_condition:Callable[[PathNode],bool]) -> None:
""" Remove (physically) the all the subtrees where the root does not
satisfy the keep condition.
The tree is scanned in breadth-first order. For each node, the keep
condition is checked and if it is not satisfied all the corresponding
subtree is physically removed from the tree.
Note that the properties of the nodes are not recomputed.
Parameters:
keep_condition: the boolean function that assess if a node, and its\
subtree, should be kept or pruned.
Parameters of keep_condition:
* node: the node to check.
Return of keep_condition:
* True if the node (and subtree) must be kept, false if it must be\
pruned.
"""
nodes = [self.root]
while len(nodes) > 0:
node = nodes.pop(0)
idx = 0
while idx < len(node.children):
if not keep_condition(node.children[idx]):
node.children.pop(idx)
else:
idx += 1
nodes = nodes + node.children
def logical_pruning(self, keep_condition:Callable[[PathNode],bool]) -> None:
""" Remove (logically) the all the subtrees where the root does not
satisfy the keep condition.
The logical removal is applied using the property
`PathTreeProperty.PRUNED`: if true, the node is removed.
The tree is scanned in breadth-first order. For each node, the keep
condition is checked and if it is not satisfied all the corresponding
subtree is logically removed from the tree.
Parameters:
keep_condition: the boolean function that assess if a node, and its\
subtree, should be kept or pruned.
Parameters of keep_condition:
* node: the node to check.
Return of keep_condition:
* True if the node (and subtree) must be kept, false if it must be\
pruned.
"""
self.root.compute_top_down_property(
PathTreeProperty.PRUNED,
lambda root: False,
lambda node, parent:
parent.property[PathTreeProperty.PRUNED] or
not keep_condition(node)
)
def get_node(self, path:Union[str,Path]) -> Union[PathNode, None]:
""" Return the PathNode corresponding to the passed Path.
Parameters:
path: the path to search in the tree
Returns:
The PathNode corresponding to the passed Path if exists, None\
otherwise.
"""
if isinstance(path, str):
path = Path(path)
parts = path.parts
node = self.root
idx = 1
if node.path == Path("."):
idx = 0
found = True
while found:
if path == node.path:
return node
found = False
for child in node.children:
print(f"compare part {idx} between '{parts}' and '{child.path.parts}'")
if parts[idx] == child.path.parts[idx]:
node = child
found = True
break
idx += 1
return None
def copy(self) -> "PathTree":
""" Return a deepcopy of the tree and all its nodes.
Returns:
A deepcopy of the tree.
"""
new_root = self.root.copy()
new_nodes = [new_root]
while len(new_nodes) > 0:
new_node = new_nodes.pop(0)
new_children = [child.copy() for child in new_node.children]
new_node.children = new_children
for new_child in new_children:
new_child.parent = new_node
new_nodes = new_nodes + new_children
return PathTree(new_root, skip_properties=True)
def __str__(self) -> str:
""" Return a string describing the tree properties.
Print the property of the root.
Returns:
A string describing the tree.
"""
return f"[PathTree] > Root: ({str(self.root)})"
def to_csv(
self,
csvfile:Union[Path, str],
properties:Union[list[Union[str, PathTreeProperty]], None]=None,
node_condition:Callable[[PathNode], bool]=lambda node: True,
node_limit:int=1000000
) -> None:
""" Export all nodes of the tree to a csv.
The export includes the name of the path and a list of properties. Due
to the high number of nodes a directory tree can have, by default, the
export is limited to the first 1 million nodes.
Parameters:
csvfile: the name of the csv for the export.
properties: the list of properties to include in the export. If\
None all parameters are included.
node_condition: the condition a node must meet to be exported (by\
default all nodes are exported).
node_limit: the max number of nodes that can be exported. If <= 0,\
no limitation is applied.
"""
if isinstance(csvfile, str):
csvfile = Path(csvfile)
if properties is None:
properties = list(self.root.property.keys())
# Header
str_properties = []
for prop in properties:
if isinstance(prop, PathTreeProperty):
str_properties.append(prop.value)
header = ";".join(["path"] + str_properties)
# Data
lines = []
for node in self.breadth_first_iter():
if 0 < node_limit <= len(lines):
break
if not node_condition(node):
continue
line = [node.path.as_posix()]
for curr_property in properties:
line.append(str(node.property[curr_property]))
lines.append(";".join(line))
with open(csvfile, "w", encoding="utf8") as f:
f.write(header + "\n")
for line in lines:
f.write(line + "\n")
def to_excel(
self,
excelfile:Union[Path, str],
properties:Union[list[Union[str, PathTreeProperty]], None]=None,
node_condition:Callable[[PathNode], bool]=lambda node: True,
node_limit:int=1000000
) -> None:
""" Export all nodes of the tree to Excel.
The export includes the name of the path and a list of properties. Due
to the high number of nodes a directory tree can have, by default, the
export is limited to the first 1 million nodes.
Parameters:
excelfile: the name of the Excel for the export.
properties: the list of properties to include in the export. If\
None all parameters are included.
node_condition: the condition a node must meet to be exported (by\
default all nodes are exported).
node_limit: the max number of nodes that can be exported. If <= 0,\
no limitation is applied.
"""
if isinstance(excelfile, str):
excelfile = Path(excelfile)
if properties is None:
properties = list(self.root.property.keys())
workbook = xlsxwriter.Workbook(excelfile)
sheet = workbook.add_worksheet(f"Export - {self.root.path}")
# Header
header_format = workbook.add_format({
'bold': True,
'font_color': 'white',
'bg_color': "#002060"
})
sheet.write(0, 0, "Path", header_format)
for col, prop in enumerate(properties, 1):
if isinstance(prop, PathTreeProperty):
sheet.write(0, col, prop.value, header_format)
else:
sheet.write(0, col, prop, header_format)
# Data
row = 1
for node in self.breadth_first_iter():
if 0 < node_limit < row:
break
if not node_condition(node):
continue
sheet.write(row, 0, str(node.path))
for col, prop in enumerate(properties, 1):
if prop in node.property:
sheet.write(row, col, node.property[prop])
row += 1
sheet.autofit()
workbook.close()
| MCallagher/pathtreelib | pathtreelib/__init__.py | __init__.py | py | 33,778 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 98... |
26578612425 | import numpy as np
import matplotlib.pyplot as plt
import itertools as it
from numpy import random
from scipy.stats import multivariate_normal
from math import exp, sqrt, pi
from scipy.spatial import distance
## this function draws an o when the value is -1 and x otherwise
def draw_x_o(data, m, text):
print(text)
for i in range(3):
for j in range(3):
if data[i][j] == -1:
print("o", end=" ")
else:
print("x", end=" ")
print()
print()
## we reorder the indexes, which we then use to plot the evidence
def create_index_set(evidence):
E = evidence.sum(axis=1)
dist = distance.squareform(distance.pdist(evidence, 'euclidean'))
np.fill_diagonal(dist, np.inf)
L = []
D = list(range(E.shape[0]))
L.append(E.argmin())
D.remove(L[-1])
while len(D) > 0:
# add d if dist from d to all other points in D
# is larger than dist from d to L[-1]
N = [d for d in D if dist[d, D].min() > dist[d, L[-1]]]
if len(N) == 0:
L.append(D[dist[L[-1],D].argmin()])
else:
L.append(N[dist[L[-1],N].argmax()])
D.remove(L[-1])
# reverse the resulting index array
return np.array(L)[::-1]
## we create 512 3x3 matrices with values -1 or 1
def getData():
cartesian_product = list(it.product([-1, 1], repeat=9))
data = []
for p in cartesian_product:
data.append(np.reshape(np.asarray(p), (3, 3)))
return np.array(data)
## the following are the models given M0 to M3
def M0():
return 1/512
def M1(data, theta):
p = 1
for i in range(3):
for j in range(3):
exponent = data[i, j] * theta[0] * (j-1)
p*= 1/(1+ exp(-exponent))
return p
def M2(data, theta):
p = 1
for i in range(3):
for j in range(3):
exponent = data[i, j] * ((theta[0] * (j-1)) + (theta[1] * (1-i)))
p*= 1/(1+ exp(-exponent))
return p
def M3(data, theta):
p = 1
for i in range(3):
for j in range(3):
exponent = data[i, j] * ((theta[0] * (j-1)) + (theta[1] * (1-i)) + theta[2])
p*= 1/(1+ exp(-exponent))
return p
## create prior given sigma and how many samples
def prior(sigma,model,sampleSize):
if model == 0:
return np.zeros((4,512))
mean = np.zeros(model)
covar = sigma*np.eye(model)
samples = random.multivariate_normal(mean,covar,sampleSize)
return samples
## q29 we give a mean of 5 for each model
def prior2(sigma,model,sampleSize):
if model == 0:
return np.zeros((4,512))
mean = np.full((model), 5)
covar = sigma*np.eye(model)
samples = random.multivariate_normal(mean,covar,sampleSize)
return samples
## q29 we change the covariance to no longer be diagonal
def prior3(sigma,model,sampleSize):
if model == 0:
return np.zeros((4,512))
mean = np.full((model), 5)
covar = sigma*np.full((model,model), 1)
samples = random.multivariate_normal(mean,covar,sampleSize)
return samples
## followed from given formula in coursework
def monte_carlo(data, sigma, model, sampleSize):
samples = prior(sigma, model, sampleSize)
evidence = 0
for i in range(sampleSize):
if model == 1:
evidence += M1(data, samples[i,:])
if model == 2:
evidence+= M2(data, samples[i,:])
if model == 3:
evidence+= M3(data, samples[i,:])
return evidence/sampleSize
def compute_evidence(data, sigma, sampleSize):
evidence = np.zeros((4,512))
evidence[0,:] = np.ones((512)) * M0()
for i in range(512):
evidence[1,i] = monte_carlo(data[i,:], sigma, 1, sampleSize)
evidence[2,i] = monte_carlo(data[i,:], sigma, 2, sampleSize)
evidence[3,i] = monte_carlo(data[i,:], sigma, 3, sampleSize)
return np.transpose(evidence)
dataSet = getData()
print('dataset', dataSet.shape)
evidence = compute_evidence(dataSet, 1000, 1000)
print('evidence', evidence)
evsum = np.sum(evidence,axis=0)
print(evsum)
## re order indices for plotting a pretty looking graph
index = create_index_set(evidence)
print('index shape', index)
## we display the x and o representing the values that correspond to the maximal and minimal dataset for each model
for m, dat in enumerate(np.transpose(evidence)):
draw_x_o(dataSet[dat.tolist().index(max(dat))], m, "Maximal dataset for M{}".format(m))
print('---------------------------')
for m, dat in enumerate(np.transpose(evidence)):
draw_x_o(dataSet[dat.tolist().index(min(dat))], m, "Minimal dataset for M{}".format(m))
plt.figure()
plt.plot(evidence[:,3],'g', label= "P(D|M3)")
plt.plot(evidence[:,2],'r', label= "P(D|M2)")
plt.plot(evidence[:,1],'b', label= "P(D|M1)")
plt.plot(evidence[:,0],'m--', label = "P(D|M0)")
plt.xlim(0,520)
plt.ylim(0,0.12)
plt.xlabel('All dataSet without ordered indices')
plt.ylabel('evidence')
plt.title('evidence of all data sets')
plt.legend()
plt.show()
plt.figure()
plt.plot(evidence[index,3],'g', label= "P(D|M3)")
plt.plot(evidence[index,2],'r', label= "P(D|M2)")
plt.plot(evidence[index,1],'b', label= "P(D|M1)")
plt.plot(evidence[index,0],'m--', label = "P(D|M0)")
plt.xlim(0,520)
plt.ylim(0,0.12)
plt.xlabel('All dataSet')
plt.ylabel('evidence')
plt.title('evidence of all data sets with ordered indices')
plt.legend()
plt.show()
plt.figure()
plt.plot(evidence[index,3],'g', label= "P(D|M3)")
plt.plot(evidence[index,2],'r', label= "P(D|M2)")
plt.plot(evidence[index,1],'b', label= "P(D|M1)")
plt.plot(evidence[index,0],'m--', label = "P(D|M0)")
plt.xlim(0,80)
plt.ylabel('subset of possible dataSet')
plt.ylabel('evidence')
plt.ylim(0,0.12)
plt.title('evidence of subset of possible data sets')
plt.legend()
plt.show()
| jaskhalsa/machine-learning-models | model-selection-q26-q29.py | model-selection-q26-q29.py | py | 5,814 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.spatial.distance.squareform",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "scipy.spatial.distance.pdist",
"line_number": 24,
"usage_type": "call"
},
{
... |
70172202595 | from __future__ import division
from functools import partial
import pandas as pd
import os
from geography import fmesh_distance, quarter_meshcode, prefecture, load_prefs
from attenuation import amp_factors
nb_sites_max = 50000
exposure_path = '../04-Exposure/'
gem_path = 'GEM/'
site_effects_path = '../02-Site Effects/'
def collect_exposure(shape, radius):
file_names = os.listdir(exposure_path + gem_path)
buildings = pd.DataFrame()
sites = pd.DataFrame()
for name in file_names:
fcode = name.split('.')[0]
if fmesh_distance(fcode, shape, radius):
exposure = pd.read_csv(exposure_path + gem_path + name)
positions = exposure[['grid_id', 'lat', 'lon', 'AmpPGA', 'AmpPGV']].drop_duplicates()
positions = positions.loc[([shape.lat - radius / shape.lat_km] < positions['lat']) &
(positions['lat'] < [shape.lat + radius / shape.lat_km]) &
([shape.lon - radius / shape.lon_km] < positions['lon']) &
(positions['lon'] < [shape.lon + radius / shape.lon_km])]
if not positions.empty:
dists = positions.apply(shape.distance2, axis=1)
positions = positions.merge(dists, left_index=True, right_index=True)
positions = positions.loc[positions['distance'] <= radius]
if not positions.empty:
print('Nb Sites : ', positions.shape[0])
sites = sites.append(positions)
buildings = buildings.append(exposure)
# break
# if sites.shape[0] > nb_sites_max:
# break
if not sites.empty:
to_del_cols = ['bldg_area', 'AmpPGV', 'AmpPGA']
for col in to_del_cols:
del buildings[col]
if sites.shape[0] > nb_sites_max:
sites.sort_values(by='distance', inplace=True)
sites = sites.iloc[0:nb_sites_max]
return buildings, sites
# Preparation of exposure
def collect_site_effects(name, positions):
fcode = name.split('.')[0]
name = 'Z-V3-JAPAN-AMP-VS400_M250-' + fcode
site_effects = pd.read_csv(site_effects_path + name + '/' + name + '.csv', skiprows=6, skipinitialspace=True)
site_effects.rename(columns={'# CODE': 'qcode'}, inplace=True)
amp = site_effects.apply(amp_factors, axis=1)
site_effects = site_effects.merge(amp, left_index=True, right_index=True)
meshcode = positions.apply(quarter_meshcode, axis=1)
positions = positions.merge(meshcode, left_index=True, right_index=True)
positions = positions.merge(site_effects, on='qcode', how='inner')
print('Site Effects : OK')
return positions
def assign_prefectures(positions):
prefecture2 = partial(prefecture, pref_df=load_prefs())
prefs = positions.apply(prefecture2, axis=1)
positions = positions.merge(prefs, left_index=True, right_index=True)
print('Prefectures : OK')
return positions
def prepare_exposure():
file_names = os.listdir(exposure_path + gem_path)
for name in file_names:
if not name.endswith('.csv'):
file_names.remove(name)
for name in file_names:
exposure = pd.read_csv(exposure_path + gem_path + name)
if not exposure.empty:
del exposure['ARV'], exposure['pref_id']
positions = exposure[['grid_id', 'lon', 'lat']].drop_duplicates()
positions = collect_site_effects(name, positions)
positions = assign_prefectures(positions)
del positions['lon'], positions['lat']
exposure = exposure.merge(positions, on='grid_id')
exposure.to_csv(exposure_path + gem_path + name, index=False)
# prepare_exposure()
| charlesco/EQCAT | EQCAT/sites.py | sites.py | py | 3,792 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "geography.fmesh_distance... |
32440994065 | #-----------------------------------------------------------------------------------------------#
# #
# I M P O R T L I B R A R I E S #
# #
#-----------------------------------------------------------------------------------------------#
from modules.utilities.utils import write_to_file_txt
from nltk.corpus import wordnet as wn
import pandas as pd
import re
#-----------------------------------------------------------------------------------------------#
# #
# Define global path variables for dataset files #
# #
#-----------------------------------------------------------------------------------------------#
train_entity = "data/chemport/chemprot_training/chemprot_training_entities.tsv"
devel_entity = "data/chemport/chemprot_development/chemprot_development_entities.tsv"
tests_entity = "data/chemport/chemprot_test_gs/chemprot_test_entities_gs.tsv"
train_feat = "data/sentenses/train_ftr.txt"
devel_feat = "data/sentenses/test_ftr.txt"
tests_feat = "data/sentenses/development_ftr.txt"
#-----------------------------------------------------------------------------------------------#
# #
# Define global path variables for newly created data files. #
# #
#-----------------------------------------------------------------------------------------------#
inter_words = "data/others/extracted_interactions.txt"
#-----------------------------------------------------------------------------------------------#
# #
# Column headers for different types of data files #
# #
#-----------------------------------------------------------------------------------------------#
column_names_entities = ['pmid', 'entity-number', 'type', 'start', 'end', 'name']
column_name = ['label', 'chemical', 'gene', 'loc_1', 'loc_2', 'sentence', 'pmid']
#***********************************************************************************************#
# #
# description: #
# main function to interaction words from the corpus of document provided. #
# #
#***********************************************************************************************#
def get_interaction_words():
# read data file to extract relations
train_data = pd.read_csv(train_feat, sep=',', lineterminator='\n', header=None, names=column_name, keep_default_na=False)
devel_data = pd.read_csv(devel_feat, sep=',', lineterminator='\n', header=None, names=column_name, keep_default_na=False)
tests_data = pd.read_csv(tests_feat, sep=',', lineterminator='\n', header=None, names=column_name, keep_default_na=False)
# read the entities data file for further refinement of dictionary
train_entities = pd.read_csv(train_entity, sep='\t', lineterminator='\n', header=None, names=column_names_entities, keep_default_na=False)
devel_entities = pd.read_csv(devel_entity, sep='\t', lineterminator='\n', header=None, names=column_names_entities, keep_default_na=False)
tests_entities = pd.read_csv(tests_entity, sep='\t', lineterminator='\n', header=None, names=column_names_entities, keep_default_na=False)
# read wordnet corpus to make certain adjustments
adverbs = {x.name().split('.', 1)[0] for x in wn.all_synsets('r')}
nouns = {x.name().split('.', 1)[0] for x in wn.all_synsets('n')}
adjec = {x.name().split('.', 1)[0] for x in wn.all_synsets('a')}
adjec_sat = {x.name().split('.', 1)[0] for x in wn.all_synsets('s')}
# call the extraction function for each dataset
set_1 = _interaction_words(train_data, train_entities, nouns, adjec, adjec_sat, adverbs)
set_2 = _interaction_words(devel_data, devel_entities, nouns, adjec, adjec_sat, adverbs)
set_3 = _interaction_words(tests_data, tests_entities, nouns, adjec, adjec_sat, adverbs)
# get a union of all three sets
union_set = set_1.union(set_2, set_3)
# finally write the final set to the text file
write_to_file_txt(union_set, inter_words)
#***********************************************************************************************#
# #
# description: #
# function to extract interaction words based on entities data provided. #
# #
#***********************************************************************************************#
def _interaction_words(sentence_data, entities_data, wordnet_nouns, wordnet_adjec, wordnet_adjec_sat, wordnet_advrb):
entity_dict = {entity for entity in entities_data['name'].values.tolist()}
rel_set = {""}
# a regex to match the new interaction word with
regex = re.compile("[a-zA-Z]*$")
# iterate over the entire sentence data and extract important words
for entry in sentence_data[['loc_1', 'loc_2', 'sentence']].values.tolist():
tokens = entry[2].split(" ")
start = entry[0] if entry[0]<entry[1] else entry[1]
end = entry[0] if entry[0]>entry[1] else entry[1]
for index in range(start+1, end):
if not bool(re.match(regex, tokens[index])):
continue
if tokens[index] in wordnet_nouns:
continue
if tokens[index] in wordnet_adjec:
continue
if tokens[index] in wordnet_adjec_sat:
continue
if tokens[index] in wordnet_advrb:
continue
if tokens[index] in entity_dict:
continue
if len(tokens[index])<4:
continue
rel_set.add(tokens[index])
# return the newly created interaction word set
return rel_set
| usamazf/cpi-extraction | modules/preprocessing/extract_interact_words.py | extract_interact_words.py | py | 6,926 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
30698941822 | '''
4C specific. Extracts the 5'->3' DNA sequence between a primer and its
restriction enzyme cut site.
'''
# TODO add check that tags starts with primers.
# output renamed primer.fa (fix _fwd _rev postfix)
import Bio.SeqIO
from pybedtools import BedTool
import collections
import bisect
from bx.intervals import intersection
import argparse
import logbook
from logbook import info, notice, warn
import os
import gzip
def load_genome_dir(genome_dir):
'''
Arguments:
`genome_dir`: directory containing zipped per chrom fasta files
Returns: A dict of sequence per chromosome
'''
notice('called')
names = os.listdir(genome_dir)
dna = {}
for x in names:
assert x.endswith('.gz')
name = x.split('.')[0]
info('loading %s' % name)
with gzip.open(os.path.join(genome_dir, x)) as f:
stream = Bio.SeqIO.parse(f, 'fasta')
dna[name] = stream.next().upper()
return dna
def load_genome(path):
'''
Arguments:
`genome_dir`: directory containing zipped per chrom fasta files
Returns: A dict of sequence per chromosome
'''
notice('called')
if path.endswith('.gz'):
f = gzip.open(path, 'rb')
else:
f = open(path, 'r')
stream = Bio.SeqIO.parse(f, 'fasta')
dna = {}
for x in stream:
dna[x.id] = x.upper()
f.close()
return dna
def all_matches_in_genome(needle, genome):
'''
Arguments:
`needle`: sequence we are looking for
`genome`: dict of sequence per chrom (haystack)
Returns: An iterable of (chrom, start, end) ntuples for each match.
'''
BedEntry = collections.namedtuple('BedEntry', 'chrom start end')
notice('called with needle "%s"' % needle)
for chrom, v in genome.items():
idx = 0
while True:
idx = v.seq.find(needle, idx)
if idx == -1:
break
yield BedEntry(chrom, idx, idx + len(needle))
idx += 1
def save_restriction_sites(outpath, genome, cut_site):
notice('called')
rsites = all_matches_in_genome(cut_site, genome)
with open(outpath, 'w') as f:
for rsite in rsites:
f.write('%s\t%s\t%s\n' % (rsite.chrom, rsite.start,
rsite.end))
def save_primer_sites(outpath, primers_fasta_path, genome):
notice('called')
primers = []
for x in Bio.SeqIO.parse(primers_fasta_path, 'fasta'):
ident = all_matches_in_genome(x.seq, genome)
rcomp = all_matches_in_genome(x.seq.reverse_complement(), genome)
primers.append((x.id, '+', ident))
primers.append((x.id, '-', rcomp))
with open(outpath, 'w') as f:
for name, strand, hits in primers:
for hit in hits:
f.write('%s\t%s\t%s\t%s\t.\t%s\n' % (
hit.chrom, hit.start, hit.end, name, strand))
class Lookup(object):
'''
wrapper around bxpython. Builds tree and
filters output on chromosome.
'''
def __init__(self, items=None):
self.tree = intersection.IntervalTree()
if items is not None:
for x in items:
self.tree.add(x.start, x.end, x)
def get_in_interval(self, chrom, start, end):
return [x for x in self.tree.find(start, end)
if x.chrom == chrom]
def find_primer_pairs_bordering_fragment(rsites_path, primer_path,
max_dist):
'''
Finds pairs of primers flanking the same fragment.
A fragment is the interval between to adjacent restriction enzyme
cut sites.
The leftmost primer site must be on the reverse strand
and the rightmost primer site must be on the forward strand.
Arguments:
`rsites_path`: bed file path to restriction cut sites
`primer_path`: bed file path to primer matches in the genome
`max_dist`: the maximum distance between a primer and a cut site.
Returns:
An iterable of named tuples containing the flanking rsites and
the two primers.
(left_rsite left_primer right_primer right_rsite)
'''
notice('called')
struct = collections.namedtuple(
'PrimerFragment',
'left_rsite left_primer right_primer right_rsite')
mspi_sites = Lookup(BedTool(rsites_path))
primers = collections.defaultdict(list)
for p in BedTool(primer_path):
basename = p.name.split('_')[0]
primers[basename].append(p)
primers = {k:sorted(v, key=lambda x: (x.chrom, x.start))
for k, v in primers.items()}
tot_frag = 0
for basename, zs in primers.items():
info('compting for %s' % basename)
nfrag = 0
for l, r in zip(zs[:-1], zs[1:]):
# RNA POL II moves 3' -> 5' along the template strand
if (not l.chrom == r.chrom or
l.name == r.name or
l.strand == '+' or
r.strand =='-' or
l.start - r.end > 10**4):
continue
sites = mspi_sites.get_in_interval(l.chrom,
l.start - max_dist,
r.end + max_dist)
if len(sites) < 2: continue
starts = [x.start for x in sites]
lidx = bisect.bisect(starts, l.start)
ridx = bisect.bisect(starts, r.start)
if not lidx == ridx: continue
lsite = sites[lidx-1]
rsite = sites[ridx]
if (lsite.start <= l.start and
r.end <= rsite.end):
nfrag += 1
yield struct(lsite, l, r, rsite)
notice('Stored %d fragments for %s' % (nfrag, basename))
tot_frag += nfrag
notice('Stored %d fragments in total' % tot_frag)
tagint = collections.namedtuple('taginterval',
'chrom start end name strand')
def bedentry_as_string(x, name=None, extra=None):
common = '%s\t%s\t%s\t' % (x.chrom, x.start, x.end)
if name:
rest = name
else:
rest = '%s\t.\t%s' % (x.name, x.strand)
if extra is not None:
rest += '\t%s' % extra
return common + rest + '\n'
def get_tag_interval(primer, re_cut, re_offset, name=None):
if primer.start < re_cut.start:
assert primer.strand == '+'
return tagint(primer.chrom, primer.start,
re_cut.end - re_offset, name, '+')
else:
assert primer.strand == '-'
return tagint(primer.chrom, re_cut.start + re_offset,
primer.end, name, '-')
def save_tags(filepath, fragments, genome=None,
re_offset=0):
def get_tag_intervals():
for frag in fragments:
# rev primer
rname = frag.left_primer.name
if rname.endswith('_fwd') or rname.endswith('_rev'):
rname = rname[:-4]
rname += '_rev'
rev_tag = get_tag_interval(frag.left_primer,
frag.left_rsite, name=rname,
re_offset=re_offset)
yield ('left_primer', rev_tag)
# fwd primer
fname = frag.right_primer.name
if fname.endswith('_fwd') or fname.endswith('_rev'):
fname = fname[:-4]
fname += '_fwd'
fwd_tag = get_tag_interval(frag.right_primer,
frag.right_rsite, name=fname,
re_offset=re_offset)
yield ('right_primer', fwd_tag)
notice('called')
if genome is None:
with open(filepath, 'w') as f:
for prim_loc, x in get_tag_intervals():
f.write(bedentry_as_string(x, extra=prim_loc))
return
z = collections.defaultdict(set)
for prim_loc, x in get_tag_intervals():
seq = genome[x.chrom][x.start:x.end]
if prim_loc == 'left_primer':
assert x.strand == '-'
seq = seq.reverse_complement()
else:
assert x.strand == '+'
seq = seq.seq.tostring()
if seq in z[x.name]:
warn('%s has multiple identical tag sequences.' % x.name)
else:
z[x.name].add(seq)
with open(filepath, 'w') as f:
for name in sorted(z):
v = z[name]
while len(v):
f.write('>%s\n' % name)
f.write('%s\n' % v.pop())
def save_fragments(filepath, fragments):
notice('called')
with open(filepath, 'w') as f:
for x in fragments:
f.write(bedentry_as_string(x.left_rsite, name='rsite'))
f.write(bedentry_as_string(x.left_primer))
f.write(bedentry_as_string(x.right_rsite, name='rsite'))
f.write(bedentry_as_string(x.right_primer))
def main(out_dir, genome, primers_filepath, re_site,
max_dist_rsite_primer,
re_offset=0):
primer_sites_path = os.path.join(out_dir, 'primers.bed')
re_cut_sites_path = os.path.join(out_dir, 're_cut_sites.bed')
tags_bed_path = os.path.join(out_dir, 'tags.bed')
tags_raw_bed_path = os.path.join(out_dir, 'tags_raw.bed')
tags_fa_path = os.path.join(out_dir, 'tags.fa')
if not os.path.isfile(primer_sites_path):
save_primer_sites(primer_sites_path, primers_filepath, genome)
else:
notice('%s exists. using cached version' % primer_sites_path)
if not os.path.isfile(re_cut_sites_path):
save_restriction_sites(re_cut_sites_path, genome, re_site)
else:
notice('%s exists. using cached version' % re_cut_sites_path)
fragments = list(find_primer_pairs_bordering_fragment(
re_cut_sites_path, primer_sites_path, max_dist_rsite_primer))
save_fragments(tags_raw_bed_path, fragments)
save_tags(tags_bed_path, fragments,
re_offset=re_offset)
save_tags(tags_fa_path, fragments, genome=genome,
re_offset=re_offset)
| eivindgl/GenerateTags | generatetags/find_possible.py | find_possible.py | py | 9,987 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logbook.notice",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logbook.info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number":... |
21906658165 | import datetime
from elasticsearch import Elasticsearch
from mitmproxy import http
es = Elasticsearch(['localhost'], port=9200)
def request(flow: http.HTTPFlow) -> None:
sendDataToEs(index="msteams_request", flow=flow)
def response(flow: http.HTTPFlow) -> None:
sendDataToEs(index="msteams_response", flow=flow)
def sendDataToEs(index: str, flow: http.HTTPFlow) -> None:
data = {"url": flow.request.pretty_url,
"content": flow.request.content.decode(),
"timestamp": datetime.datetime.utcnow()}
try:
es.create(index=index, id=flow.__hash__(), body=data)
print("send data to es")
except Exception as e:
print(e)
# exit()
| CaledoniaProject/public-src | mitmproxy/to-elasticsearch.py | to-elasticsearch.py | py | 701 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "mitmproxy.http.HTTPFlow",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "mitmproxy.http",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "... |
977322900 | from xmlrpc.client import NOT_WELLFORMED_ERROR
import torch
import torch.utils.data as data
import torch.optim as optim
import torch.nn as nn
import numpy as np
from PIL import Image
from torchvision import transforms
import cv2
import dlib
import time
import imutils
from imutils.face_utils import rect_to_bb
from imutils.video import WebcamVideoStream
from datetime import datetime
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
now = 0
model_name = "mmod_human_face_detector.dat"
detector = dlib.cnn_face_detection_model_v1(model_name)
# detector2 = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('dlib/shape_predictor_68_face_landmarks.dat')
# pred = np.argmax(pred.data.cpu().numpy(), axis = 1)
font = cv2.FONT_HERSHEY_SIMPLEX
vs = WebcamVideoStream().start()
i = 6067
flag = 0
while True:
# 取得當前的frame,轉成RGB圖片
frame = vs.read()
img = frame.copy()
img = imutils.resize(img)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(rgb, 0)
ratio = frame.shape[1] / img.shape[1]
# faces2 = detector2(rgb, 0)
# if len(faces2) != 0:
# # 检测到人脸
# for i in range(len(faces2)):
# # 取特征点坐标
# landmarks = np.matrix([[p.x, p.y] for p in predictor(rgb, faces2[i]).parts()])
# for idx, point in enumerate(landmarks):
# # 68 点的坐标
# pos = (point[0, 0], point[0, 1])
# # 利用 cv2.circle 给每个特征点画一个圈,共 68 个
# cv2.circle(frame, pos, 2, color=(139, 128 , 120))
# # 利用 cv2.putText 写数字 1-68
results = detector(rgb, 0)
boxes = [rect_to_bb(r.rect) for r in results]
for box in boxes:
# 計算bounding box(邊界框)與準確率 - 取得(左上X,左上Y,右下X,右下Y)的值 (記得轉換回原始frame的大小)
box = np.array(box) * ratio
(x, y, w, h) = box.astype("int")
crop = img[y:y+h, x:x+w]
# 畫出邊界框
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
time.sleep(0.07)
get_time = datetime.now()
second = get_time.second
if second - now == 1:
img = cv2.resize(crop, (48, 48))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
path = 'C:\\face_detection\data\\face\\total\\' + str(i) + '.jpg'
cv2.imwrite(path, img)
i += 1
now = second
print(i)
elif flag == 0:
now = second
flag = 1
elif second == 0:
now = second
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xff
if key == ord('q'):
cv2.destroyAllWindows()
break
| XiaYu-max/face.Concentration.train | capture.py | capture.py | py | 2,758 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "dlib.cnn_face_de... |
27069900658 | import json
import requests
from flask import current_app
from graphqlclient import GraphQLClient
class GraphQLClientRequests(GraphQLClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _send(self, query, variables):
data = {"query": query, "variables": variables}
headers = {"Accept": "application/json", "Content-Type": "application/json"}
if self.token is not None:
headers[self.headername] = "{}".format(self.token)
r = requests.post(self.endpoint, json=data, headers=headers)
current_app.logger.info(
"CHARITYBASE {}{}: {}".format(
"[from cache] " if getattr(r, "from_cache", False) else "",
self.endpoint,
json.dumps(variables),
)
)
r.raise_for_status()
return r.json()
| drkane/ngo-explorer | ngo_explorer/classes/graphqlclientrequests.py | graphqlclientrequests.py | py | 877 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "graphqlclient.GraphQLClient",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.current_app.logger.info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name":... |
15896783909 | import gitlab
import argparse
import subprocess
import os
import sys
import re
from datetime import datetime
from urllib import quote
# GitLab API configuration
GITLAB_URL = 'https://gitlabe1.ext.net.nokia.com' # Update with your GitLab URL
GITLAB_TOKEN = '2wtY6dsFnu5Xas7BUDw2' # Update with your GitLab access token
PROJECT_ID = '63330' # Update with your project ID
LAST_COMMIT_FILE = 'last_commit.txt' # File to store the last checked commit ID
RESULT_FILE = 'result.txt' # File to store the result
SPECIFIC_FILENAME = 'common/' # Update with the specific filename to filter
MAKEFILE_FILENAME = '/weixw_repo/nbn_target/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/Makefile'
OUTPUT_FILE = 'build_output.txt' # File to save the command output
LOG_DIR = '/weixw_repo/logs/trCommon/'
# Parse command-line arguments
parser = argparse.ArgumentParser(description='Check for new commit IDs with the specific filename on a branch.')
parser.add_argument('branch', help='Name of the branch to check')
parser.add_argument('product', help='Name of the product to check')
args = parser.parse_args()
if args.branch == "master":
if args.product == "WNTD4":
MAKEFILE_FILENAME = '/weixw_repo/nbn_target/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/Makefile'
CURRENT_DIR = '/weixw_repo/nbn_target/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/'
COVERITY_DIR = '/weixw_repo/nbn_target/tr069Common/tr069_FWA/'
elif args.product == "5GGW3-OMNI-1":
MAKEFILE_FILENAME = '/weixw_repo/target_repo/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/Makefile'
CURRENT_DIR = '/weixw_repo/target_repo/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/'
COVERITY_DIR = '/weixw_repo/target_repo/tr069Common/tr069_FWA/'
else:
print("input invalid product name!(WNTD4/5GGW3-OMNI-1/...)")
sys.exit(0)
elif args.branch == "BBD_R2302":
if args.product == "WNTD4":
MAKEFILE_FILENAME = '/weixw_repo/2302_repo/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/Makefile'
CURRENT_DIR = '/weixw_repo/2302_repo/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/'
COVERITY_DIR = '/weixw_repo/2302_repo/tr069Common/tr069_FWA/'
else:
print("input invalid product name!(WNTD4/...)")
sys.exit(0)
else:
print("input invalid branch name!(master/BBD_R2302/...)")
sys.exit(0)
print('current project diretory: ' + CURRENT_DIR)
# Create GitLab client
gl = gitlab.Gitlab(GITLAB_URL, private_token=GITLAB_TOKEN)
# Get the project
project = gl.projects.get(PROJECT_ID)
LAST_COMMIT_FILE = LOG_DIR + args.branch + '_' + args.product + '_' + LAST_COMMIT_FILE
# Read the last checked commit ID
try:
with open(LAST_COMMIT_FILE, 'a+') as file:
last_commit = file.read().strip()
except FileNotFoundError:
last_commit = None
# Get the branch
#branch = project.branches.get(BRANCH_NAME)
branch = project.branches.get(args.branch)
# Get the commits
#commits = project.commits.list(all=True)
# Get the commits on the branch
commits = project.commits.list(ref_name=branch.name, all=True)
# Check for new commit IDs and filter for the specific filename
new_commits = []
for commit in commits:
if commit.id == last_commit:
break
commit_diff = project.commits.get(commit.id).diff()
for diff in commit_diff:
if SPECIFIC_FILENAME in diff.get('new_path'):
new_commits.append(commit.id)
break
# Update the last checked commit ID
#if commits and last_commit != commits[0].id:
# with open(LAST_COMMIT_FILE, 'w') as file:
# file.write(commits[0].id)
RESULT_FILE = LOG_DIR + args.branch + '_' + args.product + '_' + RESULT_FILE
currentDateAndTime = datetime.now()
currentTime = currentDateAndTime.strftime("%Y-%m-%d %H:%M:%S")
currentDate = currentDateAndTime.strftime("%Y-%m-%d")
file = open(RESULT_FILE, 'a+')
file.seek(0)
file.truncate()
file.write(currentTime)
file.write('\n')
# Print the result
if new_commits:
result_output = "New commit IDs with the specific filename found:"
print(result_output)
file.write(result_output)
file.write('\n')
#file = open(RESULT_FILE, 'a')
#file.seek(0)
#file.truncate()
for commit_id in new_commits:
print(commit_id)
file.write(commit_id)
file.write('\n')
#with open(SPECIFIC_FILENAME, 'r+') as file:
# content = file.read()
# for commit_id in new_commits:
# content = content.replace(last_commit, commit_id)
# file.seek(0)
# file.write(content)
# file.truncate()
#file.close()
else:
result_output = "No new commit IDs with the specific filename."
print(result_output)
file.write(result_output)
#file.close()
OUTPUT_FILE = LOG_DIR + args.branch + '_' + args.product + '_' + OUTPUT_FILE
# Function to run a Linux command and capture the execution status
def run_command(command,directory):
try:
#subprocess.check_call(command, shell=True, cwd="/weixw_repo/nbn_branch/BBDFWA_Appsrc/Mgnt/OAMCore/tr069_ms/")
process = subprocess.Popen(command, shell=True, cwd=CURRENT_DIR,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
with open(OUTPUT_FILE, 'a+') as file:
file.write(stdout.decode('utf-8'))
file.write(stderr.decode('utf-8'))
failure_messages = ["Error 2", "Build failed", "[ERROR]"]
# Check for specific failures or errors
for failure_message in failure_messages:
if failure_message in stderr.decode("utf-8"):
print("Failure occurred:", failure_message)
return False
return True
#if stderr is not None and stderr.strip() != "":
# return False
#else:
# return True # Command executed successfully
except subprocess.CalledProcessError:
return False # Command execution failed
try:
os.remove(OUTPUT_FILE)
except OSError:
pass
def sendemail(content):
myemail = "wei.xb.wang@nokia-sbell.com,pu.a.zhou@nokia-sbell.com,yaxiang.chen@nokia-sbell.com,yueping.zhou@nokia-sbell.com,heming.a.tang@nokia-sbell.com"
#myemail = "wei.xb.wang@nokia-sbell.com"
subject = "tr069Common Compile Report (" + args.branch + "/" + args.product + ") -- " + currentDate
#not support attachment now
#attachment_paths = ["master_5GGW3-OMNI-1_result.txt", "master_5GGW3-OMNI-1_build_output.txt"]
# Escape special characters in the subject and content
escaped_subject = quote(subject)
escaped_content = quote(content)
# Substitute variable values in the URL
url = (
"http://135.251.205.171:8090/job/SendMail/buildWithParameters"
"?Recipient_List={0}&Subject={1}&Content={2}"
).format(myemail, escaped_subject, escaped_content)
# Construct the curl command
#command = "curl -X POST '{0}' --data-urlencode token=GoodboywillnotsendmaiL".format(url)
command = "curl -X POST '{0}' --form-string token=GoodboywillnotsendmaiL".format(url)
# Append attachments to the curl command
#for attachment_path in attachment_paths:
# command += " --form attachment=@{0}".format(attachment_path)
# Execute the command without waiting for its completion
subprocess.Popen(command, shell=True)
#following use for debug
#process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#stdout, stderr = process.communicate()
#print("stdout:", stdout)
#print("stderr:", stderr)
def replaceCommitid(commitid):
# Replace the previous commit ID with the new commit ID in the specific file using sed
#if new_commits:
#for commit_id in new_commits:
print("Replaced the previous commit ID with the new commit ID in the specific file -- " + commitid)
#subprocess.call(['sed', '-i', 's/{}/{}/g'.format(last_commit, commits[0].id), MAKEFILE_FILENAME])
# Read the content of the file
make_file = open(MAKEFILE_FILENAME, 'r')
content = make_file.read()
make_file.close()
# Replace the desired line
pattern = r'PKG_SOURCE_COMMITID = .+' # Pattern to match " PKG_SOURCE_COMMITID = " followed by any characters
new_line = 'PKG_SOURCE_COMMITID = ' + commitid # New line to replace the matched line
print(new_line)
modified_content = re.sub(pattern, new_line, content)
# Write the modified content back to the file
make_file = open(MAKEFILE_FILENAME, 'w')
make_file.write(modified_content)
make_file.close()
if commits and last_commit != commits[0].id:
# Update the last checked commit ID
#with open(LAST_COMMIT_FILE, 'w') as filecommit:
# filecommit.write(commits[0].id)
# Replace the previous commit ID with the new commit ID in the specific file using sed
replaceCommitid(commits[0].id)
isTerminal = False
# Example usage: Run a Linux command in a specific directory and check if it succeeded or failed
command = "make clean"
print(command)
file.write(command)
file.write('\n')
success = run_command(command,CURRENT_DIR)
if success:
result_output = 'make clean successful. pls see ' + OUTPUT_FILE
else:
result_output = 'make clean failed. pls see ' + OUTPUT_FILE
isTerminal = True
result_output += '\n'
print(result_output)
file.write(result_output)
email_content = result_output
if isTerminal:
sendemail(email_content)
file.close()
sys.exit(0)
command = "make product=" + args.product
print(command)
file.write(command)
file.write('\n')
success = run_command(command,CURRENT_DIR)
if success:
result_output = 'build tr069common successful. pls see ' + OUTPUT_FILE
else:
result_output = 'build tr069common failed. pls see ' + OUTPUT_FILE
isTerminal = True
result_output += '\n'
print(result_output)
file.write(result_output)
email_content += result_output
if isTerminal:
sendemail(email_content)
file.close()
sys.exit(0)
# Update the last checked commit ID
with open(LAST_COMMIT_FILE, 'w') as filecommit:
filecommit.write(commits[0].id)
#run coverity
command = "DOMAIN=OAM run_coverity.sh " + args.product
print(command)
file.write(command)
file.write('\n')
success = run_command(command,COVERITY_DIR)
if success:
result_output = 'coverity running successful. pls see ' + OUTPUT_FILE
else:
result_output = 'coverity running failed. pls see ' + OUTPUT_FILE
print(result_output)
file.write(result_output)
email_content += result_output
#send email
sendemail(email_content)
file.close()
| lucewang/NewMakefile | tr_checkcommit.py | tr_checkcommit.py | py | 10,662 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number... |
7757734215 | import dog
from cat import cat
#访问属性
mydog=dog.dog('willie',6)
print("My dog's name is "+mydog.name.title()+".")
print("My dog is "+str(mydog.age)+" years old")
#调用方法
mydog.sit()
#更改属性的值
mydog.age=20
print("My dog is "+str(mydog.age)+" years old")
#继承,cat继承dog 子类继承父类所有属性方法,子类也可以添加新的属性方法
mycat=cat('paris',2)
mycat.rool_over()
mycat.printsex()#输出添加的方法
mycat.sit()#调用重写父类的方法
print(str(mycat.t.t))#实例用作属性
#Python标准库 以模块collections中OrderedDict类为例
from collections import OrderedDict
favorite_languages=OrderedDict()
favorite_languages['jen']='python'
favorite_languages['sarsh']='c'
favorite_languages['puil']='python'
for name,language in favorite_languages.items():
print(name.title()+"'s favorite language is "+language.title())
| zXin1112/Python-Practice | cless/cless/cless.py | cless.py | py | 892 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dog.dog",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cat.cat",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 24,
"usage_type": "call"
}
] |
18209134724 | # This program include
# 1. receiving data from USB by pyserial
# 2. Normalize the data between 0 and 1
# 3. Store the data in image/ directory in .jpg format
PRINT_DATA=True
from models.model_classes.convNet import *
from txtToJpg import *
from test import *
from const import *
from utils import removeTxt
from time import sleep
import serial.tools.list_ports
import time
import os
def portSetup():
ports= serial.tools.list_ports.comports(0)
serialInst=serial.Serial()
portList=[]
#serialPort="/dev/cu.usbserial-0001 - CP2102 USB to UART Bridge Controller - CP2102 USB to UART Bridge Controller"
for onePort in ports:
portList.append(str(onePort))
print(str(onePort))
serialInst.baudrate=BAUD_RATE
serialInst.port=SERIAL_PORT
serialInst.open()
return serialInst
def isStart(txt):
if len(txt)==7:
return 1
return 0
def isSave(txt):
if len(txt)==6:
return 1
return 0
#main receive loop
def FixedLengthData_RealTimePredict(serialInst):
#local const
realTimePredictionDir=realTimePrediction.ROOT_PATH
txt_file_name=os.path.join(realTimePredictionDir,"0.txt")
image_file_name=os.path.join(realTimePredictionDir,"0.jpg")
#local variable
count=1
txtIndx=0
writing=False
#os.remove(txt_file_name)
#os.remove(image_file_name)
while True:
#print("running")
if serialInst.in_waiting:
packet=serialInst.readline()
txt=(packet.decode('ISO-8859-1'))
if PRINT_DATA:
print(txt)
with open(txt_file_name, 'a') as f:
if isStart(txt) and not (writing):
writing=True
startTime=time.time()
print("Start detected")
if writing==True:
#print("writing True")
if isStart(txt)==0 and isSave(txt)==0:#IMU data
if count<=SAMPLE_SIZE+1:
f.write(txt)
if count==SAMPLE_SIZE+1:
txtIndx+=1
count=0
writing=False
print(time.time()-startTime)
print(TRASH_STRING)
#convert txt to image
#Problem with storeTxtToJpg
storeTxtToJpg(realTimePredictionDir,realTimePredictionDir,label="",mode="prediction")
#model_path=os.path.join(Models.TRAINED_MODELS_PATH,"OptimConvNet2_20230113_151355")
model_path="./cnn"
model=OptimConvNet2(output_size=3)
model.load_state_dict(torch.load(model_path,map_location=torch.device('cpu')))
img_path=os.path.join(realTimePrediction.ROOT_PATH,"0.jpg")
print(predictSingleImage(img_path,model=model))
#prediction on the image
#remove 0.txt
os.remove(txt_file_name)
sleep(2)
count+=1
if __name__=="__main__":
serialInst=portSetup()
FixedLengthData_RealTimePredict(serialInst) | navilo314hku/FYP | realTimePrediction.py | realTimePrediction.py | py | 3,387 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "serial.tools.list_ports.tools.list_ports.comports",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "serial.tools.list_ports.tools",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "serial.tools.list_ports",
"line_number": 16,
"usage_... |
20079881856 | # -*- coding: utf-8 -*-
import feedparser
import json
import os
import time # besoin pour time_obj->json
# see https://docs.python.org/2/library/time.html#time.struct_time
# def json_serial(obj):
# """JSON serializer for objects not serializable by default json code"""
# print('hello')
# if isinstance(obj, time.struct_time):
# serial = tuple( obj )
# print(serial)
# return serial
# raise TypeError ("Type not serializable")
import sys
if sys.version_info[0] < 3:
raise "Must be using Python 3"
print( '... update rss feeds ...' )
print(" " + time.strftime("%x %X") )
feedsToFetch = { \
'Liberation_laUne': 'http://rss.liberation.fr/rss/latest/',\
'Liberation_chroniques': 'http://rss.liberation.fr/rss/44/',\
'leMonde_laUne': 'http://www.lemonde.fr/rss/une.xml',\
'leMonde_Actu': 'http://www.lemonde.fr/m-actu/rss_full.xml',\
'leMonde_Idees': 'http://rss.lemonde.fr/c/205/f/3051/index.rss',\
'lesEchos_journaldujour': 'http://www.lesechos.fr/rss/rss_articles_journal.xml' ,\
'humanite':'http://www.humanite.fr/rss/actu.rss',\
'mediapart':'https://www.mediapart.fr/articles/feed',\
'rue89':'http://api.rue89.nouvelobs.com/feed',\
'leFigaro_laUne':'http://rss.lefigaro.fr/lefigaro/laune?format=xml',\
'lePoint_24hinfo':'http://www.lepoint.fr/24h-infos/rss.xml',\
'lePoint_chroniques':'http://www.lepoint.fr/chroniques/rss.xml',\
'FranceSoir': 'http://www.francesoir.fr/rss.xml' ,\
'leParisien': 'http://www.leparisien.fr/actualites-a-la-une.rss.xml#xtor=RSS-1481423633',\
'courrierinternational':'http://www.courrierinternational.com/feed/all/rss.xml' }
## load allfeeds_info
dir_rssData = './data/'
feedinfo_filename = dir_rssData + 'allfeeds_info.json'
if not os.path.exists(dir_rssData):
os.makedirs(dir_rssData)
allfeeds_info = {}
if os.path.isfile(feedinfo_filename):
with open(feedinfo_filename, 'r') as file:
allfeeds_info = json.load(file)
fluxoubliees = [ key for key in allfeeds_info.keys() if key not in feedsToFetch]
if fluxoubliees:
print( u'flux oublié(s): '+' '.join(fluxoubliees) )
# Ajoute les nouveaux flux:
for name, url in feedsToFetch.items():
if name not in allfeeds_info:
allfeeds_info[name] = {'name':name, 'url':url}
## ---
for name, feed_info in allfeeds_info.items():
#print('// Request for %s ...' % feed_info['name'])
## --- Query ---
if 'etag' in feed_info:
data = feedparser.parse( feed_info['url'], etag=feed_info['etag'] )
elif 'modified' in feed_info:
data = feedparser.parse( feed_info['url'], modified=feed_info['modified'] )
else:
data = feedparser.parse( feed_info['url'] )
## --- Tell Status ---
if 'status' not in data:
print( name+'_Warning_ : no status, %s' % data['bozo_exception'] )
elif data['status'] == 304:
print( '{:>24s}: pas de nouveaux post (etag or modified)'.format(name) )
elif data['status'] == 301:
print( name+'_Warning!_ The feed has permanently moved to a new location. URL updated.' )
feed_info['url'] = data.href
elif data['status'] == 410:
print( name+'_Warning!_ the feed is gone. URL removed. ' )
feed_info['url'] = None
elif data['status'] == 200:
#print( '_no problem! :) _ ' )
pass
else:
print( name+'_nop_ status: %i '% data['status'])
## --- Tell Bozo ---
if data['bozo']:
print( name+'_Warning_ : erreur bozo, %s' % data['bozo_exception'] )
## --- Go for Entries ---
if 'entries' in data and len(data['entries'])>0 :
# print( '_yep_ get %i posts' % len(data['entries']) )
# load rss_data file
filename = dir_rssData + 'rss_data_%s.json' % feed_info['name']
feed_info['filename'] = filename
if os.path.isfile(filename):
with open(filename, 'r') as file:
rss_data = json.load(file)
else: rss_data = {}
nEntriesBefore = len( rss_data )
# update dico
nRejected = 0
nAdded = 0
for post in data['entries']:
if 'id' in post:
key = post['id']
elif 'link' in post:
key = post['link']
if key in rss_data:
nRejected += 1
else:
rss_data[ key ] = post
nAdded += 1
print( '{:>24s}: {:>3d} added, {:>3d} rejected, {:>5d} total'.format( \
name, nAdded, nRejected, len(rss_data) ) )
# save rss_data file
with open(filename, 'w') as outfile:
json.dump(rss_data, outfile )# , default=json_serial)
## --- update FeedInfo ---
if 'etag' in data:
feed_info['etag'] = data['etag']
elif 'modified' in data:
feed_info['modified'] = data['modified']
if 'feed' in data and data['feed']:
feed_info['feed'] = data['feed']
if 'updated' in data:
feed_info['updated'] = data['updated']
if 'updated_parsed' in data:
feed_info['updated_parsed'] = data['updated_parsed']
# save allfeeds_info
allfeeds_info[ feed_info['name'] ] = feed_info
with open(feedinfo_filename, 'w') as outfile:
json.dump(allfeeds_info, outfile )#, default=json_serial)
| xdze2/lesMotsDesJournaux | getData_feedparser.py | getData_feedparser.py | py | 5,475 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
72068568354 | import sys
import copy
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
sys.path.append('../..')
import modules.commons as commons
import modules.modules as modules
import modules.attentions as attentions
from modules.commons import init_weights, get_padding
from modules.ddsp import mlp, gru, scale_function, remove_above_nyquist, upsample
from modules.ddsp import harmonic_synth, amp_to_impulse_response, fft_convolve
from modules.ddsp import resample
import utils
from modules.stft import TorchSTFT
import torch.distributions as D
from modules.losses import (
generator_loss,
discriminator_loss,
feature_loss,
kl_loss
)
LRELU_SLOPE = 0.1
class PostF0Decoder(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, spk_channels=0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.gin_channels = spk_channels
self.drop = nn.Dropout(p_dropout)
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_1 = modules.LayerNorm(filter_channels)
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_2 = modules.LayerNorm(filter_channels)
self.proj = nn.Conv1d(filter_channels, 1, 1)
if spk_channels != 0:
self.cond = nn.Conv1d(spk_channels, in_channels, 1)
def forward(self, x, x_mask, g=None):
x = torch.detach(x)
if g is not None:
g = torch.detach(g)
x = x + self.cond(g)
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.norm_1(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
x = torch.relu(x)
x = self.norm_2(x)
x = self.drop(x)
x = self.proj(x * x_mask)
return x * x_mask
class TextEncoder(nn.Module):
def __init__(self,
c_dim,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.pre_net = torch.nn.Linear(c_dim, hidden_channels)
self.encoder = attentions.Encoder(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout)
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
def forward(self, x, x_lengths):
x = x.transpose(1,-1)
x = self.pre_net(x)
x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.encoder(x * x_mask, x_mask)
x = self.proj(x) * x_mask
return x, x_mask
def pad_v2(input_ele, mel_max_length=None):
if mel_max_length:
max_len = mel_max_length
else:
max_len = max([input_ele[i].size(0) for i in range(len(input_ele))])
out_list = list()
for i, batch in enumerate(input_ele):
if len(batch.shape) == 1:
one_batch_padded = F.pad(
batch, (0, max_len - batch.size(0)), "constant", 0.0
)
elif len(batch.shape) == 2:
one_batch_padded = F.pad(
batch, (0, 0, 0, max_len - batch.size(0)), "constant", 0.0
)
out_list.append(one_batch_padded)
out_padded = torch.stack(out_list)
return out_padded
class LengthRegulator(nn.Module):
""" Length Regulator """
def __init__(self):
super(LengthRegulator, self).__init__()
def LR(self, x, duration, max_len):
x = torch.transpose(x, 1, 2)
output = list()
mel_len = list()
for batch, expand_target in zip(x, duration):
expanded = self.expand(batch, expand_target)
output.append(expanded)
mel_len.append(expanded.shape[0])
if max_len is not None:
output = pad_v2(output, max_len)
else:
output = pad_v2(output)
output = torch.transpose(output, 1, 2)
return output, torch.LongTensor(mel_len)
def expand(self, batch, predicted):
predicted = torch.squeeze(predicted)
out = list()
for i, vec in enumerate(batch):
expand_size = predicted[i].item()
state_info_index = torch.unsqueeze(torch.arange(0, expand_size), 1).float()
state_info_length = torch.unsqueeze(torch.Tensor([expand_size] * expand_size), 1).float()
state_info = torch.cat([state_info_index, state_info_length], 1).to(vec.device)
new_vec = vec.expand(max(int(expand_size), 0), -1)
new_vec = torch.cat([new_vec, state_info], 1)
out.append(new_vec)
out = torch.cat(out, 0)
return out
def forward(self, x, duration, max_len):
output, mel_len = self.LR(x, duration, max_len)
return output, mel_len
class PriorDecoder(nn.Module):
def __init__(self,
out_bn_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
n_speakers=0,
spk_channels=0):
super().__init__()
self.out_bn_channels = out_bn_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.spk_channels = spk_channels
self.prenet = nn.Conv1d(hidden_channels , hidden_channels, 3, padding=1)
self.decoder = attentions.FFT(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout)
self.proj = nn.Conv1d(hidden_channels, out_bn_channels, 1)
if n_speakers != 0:
self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
def forward(self, x, x_lengths, spk_emb=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.prenet(x) * x_mask
if (spk_emb is not None):
x = x + self.cond(spk_emb)
x = self.decoder(x * x_mask, x_mask)
bn = self.proj(x) * x_mask
return bn, x_mask
class Decoder(nn.Module):
def __init__(self,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
n_speakers=0,
spk_channels=0,
in_channels=None):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.spk_channels = spk_channels
self.prenet = nn.Conv1d(in_channels if in_channels is not None else hidden_channels, hidden_channels, 3, padding=1)
self.decoder = attentions.FFT(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout)
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
if n_speakers != 0:
self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
def forward(self, x, x_lengths, spk_emb=None):
x = torch.detach(x)
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.prenet(x) * x_mask
if (spk_emb is not None):
x = x + self.cond(spk_emb)
x = self.decoder(x * x_mask, x_mask)
x = self.proj(x) * x_mask
return x, x_mask
class F0Decoder(nn.Module):
def __init__(self,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
n_speakers=0,
spk_channels=0,
in_channels=None):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.spk_channels = spk_channels
self.prenet = nn.Conv1d(in_channels if in_channels is not None else hidden_channels, hidden_channels, 3, padding=1)
self.decoder = attentions.FFT(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout)
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.f0_prenet = nn.Conv1d(1, hidden_channels , 3, padding=1)
if n_speakers != 0:
self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
def forward(self, x, norm_f0, x_lengths, spk_emb=None):
x = torch.detach(x)
x += self.f0_prenet(norm_f0)
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.prenet(x) * x_mask
if (spk_emb is not None):
x = x + self.cond(spk_emb)
x = self.decoder(x * x_mask, x_mask)
x = self.proj(x) * x_mask
return x, x_mask
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x):
x = self.conv_layers[0](x)
x = self.norm_layers[0](x)
x = self.relu_drop(x)
for i in range(1, self.n_layers):
x_ = self.conv_layers[i](x)
x_ = self.norm_layers[i](x_)
x_ = self.relu_drop(x_)
x = (x + x_) / 2
x = self.proj(x)
return x
class PosteriorEncoder(nn.Module):
def __init__(self,
hps,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, n_speakers=hps.data.n_speakers, spk_channels=hps.model.spk_channels)
# self.enc = ConvReluNorm(hidden_channels,
# hidden_channels,
# hidden_channels,
# kernel_size,
# n_layers,
# 0.1)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
return stats, x_mask
class ResBlock3(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock3, self).__init__()
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0])))
])
self.convs.apply(init_weights)
def forward(self, x, x_mask=None):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator_Harm(torch.nn.Module):
def __init__(self, hps):
super(Generator_Harm, self).__init__()
self.hps = hps
self.prenet = Conv1d(hps.model.hidden_channels, hps.model.hidden_channels, 3, padding=1)
self.net = ConvReluNorm(hps.model.hidden_channels,
hps.model.hidden_channels,
hps.model.hidden_channels,
hps.model.kernel_size,
8,
hps.model.p_dropout)
# self.rnn = nn.LSTM(input_size=hps.model.hidden_channels,
# hidden_size=hps.model.hidden_channels,
# num_layers=1,
# bias=True,
# batch_first=True,
# dropout=0.5,
# bidirectional=True)
self.postnet = Conv1d(hps.model.hidden_channels, hps.model.n_harmonic + 1, 3, padding=1)
def forward(self, f0, harm, mask):
pitch = f0.transpose(1, 2)
harm = self.prenet(harm)
harm = self.net(harm) * mask
# harm = harm.transpose(1, 2)
# harm, (hs, hc) = self.rnn(harm)
# harm = harm.transpose(1, 2)
harm = self.postnet(harm)
harm = harm.transpose(1, 2)
param = harm
param = scale_function(param)
total_amp = param[..., :1]
amplitudes = param[..., 1:]
amplitudes = remove_above_nyquist(
amplitudes,
pitch,
self.hps.data.sampling_rate,
)
amplitudes /= amplitudes.sum(-1, keepdim=True)
amplitudes *= total_amp
amplitudes = upsample(amplitudes, self.hps.data.hop_length)
pitch = upsample(pitch, self.hps.data.hop_length)
n_harmonic = amplitudes.shape[-1]
omega = torch.cumsum(2 * math.pi * pitch / self.hps.data.sampling_rate, 1)
omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
signal_harmonics = (torch.sin(omegas) * amplitudes)
signal_harmonics = signal_harmonics.transpose(1, 2)
return signal_harmonics
class Generator(torch.nn.Module):
def __init__(self, hps, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
upsample_initial_channel, upsample_kernel_sizes, n_speakers=0, spk_channels=0):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
self.upsample_rates = upsample_rates
self.n_speakers = n_speakers
resblock = modules.ResBlock1 if resblock == '1' else modules.R
self.downs = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
i = len(upsample_rates) - 1 - i
u = upsample_rates[i]
k = upsample_kernel_sizes[i]
# print("down: ",upsample_initial_channel//(2**(i+1))," -> ", upsample_initial_channel//(2**i))
self.downs.append(weight_norm(
Conv1d(hps.model.n_harmonic + 2, hps.model.n_harmonic + 2,
k, u, padding=k // 2)))
self.resblocks_downs = nn.ModuleList()
for i in range(len(self.downs)):
j = len(upsample_rates) - 1 - i
self.resblocks_downs.append(ResBlock3(hps.model.n_harmonic + 2, 3, (1, 3)))
self.concat_pre = Conv1d(upsample_initial_channel + hps.model.n_harmonic + 2, upsample_initial_channel, 3, 1,
padding=1)
self.concat_conv = nn.ModuleList()
for i in range(len(upsample_rates)):
ch = upsample_initial_channel // (2 ** (i + 1))
self.concat_conv.append(Conv1d(ch + hps.model.n_harmonic + 2, ch, 3, 1, padding=1, bias=False))
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
k, u, padding=(k - u) // 2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
self.ups.apply(init_weights)
if self.n_speakers != 0:
self.cond = nn.Conv1d(spk_channels, upsample_initial_channel, 1)
def forward(self, x, ddsp, g=None):
x = self.conv_pre(x)
if g is not None:
x = x + self.cond(g)
se = ddsp
res_features = [se]
for i in range(self.num_upsamples):
in_size = se.size(2)
se = self.downs[i](se)
se = self.resblocks_downs[i](se)
up_rate = self.upsample_rates[self.num_upsamples - 1 - i]
se = se[:, :, : in_size // up_rate]
res_features.append(se)
x = torch.cat([x, se], 1)
x = self.concat_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, modules.LRELU_SLOPE)
in_size = x.size(2)
x = self.ups[i](x)
# 保证维度正确,丢掉多余通道
x = x[:, :, : in_size * self.upsample_rates[i]]
x = torch.cat([x, res_features[self.num_upsamples - 1 - i]], 1)
x = self.concat_conv[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
class Generator_Noise(torch.nn.Module):
def __init__(self, hps):
super(Generator_Noise, self).__init__()
self.hps = hps
self.win_size = hps.data.win_size
self.hop_size = hps.data.hop_length
self.fft_size = hps.data.n_fft
self.istft_pre = Conv1d(hps.model.hidden_channels, hps.model.hidden_channels, 3, padding=1)
self.net = ConvReluNorm(hps.model.hidden_channels,
hps.model.hidden_channels,
hps.model.hidden_channels,
hps.model.kernel_size,
8,
hps.model.p_dropout)
self.istft_amplitude = torch.nn.Conv1d(hps.model.hidden_channels, self.fft_size // 2 + 1, 1, 1)
self.window = torch.hann_window(self.win_size)
def forward(self, x, mask):
istft_x = x
istft_x = self.istft_pre(istft_x)
istft_x = self.net(istft_x) * mask
amp = self.istft_amplitude(istft_x).unsqueeze(-1)
phase = (torch.rand(amp.shape) * 2 * 3.14 - 3.14).to(amp)
real = amp * torch.cos(phase)
imag = amp * torch.sin(phase)
spec = torch.cat([real, imag], 3)
istft_x = torch.istft(spec, self.fft_size, self.hop_size, self.win_size, self.window.to(amp), True,
length=x.shape[2] * self.hop_size, return_complex=False)
return istft_x.unsqueeze(1)
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
self.use_spectral_norm = use_spectral_norm
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, modules.LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, modules.LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiFrequencyDiscriminator(nn.Module):
def __init__(self,
hop_lengths=[128, 256, 512],
hidden_channels=[256, 512, 512],
domain='double', mel_scale=True):
super(MultiFrequencyDiscriminator, self).__init__()
self.stfts = nn.ModuleList([
TorchSTFT(fft_size=x * 4, hop_size=x, win_size=x * 4,
normalized=True, domain=domain, mel_scale=mel_scale)
for x in hop_lengths])
self.domain = domain
if domain == 'double':
self.discriminators = nn.ModuleList([
BaseFrequenceDiscriminator(2, c)
for x, c in zip(hop_lengths, hidden_channels)])
else:
self.discriminators = nn.ModuleList([
BaseFrequenceDiscriminator(1, c)
for x, c in zip(hop_lengths, hidden_channels)])
def forward(self, x):
scores, feats = list(), list()
for stft, layer in zip(self.stfts, self.discriminators):
# print(stft)
mag, phase = stft.transform(x.squeeze())
if self.domain == 'double':
mag = torch.stack(torch.chunk(mag, 2, dim=1), dim=1)
else:
mag = mag.unsqueeze(1)
score, feat = layer(mag)
scores.append(score)
feats.append(feat)
return scores, feats
class BaseFrequenceDiscriminator(nn.Module):
def __init__(self, in_channels, hidden_channels=512):
super(BaseFrequenceDiscriminator, self).__init__()
self.discriminator = nn.ModuleList()
self.discriminator += [
nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.utils.weight_norm(nn.Conv2d(
in_channels, hidden_channels // 32,
kernel_size=(3, 3), stride=(1, 1)))
),
nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.utils.weight_norm(nn.Conv2d(
hidden_channels // 32, hidden_channels // 16,
kernel_size=(3, 3), stride=(2, 2)))
),
nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.utils.weight_norm(nn.Conv2d(
hidden_channels // 16, hidden_channels // 8,
kernel_size=(3, 3), stride=(1, 1)))
),
nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.utils.weight_norm(nn.Conv2d(
hidden_channels // 8, hidden_channels // 4,
kernel_size=(3, 3), stride=(2, 2)))
),
nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.utils.weight_norm(nn.Conv2d(
hidden_channels // 4, hidden_channels // 2,
kernel_size=(3, 3), stride=(1, 1)))
),
nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.utils.weight_norm(nn.Conv2d(
hidden_channels // 2, hidden_channels,
kernel_size=(3, 3), stride=(2, 2)))
),
nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.utils.weight_norm(nn.Conv2d(
hidden_channels, 1,
kernel_size=(3, 3), stride=(1, 1)))
)
]
def forward(self, x):
hiddens = []
for layer in self.discriminator:
x = layer(x)
hiddens.append(x)
return x, hiddens[-1]
class Discriminator(torch.nn.Module):
def __init__(self, hps, use_spectral_norm=False):
super(Discriminator, self).__init__()
periods = [2, 3, 5, 7, 11]
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
self.discriminators = nn.ModuleList(discs)
# self.disc_multfrequency = MultiFrequencyDiscriminator(hop_lengths=[int(hps.data.sampling_rate * 2.5 / 1000),
# int(hps.data.sampling_rate * 5 / 1000),
# int(hps.data.sampling_rate * 7.5 / 1000),
# int(hps.data.sampling_rate * 10 / 1000),
# int(hps.data.sampling_rate * 12.5 / 1000),
# int(hps.data.sampling_rate * 15 / 1000)],
# hidden_channels=[256, 256, 256, 256, 256])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
y_d_gs.append(y_d_g)
fmap_rs.append(fmap_r)
fmap_gs.append(fmap_g)
# scores_r, fmaps_r = self.disc_multfrequency(y)
# scores_g, fmaps_g = self.disc_multfrequency(y_hat)
# for i in range(len(scores_r)):
# y_d_rs.append(scores_r[i])
# y_d_gs.append(scores_g[i])
# fmap_rs.append(fmaps_r[i])
# fmap_gs.append(fmaps_g[i])
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class SynthesizerTrn(nn.Module):
"""
Model
"""
def __init__(self, hps):
super().__init__()
self.hps = hps
self.text_encoder = TextEncoder(
hps.data.c_dim,
hps.model.prior_hidden_channels,
hps.model.prior_hidden_channels,
hps.model.prior_filter_channels,
hps.model.prior_n_heads,
hps.model.prior_n_layers,
hps.model.prior_kernel_size,
hps.model.prior_p_dropout)
self.decoder = PriorDecoder(
hps.model.hidden_channels * 2,
hps.model.prior_hidden_channels,
hps.model.prior_filter_channels,
hps.model.prior_n_heads,
hps.model.prior_n_layers,
hps.model.prior_kernel_size,
hps.model.prior_p_dropout,
n_speakers=hps.data.n_speakers,
spk_channels=hps.model.spk_channels
)
self.f0_decoder = F0Decoder(
1,
hps.model.prior_hidden_channels,
hps.model.prior_filter_channels,
hps.model.prior_n_heads,
hps.model.prior_n_layers,
hps.model.prior_kernel_size,
hps.model.prior_p_dropout,
n_speakers=hps.data.n_speakers,
spk_channels=hps.model.spk_channels
)
self.mel_decoder = Decoder(
hps.data.acoustic_dim,
hps.model.prior_hidden_channels,
hps.model.prior_filter_channels,
hps.model.prior_n_heads,
hps.model.prior_n_layers,
hps.model.prior_kernel_size,
hps.model.prior_p_dropout,
n_speakers=hps.data.n_speakers,
spk_channels=hps.model.spk_channels
)
self.posterior_encoder = PosteriorEncoder(
hps,
hps.data.acoustic_dim,
hps.model.hidden_channels,
hps.model.hidden_channels, 3, 1, 8)
self.dropout = nn.Dropout(0.2)
self.LR = LengthRegulator()
self.dec = Generator(hps,
hps.model.hidden_channels,
hps.model.resblock,
hps.model.resblock_kernel_sizes,
hps.model.resblock_dilation_sizes,
hps.model.upsample_rates,
hps.model.upsample_initial_channel,
hps.model.upsample_kernel_sizes,
n_speakers=hps.data.n_speakers,
spk_channels=hps.model.spk_channels)
self.dec_harm = Generator_Harm(hps)
self.dec_noise = Generator_Noise(hps)
self.f0_prenet = nn.Conv1d(1, hps.model.prior_hidden_channels , 3, padding=1)
self.energy_prenet = nn.Conv1d(1, hps.model.prior_hidden_channels , 3, padding=1)
self.mel_prenet = nn.Conv1d(hps.data.acoustic_dim, hps.model.prior_hidden_channels , 3, padding=1)
if hps.data.n_speakers > 0:
self.emb_spk = nn.Embedding(hps.data.n_speakers, hps.model.spk_channels)
self.flow = modules.ResidualCouplingBlock(hps.model.prior_hidden_channels, hps.model.hidden_channels, 5, 1, 4,n_speakers=hps.data.n_speakers, gin_channels=hps.model.spk_channels)
def forward(self, c, c_lengths, F0, uv, mel, bn_lengths, spk_id=None):
if self.hps.data.n_speakers > 0:
g = self.emb_spk(spk_id).unsqueeze(-1) # [b, h, 1]
else:
g = None
# Encoder
decoder_input, x_mask = self.text_encoder(c, c_lengths)
LF0 = 2595. * torch.log10(1. + F0 / 700.)
LF0 = LF0 / 500
norm_f0 = utils.normalize_f0(LF0,x_mask, uv.squeeze(1),random_scale=True)
pred_lf0, predict_bn_mask = self.f0_decoder(decoder_input, norm_f0, bn_lengths, spk_emb=g)
# print(pred_lf0)
loss_f0 = F.mse_loss(pred_lf0, LF0)
# aam
predict_mel, predict_bn_mask = self.mel_decoder(decoder_input + self.f0_prenet(LF0), bn_lengths, spk_emb=g)
predict_energy = predict_mel.detach().sum(1).unsqueeze(1) / self.hps.data.acoustic_dim
decoder_input = decoder_input + \
self.f0_prenet(LF0) + \
self.energy_prenet(predict_energy) + \
self.mel_prenet(predict_mel.detach())
decoder_output, predict_bn_mask = self.decoder(decoder_input, bn_lengths, spk_emb=g)
prior_info = decoder_output
m_p = prior_info[:, :self.hps.model.hidden_channels, :]
logs_p = prior_info[:, self.hps.model.hidden_channels:, :]
# posterior
posterior, y_mask = self.posterior_encoder(mel, bn_lengths,g=g)
m_q = posterior[:, :self.hps.model.hidden_channels, :]
logs_q = posterior[:, self.hps.model.hidden_channels:, :]
z = (m_q + torch.randn_like(m_q) * torch.exp(logs_q)) * y_mask
z_p = self.flow(z, y_mask, g=g)
# kl loss
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, y_mask)
p_z = z
p_z = self.dropout(p_z)
pitch = upsample(F0.transpose(1, 2), self.hps.data.hop_length)
omega = torch.cumsum(2 * math.pi * pitch / self.hps.data.sampling_rate, 1)
sin = torch.sin(omega).transpose(1, 2)
# dsp synthesize
noise_x = self.dec_noise(p_z, y_mask)
harm_x = self.dec_harm(F0, p_z, y_mask)
# dsp waveform
dsp_o = torch.cat([harm_x, noise_x], axis=1)
decoder_condition = torch.cat([harm_x, noise_x, sin], axis=1)
# dsp based HiFiGAN vocoder
x_slice, ids_slice = commons.rand_slice_segments(p_z, bn_lengths,
self.hps.train.segment_size // self.hps.data.hop_length)
F0_slice = commons.slice_segments(F0, ids_slice, self.hps.train.segment_size // self.hps.data.hop_length)
dsp_slice = commons.slice_segments(dsp_o, ids_slice * self.hps.data.hop_length, self.hps.train.segment_size)
condition_slice = commons.slice_segments(decoder_condition, ids_slice * self.hps.data.hop_length,
self.hps.train.segment_size)
o = self.dec(x_slice, condition_slice.detach(), g=g)
return o, ids_slice, LF0 * predict_bn_mask, dsp_slice.sum(1), loss_kl, \
predict_mel, predict_bn_mask, pred_lf0, loss_f0, norm_f0
def infer(self, c, g=None, f0=None,uv=None, predict_f0=False, noice_scale=0.3):
if len(g.shape) == 2:
g = g.squeeze(0)
if len(f0.shape) == 2:
f0 = f0.unsqueeze(0)
g = self.emb_spk(g).unsqueeze(-1) # [b, h, 1]
c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
# Encoder
decoder_input, x_mask = self.text_encoder(c, c_lengths)
y_lengths = c_lengths
LF0 = 2595. * torch.log10(1. + f0 / 700.)
LF0 = LF0 / 500
if predict_f0:
norm_f0 = utils.normalize_f0(LF0, x_mask, uv.squeeze(1))
pred_lf0, predict_bn_mask = self.f0_decoder(decoder_input, norm_f0, y_lengths, spk_emb=g)
pred_f0 = 700 * ( torch.pow(10, pred_lf0 * 500 / 2595) - 1)
f0 = pred_f0
LF0 = pred_lf0
# aam
predict_mel, predict_bn_mask = self.mel_decoder(decoder_input + self.f0_prenet(LF0), y_lengths, spk_emb=g)
predict_energy = predict_mel.sum(1).unsqueeze(1) / self.hps.data.acoustic_dim
decoder_input = decoder_input + \
self.f0_prenet(LF0) + \
self.energy_prenet(predict_energy) + \
self.mel_prenet(predict_mel)
decoder_output, y_mask = self.decoder(decoder_input, y_lengths, spk_emb=g)
prior_info = decoder_output
m_p = prior_info[:, :self.hps.model.hidden_channels, :]
logs_p = prior_info[:, self.hps.model.hidden_channels:, :]
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noice_scale
z = self.flow(z_p, y_mask, g=g, reverse=True)
prior_z = z
noise_x = self.dec_noise(prior_z, y_mask)
harm_x = self.dec_harm(f0, prior_z, y_mask)
pitch = upsample(f0.transpose(1, 2), self.hps.data.hop_length)
omega = torch.cumsum(2 * math.pi * pitch / self.hps.data.sampling_rate, 1)
sin = torch.sin(omega).transpose(1, 2)
decoder_condition = torch.cat([harm_x, noise_x, sin], axis=1)
# dsp based HiFiGAN vocoder
o = self.dec(prior_z, decoder_condition, g=g)
return o, harm_x.sum(1).unsqueeze(1), noise_x, f0
| justinjohn0306/so-vits-svc-4.0-v2 | models.py | models.py | py | 39,167 | python | en | code | 497 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"li... |
72359488674 | import asyncio
from connectionBDD import *
from connectionZabbix import *
from Zapi import *
from datetime import datetime
import time
import sys
import pandas as pd
import os
from pandasql import sqldf
from pyzabbix import ZabbixAPI
#On récupère le path pour que le fichier exécutable soit créé dans le même répertoire
path = os.path.dirname(sys.executable)
#Pour mettre la date courante dans le nom du fichier csv crée avec un fstring
now = datetime.now()
nowstr = now.strftime("%d%m%Y")
"""
#Enabling debug logging for the pyzabbix API
stream = logging.StreamHandler(sys.stdout)
stream.setLevel(logging.DEBUG)
log = logging.getLogger('pyzabbix')
log.addHandler(stream)
log.setLevel(logging.DEBUG)
"""
async def main():
sortie = 'preprod_sortie_ems'
coordo = 'preprod_bdd_coordination'
histo = 'preprod_historisation'
#Connection aux 2 BDDs
try:
conn_sortie = ConnectionBDD(sortie)
print(f"Connection to " +sortie+ " created successfully.")
except Exception as ex:
print("Connection to " +sortie+ " could not be made due to the following error: \n", ex)
try:
conn_coordo = ConnectionBDD(coordo, "bdd_coordination_schema")
print(f"Connection to " +coordo+ " created successfully.")
except Exception as ex:
print("Connection to " +coordo+ " could not be made due to the following error: \n", ex)
#Connection aux tables dont on a besoin
try:
conn_sortie.get_table('result')
print(f"Table for result created successfully.")
except Exception as ex:
print("Table for result could not be made due to the following error: \n", ex)
try:
conn_sortie.get_table('equipement_pilote_consommation_moyenne')
print(f"Table for conso created successfully.")
except Exception as ex:
print("Table for conso could not be made due to the following error: \n", ex)
try:
conn_sortie.get_table('p_c_with_flexible_consumption')
print(f"Table for p_c_with_flexible_consumption created successfully.")
except Exception as ex:
print("Table for p_c_with_flexible_consumption could not be made due to the following error: \n", ex)
try:
conn_sortie.get_table('p_c_without_flexible_consumption')
print(f"Table for p_c_without_flexible_consumption created successfully.")
except Exception as ex:
print("Table for p_c_without_flexible_consumption could not be made due to the following error: \n", ex)
try:
conn_coordo.get_table_with_schema('equipement_pilote_ou_mesure', 'bdd_coordination_schema')
print(f"Table for equipement_pilote_ou_mesure created successfully.")
except Exception as ex:
print("Table for equipement_pilote_ou_mesure could not be made due to the following error: \n", ex)
try:
conn_coordo.get_table_with_schema('equipement_mesure_compteur_electrique', 'bdd_coordination_schema')
print(f"Table for equipement_mesure_compteur_electrique created successfully.")
except Exception as ex:
print("Table for equipement_mesure_compteur_electrique could not be made due to the following error: \n", ex)
#Calcul du nombre d'appareils connectés par 1/4h
def indic_appareils_connectes() -> int :
with conn_sortie.engine.connect() as conn:
appconn = pd.read_sql("SELECT COUNT(*) FROM result\
GROUP BY first_valid_timestamp\
ORDER BY first_valid_timestamp DESC\
LIMIT 1"\
, con = conn)
return int(appconn.loc[0]['count'])
#Calcul du nombre d'appareils pilotés la semaine dernière
def indic_appareils_pilotes_semaine() -> int :
with conn_sortie.engine.connect() as conn:
appsem = pd.read_sql("SELECT COUNT(DISTINCT machine_id) AS Nombre_appareils_pilotés_la_semaine_dernière FROM result\
WHERE first_valid_timestamp > (CAST(EXTRACT (epoch FROM NOW()) AS INT) - 604800)", con = conn)
return appsem
#Calcul du pourcentage des appareils de ELFE lancés dans les dernières 24h
# Actuellement, il est mis au niveau du pourcentage de machines activées dans Viriya
#Rajouter des types dans la liste si de nouveaux types qu'on ne peut pas piloter s'ajoutent
def pourcentage_app_lancés_24h() -> float :
with conn_coordo.engine.connect() as conn:
nb_app_EMS = pd.read_sql("SELECT COUNT(*) FROM equipement_pilote_ou_mesure\
WHERE equipement_pilote_ou_mesure_type_id NOT IN (410, 901, 910, 920)"
, con = conn)
nb_app_EMS = int(nb_app_EMS.loc[0]['count'])
with conn_sortie.engine.connect() as conn:
nb_app_lancés_24h_continu = pd.read_sql("SELECT COUNT(DISTINCT machine_id) FROM result \
WHERE machine_type IN (131, -1)\
AND decisions_0 = 1\
AND first_valid_timestamp >= (CAST(EXTRACT (epoch FROM NOW()) AS INT) - 86400)"
, con = conn) # Compléter les machine_type
nb_app_lancés_24h_discontinu = pd.read_sql(""" SELECT COUNT(*) FROM result
WHERE decisions_0 = 1
AND machine_type IN (221)
AND first_valid_timestamp > (CAST(EXTRACT (epoch FROM NOW()) AS INT) - 86400)"""
, con = conn)
nb_app_lancés_24h = int(nb_app_lancés_24h_continu.loc[0]['count']) + int(nb_app_lancés_24h_discontinu.loc[0]['count'])
pourcentage_app_lancés_24h = (100*nb_app_lancés_24h)/nb_app_EMS
return round(pourcentage_app_lancés_24h, 1)
#Liste continu à terme (en prod) : 131, 151, 155
#Liste discontinu à terme (en prod) : 515, 112, 111, 113, 221, 225
#Calcul du cumul de kW d'énergie placés par l'EMS depuis le début du projet
def cumul_enr() -> int:
with conn_sortie.engine.connect() as conn:
#On fabrique deux tables sous la forme machine_type | nombre de lancements
#1e table pour les machines discoutinues : on compte chaque lancement
coeffs_discontinu:pd.DataFrame = pd.read_sql("SELECT machine_type, COUNT(*) FROM result\
WHERE decisions_0 = 1\
AND machine_type IN (111)\
GROUP BY machine_type"
, con = conn) #Compléter les machines_type
#2e table pour les machines continues : si pas de arrêt / relance, on compte un lancement par jour
coeffs_continu:pd.DataFrame = pd.read_sql("""SELECT * FROM
(SELECT machine_type, COUNT(*) FROM
(SELECT DISTINCT * FROM
(SELECT first_valid_timestamp/86400 AS day, machine_id, machine_type FROM
result WHERE decisions_0 = 1) AS T1) AS T2
GROUP BY machine_type) AS T3 WHERE machine_type IN (131)"""
, con = conn) #Compléter les machines_type
#On récupère la table machine_type | consommation moyenne.
with conn_coordo.engine.connect() as conn:
conso_energie:pd.DataFrame = pd.read_sql("SELECT equipement_pilote_type_id, consommation FROM equipement_pilote_consommation_moyenne"
, con = conn)
#cumul_enr correspond à l'indicateur final qu'on initialise à 0
cumul_enr = 0
#On l'incrémente avec pour chaque ligne des tableaux continus et discontinus nb de machines d'un type * moyenne de l'énergie consommée par ce type de machine
for i in coeffs_discontinu.index:
for j in conso_energie.index:
if coeffs_discontinu.loc[i]['machine_type'] == conso_energie.loc[j]['equipement_pilote_type_id']:
cumul_enr += coeffs_discontinu.loc[i]['count'] * conso_energie.loc[j]['consommation']
for i in coeffs_continu.index:
for j in conso_energie.index:
if coeffs_continu.loc[i]['machine_type'] == conso_energie.loc[j]['equipement_pilote_type_id']:
cumul_enr += coeffs_continu.loc[i]['count'] * conso_energie.loc[j]['consommation']
print("Ancien calcul : ", int(cumul_enr/1000))
return int(cumul_enr/1000)
#Calcul du cumul de kW d'énergie placés par l'EMS depuis le début du projet
def cumul_enr_opti() -> int:
with conn_sortie.engine.connect() as conn:
#On fabrique deux tables sous la forme machine_type | nombre de lancements
#1e table pour les machines discoutinues : on compte chaque lancement
coeffs_discontinu:pd.DataFrame = pd.read_sql("SELECT machine_type, COUNT(*) FROM result\
WHERE decisions_0 = 1\
AND machine_type IN (111)\
GROUP BY machine_type"
, con = conn) #Compléter les machines_type
print("Coeffs discontinus : " , coeffs_discontinu)
#2e table pour les machines continues : si pas de arrêt / relance, on compte un lancement par jour
coeffs_continu:pd.DataFrame = pd.read_sql("""SELECT * FROM
(SELECT machine_type, COUNT(*) FROM
(SELECT DISTINCT * FROM
(SELECT first_valid_timestamp/86400 AS day, machine_id, machine_type FROM result
WHERE decisions_0 = 1) AS T1) AS T2
GROUP BY machine_type) AS T3 WHERE machine_type IN (131)"""
, con = conn) #Compléter les machines_type
print("Coeffs continus : ", coeffs_continu)
#On récupère la table machine_type | consommation moyenne.
with conn_coordo.engine.connect() as conn:
conso_energie:pd.DataFrame = pd.read_sql("SELECT equipement_pilote_type_id, consommation FROM equipement_pilote_consommation_moyenne"
, con = conn)
#cumul_enr correspond à l'indicateur final qu'on initialise à 0
cumul_enr = 0
#On l'incrémente avec pour chaque ligne des tableaux continus et discontinus nb de machines d'un type * moyenne de l'énergie consommée par ce type de machine
for i in coeffs_discontinu.index:
for j in conso_energie.index:
if coeffs_discontinu.loc[i]['machine_type'] == conso_energie.loc[j]['equipement_pilote_type_id']:
cumul_enr += coeffs_discontinu.loc[i]['count'] * conso_energie.loc[j]['consommation']
for i in coeffs_continu.index:
for j in conso_energie.index:
if coeffs_continu.loc[i]['machine_type'] == conso_energie.loc[j]['equipement_pilote_type_id']:
cumul_enr += coeffs_continu.loc[i]['count'] * conso_energie.loc[j]['consommation']
return int(cumul_enr*1000)
#Calcul du pourcentage de la conso d'énergie des foyers placée
def conso_enr_placee() -> int:
#On commence par calculer la quantité d'énergie placée dans les dernières 24h
with conn_sortie.engine.connect() as conn:
#On fabrique deux tables sous la forme machine_type | nombre de lancements
cumul_enr_placee_24h:pd.DataFrame = pd.read_sql(""" SELECT SUM((0.25)*(p_c_with_flexible_consumption.power - p_c_without_flexible_consumption.power)) FROM p_c_with_flexible_consumption
INNER JOIN p_c_without_flexible_consumption
USING(data_timestamp)
WHERE p_c_with_flexible_consumption.data_timestamp > (CAST(EXTRACT (epoch FROM NOW()) AS INT)) """
, con = conn)
cumul_enr_placee_24h = -cumul_enr_placee_24h
cumul_enr_placee_24h = int(cumul_enr_placee_24h.loc[0]['sum'])
print("cumul energie flexible",cumul_enr_placee_24h)
#Calcul de la consommation d'énergie du panel mis à l'échelle les dernières 24h (item Panel_R_puissance_mae dans Zabbix)
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 60 * 24)
puissance_panel_mae = 0
for i in zapi.history.get(hostids = [10084], itemids = [44968], time_from = tf, time_till = tt, output = "extend", limit = 1440, history = 0):
puissance_panel_mae += int(float(i['value'])) * (1/20) #Panel_R_puissance_mae
pourcentage_enr_conso_placee = int(100*(cumul_enr_placee_24h/puissance_panel_mae))
return pourcentage_enr_conso_placee
def cumul_enr_autoconso() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = 1667287596
#Calcul de la production mise à l'échelle sur l'heure (en Wh)
enr_prod_mae = 0
for i in zapi.history.get(hostids = [10084], itemids = [44969], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
enr_prod_mae += int(float(i['value']))*(1/20) #Panel_Prod_puissance_mae : 1 valeur toutes les 3 min
#Calcul de la production en surplus à partir de l'équilibre (en Wh)
surplus_prod = 0
for i in zapi.history.get(hostids = [10084], itemids = [42883], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
if (float(i['value'])>0):
surplus_prod += int(float(i['value']))*(1/60) #equilibre_general_p_c : 1 valeur par minute
#Calcul de l'enr produite et consommée sur le territoire
cumul_autoconso = int(enr_prod_mae - surplus_prod)
return cumul_autoconso
def cumul_enr_autoconso_opti() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 15)
#Récupération de la précédente valeur
val_prec = 0
for i in zapi.history.get(hostids = [10084], itemids = [45255], time_from = tf, time_till = tt-5, output = "extend", limit = 1, history=0):
val_prec = int(float(i['value']))
#Calcul de la production mise à l'échelle sur l'heure (en Wh)
enr_prod_mae = 0
for i in zapi.history.get(hostids = [10084], itemids = [44969], time_from = tf, time_till = tt, output = "extend", limit = 5, history=0):
enr_prod_mae += int(float(i['value']))*(1/20) #Panel_Prod_puissance_mae : 1 valeur toutes les 3 min
#Calcul de la production en surplus à partir de l'équilibre (en Wh)
surplus_prod = 0
for i in zapi.history.get(hostids = [10084], itemids = [42883], time_from = tf, time_till = tt, output = "extend", limit = 15, history=0):
if (float(i['value'])>0):
surplus_prod += int(float(i['value']))*(1/60) #equilibre_general_p_c : 1 valeur par minute
#Calcul de l'enr produite et consommée sur le territoire
cumul_supp = int(enr_prod_mae - surplus_prod)
cumul_autoconso = val_prec + cumul_supp
return cumul_autoconso
def pourcentage_autoconso_mois() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 60 * 24 * datetime.now().day)
#Calcul de l'enr produite et consommée sur le territoire pendant le mois courant
enr_prod_mae = 0 #Production mise à l'échelle sur l'heure (en Wh)
for i in zapi.history.get(hostids = [10084], itemids = [44969], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
enr_prod_mae += int(float(i['value']))*(1/20)
surplus_prod = 0 #Production en surplus à partir de l'équilibre (en Wh)
for i in zapi.history.get(hostids = [10084], itemids = [42883], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
if (float(i['value'])>0):
surplus_prod += int(float(i['value']))*(1/60)
enr_prod_et_conso = int(enr_prod_mae - surplus_prod) #Enr produite et consommée sur le territoire
#Calcul de la production mise à l'échelle du panel pendant le mois courant
enr_prod = 0
for i in zapi.history.get(hostids = [10084], itemids = [44969], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
enr_prod += int(float(i['value'])) #Panel_Prod_puissance_mae
#Calcul du pourcentage d'autoconsommation
pourcentage_autoconso = int(100 * (enr_prod_et_conso/enr_prod))
return pourcentage_autoconso
def enr_eolien() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 15)
#Calcul de l'énergie éolienne des 15 dernières minutes à partir de la puissance
enr_eol = 0
for i in zapi.history.get(hostids = [10084], itemids = [45197], time_from = tf, time_till = tt, output = "extend", limit = 15, history=0):
enr_eol += int(float(i['value']))*(1/60) #Prod_eolienne
return int(enr_eol)
def enr_solaire() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 15)
#Calcul de l'énergie solaire des 15 dernières minutes à partir de la puissance
enr_sol = 0
for i in zapi.history.get(hostids = [10084], itemids = [45198], time_from = tf, time_till = tt, output = "extend", limit = 15, history=0):
enr_sol += int(float(i['value']))*(1/60) #Prod_solaire
return int(enr_sol)
def enr_metha() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 15)
#Calcul de l'énergie métha des 15 dernières minutes à partir de la puissance
enr_meth = 0
for i in zapi.history.get(hostids = [10084], itemids = [45248], time_from = tf, time_till = tt, output = "extend", limit = 15, history=0):
enr_meth += int(float(i['value']))*(1/60) #Prod_methanisation
return int(enr_meth)
def part_eolien_prod_15min() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 15)
#Production totale d'enr sur le dernier 1/4h
prod_enr = 0
#Production d'éolien sur le dernier 1/4h
puissance_eolien = 0
for i in zapi.history.get(hostids = [10084], itemids = [45198], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_solaire
for i in zapi.history.get(hostids = [10084], itemids = [45248], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_methanisation
for i in zapi.history.get(hostids = [10084], itemids = [45197], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_eolienne
puissance_eolien += int(i['value'])
#Calcul du pourcentage
pourcentage_prod_eolien = 100*puissance_eolien/prod_enr
return int(pourcentage_prod_eolien)
def part_solaire_prod_15min() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 15)
#Production totale d'enr sur le dernier 1/4h
prod_enr = 0
#Production de solaire sur le dernier 1/4h
puissance_solaire = 0
for i in zapi.history.get(hostids = [10084], itemids = [45198], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_solaire
puissance_solaire += int(i['value'])
for i in zapi.history.get(hostids = [10084], itemids = [45248], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_methanisation
for i in zapi.history.get(hostids = [10084], itemids = [45197], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_eolienne
#Calcul du pourcentage
pourcentage_prod_solaire = 100*puissance_solaire/prod_enr
return int(pourcentage_prod_solaire)
def part_metha_prod_15min() -> int:
#Connection au Zabbix
zapi = createZapi()
tt = int(time.mktime(datetime.now().timetuple()))
tf = int(tt - 60 * 15)
#Production totale d'enr sur le dernier 1/4h
prod_enr = 0
#Production de metha sur le dernier 1/4h
puissance_metha = 0
for i in zapi.history.get(hostids = [10084], itemids = [45198], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_solaire
for i in zapi.history.get(hostids = [10084], itemids = [45248], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_methanisation
puissance_metha += int(i['value'])
for i in zapi.history.get(hostids = [10084], itemids = [45197], time_from = tf, time_till = tt, output = "extend", limit = 1440, history=0):
prod_enr += int(i['value']) #Prod_eolienne
#Calcul du pourcentage
pourcentage_prod_metha = 100*puissance_metha/prod_enr
return int(pourcentage_prod_metha)
print("\n DEBUT")
print(pourcentage_autoconso_mois())
print("FIN \n")
#Encapsulation dans un csv
filename = "indics.csv"
finalpath = os.path.join(path, filename)
print("Path : ", finalpath)
fichier = open("/home/indicateurs/Indicateurs-ELFE/indicateurs/indics.csv","w")
res1 = str(indic_appareils_connectes())
fichier.write("\"Zabbix server\" Nombre_appareils_connectes_test " + res1 + "\n")
res2 = str(pourcentage_app_lancés_24h())
fichier.write("\"Zabbix server\" Pourcentage_app_lances_24h_test " + res2 + "\n")
res_cumul = str(cumul_enr())
fichier.write("\"Zabbix server\" Cumul_energie_placee_test " + res_cumul + "\n")
res_conso = str(conso_enr_placee())
fichier.write("\"Zabbix server\" Pourcentage_energie_consommee_placee_test " + res_conso + "\n")
res_cautoconso = str(cumul_enr_autoconso_opti())
fichier.write("\"Zabbix server\" Energie_autoconsommee_test " + res_cautoconso + "\n")
res_pautoconso = str(pourcentage_autoconso_mois())
fichier.write("\"Zabbix server\" Pourcentage_autoconsommation_test " + res_pautoconso + "\n")
res_enreol = str(enr_eolien())
fichier.write("\"Zabbix server\" Enr_eolienne_test " + res_enreol + "\n")
res_enrsol = str(enr_solaire())
fichier.write("\"Zabbix server\" Enr_solaire_test " + res_enrsol + "\n")
res_enrmeth = str(enr_metha())
fichier.write("\"Zabbix server\" Enr_methanisation_test " + res_enrmeth + "\n")
res_eol = str(part_eolien_prod_15min())
fichier.write("\"Zabbix server\" Part_eolien_prod " + res_eol + "\n")
res_sol = str(part_solaire_prod_15min())
fichier.write("\"Zabbix server\" Part_solaire_prod " + res_sol + "\n")
res_meth = str(part_metha_prod_15min())
fichier.write("\"Zabbix server\" Part_metha_prod " + res_meth + "\n")
#Connection au Zabbix
try:
zab = ConnectionZabbix('192.168.30.111', 'Zabbix server')
print(f"Connection to Zabbix made successfully.")
except Exception as ex:
print("Connection to Zabbix could not be made due to the following error: \n", ex)
def addMeasurement(cle:str, res:str):
m = zb.Measurement(zab.host, cle, res)
zab.measurements.add_measurement(m)
addMeasurement("Pourcentage_app_lances_24h_test", res2)
addMeasurement("Nombre_appareils_connectes_test", res1)
addMeasurement("Cumul_energie_placee_test", res_cumul)
addMeasurement("Pourcentage_energie_consommee_placee_test", res_conso)
addMeasurement("Energie_autoconsommee_test", res_cautoconso)
addMeasurement("Pourcentage_autoconsommation_test", res_pautoconso)
addMeasurement("Enr_eolienne_test", res_enreol)
addMeasurement("Enr_solaire_test", res_enrsol)
addMeasurement("Enr_methanisation_test", res_enrmeth)
try:
await zab.response()
print(f"Measurements well send to the Zabbix server")
except Exception as ex:
print("Measurements could not be send to the Zabbix server due to the following error: \n", ex)
if __name__ == "__main__":
asyncio.run(main())
| Energies-citoyennes-en-Pays-de-Vilaine/Indicateurs-ELFE | indicateurs/main.py | main.py | py | 26,063 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.executable",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.no... |
1412018915 | import pandas as pd
import requests
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import transforms
import numpy as np
# Fetch data from the Elering API
start = "2022-04-01T00%3A00%3A00.000Z"
end = "2023-03-31T23%3A59%3A59.999Z"
url = f"https://dashboard.elering.ee/api/nps/price?start={start}&end={end}"
response = requests.get(url)
if response.status_code == 200:
data = response.json()["data"]["ee"]
else:
raise Exception(f"Failed to fetch data from Elering API. Status code: {response.status_code}")
# Convert data to a DataFrame
df = pd.DataFrame(data)
df["timestamp"] = pd.to_datetime(df["timestamp"], unit="s")
df["date"] = df["timestamp"].dt.date
# Convert fixed price from 15 eurocents/kWh to EUR/MWh (METHOD 1)
fixed_price = 15 * 10
# Calculate the average stock market electricity price for each separate day (METHOD 2)
daily_average_market_prices = df.groupby("date")["price"].mean()
# Calculate the cost using the 4 cheapest hours of each day (METHOD 3)
cheapest_hours_df = df.groupby("date", as_index=False).apply(lambda x: x.nsmallest(4, "price"))
cheapest_hours_daily_avg = cheapest_hours_df.groupby("date")["price"].mean()
# Calculate daily costs in EUR for consuming electricity for 4 hours every day, at 10 kW per hour (SCENARIO)
daily_consumption = 4 # in hours
daily_power_consumption_kw = 2 # in kilowatts (kW)
daily_energy_consumption_kwh = daily_power_consumption_kw * daily_consumption # in kilowatt-hours (kWh)
# Calculate daily costs
fixed_daily_cost = fixed_price * daily_energy_consumption_kwh / 1000
average_market_daily_cost = daily_average_market_prices * daily_energy_consumption_kwh / 1000
cheapest_hours_daily_cost = cheapest_hours_daily_avg * daily_energy_consumption_kwh / 1000
# Calculate cumulative costs
fixed_cumulative_cost = np.cumsum(np.repeat(fixed_daily_cost, len(average_market_daily_cost)))
average_market_cumulative_cost = np.cumsum(average_market_daily_cost)
cheapest_hours_cumulative_cost = np.cumsum(cheapest_hours_daily_cost)
# CREATE A LINE GRAPH
# Set up the line graph
dates = daily_average_market_prices.index
plt.plot(dates, fixed_cumulative_cost, label="Fikseeritud hind (15 eurosenti/kWh)", linewidth=2)
plt.plot(dates, average_market_cumulative_cost, label="Päeva keskmine turuhind", linewidth=2)
plt.plot(dates, cheapest_hours_cumulative_cost, label="Päeva 4 odavamat tundi", linewidth=2)
# Add labels, title and legend
plt.ylabel("Summaarne kulu (EUR)", fontsize=14)
plt.xlabel("Kuupäev", fontsize=14)
plt.title("Eesti elektrikulude võrdlus (2022-04-01 kuni 2023-03-31)", fontsize=16)
plt.legend(fontsize=12)
# Configure x-axis ticks
ax = plt.gca()
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
plt.xticks(rotation=45, fontsize=12)
plt.yticks(fontsize=12)
# Add a textbox with the assumption
textbox_props = dict(boxstyle='round, pad=0.3', edgecolor='black', facecolor='lightgrey', linewidth=1, alpha=0.8)
ax = plt.gca()
transform = ax.transAxes
ax.text(0.01, 0.84, f"Eeldus: 4 tundi päevas, 2 kWh juures (kokku 8 kWh).", fontsize=12, bbox=textbox_props, transform=transform)
# Add gridlines
plt.grid()
# Add labels for last data points
plt.annotate("{:.2f} EUR".format(fixed_cumulative_cost[-1]), xy=(dates[-1], fixed_cumulative_cost[-1]), xytext=(-60, 8), textcoords='offset points', fontsize=14)
plt.annotate("{:.2f} EUR".format(average_market_cumulative_cost[-1]), xy=(dates[-1], average_market_cumulative_cost[-1]), xytext=(-60, 8), textcoords='offset points', fontsize=14)
plt.annotate("{:.2f} EUR".format(cheapest_hours_cumulative_cost[-1]), xy=(dates[-1], cheapest_hours_cumulative_cost[-1]), xytext=(-60, 8), textcoords='offset points', fontsize=14)
# Save it as "electricity_costs.png"
plt.savefig("electricity_costs.png", dpi=300)
# Display the graph
plt.show()
| adaresa/nutipistik | analysis/electricity_costs.py | electricity_costs.py | py | 3,880 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"... |
12733542788 | import base64
import hashlib
import json
from urllib import quote_plus
from internetofmoney.managers.BaseManager import BaseManager
class PayPalBaseManager(BaseManager):
def __init__(self, database, cache_dir='cache'):
super(PayPalBaseManager, self).__init__(database, cache_dir=cache_dir)
self.persistent_storage['registered'] = True # No registration required, one can immediately login
self.logged_in = False
self.access_token = None
self.guid = "FB04564F-6675-454A-972E-9A50A5D75A8B"
# Keep a cache with the descriptions that we've already seen for each transaction. This saves requests.
self.known_descriptions = {}
def is_logged_in(self):
return self.logged_in
def persistent_storage_filename(self):
return 'paypal_account.json'
def get_bank_name(self):
return 'PayPal'
def get_bank_id(self):
return 'PP'
def is_switch_capable(self):
"""
Currently, PayPal is not capable of being a money switch.
"""
return False
def generate_auth_nonce(self, timestamp, email, password):
data = "com.yourcompany.PPClient" + self.guid + self.guid + email + password[:3] + str(timestamp)
sha_mod = hashlib.sha256()
sha_mod.update(data)
return base64.encodestring(sha_mod.digest()).rstrip('\n')
def get_paypal_headers(self, is_json=False):
if self.logged_in:
auth_header = 'Bearer %s' % self.access_token
else:
auth_header = 'Basic QVY4aGRCQk04MHhsZ0tzRC1PYU9ReGVlSFhKbFpsYUN2WFdnVnB2VXFaTVRkVFh5OXBtZkVYdEUxbENxOg=='
if is_json:
content_type = 'application/json'
else:
content_type = 'application/x-www-form-urlencoded'
app_context = {
"deviceLocationCountry": "Unknown",
"deviceOS": "iPhone OS",
"appName": "com.yourcompany.PPClient",
"sdkVersion": "1.0.0",
"appGuid": self.guid,
"appVersion": "6.11.0",
"deviceLocale": "nl_NL",
"deviceNetworkType": "Unknown",
"deviceOSVersion": "8.1.2",
"deviceLanguage": "nl",
"deviceType": "iPhone",
"deviceMake": "Apple",
"deviceModel": "iPhone",
"deviceNetworkCarrier": "Unknown"
}
return {
'Accept-Language': 'nl-nl',
'Accept': '*/*',
'Connection': 'keep-alive',
'Content-Type': content_type,
'User-Agent': 'PayPal/116 (iPhone; iOS 8.1.2; Scale/2.00)',
'X-PayPal-ConsumerApp-Context': quote_plus(json.dumps(app_context)),
'Authorization': auth_header,
}
| devos50/ipv8-android-app | app/src/main/jni/lib/python2.7/site-packages/internetofmoney/managers/paypal/PayPalBaseManager.py | PayPalBaseManager.py | py | 2,747 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "internetofmoney.managers.BaseManager.BaseManager",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "hashlib.sha256",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "base64.encodestring",
"line_number": 43,
"usage_type": "call"
},
{
... |
73737537315 | # -*- coding: utf-8 -*-
"""Realise an optimisation of the hyper-parameters.
"""
__authors__ = "emenager, tnavez"
__contact__ = "etienne.menager@inria.fr, tanguy.navez@inria.fr"
__version__ = "1.0.0"
__copyright__ = "(c) 2022, Inria"
__date__ = "Nov 7 2022"
import sys
import os
import optuna
import pathlib
import torch
import torch.optim as optim
import numpy as np
import joblib
sys.path.insert(0, str(pathlib.Path(__file__).parent.absolute())+"/../")
sys.path.insert(0, str(pathlib.Path(__file__).parent.absolute()))
from Libraries.utils import ask_user_sampling_stat_input, compute_std_normalization, compute_minmax_normalization
from Libraries.Learning.DataSetManager import DataSetManager, get_data_loader
from Libraries.database import *
import Libraries.Learning.MLP.learning_tools as MLPlearning_tools
PATH = str(pathlib.Path(str(pathlib.Path(__file__).parent.absolute())))
os.makedirs(PATH + "/optimisation", exist_ok=True)
def init_optimisation(config, ratio_test_train = 0.25):
"""
Init the dataset for the optimisation.
Parameters
----------
config: Config
Config instance for the simulated scene
ratio_test_train: float
The ratio between the train set and the test set.
Outputs
-------
train_dataset: DataSet
The train dataset, containing data for the learning step.
test_dataset: DataSet
The test dataset, containing data for the validation step.
"""
n_samples, sampling_strategy, id_sampling = ask_user_sampling_stat_input(config)
q_train = list(query_sampling_stats(model_name=config.model_name, sampling_strategy=sampling_strategy, n_samples=n_samples))
n_test_samples = int(ratio_test_train * q_train[0]['n_samples'])
q_test = list(query_sampling_stats(model_name = config.model_name, sampling_strategy = "Random", n_samples = n_test_samples))
train_dataset = DataSetManager(config.model_name, id_sampling_stats = q_train[0]['id'], train = True)
test_dataset = DataSetManager(config.model_name, id_sampling_stats = q_test[0]['id'], train = False)
return train_dataset, test_dataset
def init_network(parameters, train_dataset, test_dataset, config):
"""
Init the network for the optimisation.
Parameters
----------
parameters: dictionary
The parameters we optimize (batchsize, hiddensize, hiddenlayer, learning_rate).
train_dataset, test_dataset: DataSet
Dataset containing data for the learning step and the validation step.
config: Config
Config instance for the simulated scene
Outputs
-------
dataloader, dataloader_test: torch dataloaders
Dataloader for the learning step and the validation step.
model: torch model
The neural network we want to optimize.
optimizer: torch.optim optimizer
The optimizer for the neural network.
scheduler: torch.optim.lr_scheduler.ReduceLROnPlateau
The scheduler to change the value of the learning rate over time.
"""
hiddensize = int(parameters["hiddensize"])
hiddenlayer = int(parameters["hiddenlayer"])
n_constraint = len(train_dataset.X[0][2]) # This may be changing when including collisions
dataloader = get_data_loader(train_dataset, config.config_network["batch_size"], True)
dataloader_test = get_data_loader(test_dataset, config.config_network["batch_size"], True) # Only one batch for test set
LATENT_SIZE = hiddensize
N_HIDDEN_LAYER = hiddenlayer
OUTPUT_SIZE = len(np.triu_indices(n=n_constraint)[0]) + n_constraint
INPUT_SIZE = OUTPUT_SIZE + n_constraint
model = MLPlearning_tools.MLP(INPUT_SIZE, OUTPUT_SIZE, LATENT_SIZE, n_hidden_layers=N_HIDDEN_LAYER, dropout_probability=config.config_network["dropout_probability"])
optimizer = optim.Adam(model.parameters(), lr=1e-3) # Optimizer
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-05)
return dataloader, dataloader_test, model, optimizer, scheduler
def eval_loop(model, optimizer, dataloader, scaling, config, is_train, previous_nb_update):
"""
Test network against all dataloader set and compute loss.
Parameters
----------
model: torch model
The neural network we want to optimize.
optimizer: torch.optim optimizer
The optimizer for the neural network.
dataloader: torch dataloader
Dataloader for the learning step/the validation step.
scaling: list of tensor
Scaling value for the data normalisation/standardisation
config: Config
Config instance for the simulated scene
is_train: bool
Train or test set
previous_nb_update: int
Number of gradient descent already done (= number of update)
Outputs
----------
e_loss: float
Loss for evaluated model
nb_update: int
Number of update after the eval loop
"""
e_loss = 0
n_batches = 0
nb_update = previous_nb_update
for i, data in enumerate(dataloader):
X_batch, Y_batch = data
if config.config_network["data_normalization"] == "Std":
X, Y = MLPlearning_tools.create_data_std(X_batch, Y_batch, scaling[0], scaling[1], scaling[2], scaling[3], design_to_MM=False)
elif config.config_network["data_normalization"] == "MinMax":
X, Y = MLPlearning_tools.create_data_minmax(X_batch, Y_batch, scaling[0], scaling[1], scaling[2], scaling[3], design_to_MM=False)
else:
X, Y = MLPlearning_tools.create_data(X_batch, Y_batch, design_to_MM=False)
Y_pred = model(X)
if config.config_network["mode_loss"] != "physics_informed":
loss = MLPlearning_tools.create_loss(Y_pred, Y, config.config_network["mode_loss"])
else:
loss = MLPlearning_tools.create_physics_informed_loss(Y_pred, Y, X, n_constraint, len(config.get_actuators_variables()),
config.config_network["data_normalization"], scaling, is_contact=(len(config.get_contacts_variables()) != 0))
if is_train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
nb_update+= 1
e_loss += loss
n_batches += 1
e_loss /= n_batches
return e_loss, nb_update
def main_loop(parameters, train_dataset, test_dataset, scaling, config):
"""
Train the network using acquire data for a given set of parameters.
Parameters
----------
parameters: dictionary
The parameters we optimize (batchsize, hiddensize, hiddenlayer, learning_rate).
train_dataset, test_dataset: DataSet
Dataset containing data for the learning step and the validation step.
scaling: list of tensor
Scaling value for the data normalisation/standardisation
config: Config
Config instance for the simulated scene
Outputs:
-------
test_loss: float
The loss obtained on the validation set after training the network during n_epochs epochs.
"""
dataloader, dataloader_test, model, optimizer, scheduler = init_network(parameters, train_dataset, test_dataset, config)
nb_update, nb_update_max = 0, 1000
while nb_update < nb_update_max:
print(">> Number of update: {}/{}".format(nb_update, nb_update_max))
model.train()
e_loss, nb_update = eval_loop(model, optimizer, dataloader, scaling, config, True, nb_update)
scheduler.step(e_loss)
model.eval()
with torch.no_grad():
test_loss, nb_update = eval_loop(model, optimizer, dataloader_test, scaling, config, False, nb_update)
return test_loss.item()
def objective(trial, train_dataset, test_dataset, scaling, config):
"""
Objective function for the optimisation step.
Parameters
----------
trial: optuna trial
The trial for the study.
train_dataset, test_dataset: DataSet
Dataset containing data for the learning step and the validation step.
scaling: list of tensor
Scaling value for the data normalisation/standardisation
config: Config
Config instance for the simulated scene
Outputs:
-------
The evaluation of the network given a set of parameter.
"""
params = { "hiddensize": trial.suggest_int("hiddensize", 32, 512, step = 1),
"hiddenlayer": trial.suggest_int("hiddenlayer", 2, 4, step = 1)
}
return main_loop(params, train_dataset, test_dataset, scaling, config)
def hyperparameters_optimisation(config, n_cores = 1, load = False, n_optimisation = 1000):
"""
Function for the optimisation of the hyperparameters of the network.
Parameters:
----------
config: Config
Config instance for the simulated scene
n_cores: int
The number of cores for parallel optimisation.
load: bool
Load the study to see results or create and run a new one.
n_optimisation: int
Number of optimisation steps (corresponding to n_optimisation*20*n_cores trials).
"""
normalization_method = config.config_network["data_normalization"]
if not load:
print(">> Init dataset ...")
train_dataset, test_dataset = init_optimisation(config)
scaling = []
if normalization_method == "Std":
mean_features_X, std_features_X, mean_features_Y, std_features_Y = compute_std_normalization(train_dataset,
design_to_MM=False)
scaling = [mean_features_X, std_features_X, mean_features_Y, std_features_Y]
elif normalization_method == "MinMax":
min_features_X, max_features_X, min_features_Y, max_features_Y = compute_minmax_normalization(train_dataset,
design_to_MM=False)
scaling = [min_features_X, max_features_X, min_features_Y, max_features_Y]
print(">> Start optimisation ...")
study = optuna.create_study(direction='minimize')
for i in range(n_optimisation):
study.optimize(lambda trial: objective(trial, train_dataset, test_dataset, scaling, config), n_jobs=n_cores, n_trials=20*n_cores, timeout=None)
joblib.dump(study, PATH + "/optimisation/study.pkl")
print(">> Best Params ", study.best_params)
else:
study=joblib.load(PATH + "/optimisation/study.pkl")
print(">> Best trial until now:")
print(">> Value: ", study.best_trial.value)
print(">> Params: ")
params = dict(study.best_trial.params.items())
print("hiddensize: {},".format(params["hiddensize"]))
print("hiddenlayer: {},".format(params["hiddenlayer"])) | SofaDefrost/CondensedFEMModel | Applications/NetworkHyperparametersOptimisation.py | NetworkHyperparametersOptimisation.py | py | 11,073 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"lin... |
39603560145 | import math
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from transformers import BertModel
d_model = 768
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size)) #
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertOutput(nn.Module):
def __init__(self):
super(BertOutput, self).__init__()
self.dense = nn.Linear(3072, 768)
self.LayerNorm = BertLayerNorm(768, eps=1e-12)
self.dropout = nn.Dropout(0.1)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertIntermediate(nn.Module):
def __init__(self):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(768, 3072)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = gelu(hidden_states)
return hidden_states
class BertSelfOutput(nn.Module):
def __init__(self):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(768, 768)
self.LayerNorm = BertLayerNorm(768, eps=1e-12)
self.dropout = nn.Dropout(0.1)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class KLBert(nn.Module):
def __init__(self):
super(KLBert, self).__init__()
self.text_bert = BertModel.from_pretrained('bert-base-uncased', return_dict=False)
self.know_bert = BertModel.from_pretrained('bert-base-uncased', return_dict=False)
self.W_gate = nn.Linear(768 * 2, 1)
self.intermediate = BertIntermediate()
self.output = BertSelfOutput()
self.classifier = nn.Linear(768, 2)
self.secode_output = BertOutput()
self.incongruence1 = Incongruence()
self.incongruence2 = Incongruence()
self.incongruence_cross = IncongruenceCross()
def forward(self, text_ids, text_mask, know_ids, know_mask, labels=None):
text_info, pooled_text_info = self.text_bert(input_ids=text_ids, attention_mask=text_mask)
know_info, pooled_know_info = self.know_bert(input_ids=know_ids, attention_mask=know_mask)
text_info_mask_mtr = get_attn_pad_mask(text_mask, text_mask)
know_info_mask_mtr = get_attn_pad_mask(know_mask, know_mask)
text_know_mask_mtr = get_attn_pad_mask(text_mask, know_mask)
know_info_incong = self.incongruence2(know_info, know_info_mask_mtr,know_ids)
text_info_incong = self.incongruence1(text_info, text_info_mask_mtr, text_ids)
know_text_incong = self.incongruence_cross(text_info_incong, text_know_mask_mtr, know_info_incong, text_ids, know_ids)
know_info_mean1 = torch.mean(know_info, dim=1).unsqueeze(1).expand(text_info.size(0),text_info.size(1),text_info.size(-1))
text_info_mean1 = torch.mean(text_info, dim=1).unsqueeze(1).expand(text_info.size(0),text_info.size(1),text_info.size(-1))
combine_info = torch.cat([text_info, know_info_mean1+text_info_mean1], dim=-1)
# ablation study with "add" method
'''res_add = torch.mean((text_info_incong + know_text_incong), dim=1, keepdim=True)
res_add = self.add_lin2(self.add_lin1(res_add))
intermediate_res = self.intermediate(res_add)
# 32*1*768
res = self.secode_output(intermediate_res, res_add)'''
# ablation study with no "gate" method
'''res_nogate = (text_info_incong[:, 0, :] + know_text_incong[:, 0, :]).unsqueeze(1)
intermediate_res = self.intermediate(res_nogate)
# 32*1*768
res = self.secode_output(intermediate_res, res_nogate)'''
alpha = self.W_gate(combine_info)
alpha = torch.sigmoid(alpha)
text_info_end= torch.matmul(alpha.transpose(1, 2), text_info_incong)
know_text = torch.matmul((1 - alpha).transpose(1, 2), know_text_incong)
res = self.output(know_text, text_info_end)
intermediate_res = self.intermediate(res)
res = self.secode_output(intermediate_res, res)
logits = self.classifier(res)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, 2), labels.view(-1))
return loss
else:
return logits
class PoswiseFeedForwardNet(nn.Module):
def __init__(self):
super(PoswiseFeedForwardNet, self).__init__()
self.lin1 = nn.Linear(d_model, 3072)
self.lin2 = nn.Linear(3072, d_model)
self.layer_norm = BertLayerNorm(768, eps=1e-12)
self.dropout = nn.Dropout(0.1)
def forward(self, inputs):
residual = inputs
output = gelu(self.lin1(inputs))
output = self.lin2(output)
output = self.dropout(output)
return self.layer_norm(output + residual)
def get_attn_pad_mask(seq_q, seq_k):
batch_size, len_q= seq_q.size()
batch_size, len_k= seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)
return pad_attn_mask.expand(batch_size, len_q, len_k)
class Incongruence(nn.Module):
def __init__(self):
super(Incongruence, self).__init__()
self.incong_lin1 = nn.Linear(d_model, d_model)
self.incong_lin2 = nn.Linear(d_model, d_model)
self.incong_lin3 = nn.Linear(d_model, d_model)
self.layer_norm = BertLayerNorm(768, eps=1e-12)
self.pos_ffn = PoswiseFeedForwardNet()
def forward(self, text_info, text_info_mask_mtr):
text_info_incon1 = self.incong_lin1(text_info)
text_info_incon2 = self.incong_lin2(text_info)
text_info_incon3 = self.incong_lin3(text_info)
text_info_cdist = torch.cdist(text_info_incon1, text_info_incon2)
text_info_cdist1 = text_info_cdist.masked_fill(text_info_mask_mtr, -1e9)
scores_dist = torch.softmax(text_info_cdist1, dim=-1)
incon_info = torch.matmul(scores_dist, text_info_incon3)
text_info_add = self.layer_norm(text_info + incon_info)
text_info_add = self.pos_ffn(text_info_add)
return text_info_add
class IncongruenceCross(nn.Module):
def __init__(self):
super(IncongruenceCross, self).__init__()
self.incong_lin1 = nn.Linear(d_model, d_model)
self.incong_lin2 = nn.Linear(d_model, d_model)
self.incong_lin3 = nn.Linear(d_model, d_model)
self.layer_norm = BertLayerNorm(768, eps=1e-12)
self.pos_ffn = PoswiseFeedForwardNet()
def forward(self, text_info, text_know_mask_mtr, know_info):
text_info_incon_q = self.incong_lin1(text_info)
know_info_incon_k = self.incong_lin2(know_info)
know_info_incon_v = self.incong_lin3(know_info)
text_info_cdist = torch.cdist(text_info_incon_q, know_info_incon_k)
text_know_cdist1 = text_info_cdist.masked_fill(text_know_mask_mtr, -1e9)
scores_dist = torch.softmax(text_know_cdist1, dim=-1)
incon_info = torch.matmul(scores_dist, know_info_incon_v)
text_info_add = self.layer_norm(text_info + incon_info)
text_info_add = self.pos_ffn(text_info_add)
return text_info_add
| cathy345345/SD-based-on-AIEN-and-ICA | models.py | models.py | py | 8,329 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "transformers.BertTokenizer.from_pretrained",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.erf",
"line_number": 17,
"usage_type": "call"
},
{
"api... |
12300684128 | '''
Parses Yahoo Groups messages originally in JSON into e-mail readable format.
Must be placed in folder with JSON files to parse - however, could easily be
modified to work from single location.
'''
import json
import glob, os
import email
import html
import re
redacted = input("Should the archive be redacted [enter Y / N]? ")
seen = set()
threads = dict()
subjects = dict()
def redact(jsonMsg, jsonStr, mid):
global msg
#If chosen, redact identifying information from message headers to limit mass
#data scraping.
if redacted.lower() == "y" or "yes":
msgcontent = email.message_from_string(jsonStr)
#Use email module to parse and redact headers with possible identifying info
#Note that content of headers is somewhat inconsistent from message to message,
#so deletion is inclusive for all possible headers with identifying information.
#Even if redaction is not chosen, headers do not in many cases contain
#full e-mail addresses, but only the handle before "@".
del msgcontent['from'], msgcontent['to'], msgcontent['cc'], msgcontent['return-path'], msgcontent['x-sender'], msgcontent['X-Yahoo-Profile']
msgstr = "Post #%s:\n\n%s" % (mid, msgcontent)
#Unescape HTML character codes
msg = html.unescape(msgstr)
else:
msgcontent = email.message_from_string(jsonStr)
msgstr = "Post #%s:\n\n%s" % (mid, msgcontent)
#Unescape HTML character codes
msg = html.unescape(msgstr)
return msg
def threadparse(tid, mid, msg, threadSub):
#Collects messages into threads, based on existing thread ID,
#which is identical to the Message ID of the first message in
#the thread. Also assigns each thread a subject for use in the
#filename.
if tid in seen:
threads[tid].append(msg)
else:
threads[tid] = [msg]
subjects[tid] = threadSub
#Adds message to set of processed messages, so downthread messaages
#can be matched with the initial post.
seen.add(mid)
def parser():
global jsonMsg
global jsonStr
global tid
global mid
global subject
read_files = glob.glob("*.json")
threads = dict()
for f in read_files:
with open(f, 'r', encoding="utf8") as current_file:
#Collect data for functions
raw = current_file.read()
jsonMsg = json.loads(raw)
#Collect Topic ID numbers
tid = str(jsonMsg['ygData']['topicId'])
#Collect Message ID numbers
mid = str(jsonMsg['ygData']['msgId'])
#Get raw email from JSON
jsonStr = str(jsonMsg['ygData']['rawEmail'])
#Get message subject
if 'subject' in jsonMsg['ygData']:
threadSub = str(jsonMsg['ygData']['subject'])
else:
threadSub = "No Subject"
#Process text
redact(jsonMsg, jsonStr, mid)
threadparse(tid, mid, msg, threadSub)
return jsonMsg
return jsonStr
return tid
return mid
return threadSub
def main():
parser()
#Write each thread to an individual, labeled textfile
for key, value in threads.items():
#Get and clean up subjects for file title
if key in subjects:
titleSub = str(subjects[key])
#Shorten title to fit filename length limits
titleShort = titleSub[:100] + (titleSub[100:] and '...')
#Escape unusable characters from filename, like "?"
title = re.sub('[^\w\-_\. ]', '_', titleShort)
with open('Thread #%s - %s.txt' % (key, title), 'w', encoding='utf-8') as msgfile:
#Write thread to textfile
threadcount = len(value)
msgfile.write("Number of Posts in Thread: %s\n\n" % threadcount)
for item in value:
if item in value[:-1]:
msgfile.write("%s\n\n-------Next Post in Thread-------\n\n" % item)
else:
msgfile.write("%s" % item)
if __name__=="__main__":
main()
| apdame/YG-tools | threadparser.py | threadparser.py | py | 4,201 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "email.message_from_string",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "html.unescape",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "email.message_from_string",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "htm... |
43963665477 |
from inspect import getsourcefile
import os.path as path, sys
current_dir = path.dirname(path.abspath(getsourcefile(lambda:0)))
sys.path.insert(0, current_dir[:current_dir.rfind(path.sep)])
from AP import *
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from scipy import optimize
import matplotlib.ticker as ticker
#COLOR_STYLE = ["red","green","blue"]
Y_LABEL = r"Zählrate"
X_LABEL = r"Stromstärke in $mA$"
X_START =0 * 0.001
Y_START =0
X_END = 1.1 * 0.001
Y_END = 2200
X_MAJOR_TICK = 0.2 *0.001
Y_MAJOR_TICK =500
X_MINOR_TICK = 0.05 *0.001
Y_MINOR_TICK = 100
SAVE_AS = "./XST/Detektortotzeit.pdf"
path_ = "./XST/data.xls"
data = getTableFromCells("E5","O35",path_,"V5")
Totdat = [[i* 0.0001 for i in range(1,11)],[]]
Totdat[1] = [data[i][19] for i in range(1,11)]
print(Totdat)
#-------- curve fit
def func(x,r,t):
return r * x *np.exp(-1*r*t*x)
def funcArr(x,arr):
return func(x,arr[0],arr[1])
popt,perr = optimize.curve_fit(func,Totdat[0],Totdat[1])
print(popt,np.sqrt(np.diag(perr)))
fitDat =genDataFromFunktion(1000,X_START,X_END,popt,funcArr)
#----------- plot
fig, ax = plt.subplots()
ax.grid()
ax.scatter(Totdat[0],Totdat[1],s=15)
ax.errorbar(Totdat[0],Totdat[1],xerr=20e-6,fmt="x", ecolor = 'black',elinewidth=0.9,capsize=4,capthick=0.9,label="Messdaten")
ax.plot(fitDat[0],fitDat[1],color = "red")
ax.set_xlabel(X_LABEL)
ax.set_ylabel(Y_LABEL)
ax.set_xlim(X_START,X_END)
ax.set_ylim(Y_START,Y_END)
# Define the formatter function
def format_func(value, tick_number):
return round(value*1000,2)
# Set the x-axis formatter
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_func))
ax.xaxis.set_major_locator(MultipleLocator(X_MAJOR_TICK))
ax.xaxis.set_minor_locator(MultipleLocator(X_MINOR_TICK))
ax.yaxis.set_major_locator(MultipleLocator(Y_MAJOR_TICK))
ax.yaxis.set_minor_locator(MultipleLocator(Y_MINOR_TICK))
plt.legend(("Messwerte", fr"fit mit r={round_errtex(popt[0],np.sqrt(np.diag(perr))[0])}; $\tau$ = {round_errtex(popt[1],np.sqrt(np.diag(perr))[1])}"))
plt.savefig(SAVE_AS)
plt.show()
| brouwerb/AP3 | XST/Detektortotzeit.py | Detektortotzeit.py | py | 2,086 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "inspect.getsourcefile",
"lin... |
28444767550 | import base64
import json
import os
import time
import yaml
from io import BytesIO
import logging
import logging.config
import numpy as np
import tensorflow as tf
from azureml.core.model import Model
from PIL import Image
from utils import label_map_util
from azureml.monitoring import ModelDataCollector
from azure.storage.blob import BlockBlobService
from azure.storage.blob.models import ContentSettings
MODEL_NAME = '__REPLACE_MODEL_NAME__'
PASCAL_LABEL_MAP_FILE = 'pascal_label_map.pbtxt'
THRESHOLD = float(os.getenv('MIN_CONFIDENCE', '0.8'))
IMAGE_STORAGE_ACCOUNT_NAME = '__REPLACE_IMAGE_STORAGE_ACCOUNT_NAME__'
IMAGE_STORAGE_ACCOUNT_KEY = '__REPLACE_IMAGE_STORAGE_ACCOUNT_KEY__'
IMAGE_STORAGE_CONTAINER_NAME = '__REPLACE_IMAGE_STORAGE_CONTAINER_NAME__'
def init():
global logger
global model
global pred_collector
global blob_service
init_logger()
pred_collector = ModelDataCollector(MODEL_NAME, identifier="imgpred", feature_names=["detection"])
model = load_model()
blob_service = BlockBlobService(IMAGE_STORAGE_ACCOUNT_NAME, IMAGE_STORAGE_ACCOUNT_KEY)
blob_service.create_container(IMAGE_STORAGE_CONTAINER_NAME) #fail_on_exist=False by default
def init_logger():
global logger
logconf = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logging.yml')
print('logconf {}'.format(logconf))
if os.path.exists(logconf):
with open(logconf, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
print('logconf loaded')
else:
logging.basicConfig(level=default_level)
print('logconf fall back to default')
logger = logging.getLogger('score')
def load_model():
# figure out how many classes
label_map_dict = label_map_util.get_label_map_dict(PASCAL_LABEL_MAP_FILE)
num_classes = len(label_map_dict)
logger.info('num_of_classes in pascal_label_map: {}'.format(num_classes))
# Load label map
label_map = label_map_util.load_labelmap(PASCAL_LABEL_MAP_FILE)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen Tensorflow model into memory
model_path = Model.get_model_path(MODEL_NAME)
logger.info('getting model {} from {}'.format(MODEL_NAME, model_path))
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs }
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
sess = tf.Session(graph=detection_graph)
return {
'session': sess,
'image_tensor': image_tensor,
'tensor_dict': tensor_dict,
'category_index': category_index
}
def run(raw_data):
logger.debug('raw_data: {}'.format(raw_data))
try:
response = inference(raw_data)
except Exception as e:
response = str(e)
return response
def inference(raw_data):
logger.info('parse input json raw_data to get image')
parsed_json = json.loads(raw_data)
image_raw = parsed_json['file']
logger.info('base64 decode input image')
image_bytes = base64.b64decode(image_raw)
image = Image.open(BytesIO(image_bytes))
logger.info('turn decoded image to np_array')
(im_width, im_height) = image.size
image_np = np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
image_np_expanded = np.expand_dims(image_np, axis=0)
# Run inference
start_time = time.time()
output_dict = model['session'].run(
model['tensor_dict'], feed_dict={model['image_tensor']: image_np_expanded})
latency = time.time() - start_time
logger.info('scoring took {} seconds'.format(latency))
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8).tolist()
output_dict['detection_boxes'] = output_dict['detection_boxes'][0].tolist()
output_dict['detection_scores'] = output_dict['detection_scores'][0].tolist()
logger.info('num of detections: {}'.format(output_dict['num_detections']))
result = []
for idx, score in enumerate(output_dict['detection_scores']):
if score > THRESHOLD:
result.append({
'class': output_dict['detection_classes'][idx],
'label': model['category_index'][output_dict['detection_classes'][idx]]['name'],
'confidence': output_dict['detection_scores'][idx],
'bounding_box': output_dict['detection_boxes'][idx]
})
else:
logger.debug('idx {} detection score too low {}'.format(idx, score))
#store the input image in blob storage and get the path to the image as correlation_id for prediction
image_id = '{}/{}.jpg'.format(MODEL_NAME, int(start_time))
StoreImage(image_id, image_bytes)
pred_collector.collect(result, user_correlation_id=image_id)
return result
def StoreImage(image_id, image_bytes):
blob_service.create_blob_from_bytes(IMAGE_STORAGE_CONTAINER_NAME, image_id, image_bytes,
content_settings=ContentSettings('image/jpeg'))
| liupeirong/tensorflow_objectdetection_azureml | aml_deploy/score.py | score.py | py | 5,888 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "os.getenv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "azureml.monitoring.ModelDataCollector",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "azure.storage.blob.BlockBlobService",
"line_number": 35,
"usage_type": "call"
},
{
... |
35471287132 | # This test must be executed after test_account_transfer_funds tests in order to work properly
from ...parabank.src.base_element import BaseElement
from ...parabank.src.pages.account_services_pages.find_transactions_page import FindTransactionsPage
# from web.parabank.tests.test_account_transfer_funds import transfer_data
from datetime import date
from typing import List, Union
import pytest
"""
!!! Please run 'test_account_transfer_funds.py' tests before executing this module !!!
"""
account_link = "find_transactions"
id_incorrect = ["00000", "aaa"]
date_incorrect = ["01-01-2020", "11-11-1111", str(date.today())]
date_incorrect_format = ["2020_01_01", "01.01.2020", "aaa"]
amount_incorrect = ["1.1", "-11", "0011"]
amount_incorrect_format = ["1_1", "aaa"]
transfer_data = ["10", "0", "-10"]
@pytest.fixture(scope="function")
def find_page(driver, user_login) -> FindTransactionsPage:
page = user_login.logged_in_panel.click_link(account_link)
page.dropdown_account.wait_until_not_empty()
return page
@pytest.fixture(scope="function")
def transaction_id(user_login) -> str:
page = user_login.logged_in_panel.click_accounts_overview()
page = page.click_account_link()
transaction_id = page.get_transaction_id()
page.logged_in_panel.click_find_transactions()
return transaction_id
@pytest.fixture(scope="module")
def current_date() -> str:
today = str(date.today())
today_format = today[5:10] + '-' + today[0:4]
return today_format
def submit_form_with_params(fields: Union[BaseElement, List[BaseElement]], button: BaseElement, form: BaseElement,
values: Union[str, List[str]]):
if isinstance(fields, BaseElement) and isinstance(values, str):
fields.set_text(values)
elif isinstance(fields, List) and isinstance(values, str):
for field in fields:
field.set_text(values)
elif isinstance(fields, List) and isinstance(values, List):
for field, value in zip(fields, values):
field.set_text(value)
else:
raise Exception("You can't assign the list of values to the single input field")
button.click()
form.wait_to_appear()
def test_find_transaction_by_correct_transaction_id(find_page, transaction_id):
submit_form_with_params(find_page.input_by_id, find_page.button_by_id, find_page.success_form, transaction_id)
assert find_page.is_success()
assert find_page.get_transaction_id() == transaction_id
find_page.print_transaction_results()
page = find_page.click_transaction_link()
page.print_transaction_details()
assert page.is_on_correct_page()
@pytest.mark.parametrize("data", id_incorrect,
ids=[f"Negative test {i+1} with id '{id_incorrect[i]}'"
for i in range(len(id_incorrect))]
)
def test_find_transaction_by_incorrect_transaction_id(find_page, data):
submit_form_with_params(find_page.input_by_id, find_page.button_by_id, find_page.error_form, data)
assert not find_page.is_success()
def test_find_transaction_by_correct_date(find_page, current_date):
submit_form_with_params(find_page.input_by_date, find_page.button_by_date, find_page.success_form, current_date)
assert find_page.is_success()
find_page.print_transaction_results()
page = find_page.click_transaction_link()
assert page.is_on_correct_page()
@pytest.mark.parametrize("data", date_incorrect,
ids=[f"Negative test {i+1} with date '{date_incorrect[i]}'"
for i in range(len(date_incorrect))]
)
def test_find_transaction_by_incorrect_date(find_page, data):
submit_form_with_params(find_page.input_by_date, find_page.button_by_date, find_page.success_form, data)
assert find_page.is_success() and not find_page.transactions_table_line.exists()
@pytest.mark.parametrize("data", date_incorrect_format,
ids=[f"Negative test {i+1} with date '{date_incorrect_format[i]}'"
for i in range(len(date_incorrect_format))]
)
def test_find_transaction_by_incorrect_date_format(find_page, data):
submit_form_with_params(find_page.input_by_date, find_page.button_by_date, find_page.error_form, data)
assert not find_page.is_success()
def test_find_transaction_by_correct_dates(find_page, current_date):
submit_form_with_params([find_page.input_from_date, find_page.input_to_date], find_page.button_by_date_range,
find_page.success_form, current_date)
assert find_page.is_success()
find_page.print_transaction_results()
page = find_page.click_transaction_link()
assert page.is_on_correct_page()
@pytest.mark.parametrize("data", date_incorrect,
ids=[f"Negative test {i+1} with date '{date_incorrect[i]}'"
for i in range(len(date_incorrect))]
)
def test_find_transaction_by_incorrect_dates(find_page, data):
submit_form_with_params([find_page.input_from_date, find_page.input_to_date], find_page.button_by_date_range,
find_page.success_form, data)
assert find_page.is_success() and not find_page.transactions_table_line.exists()
@pytest.mark.parametrize("data", date_incorrect_format,
ids=[f"Negative test {i+1} with date '{date_incorrect_format[i]}'"
for i in range(len(date_incorrect_format))]
)
def test_find_transaction_by_incorrect_dates_format(find_page, data):
submit_form_with_params([find_page.input_from_date, find_page.input_to_date], find_page.button_by_date_range,
find_page.error_form, data)
assert not find_page.is_success()
@pytest.mark.parametrize("data", transfer_data,
ids=[f"Positive test {i+1} with amount '{transfer_data[i]}'"
for i in range(len(transfer_data))]
)
def test_find_transaction_by_correct_amount(find_page, data):
submit_form_with_params(find_page.input_by_amount, find_page.button_by_amount, find_page.success_form, data)
assert find_page.is_success()
find_page.print_transaction_results()
page = find_page.click_transaction_link()
assert page.is_on_correct_page()
@pytest.mark.parametrize("data", amount_incorrect,
ids=[f"Negative test {i+1} with amount '{amount_incorrect[i]}'"
for i in range(len(amount_incorrect))]
)
def test_find_transaction_by_incorrect_amount(find_page, data):
submit_form_with_params(find_page.input_by_amount, find_page.button_by_amount, find_page.success_form, data)
assert find_page.is_success() and not find_page.transactions_table_line.exists()
@pytest.mark.parametrize("data", amount_incorrect_format,
ids=[f"Negative test {i+1} with amount '{amount_incorrect_format[i]}'"
for i in range(len(amount_incorrect_format))]
)
def test_find_transaction_by_incorrect_amount_format(find_page, data):
submit_form_with_params(find_page.input_by_amount, find_page.button_by_amount, find_page.error_form, data)
assert not find_page.is_success()
def test_find_transaction_with_blank_inputs(find_page):
find_page.button_by_id.click()
assert find_page.is_on_correct_page() and not find_page.is_success()
find_page.button_by_date.click()
assert find_page.is_on_correct_page() and not find_page.is_success()
find_page.button_by_date_range.click()
assert find_page.is_on_correct_page() and not find_page.is_success()
find_page.button_by_amount.click()
assert find_page.is_on_correct_page() and not find_page.is_success()
| vshkugal/pythonProject | parabank/tests/test_account_find_transactions.py | test_account_find_transactions.py | py | 8,080 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "parabank.src.pages.a... |
34007480515 | from itertools import permutations as permu
INF = 10**18
N = int(input())
A = [list(map(int, input().split())) for _ in range(N)]
M = int(input())
XY = [list(map(int, input().split())) for _ in range(M)]
invalids = set()
for x, y in XY:
x -= 1
y -= 1
invalids.add((x, y))
invalids.add((y, x))
def solve_cost(l):
ans = 0
for i in range(N):
if i+1 < N:
ab = (l[i], l[i+1])
if ab in invalids:
return INF
ans += A[l[i]][i]
return ans
ans = INF
for l in permu(range(N)):
score = solve_cost(l)
# print(l, score)
ans = min(ans, score)
if ans == INF:
print(-1)
else:
print(ans)
| yojiyama7/python_competitive_programming | atcoder/else/typical90/032_atcoder_ekiden.py | 032_atcoder_ekiden.py | py | 679 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.permutations",
"line_number": 28,
"usage_type": "call"
}
] |
69943940193 | """
Handles /templates endpoint
Doc: https://developers.mailersend.com/api/v1/templates.html
"""
import requests
from mailersend.base import base
class NewTemplate(base.NewAPIClient):
"""
Instantiates the /templates endpoint object
"""
def __init__(self):
"""
NewTemplate constructor
"""
baseobj = base.NewAPIClient()
super().__init__(
baseobj.api_base,
baseobj.headers_default,
baseobj.headers_auth,
baseobj.mailersend_api_key,
)
def get_templates(self):
"""
Returns a JSON response from the MailerSend API
"""
request = requests.get(
f"{self.api_base}/templates", headers=self.headers_default
)
return request.text
def get_template_by_id(self, template_id):
"""
Returns a JSON response from the MailerSend API
"""
request = requests.get(
f"{self.api_base}/templates/{template_id}", headers=self.headers_default
)
return request.text
def delete_template(self, template_id):
"""
Returns a JSON response from the MailerSend API
"""
request = requests.delete(
f"{self.api_base}/templates/{template_id}", headers=self.headers_default
)
return request.text
| digiajay/LabVIEW-to-SaaS-World | venv/Lib/site-packages/mailersend/templates/__init__.py | __init__.py | py | 1,364 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mailersend.base.base.NewAPIClient",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mailersend.base.base",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "mailersend.base.base.NewAPIClient",
"line_number": 19,
"usage_type": "call"
... |
32811509112 | from .metric import *
from scipy.spatial.distance import cosine
import torch
from feerci import feerci
def asnorm(enroll_test_scores, enroll_xv, test_xv, cohort_xv):
"""
Calculate adaptive s-norm
A direct and continuous measurement of speaker confusion between all
training samples is computationally expensive. Hence, an option is to
compute speaker similarities from the weights of the AAM-softmax layer as
approximations of the class-centers.
:param enroll_test_scores: Score between enrollment and test utterances. Must be in torch.tensor format
:param enroll_xv: Extracted xvectors from enrollment utterances. Must be in torch.tensor format
:param test_xv: Extracted xvectors from test utterances. Must be in torch.tensor format
:param cohort_xv: xvectors from cohort speakers. Must be in torch.tensor format
:return: The adaptive S-Norm (ref https://www.isca-speech.org/archive/pdfs/interspeech_2017/matejka17_interspeech.pdf)
"""
# Calculate scores between enroll and cohort
# print(enroll_xv.shape, cohort_xv.shape, flush=True)
enroll_cohort_scores = torch.einsum('ij,kj', enroll_xv, cohort_xv)
k = 200
if cohort_xv.shape[0] < k:
k = cohort_xv.shape[0]
# Calculate mean and std from top 200 scores
topk_cohort_enroll = enroll_cohort_scores.topk(k, dim=1).values
calib_mean_enroll = topk_cohort_enroll.mean(dim=1)
calib_std_enroll = topk_cohort_enroll.std(dim=1)
# Calculate scores between test and cohort
cohort_test_scores = torch.einsum('ij,kj', test_xv, cohort_xv)
# Calculate mean and std from top 200 scores
topk_cohort_test = cohort_test_scores.topk(k, dim=1).values
calib_mean_test = topk_cohort_test.mean(dim=1)
calib_std_test = topk_cohort_test.std(dim=1)
# Calculate S-norm from z_Norm and t_norm
z_Norm = ((enroll_test_scores - calib_mean_enroll) / calib_std_enroll)
t_Norm = (enroll_test_scores - calib_mean_test) / calib_std_test
return 0.5 * (z_Norm + t_Norm)
def cosine_scoring(embd1s, embd2s):
scores = []
for embd1, embd2 in zip(embd1s, embd2s):
# Multiplying by -1 to ensure compatibility with affinity
# Now lower value will indicate less affinity as compared
# to original cosine distance
score = 1 - cosine(embd1, embd2)
scores.append(score)
return scores
| deep-privacy/SA-toolkit | satools/satools/sidekit/scoring/__init__.py | __init__.py | py | 2,379 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "torch.einsum",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.einsum",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.cosine",
"line_number": 53,
"usage_type": "call"
}
] |
10052981995 | import argparse
import os
import chc.util.fileutil as UF
import chc.reporting.ProofObligations as RP
from chc.app.CApplication import CApplication
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("cwe", help="name of cwe, e.g., CWE121")
parser.add_argument("test", help="name of test case, e.g., CWE129_large")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse()
try:
cpath = UF.get_juliet_testpath(args.cwe, args.test)
UF.check_analysis_results(cpath)
except UF.CHError as e:
print(str(e.wrap()))
exit(1)
sempath = os.path.join(cpath, "semantics")
excludefiles = ["io.c", "main_linux.c", "std_thread.c"]
summary = UF.read_project_summary_results(cpath)
if summary is not None:
print(RP.project_proofobligation_stats_dict_to_string(summary))
exit(0)
capp = CApplication(sempath, excludefiles=excludefiles)
filterout = ["io", "main_linux", "std_thread"]
dc = ["deadcode"]
def filefilter(f):
return f not in filterout
print(
RP.project_proofobligation_stats_tostring(
capp, extradsmethods=dc, filefilter=filefilter
)
)
contract_condition_violations = capp.get_contract_condition_violations()
if len(contract_condition_violations) > 0:
print("=" * 80)
print(str(len(contract_condition_violations)) + " CONTRACT CONDITION FAILURES")
print("=" * 80)
for (fn, cc) in contract_condition_violations:
print(fn + ":")
for (name, desc) in cc:
print(" " + name + ":" + desc)
print("=" * 80)
| static-analysis-engineering/CodeHawk-C | chc/cmdline/juliet/chc_report_juliettest.py | chc_report_juliettest.py | py | 1,690 | python | en | code | 20 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "chc.util.fileutil.get_juliet_testpath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "chc.util.fileutil",
"line_number": 23,
"usage_type": "name"
},
{
"a... |
35712666035 | from django.urls import path
from . import views
app_name = 'todo'
urlpatterns = [
path('', views.index, name='index'),
path('completed', views.get_completed, name='get_completed'),
path('unfinished', views.get_unfinished, name='get_unfinished'),
path('add', views.add, name='add'),
path('edit/<int:identifier>', views.edit, name='edit'),
path('delete/<int:identifier>', views.delete, name='delete'),
path('complete/<int:identifier>', views.complete, name='complete'),
path('remove_complete/<int:identifier>', views.remove_complete, name='remove_complete')
]
| jkirira/todo-project | todo/urls.py | urls.py | py | 592 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
73435931555 | import cv2
import numpy as np
cap = cv2.VideoCapture(r'Copy_of_offsside7.mp4')
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
fps = cap.get(cv2.CAP_PROP_FPS)
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
out = cv2.VideoWriter('output.avi', fourcc, fps, size)
while(cap.isOpened()):
# Take each frame
ret, frame = cap.read()
if ret:
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110, 50, 50])
upper_blue = np.array([130, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask= mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
# draw bounding box for the players
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt_thresh = 180
if len(cnts) > 0:
c = sorted(cnts, key=cv2.contourArea, reverse=True)
for i in range(len(c)):
if cv2.contourArea(c[i]) < cnt_thresh:
break
x, y, w, h = cv2.boundingRect(c[i])
h += 10
y -= 5
if h < 0.8 * w:
continue
elif h / float(w) > 3:
continue
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
M = cv2.moments(c[i])
# find the center of gravity of the players
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# find the foot of the players
foot = (center[0], int(center[1] + h*1.1))
cv2.circle(frame, foot, 5, (0, 0, 255), -1)
out.write(frame)
else:
break
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
| cjustacoder/Soccer-Offside | src/player_tracking/track_players_test.py | track_players_test.py | py | 2,140 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FOURCC",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.CAP... |
20669709910 | from typing import (
Any,
Dict,
List,
Tuple,
Union,
)
from eth_utils import (
to_canonical_address,
decode_hex,
big_endian_to_int,
)
from eth_typing import (
Address,
)
from sharding.contracts.utils.smc_utils import (
get_smc_json,
)
from sharding.handler.exceptions import (
LogParsingError,
)
class LogParser(object):
def __init__(self, *, event_name: str, log: Dict[str, Any]) -> None:
event_abi = self._extract_event_abi(event_name=event_name)
topics = []
data = []
for item in event_abi["inputs"]:
if item['indexed'] is True:
topics.append((item['name'], item['type']))
else:
data.append((item['name'], item['type']))
self._set_topic_value(topics=topics, log=log)
self._set_data_value(data=data, log=log)
def _extract_event_abi(self, *, event_name: str) -> Dict[str, Any]:
for func in get_smc_json()['abi']:
if func['name'] == event_name and func['type'] == 'event':
return func
raise LogParsingError("Can not find event {}".format(event_name))
def _set_topic_value(self, *, topics: List[Tuple[str, Any]], log: Dict[str, Any]) -> None:
if len(topics) != len(log['topics'][1:]):
raise LogParsingError(
"Error parsing log topics, expect"
"{} topics but get {}.".format(len(topics), len(log['topics'][1:]))
)
for (i, topic) in enumerate(topics):
val = self._parse_value(val_type=topic[1], val=log['topics'][i + 1])
setattr(self, topic[0], val)
def _set_data_value(self, *, data: List[Tuple[str, Any]], log: Dict[str, Any]) -> None:
data_bytes = decode_hex(log['data'])
if len(data) * 32 != len(data_bytes):
raise LogParsingError(
"Error parsing log data, expect"
"{} data but get {}.".format(len(data), len(data_bytes))
)
for (i, (name, type_)) in enumerate(data):
val = self._parse_value(val_type=type_, val=data_bytes[i * 32: (i + 1) * 32])
setattr(self, name, val)
def _parse_value(self, *, val_type: str, val: bytes) -> Union[bool, Address, bytes, int]:
if val_type == 'bool':
return bool(big_endian_to_int(val))
elif val_type == 'address':
return to_canonical_address(val[-20:])
elif val_type == 'bytes32':
return val
elif 'int' in val_type:
return big_endian_to_int(val)
else:
raise LogParsingError(
"Error parsing the type of given value. Expect bool/address/bytes32/int*"
"but get {}.".format(val_type)
)
| ethereum/sharding | sharding/handler/utils/log_parser.py | log_parser.py | py | 2,781 | python | en | code | 477 | github-code | 1 | [
{
"api_name": "typing.Dict",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "sharding.contracts.utils.smc_utils.get_smc_json",
"line_number": 42,
"usage_type": "call"
},
{
"api_name":... |
11637373168 | from flask import Blueprint, render_template
from flask import current_app as app
about_bp = Blueprint(
name = "about_bp",
import_name = __name__,
template_folder = "templates",
static_folder = 'assets'
)
@about_bp.route("/about")
def about_page():
return render_template(
"about.html", title = 'Brasil em Números'
)
| brasil-em-numeros/brasil-em-numeros | dashboard/about/about.py | about.py | py | 354 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
}
] |
32247268331 | import os
import subprocess
import unittest
from click.testing import CliRunner
from impulsare_config import Reader as ConfigReader
from impulsare_distributer.queue_listener import cli
base_path = os.path.abspath(os.path.dirname(__file__))
# https://docs.python.org/3/library/unittest.html#assert-methods
class TestQueueListener(unittest.TestCase):
base_cmd = ['python', base_path + '/../impulsare_distributer/queue_listener.py']
def test_requires_config(self):
runner = CliRunner()
result = runner.invoke(cli)
self.assertIs(result.exit_code, 2)
self.assertRegex(result.output, '.*Missing option "--host"')
def test_bad_host(self):
cmd = ['queue-listener', '-h', '127.0.0.1', '-p', '80', '-q', 'wrong']
res = self._exec_cmd(cmd)
self.assertIs(res['status'], 1, "Can't get status 1, message: {} ('{}')".format(res['stderr'], cmd))
self.assertEqual(res['stdout'], '')
self.assertRegex(res['stderr'], '.*Error 111 connecting to 127.0.0.1:80. Connection refused.*')
self.assertNotRegex(res['stderr'], '.*redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:80. Connection refused.*')
def test_bad_host_debug(self):
cmd = self.base_cmd + ['--debug', '-h', '127.0.0.1', '-p', '80', '-q', 'wrong']
res = self._exec_cmd(cmd)
self.assertIs(res['status'], 1, "Can't get status 1, message: {} ('{}')".format(res['stderr'], cmd))
self.assertEqual(res['stdout'], '')
self.assertRegex(res['stderr'], '.*redis.exceptions.ConnectionError: Error 111 connecting to 127.0.0.1:80. Connection refused.*')
def test_right_config(self):
config = self._get_config()
cmd = self.base_cmd + ['-h', config['distributer']['host'], '-q', config['testqueue']['queue']]
res = self._exec_cmd(cmd)
self.assertIs(res['status'], 0, "Can't get status 0, message: {} ('{}')".format(res['stderr'], cmd))
self.assertEqual(res['stdout'], '')
self.assertRegex(res['stderr'], '.*RQ worker.*started')
def _get_config(self):
config_specs = base_path + '/../impulsare_distributer/static/specs.yml'
config_default = base_path + '/../impulsare_distributer/static/default.yml'
config_file = base_path + '/static/config_valid.yml'
# Use another server, make sure to have the right configuration file
if 'REDIS' in os.environ and os.environ['REDIS'] != '127.0.0.1':
config_file = base_path + '/static/config_valid_{}.yml'.format(os.environ['REDIS'])
config = ConfigReader().parse(config_file, config_specs, config_default)
return config
def _exec_cmd(self, cmd: list):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(timeout=3)
except subprocess.TimeoutExpired:
p.terminate()
stdout, stderr = p.communicate()
stdout = stdout.decode().strip().replace('\n', '')
stderr = stderr.decode().strip().replace('\n', '')
return {'stdout': stdout, 'stderr': stderr, 'status': p.returncode}
| impulsare/distributer | tests/test_queue_listener.py | test_queue_listener.py | py | 3,170 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"li... |
74326021153 | import argparse
from cmath import e
import enum
import logging
from collections import OrderedDict
import os
import random
from shutil import copyfile
import sys
import time
import numpy as np
from sklearn import metrics
from tensorboardX import SummaryWriter
import torch
import torch.backends.cudnn as cudnn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
from bin import dataset_LGAImers
from model import models
# from model.model_factory import create_model
# from loss.loss_factory import create_loss
from optimization.optimizer_factory import create_optimizer
from scheduler.scheduler_factory import create_scheduler
from util import config
from util.logger import print_train_log, print_val_log
from util.metrics import AverageMeter
# from util.utils import make_bbox
def get_parser():
parser = argparse.ArgumentParser(description="DACON LG")
parser.add_argument("--config", type=str, default="config/LGAImers_test.yaml", help="config file")
parser.add_argument("--weight", type=str, default=None, help="weight file")
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
if args.weight is not None:
cfg.weight = args.weight
os.makedirs(cfg.save_path, exist_ok=True)
copyfile(args.config, os.path.join(cfg.save_path, os.path.split(args.config)[-1]))
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s] %(message)s"
handler.setFormatter(logging.Formatter(fmt, "%Y-%m-%d %H:%M:%S"))
logger.addHandler(handler)
output_log = os.path.join(args.save_path, "log")
os.makedirs(output_log, exist_ok=True)
logging.basicConfig(filename=os.path.join(output_log, "log.txt"), filemode="w")
return logger
def set_seed(manual_seed):
if manual_seed is not None:
random.seed(manual_seed)
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
cudnn.benchmark = False
cudnn.deterministic = True
def main():
global args
args = get_parser()
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(x) for x in args.train_gpu)
set_seed(args.manual_seed)
main_worker()
def main_worker():
global logger, writer
logger = get_logger()
logger.info(args)
writer = SummaryWriter(args.save_path)
model = models.CustomModel(input_dim = 56, output_dim = 14)
optimizer = create_optimizer(model.parameters(), args)
# optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
lr_scheduler, args.epochs = create_scheduler(args, optimizer)
# CUDA
model = torch.nn.DataParallel(model.cuda())
# if args.weight:
# if os.path.isfile(args.weight):
# logger.info("=> loading weight '{}'".format(args.weight))
# checkpoint = torch.load(args.weight)
# model.load_state_dict(checkpoint["state_dict"], strict=False)
# # For loading a old version model
# if args.arch == "chestCAD_seg_cls" and len(
# checkpoint["state_dict"]["module.backbone._conv_stem_no_stride.weight"]
# ):
# model.module.backbone._conv_stem.weight = torch.nn.Parameter(
# checkpoint["state_dict"]["module.backbone._conv_stem_no_stride.weight"]
# )
# model.module.backbone._conv_stem.bias = torch.nn.Parameter(
# checkpoint["state_dict"]["module.backbone._conv_stem_no_stride.bias"]
# )
# logger.info("=> loaded weight '{}'".format(args.weight))
# else:
# logger.info("=> no weight found at '{}'".format(args.weight))
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
# checkpoint = torch.load(args.resume)
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda())
args.start_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logger.info("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint["epoch"]))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
train_data = dataset_LGAImers.Dataset_LGAImers(
split="train",
data_path=args.data_path_train,
transform=None,
features_X=56,
features_Y=14,
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
drop_last=True,
)
if args.evaluate:
val_data = dataset_LGAImers.Dataset_LGAImers(
split="val",
data_path=args.data_path_val,
transform=None,
features_X=56,
features_Y=14,
)
val_loader = torch.utils.data.DataLoader(
val_data,
batch_size=args.batch_size_val,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
best_dict = {
"loss": float("inf"),
"best_idx": 1,
}
# loss_func = torch.nn.MSELoss()
from loss.loss_lg_nrmse import lg_nrmse_loss
loss_func = lg_nrmse_loss()
loss_func.cuda()
for epoch in range(args.start_epoch, args.epochs):
epoch_log = epoch + 1
train_metrics = train(train_loader, model, optimizer, epoch, loss_func, lr_scheduler)
for k, v in train_metrics.items():
writer.add_scalar("train_" + k,
v, epoch_log)
if epoch_log % args.save_freq == 0:
filename = args.save_path + "/train_epoch_" + str(epoch_log) + ".pth"
logger.info("Saving checkpoint to: " + filename)
torch.save(
{
"epoch": epoch_log,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
},
filename,
_use_new_zipfile_serialization=False,
)
if epoch_log / args.save_freq > 2:
deletename = args.save_path + "/train_epoch_" + str(epoch_log - args.save_freq * 2) + ".pth"
try:
os.remove(deletename)
except:
print("no file at : " + deletename)
if lr_scheduler is not None:
lr_scheduler.step(epoch + 1)
# writer.add_scalar("lr", optimizer.param_groups[0]["lr"], epoch)
# writer.add_scalar("lrx10", optimizer.param_groups[1]["lr"], epoch)
if args.evaluate:
val_metrics = validate(val_loader, model, loss_func)
for k, v in val_metrics.items():
writer.add_scalar("val_" + k, v, epoch_log)
if best_dict[args.best_target] >= val_metrics[args.best_target]: # loss : 부등호 반대
best_dict[args.best_target] = val_metrics[args.best_target]
filename = os.path.join(args.save_path, "best{}.pth".format(best_dict["best_idx"]))
torch.save(
{
"epoch": epoch_log,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
},
filename,
_use_new_zipfile_serialization=False,
)
best_dict["best_idx"] += 1
best_dict["best_idx"] = (best_dict["best_idx"] % args.save_top_k) + 1
logger.info("Saving best model: " + filename)
def train(train_loader, model, optimizer, epoch, loss_func, lr_scheduler=None):
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
model.train()
end_time = time.time()
max_iter = args.epochs * len(train_loader)
for batch_idx, (X_tensor, Y_tensor, df_ID) in enumerate(train_loader):
data_time.update(time.time() - end_time)
# X_tensor = torch.FloatTensor(X_list)
# Y_tensor = torch.FloatTensor(Y_list)
X_tensor = X_tensor.cuda(non_blocking=True)
Y_tensor = Y_tensor.cuda(non_blocking=True)
Y_pred = model(X_tensor)
loss = loss_func(Y_pred, Y_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_meter.update(loss.item(), X_tensor.size(0))
batch_time.update(time.time() - end_time)
end_time = time.time()
current_iter = epoch * len(train_loader) + batch_idx + 1
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=current_iter)
remain_iter = max_iter - current_iter
remain_time = remain_iter * batch_time.avg
t_m, t_s = divmod(remain_time, 60)
t_h, t_m = divmod(t_m, 60)
remain_time = "{:02d}:{:02d}:{:02d}".format(int(t_h), int(t_m), int(t_s))
if (batch_idx + 1) % args.print_freq == 0:
print_train_log(
logger,
epoch,
args.epochs,
batch_idx,
len(train_loader),
batch_time,
data_time,
remain_time,
loss_meter,
)
logger.info("Train result at epoch [{}/{}]: loss {:.4f}.".format(epoch + 1, args.epochs, loss))
train_metrics_dict = OrderedDict([("loss", loss_meter.avg)])
return train_metrics_dict
def validate(val_loader, model, loss_func):
logger.info(">>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>")
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
model.eval()
end_time = time.time()
with torch.no_grad():
for batch_idx, (X_tensor, Y_tensor, df_ID) in enumerate(val_loader):
data_time.update(time.time() - end_time)
# X_tensor = torch.FloatTensor(X_list)
# Y_tensor = torch.FloatTensor(Y_list)
X_tensor = X_tensor.cuda(non_blocking=True)
Y_tensor = Y_tensor.cuda(non_blocking=True)
Y_pred = model(X_tensor)
loss = loss_func(Y_pred, Y_tensor)
loss_meter.update(loss.item(), X_tensor.size(0))
batch_time.update(time.time() - end_time)
end_time = time.time()
if (batch_idx + 1) % args.print_freq == 0:
print_val_log(
logger,
batch_idx,
len(val_loader),
data_time,
batch_time,
loss_meter,
)
logger.info("Val result: loss {:.4f}.".format(loss))
logger.info("<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<")
val_metrics_dict = OrderedDict([("loss", loss_meter.val)])
return val_metrics_dict
if __name__ == "__main__":
main()
| Jeonsec/LG_Innotek_Hackathon | bin/train_TabNet.py | train_TabNet.py | py | 11,389 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
2807200688 | # -*- coding: utf-8 -*-
import sys
sys.path.append('.')
from src import *
from joblib import Parallel, delayed
from multiprocessing import cpu_count
from multiprocessing import Pool
from src.data import *
from src.dataset.base import *
from src.utils.util import *
from src.utils.util_data import *
from src.utils.util_file import *
class UnilangDataloader(object):
__slots__ = ('batch_size', 'modes', 'lng', 'token_dicts', 'data_loaders', 'LENGTH',)
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._construct(*args, **kwargs)
def _construct(self,
# base parameters
base_dataset: Union[sBaseDataset],
collate_fn: Union[sbase_collate_fn],
params: Dict,
# loader parameters
batch_size: int,
modes: List[str],
thread_num: int,
) -> None:
self.batch_size = batch_size
self.modes = modes
self.lng = params.get('data_lng')
self.LENGTH = {}
self.data_loaders = {}
self._load_dict(params.get('token_dicts'))
params['token_dicts'] = self.token_dicts
LOGGER.info("before _load_data in UnilangDataloader")
self._load_data(thread_num, base_dataset, collate_fn, params)
def _load_data(self,
thread_num: int,
base_dataset: Union[sBaseDataset],
collate_fn: Union[sbase_collate_fn],
params: Dict,
) -> None:
# 18.2358
LOGGER.debug('read unilang dataset loader')
LOGGER.info('read unilang dataset loader')
tmp_param = deepcopy(params)
is_dtrl = params['src_ruby'] != params['src_other']
if is_dtrl: # dtrl
if tmp_param['data_lng'] == 'ruby':
tmp_param['portion'] = tmp_param['src_ruby']
else:
tmp_param['portion'] = tmp_param['src_other']
paralleler = Parallel(len(self.modes))
datasets = paralleler(delayed(base_dataset)(**dict(tmp_param, **{'mode': mode})) for mode in self.modes)
LOGGER.debug("after paralleler in UnilangDataloader's _load_data")
for mode, ds in zip(self.modes, datasets):
assert mode == ds.mode
self.LENGTH[mode] = ds.size
data_loader = DataLoader(
ds, batch_size=256 if mode == 'valid' else self.batch_size,
# shuffle=True if mode == 'train' else False,
shuffle=False,
collate_fn=collate_fn,
num_workers=thread_num,
# drop_last=True if is_dtrl else False
)
self.data_loaders[mode] = data_loader
LOGGER.debug("UnilangDataloader load_data finished")
# # slow, but for debug
# for mode in self.modes:
# dataset = base_dataset(**dict(params, **{'mode': mode}))
# self.LENGTH[mode] = dataset.size
# data_loader = DataLoader(
# dataset, batch_size=self.batch_size,
# shuffle=False if mode == 'test' else True,
# collate_fn=collate_fn,
# num_workers=thread_num,
# )
# self.data_loaders[mode] = data_loader
def _load_dict(self, token_dicts: Union[TokenDicts, Dict], ) -> None:
if isinstance(token_dicts, dict):
self.token_dicts = TokenDicts(token_dicts)
elif isinstance(token_dicts, TokenDicts):
self.token_dicts = token_dicts
else:
raise NotImplementedError('{}} token_dicts is wrong'.format(self.__class__.__name__))
def __getitem__(self, key: str) -> Any:
return self.data_loaders[key]
@property
def size(self, ) -> Dict:
return self.LENGTH
def __repr__(self):
return str(self)
def __str__(self):
return '{}: {} - {}, batch_size({})'.format(
self.__class__.__name__, self.lng, self.LENGTH, self.batch_size
)
| CGCL-codes/code_summarization_meta | src/dataset/unilang_dataloader.py | unilang_dataloader.py | py | 4,080 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "joblib.Parallel",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"lin... |
28207011902 | from tkinter import *
from tkinter.ttk import *
import ttkbootstrap as ttk
from ttkbootstrap.tooltip import ToolTip
from tkinter import filedialog
import sys, os, re
import tkinter
from tkinter import messagebox
from Excel_optimize import settings
from cle.data_clean import CleanWOExcel, CleanBugExcel
from sum.data_transfer import Transfer
from mod.modify_sheet import Modification, TBTJ, SJTJ_old
"""
全局通用函数
"""
def find_books(path):
file_list = [i for i in os.walk(path)] # 返回二维列表[路径,文件夹,文件]
book_list = file_list[0][2] # 获取目标目录第一级目录下的表格文件
pattern_wo = re.compile(r'.*工单.*', flags=re.I)
pattern_unclosed_bug = re.compile(r'.*未关闭.*', flags=re.I)
pattern_all_bug = re.compile(r'.*所有.*', flags=re.I)
pattern_together = re.compile(r'.*汇总.*', flags=re.I)
wo_list, all_bug, unclosed_bug, together = [], '所有Bug', '未关闭Bug', '汇总表'
for i in book_list:
if re.search(pattern_wo, string=i):
wo_list.append(i)
elif re.search(pattern_all_bug, string=i):
all_bug = i
elif re.search(pattern_unclosed_bug, string=i):
unclosed_bug = i
elif re.search(pattern_together, string=i):
together = i
return together, unclosed_bug, all_bug, wo_list
# 初始化函数处理每一个目标工作簿,返回字典
# {book_name:工作簿名,sheet_name_list:[表名],X_sheet_max_row:某表的最大有效行}
# 获取到最大有效行数,最后的一个单元格,表头
def init():
pass
def start(together, unclosed_bug, all_bug, wo_list):
CleanBugExcel(all_bug, 'Bug')
CleanBugExcel(unclosed_bug, 'Bug')
CleanWOExcel(*wo_list) # 这个方式可以直接将一个列表的所有元素当作不定参数
Transfer(together, unclosed_bug, all_bug, wo_list[0]) # 顺序不能搞乱
mod = Modification(together, unclosed_bug, all_bug, wo_list[0])
mod.save()
tbtj = TBTJ(together, unclosed_bug, all_bug, wo_list[0])
tbtj.save()
sjtj = SJTJ_old(together, unclosed_bug, all_bug, wo_list[0])
sjtj.save()
class mystdout(object):
def __init__(self, text):
self.stdoutbak = sys.stdout
self.stderrbak = sys.stderr
sys.stdout = self
sys.stderr = self
self.text = text
def write(self, info): # 外部的print语句将执行本write()方法,并由当前sys.stdout输出
# t = tkinter.Text()
self.text.insert('end', info)
self.text.update()
self.text.see(tkinter.END)
def restore_std(self):
sys.stdout = self.stdoutbak
sys.stderr = self.stderrbak
# 自动隐藏滚动条
def scrollbar_auto_hide(bar, widget):
def show():
bar.lift(widget)
def hide():
bar.lower(widget)
hide()
widget.bind("<Enter>", lambda e: show())
bar.bind("<Enter>", lambda e: show())
widget.bind("<Leave>", lambda e: hide())
bar.bind("<Leave>", lambda e: hide())
# 容器类,继承于Frame,本类实现了许多组件的自动挂载
class Frame(ttk.Frame):
def __init__(self):
super().__init__() # 对继承自父类的属性进行初始化,并且用父类的初始化方法初始化继承的属性。
"""
super()的使用
python2必须写成 super(子类,self).方法名(参数)
python3也可直接写成 super().方法名(参数)
"""
self.locate() # 调用自身的方法,把自身放置到某个坐标位置。
label = Label(self, text="报表整合工具", anchor="center")
label.grid(row=1, column=1, padx=100, pady=20)
label = Label(self, text="更换皮肤", anchor="center")
label.grid(row=1, column=3)
select_skin = self.tk_select_box_sele_skin()
select_skin.grid(row=1, column=4)
# 工单
label = Label(self, text="选择工单报表(多选)", anchor="center")
label.grid(row=2, column=1, pady=0)
self.select_path_WO = ttk.StringVar()
ipt = Entry(self, textvariable=self.select_path_WO, width=30)
ipt.grid(row=3, column=1, padx=30, pady=0)
btn = Button(self, text="选择文件", command=self.select_file_wo)
btn.grid(row=3, column=2)
# 未关闭报表
label = Label(self, text="未关闭Bug报表", anchor="center")
label.grid(row=4, column=1)
self.select_path_Un = ttk.StringVar()
ipt = Entry(self, textvariable=self.select_path_Un, width=30)
ipt.grid(row=5, column=1)
btn = Button(self, text="选择文件", command=self.select_file_un)
btn.grid(row=5, column=2)
# 所有报表
label = Label(self, text="所有Bug报表", anchor="center")
label.grid(row=6, column=1)
self.select_path_All = ttk.StringVar()
ipt = Entry(self, textvariable=self.select_path_All, width=30)
ipt.grid(row=7, column=1)
btn = Button(self, text="选择文件", command=self.select_file_all)
btn.grid(row=7, column=2)
# 目标文件
label = Label(self, text="目标文件", anchor="center")
label.grid(row=8, column=1)
self.select_path_Tag = ttk.StringVar()
ipt = Entry(self, textvariable=self.select_path_Tag, width=30)
ipt.grid(row=9, column=1)
btn = Button(self, text="选择文件", command=self.select_file_tag)
btn.grid(row=9, column=2)
# 选择文件夹
label = Label(self, text="选择文件夹", anchor="center")
label.grid(row=9, column=1)
self.select_path_folder = ttk.StringVar()
ipt = Entry(self, textvariable=self.select_path_folder, width=30)
ipt.grid(row=9, column=1)
btn = Button(self, text="选择文件夹", command=self.select_folder)
btn.grid(row=9, column=2)
# 启动按钮
btn = Button(self, text="合并", command=self.read_log) # 注意不能带括号。
# default tooltip
ToolTip(btn, text="请不要重复点击!")
btn.grid(row=10, column=1)
# 输出框
self.text = Text(self, borderwidth=5, bg='#B0E0E6', width=80, height=38)
# self.text.tag_add('tag',)
# self.text.tag_config(foreground='red')
self.text.grid(row=2, column=3, rowspan=20, columnspan=10, padx=10, pady=30, sticky=ttk.SE)
def locate(self): # 调用自己的方法,把自己放置到某个位置
self.grid(row=0, column=0, rowspan=20, columnspan=20)
def tk_select_box_sele_skin(self):
style = ttk.Style()
theme_names = style.theme_names() # 以列表的形式返回多个主题名
cb = ttk.Combobox(self, values=theme_names)
cb.current(theme_names.index(theme_names[8]))
def change_theme(event):
theme_cbo_value = cb.get()
style.theme_use(theme_cbo_value)
cb.selection_clear()
cb.bind('<<ComboboxSelected>>', change_theme)
return cb
# 执行函数
def select_file_wo(self):
# 多个文件选择
selected_files_path = filedialog.askopenfilenames() # askopenfilenames函数选择多个文件
self.select_path_WO.set('\n'.join(selected_files_path)) # 多个文件的路径用换行符隔开
def select_file_un(self):
# 单个文件选择
selected_file_path = filedialog.askopenfilename() # 使用askopenfilename函数选择单个文件
self.select_path_Un.set(selected_file_path)
def select_file_all(self):
# 单个文件选择
selected_file_path = filedialog.askopenfilename() # 使用askopenfilename函数选择单个文件
self.select_path_All.set(selected_file_path)
def select_file_tag(self):
# 单个文件选择
selected_file_path = filedialog.askopenfilename() # 使用askopenfilename函数选择单个文件
self.select_path_Tag.set(selected_file_path)
def select_folder(self):
# 文件夹选择
selected_folder = filedialog.askdirectory() # 使用askdirectory函数选择文件夹
self.select_path_folder.set(selected_folder)
def read_log(self):
# 清空输出框
if not self.text.compare("end-1c", "==", "1.0"):
self.text.delete("1.0", END)
out = mystdout(self.text) # 实例化了本mystdout类,print输出就换了地方了
if not self.select_path_folder.get():
messagebox.showerror(title='出错', message='请先选择操作路径!')
else:
try:
os.chdir(self.select_path_folder.get())
self.send_folder_path()
to, un, al, wo = find_books(self.select_path_folder.get())
start(to, un, al, wo)
except Exception as e:
messagebox.showerror(title='出错', message=str(e))
def send_folder_path(self):
folder_path = self.select_path_folder.get()
settings.basic_path = folder_path
# 窗口类,继承于Tk
class WinGUI(ttk.Window): # 让这个窗口继承于ttk的Window
def __init__(self):
super().__init__()
self.set_attr()
self.frame = Frame() # 实例化本窗口类或者他的子类时,就会自动实例化一个容器类Frame,同时这个容器马上就有了各种组件
self.themename = 'solar'
def set_attr(self):
self.title("报表整理工具")
# 设置窗口大小、居中
width = 1200
height = 900
screenwidth = self.winfo_screenwidth()
screenheight = self.winfo_screenheight()
geometry = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 3)
self.geometry(geometry)
self.resizable(width=False, height=False)
class Win(WinGUI):
def __init__(self):
super().__init__()
self.event_bind()
def event_bind(self):
pass
if __name__ == "__main__":
win = Win()
win.mainloop()
| Layneliang24/Excel_optimize | UI.py | UI.py | py | 10,029 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 23,
... |
38925814728 | import torch
"""
"""
import time
class data_prefetcher():
def __init__(self, loader):
st = time.time()
self.loader = iter(loader)
self.origin_loader = iter(loader)
# print('Generate loader took', time.time() - st)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_batch = next(self.loader)
except StopIteration:
self.next_batch = None
return
with torch.cuda.stream(self.stream):
self.next_batch = self.next_batch.cuda(non_blocking=True)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.next_batch
self.preload()
return batch | TsinghuaDatabaseGroup/AI4DBCode | FACE/train/prefetcher.py | prefetcher.py | py | 758 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda.Stream",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.stream",
"l... |
20105856322 | from django.utils.translation import gettext_lazy as _
from .base import ModelBase
from django.db import models
class Entity(ModelBase):
class Meta:
db_table = "nlp_entity"
ordering = ["pk"]
verbose_name = _("entity")
verbose_name_plural = _("entities")
bot = models.ForeignKey(
"nlp.NLPBot", on_delete=models.CASCADE, verbose_name=_("nlp bot")
)
entity_name = models.CharField(
max_length=100, null=False, verbose_name=_("entity name")
)
entity_category = models.ForeignKey(
"nlp.EntityCategory",
null=True,
blank=True,
on_delete=models.SET_NULL,
verbose_name=_("entity category"),
)
synonym = models.CharField(null=True, blank=True, verbose_name=_("synonym"))
def __str__(self):
return self.entity_name
| tomodachii/mytempura | nlp/models/entity.py | entity.py | py | 843 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "base.ModelBase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 11,
"usage_type": "call"
},
... |
24621919782 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/5/2 9:48
# @Author : yaomy
# real+render+fuse 所有物体
import json
import torch
import random
import torch.utils.data
import torch.nn.functional as F
import torchvision.transforms as transforms
import pickle as pkl
import numpy as np
import yaml
import os
import pickle
from pathlib import Path
from PIL import Image
import cv2
import numpy.ma as ma
from mmcv import Config
from lib.transform.coordinate import crop_resize_by_warp_affine
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJETCT_PATH = Path(os.path.realpath(__file__)).parent.parent.parent
CONFIG = Config.fromfile(f'{PROJETCT_PATH}/config/linemod/lm_v3.py')
class PoseDataset(torch.utils.data.Dataset):
def __init__(self, mode, num_point, add_noise, root, noise_trans, num_kps, cls_type=None, cfg=CONFIG):
self.obj_dict = {
'ape': 1, 'benchvise': 2, 'bowl': 3, 'cam': 4, 'can': 5, 'cat': 6, 'cup': 7, 'driller': 8,
'duck': 9, 'eggbox': 10, 'glue': 11, 'holepuncher': 12, 'iron': 13, 'lamp': 14, 'phone': 15,
}
self.obj_name = {v: k for k, v in self.obj_dict.items()}
if cls_type is None or cls_type == 'all':
self.objlist = [1, 2, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]
else:
self.objlist = [self.obj_dict[cls_type]]
print(f'{mode} Use Object: ', self.objlist)
self.mode = mode
self.num_point = num_point
self.num_kps = num_kps
self.cfg = cfg
self.back = cfg.Data.BACK
self.num_pt_mesh = num_point
self.num_pt_mesh_large = 2600
self.num_pt_mesh_small = num_point
self.noise_trans = noise_trans
self.add_noise = add_noise
self.height = 480.0
self.width = 640.0
self.scale = 1000.0
self.ymap = np.array([[j for i in range(640)] for j in range(480)])
self.xmap = np.array([[i for i in range(640)] for j in range(480)])
self.trancolor = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05)
# 要归一化后才能用这个
self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.sym_obj = [7, 8]
self.obj_dict_name = {v: k for k, v in self.obj_dict.items()}
self.intrinsic_matrix = {
'linemod': np.array([[572.4114, 0., 325.2611],
[0., 573.57043, 242.04899],
[0., 0., 1.]]),
'blender': np.array([[700., 0., 320.],
[0., 700., 240.],
[0., 0., 1.]]),
}
self.ori_iamge = np.zeros((480, 640))
self.gt_pose = torch.zeros((4, 4))
self.root = root
self.backgrounds = []
self.synthetic_all = []
self.real_all = []
self.diameter = []
self.pt = {}
self.extent = {}
self.lf_border = {}
self.meta = {}
self.kps = {}
self.centers = {}
info_yml = open(f'{CURRENT_DIR}/dataset_config/models_info.yml', 'r')
info = yaml.load(info_yml, Loader=yaml.FullLoader)
with open(f'{self.root}/test/test_bboxes/bbox_yolov3_all.json', 'r') as f:
eval_bbox_all = json.load(f)
for item in self.objlist:
if self.mode == 'train':
cls_type = self.obj_dict_name[item]
if item == 4:
cls_type = 'camera'
# real_img_pth = os.path.join(cls_root, "train.txt")
render_root = os.path.join(self.root, 'train', f'{item:06d}')
real_root = os.path.join(self.root, 'test', f'{item:06d}')
real_img_pth = os.path.join(self.root, 'image_set', f"{cls_type}_train.txt")
real_lst = read_lines(real_img_pth)
rnd_paths = sorted(Path(f'{render_root}/rgb').glob('*.png'))
with open(f'{render_root}/scene_gt.json', 'r') as f:
render_gt = json.load(f)
with open(f'{real_root}/scene_gt.json', 'r') as f:
real_gt= json.load(f)
with open(f'{render_root}/scene_gt_info.json', 'r') as f:
render_gt_info = json.load(f)
with open(f'{real_root}/scene_gt_info.json', 'r') as f:
real_gt_info = json.load(f)
with open(f'{render_root}/scene_camera.json', 'r') as f:
render_gt_camera = json.load(f)
with open(f'{real_root}/scene_camera.json', 'r') as f:
real_gt_camera = json.load(f)
self.synthetic_all += [{
'cls_id': item, 'im_id': syn.stem,
'cam_R_m2c': render_gt[f'{int(syn.stem)}'][0]['cam_R_m2c'],
'cam_t_m2c': render_gt[f'{int(syn.stem)}'][0]['cam_t_m2c'],
'bbox': render_gt_info[f'{int(syn.stem)}'][0]['bbox_visib'],
'cam_K': render_gt_camera[f'{int(syn.stem)}']['cam_K'],
# 'depth_scale': render_gt_camera[f'{int(syn.stem)}']['depth_scale'],
'depth_scale': 1000.,
'type': 'syn'
} for syn in rnd_paths]
self.real_all += [{
'cls_id': item, 'im_id': real_id,
'cam_R_m2c': real_gt[f'{int(real_id)}'][0]['cam_R_m2c'],
'cam_t_m2c': real_gt[f'{int(real_id)}'][0]['cam_t_m2c'],
'bbox': real_gt_info[f'{int(real_id)}'][0]['bbox_visib'],
'cam_K': real_gt_camera[f'{int(real_id)}']['cam_K'],
# 'depth_scale': real_gt_camera[f'{int(real_id)}']['depth_scale'],
'depth_scale': 1000.,
'type': 'real'
} for real_id in real_lst]
self.backgrounds += sorted(Path(self.back).glob('*.jpg'))
else:
tst_img_pth = os.path.join(self.root, 'image_set', f"{cls_type}_test.txt")
real_lst = read_lines(tst_img_pth)
real_root = os.path.join(self.root, 'test', f'{item:06d}')
with open(f'{real_root}/scene_gt.json', 'r') as f:
real_gt= json.load(f)
with open(f'{real_root}/scene_gt_info.json', 'r') as f:
real_gt_info = json.load(f)
with open(f'{real_root}/scene_camera.json', 'r') as f:
real_gt_camera = json.load(f)
self.real_all += [{
'cls_id': item, 'im_id': real_id,
'cam_R_m2c': real_gt[f'{int(real_id)}'][0]['cam_R_m2c'],
'cam_t_m2c': real_gt[f'{int(real_id)}'][0]['cam_t_m2c'],
'bbox': real_gt_info[f'{int(real_id)}'][0]['bbox_visib'],
'eval_bbox': eval_bbox_all[f'{int(item)}/{int(real_id)}'][0]['bbox_est'],
'cam_K': real_gt_camera[f'{int(real_id)}']['cam_K'],
# 'depth_scale': real_gt_camera[f'{int(real_id)}'][0]['depth_scale'],
'depth_scale': 1000.,
'type': 'real'
} for real_id in real_lst]
self.pt[item], self.extent[item], self.lf_border[item] = self._load_model('{0}/models/obj_{1}.ply'.format(self.root, '%06d' % item))
self.diameter.append(info[item]['diameter'] / 1000.0)
kps_pattern = os.path.join(CURRENT_DIR, 'dataset_config', 'kps_orb9_fps',
f"{self.obj_name[item]}_8_kps.txt")
corner_pattern = os.path.join(CURRENT_DIR, 'dataset_config', 'kps_orb9_fps',
f"{self.obj_name[item]}_corners.txt")
kps = np.loadtxt(kps_pattern, dtype=np.float32)
corners = np.loadtxt(corner_pattern, dtype=np.float32)
# center = np.mean(corners, axis=0).reshape(1, 3)
center = np.mean(self.pt[item], axis=0).reshape(1, 3)
self.kps[item] = kps
self.centers[item] = center
self.all_lst = self.real_all + self.synthetic_all
# self.all_lst = self.synthetic_all
print(f'len of real {self.mode}: ', len(self.real_all))
print(f'len of synthetic {self.mode}: ', len(self.synthetic_all))
print(f'len of background {self.mode}: ', len(self.backgrounds))
print(f'len of all {self.mode}: ', len(self.all_lst), '\n')
def _load_real_data(self, item):
im_id = item['im_id']
cls_id = item['cls_id']
if item['type'] == 'syn':
cls_root = os.path.join(self.root, 'train', f'{cls_id:06d}')
else:
cls_root = os.path.join(self.root, 'test', f'{cls_id:06d}')
with Image.open(os.path.join(cls_root, "depth/{}.png".format('%06d' % int(im_id)))) as di:
depth = np.array(di)
with open(f'{cls_root}/normal/{int(im_id):06d}-normal.pkl', 'rb') as f_n:
normal = pickle.load(f_n)
with open(f'{cls_root}/xyz/{int(im_id):06d}-coordinate.pkl', 'rb') as f_c:
coordinate = pickle.load(f_c)
with open(f'{cls_root}/xyz/{int(im_id):06d}-region.pkl', 'rb') as f_r:
region = pickle.load(f_r)
# if self.mode == 'eval':
# with Image.open(
# os.path.join(self.root, "segnet_results", f'{int(cls_id):02d}_label/{int(im_id):04d}_label.png')
# ) as li:
# label = np.array(li)
# else:
with Image.open(os.path.join(cls_root, "mask_visib/{}_000000.png".format('%06d' % int(im_id)))) as li:
label = np.array(li)
with Image.open(os.path.join(cls_root, "rgb/{}.png".format('%06d' % int(im_id)))) as ri:
# if self.add_noise:
# ri = self.trancolor(ri)
img = np.array(ri)[:, :, :3]
if item['type'] == 'render':
with Image.open(os.path.join(cls_root, "mask/{}_000000.png".format('%06d' % int(im_id)))) as li:
label_back = np.array(li)
mask_back = ma.getmaskarray(ma.masked_equal(label_back, 0))
seed = random.choice(self.backgrounds)
with Image.open(seed) as bf:
background = np.asarray(bf.convert("RGB"))
back = cv2.resize(np.array(self.trancolor(background)), (self.width, self.height))
img = back * mask_back + img
self.ori_image = img.copy()
cam_scale = item['depth_scale']
target_r = np.resize(np.array(item['cam_R_m2c']), (3, 3))
target_t = np.array(item['cam_t_m2c']) / cam_scale
self.gt_pose = self.rt2matrix(target_r, target_t)
if self.mode == 'eval':
bbox = item['eval_bbox']
else:
bbox = item['bbox']
mask_label = np.ma.getmaskarray(np.ma.masked_equal(label, np.array(255)))
# mask_depth = np.ma.getmaskarray(np.ma.masked_not_equal(depth, 0))
# mask_label = mask_label * mask_depth
return {
'img': img,
'depth': depth,
'normal': normal,
'coordinate': coordinate,
'region': region,
# 'croped_normal': croped_normal,
'mask_label': mask_label,
'cls_id': cls_id,
'K': np.asarray(item['cam_K']).reshape(3, 3),
'bbox': bbox,
'cam_scale': cam_scale,
'target_r': target_r,
'target_t': target_t,
'type': item['type'],
}
def _load_data(self, item):
while True:
ds = self._load_real_data(item)
if ds is None:
print(f'Real data error pass ! {item["type"]}')
item = random.choice(self.all_lst)
else:
break
out_size = self.cfg.Data.OUT_SIZE
input_size = self.cfg.Data.INPUT_SIZE
resize_type = self.cfg.Data.RESIZE_TYPE
rmin, rmax, cmin, cmax = get_square_bbox(ds['bbox'])
img_masked = np.array(ds['img'])[rmin:rmax, cmin:cmax, :]
cam_fx, cam_fy, cam_cx, cam_cy = ds['K'][0, 0], ds['K'][1, 1], ds['K'][0, 2], ds['K'][1, 2]
cls_id = ds['cls_id']
target_r, target_t = ds['target_r'], ds['target_t']
if ds['normal'].shape == (self.height, self.width, 3):
normal_ = ds['normal'][rmin:rmax, cmin:cmax, :]
else:
normal_ = ds['normal']
hole_normal = np.ones((int(self.height), int(self.width), 3))
hole_normal[rmin:rmax, cmin:cmax, :] = normal_
normal = hole_normal * (hole_normal != [1, 1, 1])
if resize_type == 'resize':
resize_normal = cv2.resize(normal, (out_size, out_size), interpolation=cv2.INTER_NEAREST)
else:
resize_normal= crop_resize_by_warp_affine(
normal, [(cmin + cmax) / 2, (rmin + rmax) / 2], int(rmax - rmin), out_size, interpolation=cv2.INTER_NEAREST
)
# croped_normal_ = ds['croped_normal']
# croped_normal = croped_normal_ * (croped_normal_ != [1, 1, 1])
#
# if resize_type == 'resize':
# resize_croped_normal = cv2.resize(croped_normal, (out_size, out_size), interpolation=cv2.INTER_NEAREST)
# else:
# resize_croped_normal= crop_resize_by_warp_affine(
# croped_normal, [(cmin + cmax) / 2, (rmin + rmax) / 2], int(rmax - rmin), 64, interpolation=cv2.INTER_NEAREST
# )
if ds['coordinate'].shape == (self.height, self.width, 3):
coordinate = ds['coordinate'][rmin:rmax, cmin:cmax, :]
else:
coordinate = ds['coordinate']
# 变成[0, 1]
# coordinate_map = np.ones_like(coordinate)
hole_coordinate = np.zeros((int(self.height), int(self.width), 3))
hole_coordinate[rmin:rmax, cmin:cmax, :] = coordinate
coordinate_map = np.zeros_like(hole_coordinate)
lf_border = self.lf_border[cls_id]
extent = self.extent[cls_id]
mask_obj = (hole_coordinate != [0, 0, 0])[..., 0]
mask = (ds['mask_label']*mask_obj).astype(np.float32)
if resize_type == 'resize':
resize_mask = cv2.resize(mask[:, :, None], (out_size, out_size), interpolation=cv2.INTER_NEAREST)
else:
resize_mask = crop_resize_by_warp_affine(
mask[:, :, None], [(cmin + cmax) / 2, (rmin + rmax) / 2], int(rmax - rmin), out_size,
interpolation=cv2.INTER_NEAREST
)
choose = mask[rmin:rmax, cmin:cmax, None].flatten().nonzero()[0]
if len(choose) > self.num_point:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:self.num_point] = 1
np.random.shuffle(c_mask)
# num_points+8
choose = choose[c_mask.nonzero()]
else:
try:
choose = np.pad(choose, (0, self.num_point - len(choose)), 'wrap')
except ValueError:
print('choose error:', item)
return None
choose = np.array([choose])
resize_choose = resize_mask.flatten().nonzero()[0]
if len(resize_choose) > self.num_point:
c_mask = np.zeros(len(resize_choose), dtype=int)
c_mask[:self.num_point] = 1
np.random.shuffle(c_mask)
# num_points+8
resize_choose = resize_choose[c_mask.nonzero()]
else:
try:
resize_choose = np.pad(resize_choose, (0, self.num_point - len(resize_choose)), 'wrap')
except ValueError:
print('choose error:', item)
return None
resize_choose = np.array([resize_choose])
if resize_type == 'resize':
hole_coordinate_map = cv2.resize(hole_coordinate, (out_size, out_size), interpolation=cv2.INTER_NEAREST)
else:
hole_coordinate_map = crop_resize_by_warp_affine(
hole_coordinate, [(cmin + cmax) / 2, (rmin + rmax) / 2], int(rmax - rmin), out_size,
interpolation=cv2.INTER_NEAREST
)
coordinate_choosed = hole_coordinate_map.reshape(-1, 3)[resize_choose[0]].T
coordinate_map[:, :, 0] = (hole_coordinate[:, :, 0] - lf_border[0]) / extent[0]
coordinate_map[:, :, 1] = (hole_coordinate[:, :, 1] - lf_border[1]) / extent[1]
coordinate_map[:, :, 2] = (hole_coordinate[:, :, 2] - lf_border[2]) / extent[2]
coordinate_map = coordinate_map * (hole_coordinate != [0, 0, 0])
if resize_type == 'resize':
resize_coordinate_map = cv2.resize(coordinate_map, (out_size, out_size), interpolation=cv2.INTER_NEAREST)
else:
resize_coordinate_map = crop_resize_by_warp_affine(
coordinate_map, [(cmin + cmax) / 2, (rmin + rmax) / 2], int(rmax - rmin), out_size,
interpolation=cv2.INTER_NEAREST
)
if ds['region'].shape == (self.height, self.width, 3):
region = ds['region'][rmin:rmax, cmin:cmax]
else:
region = ds['region']
hole_region = np.zeros((int(self.height), int(self.width)))
hole_region[rmin:rmax, cmin:cmax] = region
if resize_type == 'resize':
resize_region = cv2.resize(hole_region, (out_size, out_size), interpolation=cv2.INTER_NEAREST)
else:
resize_region = crop_resize_by_warp_affine(
hole_region, [(cmin + cmax) / 2, (rmin + rmax) / 2], int(rmax - rmin), out_size,
interpolation=cv2.INTER_NEAREST
)
ori_kps = self.kps[cls_id] / 1.0
# ori_center = self.centers[cls_id] / 1.0
ori_center = np.array([[0., 0., 0.]])
all_kps = np.concatenate([ori_kps, ori_center], axis=0)
kps = np.dot(ori_kps, target_r.T) + target_t
center = np.dot(ori_center, target_r.T) + target_t
kps = np.concatenate([kps, center], axis=0)
u = kps[:, 0] * cam_fx / kps[:, 2] + cam_cx
v = kps[:, 1] * cam_fy / kps[:, 2] + cam_cy
# [8, 3]
uvd1 = np.stack([u, v, kps[:, 2]], axis=1)
model_points = self.pt[cls_id]
model_points:np.array
target = model_points @ target_r.T + target_t
add_t = np.array([random.uniform(-self.noise_trans, self.noise_trans) for _ in range(3)])
if self.add_noise:
target = np.add(target, add_t)
if resize_type == 'resize':
reize_img = cv2.resize(img_masked/255., (input_size, input_size), interpolation=cv2.INTER_LINEAR)
else:
reize_img = crop_resize_by_warp_affine(
ds['img']/255., [(cmin+cmax)/2, (rmin+rmax)/2], int(rmax-rmin), input_size, interpolation=cv2.INTER_LINEAR
)
kps_scale = float(rmax-rmin) / self.cfg.Data.INPUT_SIZE
resize_scale = float(rmax-rmin) / self.cfg.Data.OUT_SIZE
resize_uvd = uvd1.copy()
resize_uvd[..., 0] -= cmin
resize_uvd[..., 1] -= rmin
resize_uvd[..., :2] /= kps_scale
resize_xmap = torch.LongTensor([[i for i in range(out_size)] for _ in range(out_size)]) / out_size
resize_ymap = torch.LongTensor([[j for _ in range(out_size)] for j in range(out_size)]) / out_size
resize_cam_fx, resize_cam_fy, resize_cam_cx, resize_cam_cy = cam_fx / resize_scale, cam_fy / resize_scale, out_size / 2, out_size / 2
if resize_type == 'resize':
x_map = cv2.resize(self.xmap[rmin:rmax, cmin:cmax], (out_size, out_size), cv2.INTER_LINEAR)/640.
y_map = cv2.resize(self.ymap[rmin:rmax, cmin:cmax], (out_size, out_size), cv2.INTER_LINEAR)/480.
else:
x_map = crop_resize_by_warp_affine(
self.xmap/640., [(cmin+cmax)/2, (rmin+rmax)/2], int(rmax-rmin), out_size, interpolation=cv2.INTER_NEAREST
)
y_map = crop_resize_by_warp_affine(
self.ymap/480., [(cmin+cmax)/2, (rmin+rmax)/2], int(rmax-rmin), out_size, interpolation=cv2.INTER_NEAREST
)
return {
'target': torch.from_numpy(target.astype(np.float32)), # [Np, 3]
'model_points': torch.from_numpy(model_points.astype(np.float32)),
'cls_id': torch.LongTensor([self.objlist.index(cls_id)]),
'choose': torch.LongTensor(choose.astype(np.int32)),
'intrinsic': torch.FloatTensor([cam_fx, cam_fy, cam_cx, cam_cy]),
'uvd1': torch.from_numpy(uvd1), # [9, 3]
'trans_kps': torch.from_numpy(kps.astype(np.float32)),
'kps': torch.from_numpy(ori_kps.astype(np.float32)),
'all_kps': torch.from_numpy(all_kps.astype(np.float32)),
'trans_center': torch.from_numpy(center.astype(np.float32)),
'center': torch.from_numpy(ori_center.astype(np.float32)),
'target_r': torch.from_numpy(target_r.astype(np.float32)),
'target_t': torch.from_numpy(target_t.astype(np.float32)),
'bbox': torch.FloatTensor([rmin, rmax, cmin, cmax]),
'type': ds['type'],
'xmap': torch.from_numpy(x_map.astype(np.float32)).unsqueeze(dim=0),
'ymap': torch.from_numpy(y_map.astype(np.float32)).unsqueeze(dim=0),
'ori_mask': torch.from_numpy(ds['mask_label'].astype(np.float32)),
'ori_depth': torch.from_numpy(ds['depth'].astype(np.float32)),
'ori_coordinate': torch.from_numpy(hole_coordinate.astype(np.float32)),
'coordinate_choosed': torch.from_numpy(coordinate_choosed.astype(np.float32)),
'resize_img': torch.from_numpy(reize_img.astype(np.float32)).permute(2, 0, 1),
'resize_normal': F.normalize(torch.from_numpy(resize_normal).permute(2, 0, 1), p=2, dim=0).float(),
'resize_uvd': torch.from_numpy(resize_uvd),
'resize_scale': torch.tensor(resize_scale),
'kps_scale': torch.tensor(kps_scale),
'resize_xmap': resize_xmap,
'resize_ymap': resize_ymap,
'resize_choose': torch.LongTensor(resize_choose.astype(np.int32)),
'resize_intrinsic': torch.FloatTensor([resize_cam_fx, resize_cam_fy, resize_cam_cx, resize_cam_cy]),
'resize_coordinate': torch.from_numpy((resize_coordinate_map*resize_mask[:, :, None]).astype(np.float32)).permute(2, 0, 1),
'extent': torch.from_numpy(extent), # [3]
'lfborder': torch.from_numpy(lf_border), # [3]
# 'resize_croped_normal': F.normalize(torch.from_numpy(resize_croped_normal).permute(2, 0, 1), p=2, dim=0).float(), # [3]
'resize_mask': torch.from_numpy(resize_mask.astype(np.float32)).unsqueeze(dim=0),
'resize_region': torch.from_numpy(resize_region*resize_mask).long(),
}
def __getitem__(self, index):
item = self.all_lst[index]
ds = None
while ds is None:
ds = self._load_data(item)
item = random.choice(self.all_lst)
if self.ori_image.any():
ds['ori_img'] = self.ori_image
return ds
def __len__(self):
return len(self.all_lst)
@property
def _error_data(self):
cc = torch.tensor([0.])
return {'img': cc, 'flag': False}
def _load_model(self, model_path):
if self.mode == 'eval':
num_pt_mesh = self.num_pt_mesh_large
else:
num_pt_mesh = self.num_pt_mesh_small
model_points = ply_vtx(model_path) / self.scale
xmax, xmin = model_points[:, 0].max(), model_points[:, 0].min()
ymax, ymin = model_points[:, 1].max(), model_points[:, 1].min()
zmax, zmin = model_points[:, 2].max(), model_points[:, 2].min()
extent = np.array([xmax-xmin, ymax-ymin, zmax-zmin])
lf_border = np.array([xmin, ymin, zmin])
dellist = [j for j in range(0, len(model_points))]
dellist = random.sample(dellist, len(model_points) - num_pt_mesh)
model_points = np.delete(model_points, dellist, axis=0)
return model_points, extent, lf_border
@staticmethod
def rt2matrix(r, t):
ext = np.eye(4, 4)
ext[:3, :3] = r
ext[:3, 3] = t
return ext
border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
# border_list = [-1] + list(range(8, 680, 8)) + [680]
def read_lines(p):
with open(p, 'r') as f:
return [line.strip() for line in f.readlines()]
def ply_vtx(path):
f = open(path)
assert f.readline().strip() == "ply"
f.readline()
f.readline()
N = int(f.readline().split()[-1])
while f.readline().strip() != "end_header":
continue
pts = []
for _ in range(N):
pts.append(np.float32(f.readline().split()[:3]))
return np.array(pts)
def get_bbox(bbox):
bbx = [bbox[1], bbox[1] + bbox[3], bbox[0], bbox[0] + bbox[2]]
if bbx[0] < 0:
bbx[0] = 0
if bbx[1] >= 480:
bbx[1] = 479
if bbx[2] < 0:
bbx[2] = 0
if bbx[3] >= 640:
bbx[3] = 639
rmin, rmax, cmin, cmax = bbx[0], bbx[1], bbx[2], bbx[3]
r_b = rmax - rmin
for tt in range(len(border_list)):
if border_list[tt] < r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if border_list[tt] < c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > 480:
delt = rmax - 480
rmax = 480
rmin -= delt
if cmax > 640:
delt = cmax - 640
cmax = 640
cmin -= delt
return rmin, rmax, cmin, cmax
def get_square_bbox(bbox, height_px=480, width_px=640):
bbx = [bbox[1], bbox[1] + bbox[3], bbox[0], bbox[0] + bbox[2]]
if bbx[0] < 0:
bbx[0] = 0
if bbx[1] >= 480:
bbx[1] = 479
if bbx[2] < 0:
bbx[2] = 0
if bbx[3] >= 640:
bbx[3] = 639
rmin, rmax, cmin, cmax = bbx[0], bbx[1], bbx[2], bbx[3]
rmax += 1
cmax += 1
r_b = rmax - rmin # h
c_b = cmax - cmin # w
if r_b <= c_b:
r_b = c_b
else:
c_b = r_b
for tt in range(len(border_list)):
if border_list[tt] < r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
for tt in range(len(border_list)):
if border_list[tt] < c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > height_px:
delt = rmax - height_px
rmax = height_px
rmin -= delt
if rmin < 0:
rmax = rmax - rmin
rmin = 0
if rmax >= height_px:
rmax = height_px - 1
if cmax > width_px:
delt = cmax - width_px
cmax = width_px
cmin -= delt
if cmin < 0:
cmax = cmax - cmin
cmin = 0
if cmax >= width_px:
cmax = width_px - 1
m = (rmax - rmin) - (cmax - cmin)
if m > 0:
rmax = rmax - np.floor(m / 2)
rmin = rmin + np.floor(m / 2)
elif m < 0:
cmax = cmax + np.floor(m / 2)
cmin = cmin - np.floor(m / 2)
return int(rmin), int(rmax), int(cmin), int(cmax)
def main():
root_path = '/root/Source/ymy_dataset/lm-bop'
from numpy import array, float32
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
dataset = PoseDataset('train', 500, True, root_path, 0.000, 8, cls_type='ape')
dataloader_test = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=0)
ds_iter = dataloader_test.__iter__()
for _ in range(5):
datas = ds_iter.__next__()
datas = {k:v for k,v in datas.items() if type(v).__name__ == 'Tensor'}
print(datas['cls_id'].size())
def tst():
# p = '/root/Source/ymy_dataset/lm-bop/train/000001/rgb'
# paths = sorted(Path(p).glob('*.png'))
# print(paths[0].stem)
p = '/root/Source/ymy_dataset/lm-bop/train/000001/scene_gt.json'
with open(p) as f:
files = json.load(f)
print(type(files))
if __name__ == '__main__':
# tst()
main()
| yaomy533/pose_estimation | dataset/linemod/lm_bop.py | lm_bop.py | py | 29,087 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.