seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24369028230 | import os
import time
import argparse
from datetime import datetime
from utils.CpuMonitor import CpuMonitor
from utils.GpuMonitor import GpuMonitor
from utils.Recoder import Recoder
from utils.Printer import print_info, print_err, print_warn
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cpu-only', help='Only record CPU temperature', action='store_true')
parser.add_argument('--sample-interval',
help='Specify the data retrieve interval in seconds. Default is 10 seconds',
type=int, default=10)
parser.add_argument('--log-file', help='Record CSV file. Default is ./statistic.csv',
type=str, default='./statistic.csv')
return parser.parse_args()
def main():
args = get_args()
sample_interval = args.sample_interval
log_file = args.log_file
log = Recoder(log_file)
cpu_tester = None
gpu_tester = None
try:
if not args.cpu_only:
print_info("[INFO] Start to record GPU/CPU temperature in every {} sec.".format(str(sample_interval)))
cpu_tester = CpuMonitor()
gpu_tester = GpuMonitor()
log.generate_table_header(cpu_tester.num, gpu_tester.num)
else:
print_warn("[WARN] Only collect CPU Temperature.")
print_info("[INFO] Start to record CPU temperature in every {} sec.\033[0m".format(str(sample_interval)))
cpu_tester = CpuMonitor()
log.generate_table_header(cpu_tester.num)
while True:
timestamp = str(datetime.now()).split('.')[0]
data_row = [timestamp]
data_row.extend(cpu_tester.get_statistics())
if gpu_tester is not None:
data_row.extend(gpu_tester.get_statistics())
log.write_record_file(','.join(data_row))
time.sleep(sample_interval)
except KeyboardInterrupt:
print()
print_err("[ERROR] Keyboard Interrupted, temperature recording program exit.")
exit()
if __name__ == '__main__':
main()
| Huang-Junchen/hardware-tester | no_gui.py | no_gui.py | py | 2,093 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.Recoder.Recoder",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "utils.Printer.print_info",
"line_number": 34,
"usage_type": "call"
},
{
"api_name":... |
41587212510 | import os
import numpy as np
import warnings
from equilib import equi2cube
import cv2
import torch
from PIL import Image
def main():
image_path = "./data/images/000001.jpg"
equi_img = Image.open(image_path)
img_mode = equi_img.mode
equi_img = np.asarray(equi_img)
print("equi_img: ", equi_img.shape)
equi_img = np.transpose(equi_img, (2, 0, 1))
print(equi_img.shape)
rots = {
"roll": 0,
"pitch": 0,
"yaw": 0
}
mode = "bilinear"
# mode = "bicubic"
# mode = "nearest"
equi_img_torch = torch.from_numpy(equi_img).to('cuda')
#cube = equi2cube(equi = equi_img,cube_format="horizon", rots=rots, w_face=3368, z_down=False, mode=mode)
cube = equi2cube(equi = equi_img_torch,cube_format="horizon", rots=rots, w_face=3368, z_down=False, mode=mode)
cube = cube.to('cpu').detach().numpy().copy()
print("cube.shape", cube.shape)
print("type: ", type(cube))
print("size: ", cube.size, "shape: ", cube.shape)
cube = cube.transpose(1,2,0)
out_image = Image.fromarray(cube, img_mode)
out_path = "./data/results/00001.jpg"
out_image.save(out_path)
if __name__ == "__main__":
main() | motoki/nsworks | src/sphere2cube.py | sphere2cube.py | py | 1,211 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_nu... |
21883804476 |
# coding: utf-8
# In[2]:
CUDA_VISIBLE_DEVICES = 1
# In[3]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# In[4]:
trainA = []
trainB = []
for i in range(1,701):
img = cv2.imread('rain/{}clean.jpg'.format(i))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.resize(img,(256,256))
trainA.append(img)
img = cv2.imread('rain/{}bad.jpg'.format(i))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.resize(img,(256,256))
trainB.append(img)
trainA = np.array(trainA)
trainB = np.array(trainB)
trainA = (trainA - 127.5)/127.5
trainB = (trainB - 127.5)/127.5
# In[ ]:
from keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, BatchNormalization, Activation, add
from keras.models import Model, model_from_json
from keras.optimizers import Adam
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.utils.vis_utils import plot_model
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), activation='relu', name=None):
'''
2D Convolutional layers
Arguments:
x {keras layer} -- input layer
filters {int} -- number of filters
num_row {int} -- number of rows in filters
num_col {int} -- number of columns in filters
Keyword Arguments:
padding {str} -- mode of padding (default: {'same'})
strides {tuple} -- stride of convolution operation (default: {(1, 1)})
activation {str} -- activation function (default: {'relu'})
name {str} -- name of the layer (default: {None})
Returns:
[keras layer] -- [output layer]
'''
x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False)(x)
x = BatchNormalization(axis=3, scale=False)(x)
if(activation == None):
return x
x = Activation(activation, name=name)(x)
return x
def trans_conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(2, 2), name=None):
'''
2D Transposed Convolutional layers
Arguments:
x {keras layer} -- input layer
filters {int} -- number of filters
num_row {int} -- number of rows in filters
num_col {int} -- number of columns in filters
Keyword Arguments:
padding {str} -- mode of padding (default: {'same'})
strides {tuple} -- stride of convolution operation (default: {(2, 2)})
name {str} -- name of the layer (default: {None})
Returns:
[keras layer] -- [output layer]
'''
x = Conv2DTranspose(filters, (num_row, num_col), strides=strides, padding=padding)(x)
x = BatchNormalization(axis=3, scale=False)(x)
return x
def MultiResBlock(U, inp, alpha = 1.67):
'''
MultiRes Block
Arguments:
U {int} -- Number of filters in a corresponding UNet stage
inp {keras layer} -- input layer
Returns:
[keras layer] -- [output layer]
'''
W = alpha * U
shortcut = inp
shortcut = conv2d_bn(shortcut, int(W*0.167) + int(W*0.333) +
int(W*0.5), 1, 1, activation=None, padding='same')
conv3x3 = conv2d_bn(inp, int(W*0.167), 4, 4,
activation='relu', padding='same')
conv5x5 = conv2d_bn(conv3x3, int(W*0.333), 4, 4,
activation='relu', padding='same')
conv7x7 = conv2d_bn(conv5x5, int(W*0.5), 4, 4,
activation='relu', padding='same')
out = concatenate([conv3x3, conv5x5, conv7x7], axis=3)
out = BatchNormalization(axis=3)(out)
out = add([shortcut, out])
out = Activation('relu')(out)
out = BatchNormalization(axis=3)(out)
return out
def ResPath(filters, length, inp):
'''
ResPath
Arguments:
filters {int} -- [description]
length {int} -- length of ResPath
inp {keras layer} -- input layer
Returns:
[keras layer] -- [output layer]
'''
shortcut = inp
shortcut = conv2d_bn(shortcut, filters, 1, 1,
activation=None, padding='same')
out = conv2d_bn(inp, filters, 4, 4, activation='relu', padding='same')
out = add([shortcut, out])
out = Activation('relu')(out)
out = BatchNormalization(axis=3)(out)
for i in range(length-1):
shortcut = out
shortcut = conv2d_bn(shortcut, filters, 1, 1,
activation=None, padding='same')
out = conv2d_bn(out, filters, 4, 4, activation='relu', padding='same')
out = add([shortcut, out])
out = Activation('relu')(out)
out = BatchNormalization(axis=3)(out)
return out
def MultiResUnet(height, width, n_channels):
'''
MultiResUNet
Arguments:
height {int} -- height of image
width {int} -- width of image
n_channels {int} -- number of channels in image
Returns:
[keras model] -- MultiResUNet model
'''
inputs = Input((height, width, n_channels))
mresblock1 = MultiResBlock(32, inputs)
pool1 = MaxPooling2D(pool_size=(2, 2))(mresblock1)
mresblock1 = ResPath(32, 4, mresblock1)
mresblock2 = MultiResBlock(32*2, pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(mresblock2)
mresblock2 = ResPath(32*2, 3, mresblock2)
mresblock3 = MultiResBlock(32*4, pool2)
pool3 = MaxPooling2D(pool_size=(2, 2))(mresblock3)
mresblock3 = ResPath(32*4, 2, mresblock3)
mresblock4 = MultiResBlock(32*8, pool3)
up5 = concatenate([Conv2DTranspose(
32*4, (2, 2), strides=(2, 2), padding='same')(mresblock4), mresblock3], axis=3)
mresblock6 = MultiResBlock(32*4, up5)
up6 = concatenate([Conv2DTranspose(
32*2, (2, 2), strides=(2, 2), padding='same')(mresblock6), mresblock2], axis=3)
mresblock7 = MultiResBlock(32*2, up6)
up7 = concatenate([Conv2DTranspose(
32, (2, 2), strides=(2, 2), padding='same')(mresblock7), mresblock1], axis=3)
mresblock8 = MultiResBlock(32, up7)
g = Conv2DTranspose(3, (4,4), strides=(1,1), padding='same')(mresblock8)
output1 = Activation('tanh')(g)
#second encoder-decoder ##############################################################################################
mresblock10 = MultiResBlock(32, output1)
pool10 = MaxPooling2D(pool_size=(2, 2))(mresblock10)
mresblock10 = ResPath(32, 4, mresblock10)
bridge1 = concatenate([Conv2D(32*2,(2,2),strides=(2,2),padding='same')(mresblock10), mresblock7],axis=3)
mresblock11 = MultiResBlock(32*2, bridge1)
pool11 = MaxPooling2D(pool_size=(2, 2))(mresblock11)
mresblock11 = ResPath(32*2, 3, mresblock11)
bridge2 = concatenate([Conv2D(32*4,(2,2),strides=(2,2),padding='same')(mresblock11), mresblock6],axis=3)
mresblock12 = MultiResBlock(32*4, bridge2)
pool12 = MaxPooling2D(pool_size=(2, 2))(mresblock12)
mresblock12 = ResPath(32*4, 2, mresblock12)
bridge3 = concatenate([Conv2D(32*8,(2,2),strides=(2,2),padding='same')(mresblock12), mresblock4],axis=3)
mresblock13 = MultiResBlock(32*8, pool12)
up16 = concatenate([Conv2DTranspose(
32*4, (2, 2), strides=(2, 2), padding='same')(mresblock13), mresblock12, mresblock3], axis=3)
mresblock16 = MultiResBlock(32*4, up16)
up17 = concatenate([Conv2DTranspose(
32*2, (2, 2), strides=(2, 2), padding='same')(mresblock16), mresblock11, mresblock2], axis=3)
mresblock17 = MultiResBlock(32*2, up17)
up18 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(mresblock17), mresblock10, mresblock1], axis=3)
mresblock18 = MultiResBlock(32, up18)
g = Conv2DTranspose(3, (4,4), strides=(1,1), padding='same')(mresblock18)
output = Activation('tanh')(g)
model = Model(inputs,output)
return model
def main():
# Define the model
model = MultiResUnet(256, 256,3)
model.summary()
if __name__ == '__main__':
main()
# plot the model
#plot_model(model, to_file='generator_model_plot.png', show_shapes=True, show_layer_names=True)
# In[ ]:
from keras.layers import Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, add, Concatenate
from keras.models import Model, model_from_json
from keras.optimizers import Adam
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.utils.vis_utils import plot_model
from keras.initializers import RandomNormal
from tensorflow.keras.losses import BinaryCrossentropy
def define_discriminator(image_shape):
# weight initialization
init = RandomNormal(stddev=0.02)
# source image input
in_src_image = Input(shape=image_shape)
# target image input
in_target_image = Input(shape=image_shape)
# concatenate images channel-wise
merged = Concatenate()([in_src_image, in_target_image])
# C64
d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged)
d = LeakyReLU(alpha=0.2)(d)
# C128
d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C256
d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# second last output layer
d = Conv2D(256, (4,4), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# patch output
d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
patch_out = Activation('sigmoid')(d)
# define model
model = Model([in_src_image, in_target_image], patch_out)
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss= BinaryCrossentropy(from_logits=True), optimizer=opt, loss_weights=[0.5])
return model
# define image shape
image_shape = (256,256,3)
# create the model
model = define_discriminator(image_shape)
# summarize the model
model.summary()
# plot the model
#plot_model(model, to_file='/content/drive/My Drive/test/discriminator_model_plot.png', show_shapes=True, show_layer_names=True)
# In[ ]:
from tensorflow.keras.losses import BinaryCrossentropy
def define_gan(g_model, d_model, image_shape):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
in_target = Input(shape = image_shape)
# connect the source image to the generator input
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model([in_src,in_target], [dis_out, gen_out])
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss=[BinaryCrossentropy(from_logits=True), 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# In[ ]:
def generate_real_samples(n_samples, patch_shape):
# unpack dataset
# choose random instances
ix = np.random.randint(0, trainA.shape[0], n_samples)
# retrieve selected images
X1, X2 = trainA[ix], trainB[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# In[1]:
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# In[18]:
# generate samples and save as a plot and save the model
def summarize_performance(step, g_model, d_model, gan_model, n_samples=1):
# select a sample of input images
[X_realA, X_realB], _ = generate_real_samples(n_samples, 1)
# generate a batch of fake samples
X_fakeB, _ = generate_fake_samples(g_model, X_realA, 1)
# scale all pixels from [-1,1] to [0,1]
X_realA = (X_realA + 1) / 2.0
X_realB = (X_realB + 1) / 2.0
X_fakeB = (X_fakeB + 1) / 2.0
X_fakeB = 255 * X_fakeB
# plot generated target image
for i in range(n_samples):
plt.subplot(3, n_samples, 1 + n_samples + i)
plt.axis('off')
plt.imshow(X_fakeB[i])
# save plot to file
filename1 = 'test1/plot_%06d.png' % (step+1)
cv2.imwrite(filename1,X_fakeB[0])
# save the generator, discriminator and gan models
filename2 = 'test1/g_model_%06d.h5' % (step+1)
g_model.save(filename2)
#filename3 = 'test/d_model_%06d.h5' % (step+1)
#d_model.save(filename3)
#filename4 = 'test/gan_model_%06d.h5' % (step+1)
#gan_model.save(filename4)
print('>Saved: %s and %s' % (filename1, filename2))
# In[32]:
def train(d_model, g_model, gan_model, n_epochs=200, n_batch=1, n_patch=32):
# unpack dataset
# calculate the number of batches per training epoch
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
for i in range(n_steps):
# select a batch of real samples
[X_realA, X_realB], y_real = generate_real_samples( n_batch, n_patch)
# generate a batch of fake samples
X_fakeB, y_fake = generate_fake_samples(g_model, X_realB, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([X_realB, X_realA], y_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([X_realB, X_fakeB], y_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch([X_realB,X_realA], [y_fake, X_realA])
# summarize performance
print('>%d, d1[%.3f] d2[%.3f] g[%.3f]' % (i+1, d_loss1, d_loss2, g_loss))
# summarize model performance
if (i+1) % (bat_per_epo * 1) == 0:
summarize_performance(i, g_model,d_model, gan_model)
# In[ ]:
image_shape = (256,256,3)
# define the models
d_model = define_discriminator(image_shape)
g_model = MultiResUnet(256,256,3)
# define the composite model
gan_model = define_gan(g_model, d_model, image_shape)
# train model
train(d_model, g_model, gan_model)
| programmer-770/Image_Deraining_GANs | multi_res_unet-Copy1.py | multi_res_unet-Copy1.py | py | 13,838 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line... |
42998147166 | from __future__ import annotations
import time
from datetime import date, datetime, timedelta
from logging import getLogger
from time import struct_time
from typing import Any, Callable
import pytz
from .compat import IS_WINDOWS
from .constants import is_date_type_name, is_timestamp_type_name
from .converter import (
ZERO_EPOCH,
SnowflakeConverter,
_adjust_fraction_of_nanoseconds,
_extract_timestamp,
_generate_tzinfo_from_tzoffset,
)
from .sfbinaryformat import SnowflakeBinaryFormat, binary_to_python
from .sfdatetime import SnowflakeDateFormat, SnowflakeDateTime, SnowflakeDateTimeFormat
logger = getLogger(__name__)
def format_sftimestamp(
ctx: dict[str, Any], value: datetime | struct_time, franction_of_nanoseconds: int
) -> str:
sf_datetime = SnowflakeDateTime(
datetime=value, nanosecond=franction_of_nanoseconds, scale=ctx.get("scale")
)
return ctx["fmt"].format(sf_datetime) if ctx.get("fmt") else str(sf_datetime)
class SnowflakeConverterSnowSQL(SnowflakeConverter):
"""Snowflake Converter for SnowSQL.
Format data instead of just converting the values into native
Python objects.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._support_negative_year = kwargs.get("support_negative_year", True)
def _get_format(self, type_name: str) -> str:
"""Gets the format."""
fmt = None
if type_name == "DATE":
fmt = self._parameters.get("DATE_OUTPUT_FORMAT")
if not fmt:
fmt = "YYYY-MM-DD"
elif type_name == "TIME":
fmt = self._parameters.get("TIME_OUTPUT_FORMAT")
elif type_name + "_OUTPUT_FORMAT" in self._parameters:
fmt = self._parameters[type_name + "_OUTPUT_FORMAT"]
if not fmt:
fmt = self._parameters["TIMESTAMP_OUTPUT_FORMAT"]
elif type_name == "BINARY":
fmt = self._parameters.get("BINARY_OUTPUT_FORMAT")
return fmt
#
# FROM Snowflake to Python objects
#
# Note: Callable doesn't implement operator|
def to_python_method(
self, type_name: str, column: dict[str, Any]
) -> Callable | None:
ctx = column.copy()
if ctx.get("scale") is not None:
ctx["max_fraction"] = int(10 ** ctx["scale"])
ctx["zero_fill"] = "0" * (9 - ctx["scale"])
fmt = None
if is_date_type_name(type_name):
datetime_class = time.struct_time if not IS_WINDOWS else date
fmt = SnowflakeDateFormat(
self._get_format(type_name),
support_negative_year=self._support_negative_year,
datetime_class=datetime_class,
)
elif is_timestamp_type_name(type_name):
fmt = SnowflakeDateTimeFormat(
self._get_format(type_name),
data_type=type_name,
support_negative_year=self._support_negative_year,
datetime_class=SnowflakeDateTime,
)
elif type_name == "BINARY":
fmt = SnowflakeBinaryFormat(self._get_format(type_name))
logger.debug("Type: %s, Format: %s", type_name, fmt)
ctx["fmt"] = fmt
converters = [f"_{type_name}_to_python"]
for conv in converters:
try:
return getattr(self, conv)(ctx)
except AttributeError:
pass
logger.warning("No column converter found for type: %s", type_name)
return None # Skip conversion
def _BOOLEAN_to_python(self, ctx):
"""No conversion for SnowSQL."""
return lambda value: "True" if value in ("1", "True") else "False"
def _FIXED_to_python(self, ctx):
"""No conversion for SnowSQL."""
return None
def _REAL_to_python(self, ctx):
"""No conversion for SnowSQL."""
return None
def _BINARY_to_python(self, ctx):
"""BINARY to a string formatted by BINARY_OUTPUT_FORMAT."""
return lambda value: ctx["fmt"].format(binary_to_python(value))
def _DATE_to_python(self, ctx: dict[str, str | None]) -> Callable:
"""Converts DATE to struct_time/date.
No timezone is attached.
"""
def conv(value: str) -> str:
return ctx["fmt"].format(time.gmtime(int(value) * (24 * 60 * 60)))
def conv_windows(value):
ts = ZERO_EPOCH + timedelta(seconds=int(value) * (24 * 60 * 60))
return ctx["fmt"].format(date(ts.year, ts.month, ts.day))
return conv if not IS_WINDOWS else conv_windows
def _TIMESTAMP_TZ_to_python(self, ctx: dict[str, Any]) -> Callable:
"""Converts TIMESTAMP TZ to datetime.
The timezone offset is piggybacked.
"""
scale = ctx["scale"]
max_fraction = ctx.get("max_fraction")
def conv0(encoded_value: str) -> str:
value, tz = encoded_value.split()
microseconds = float(value)
tzinfo = _generate_tzinfo_from_tzoffset(int(tz) - 1440)
try:
t = datetime.fromtimestamp(microseconds, tz=tzinfo)
except OSError as e:
logger.debug("OSError occurred but falling back to datetime: %s", e)
t = ZERO_EPOCH + timedelta(seconds=microseconds)
if pytz.utc != tzinfo:
t += tzinfo.utcoffset(t)
t = t.replace(tzinfo=tzinfo)
fraction_of_nanoseconds = _adjust_fraction_of_nanoseconds(
value, max_fraction, scale
)
return format_sftimestamp(ctx, t, fraction_of_nanoseconds)
def conv(encoded_value: str) -> str:
value, tz = encoded_value.split()
microseconds = float(value[0 : -scale + 6])
tzinfo = _generate_tzinfo_from_tzoffset(int(tz) - 1440)
try:
t = datetime.fromtimestamp(microseconds, tz=tzinfo)
except (OSError, ValueError) as e:
logger.debug("OSError occurred but falling back to datetime: %s", e)
t = ZERO_EPOCH + timedelta(seconds=microseconds)
if pytz.utc != tzinfo:
t += tzinfo.utcoffset(t)
t = t.replace(tzinfo=tzinfo)
fraction_of_nanoseconds = _adjust_fraction_of_nanoseconds(
value, max_fraction, scale
)
return format_sftimestamp(ctx, t, fraction_of_nanoseconds)
return conv if scale > 6 else conv0
def _TIMESTAMP_LTZ_to_python(self, ctx: dict[str, Any]) -> Callable:
def conv(value: str) -> str:
t, fraction_of_nanoseconds = self._pre_TIMESTAMP_LTZ_to_python(value, ctx)
return format_sftimestamp(ctx, t, fraction_of_nanoseconds)
return conv
def _TIMESTAMP_NTZ_to_python(self, ctx: dict[str, Any]) -> Callable:
"""Converts TIMESTAMP NTZ to Snowflake Formatted String.
No timezone info is attached.
"""
def conv(value: str) -> str:
microseconds, fraction_of_nanoseconds = _extract_timestamp(value, ctx)
try:
t = time.gmtime(microseconds)
except (OSError, ValueError) as e:
logger.debug("OSError occurred but falling back to datetime: %s", e)
t = ZERO_EPOCH + timedelta(seconds=(microseconds))
return format_sftimestamp(ctx, t, fraction_of_nanoseconds)
return conv
_TIME_to_python = _TIMESTAMP_NTZ_to_python
| snowflakedb/snowflake-connector-python | src/snowflake/connector/converter_snowsql.py | converter_snowsql.py | py | 7,534 | python | en | code | 511 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "time.struct_time",
... |
14913237067 | from __future__ import annotations
import datetime
from dataclasses import dataclass, field
import random
from typing import List, Dict, Any
from chess_manager.M import turn_model
MAX_STR_LEN = 122
def check_valid_int(user_int_input: Any) -> bool:
""" Vérifie si l'input de l'utilisateur est un int valide. """
try:
int(user_int_input)
return True
except ValueError:
return False
def check_valid_str(user_str_input: Any) -> bool | str:
""" Vérifie si l'input de l'utilisateur est un str valide. """
if not 0 < len(user_str_input) < MAX_STR_LEN:
return f"You must enter from 1 to {MAX_STR_LEN} char."
return True
TOURNAMENT_FORM_VALIDATOR: Dict = {
'name': check_valid_str,
'place': check_valid_str,
'description': check_valid_str,
'turn_nbr': check_valid_int,
'player_nbr': check_valid_int
}
def _shuffle_player_list(player_list: List) -> List:
"""Retourne une nouvelle liste mélangée"""
randomised_list = player_list[:]
random.shuffle(randomised_list)
return randomised_list
def _order_player_by_score(full_player_data: List) -> List:
full_player_data.sort(key=lambda individual_player_data: individual_player_data[1], reverse=True)
return full_player_data
def _reset_player_pairing(ordered_player_data: List) -> List:
"""Visite chaque joueur de la liste reçue et en reinitialise le compteur d'adversaires"""
for player_data in ordered_player_data:
player_data[0].clear_player_pairing()
return ordered_player_data
def _make_player_pair(ordered_player_data: List) -> List:
"""
Reçoit une liste de (joueurs, score) et retourne une liste de paire (joueurs, score) classé par score
décroissant. Si tous les joueurs ont déjà joué les uns contre les autres, les historiques de pairage sont
réinitialisés
"""
player_pairs = list()
working_list = ordered_player_data[:]
adversary_index = 0
current_pairing_player = working_list.pop(0)
while len(working_list) > 0:
if current_pairing_player[0].has_played_against(working_list[adversary_index][0]):
adversary_index += 1
if adversary_index >= len(working_list):
print("Cannot make more player pair without player playing each other again")
return _make_player_pair(_reset_player_pairing(ordered_player_data[:]))
continue
player_pairs.append((current_pairing_player, working_list.pop(adversary_index)))
adversary_index = 0
if len(working_list) < 1:
break
current_pairing_player = working_list.pop(0)
return player_pairs
@dataclass
class TournamentM:
"""Représentation d'un tournoi d'échec"""
name: str
place: str
turn_nbr: int = 4
description: str = "No description"
player_nbr: int = 0
players: List = field(default_factory=list)
turn_list: List = field(default_factory=list)
start_date: str | None = None
end_date: str | None = None
tournament_id: int = -1
def __post_init__(self) -> None:
if self.start_date is None:
self.start_date = datetime.datetime.now().strftime("%d/%m/%y %H:%M")
def register_turn(self, turn: turn_model.TurnM) -> None:
self.turn_list.append(turn)
def get_current_turn_nbr(self) -> int:
return len(self.turn_list)
@property
def is_finished(self) -> bool:
if self.end_date is not None:
return True
if self.get_current_turn_nbr() < self.turn_nbr:
return False
return self.turn_list[-1].finished
def end_tournament(self) -> None:
if self.end_date is not None:
return
self.end_date = datetime.datetime.now().strftime("%d/%m/%y %H:%M")
def get_next_turn_player_pair(self) -> List:
if self.get_current_turn_nbr() > 0:
return _make_player_pair(_order_player_by_score(self.turn_list[-1].get_turn_data()))
player_list = _shuffle_player_list(self.players)
player_data = [[player, 0] for player in player_list]
return _make_player_pair(_order_player_by_score(player_data))
def from_obj_to_dict(self) -> Dict:
return {'name': self.name,
'place': self.place,
'turn_nbr': int(self.turn_nbr),
'description': self.description,
'player_nbr': int(self.player_nbr),
'players': [player.player_id for player in self.players],
'turn_list': [turn.turn_id for turn in self.turn_list],
'start_date': self.start_date,
'end_date': self.end_date,
'tournament_id': self.tournament_id,
}
| AntoineArchy/Chessmanager | chess_manager/M/tournament_model.py | tournament_model.py | py | 4,751 | python | en | code | null | github-code | 36 | [
{
"api_name": "typing.Any",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 38... |
35099642670 | from django.contrib.auth.models import Group
from mf.crud.models import Dolar, HistoryOperations, Permisology
from mf.user.models import User
from django.utils import timezone
from datetime import date, datetime, timedelta
def convertToDecimalFormat(n):
return n.replace('.', '').replace(',', '.')
def get_dollar():
data = []
try:
dolar1 = Dolar.objects.using('default').get(pk=1)
dl1 = float(dolar1.dolar)
except:
new_dolar1 = Dolar()
new_dolar1.dolar = '1000000'
new_dolar1.save()
dolar1 = Dolar.objects.using('default').get(pk=1)
dl1 = float(dolar1.dolar)
try:
dolar2 = Dolar.objects.using('default').get(pk=2)
dl2 = float(dolar2.dolar)
except:
new_dolar2 = Dolar()
new_dolar2.dolar = '1200000'
new_dolar2.save()
dolar1 = Dolar.objects.using('default').get(pk=1)
dl1 = float(dolar1.dolar)
data = {
'dolar1': dl1,
'dolar2': dl2
}
return data
def ValidatePermissions(perms, requestGroup):
autorized = False
try:
permsRequired = perms
pk = requestGroup.id
group = Group.objects.get(pk=pk)
permsRequired = perms
for p in permsRequired:
if not group.permissions.filter(codename=p).exists():
autorized = False
break
else:
autorized = True
except:
autorized = False
return autorized
def RegisterOperation(db, user, action):
date = timezone.localtime(timezone.now())
result = 0
try:
h = HistoryOperations()
h.datejoined = date.strftime('%Y-%m-%d | %H:%M:%S %p')
h.userSession_id = user
h.description = action
h.save()
except:
result = 1
return result
def get_q_events_today():
data = 0
try:
start = date.today()
end = start + timedelta(days=7)
start_date = start.strftime('%Y-%m-%d')
end_date = end.strftime('%Y-%m-%d')
total = 0
search = Permisology.objects.all()
if len(start_date) and len(end_date):
search = search.filter(day__range=[start_date, end_date])
for s in search:
total = int(total) + 1
data = total
except:
pass
return data
def get_events_today():
data = []
total = 0
start = date.today()
end = start + timedelta(days=7)
start_date = start.strftime('%Y-%m-%d')
end_date = end.strftime('%Y-%m-%d')
search = Permisology.objects.all()
if len(start_date) and len(end_date):
search = search.filter(day__range=[start_date, end_date])
for s in search:
data.append(
{
'name': s.name,
'description': s.description,
'day': s.day.strftime('%Y-%m-%d'),
}
)
return data
| isela1998/facebook | app/mf/crud/functions.py | functions.py | py | 2,923 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mf.crud.models.Dolar.objects.using",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mf.crud.models.Dolar.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "mf.crud.models.Dolar",
"line_number": 13,
"usage_type": "name"
},
... |
70970695463 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 11:20:09 2020
@author: KANNAN
"""
from flask import Flask, render_template, request
import emoji
#import sklearn
import pickle
model = pickle.load(open("diabetes_logreg.pkl", "rb"))
app = Flask(__name__)
@app.route('/')
def home():
return render_template('Diabetes.html')
@app.route('/predict', methods = ["GET", "POST"])
def Diabetes():
if request.method == "POST":
Glucose = float(request.form["Glucose"])
if (Glucose > 0 and Glucose < 140):
Glucose_Prediabetes = 0
Glucose_Diabetes = 0
elif(Glucose >= 140 and Glucose < 200):
Glucose_Prediabetes = 1
Glucose_Diabetes = 0
else:
Glucose_Prediabetes = 0
Glucose_Diabetes = 1
BloodPressure = float(request.form["BloodPressure"])
if (BloodPressure > 0 and BloodPressure < 80):
BloodPressure_Hyper_St1 = 0
BloodPressure_Hyper_St2 = 0
BloodPressure_Hyper_Emer = 0
elif (BloodPressure >=80 and BloodPressure < 90):
BloodPressure_Hyper_St1 = 1
BloodPressure_Hyper_St2 = 0
BloodPressure_Hyper_Emer = 0
elif (BloodPressure >= 90 and BloodPressure < 120):
BloodPressure_Hyper_St1 = 0
BloodPressure_Hyper_St2 = 1
BloodPressure_Hyper_Emer = 0
else:
BloodPressure_Hyper_St1 = 0
BloodPressure_Hyper_St2 = 0
BloodPressure_Hyper_Emer = 1
BMI = float(request.form["BMI"])
if (BMI > 0 and BMI < 18.5):
BMI_Normal = 0
BMI_Overweight = 0
BMI_Obese = 0
elif (BMI >= 18.5 and BMI < 24.9):
BMI_Normal = 1
BMI_Overweight = 0
BMI_Obese = 0
elif (BMI >= 24.9 and BMI < 29.9):
BMI_Normal = 0
BMI_Overweight = 1
BMI_Obese = 0
else:
BMI_Normal = 0
BMI_Overweight = 0
BMI_Obese = 1
Insulin = float(request.form["Insulin"])
if (Insulin >= 100 and Insulin <= 126):
Insulin_Normal = 1
else:
Insulin_Normal = 0
Pregnancies = float(request.form["Pregnancies"])
Pregnancies = (Pregnancies - 3.874593) / 3.443637
SkinThickness = float(request.form["SkinThickness"])
SkinThickness = (SkinThickness - 29.180782) / 8.94289800
DiabetesPedigreeFunction = float(request.form["DiabetesPedigreeFunction"])
DiabetesPedigreeFunction = (DiabetesPedigreeFunction - 0.466471) / 0.333203
Age = float(request.form["Age"])
Age = (Age - 33.594463) / 12.016168
prediction = model.predict([[
Pregnancies,
SkinThickness,
DiabetesPedigreeFunction,
Age,
BMI_Normal,
BMI_Overweight,
BMI_Obese,
BloodPressure_Hyper_St1,
BloodPressure_Hyper_St2,
BloodPressure_Hyper_Emer,
Glucose_Prediabetes,
Glucose_Diabetes,
Insulin_Normal
]])
output = prediction[0]
if output == 0:
text = "You are Healthy!!"+"\U0001F603"
else:
text = "You have Diabetes"+"\U0001F61E"
return render_template('Diabetes.html',prediction_text = text)
return render_template('Diabetes.html')
if __name__ == '__main__':
app.run()
| GuruYohesh/ML | Diabetes Prediction/Diabetes_app.py | Diabetes_app.py | py | 3,799 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
... |
28890197379 | """PDB dataset loader."""
import tree
import numpy as np
import torch
import pandas as pd
import logging
import random
import functools as fn
from torch.utils import data
from data import utils as du
from openfold.data import data_transforms
from openfold.np import residue_constants
from openfold.utils import rigid_utils
class PdbDataset(data.Dataset):
def __init__(
self,
*,
data_conf,
diffuser,
is_training,
):
self._log = logging.getLogger(__name__)
self._is_training = is_training
self._data_conf = data_conf
self._init_metadata()
self._diffuser = diffuser
@property
def is_training(self):
return self._is_training
@property
def diffuser(self):
return self._diffuser
@property
def data_conf(self):
return self._data_conf
def _init_metadata(self):
"""Initialize metadata."""
# Process CSV with different filtering criterions.
filter_conf = self.data_conf.filtering
pdb_csv = pd.read_csv(self.data_conf.csv_path)
self.raw_csv = pdb_csv
if filter_conf.allowed_oligomer is not None and len(filter_conf.allowed_oligomer) > 0:
pdb_csv = pdb_csv[pdb_csv.oligomeric_detail.isin(
filter_conf.allowed_oligomer)]
if filter_conf.max_len is not None:
pdb_csv = pdb_csv[pdb_csv.modeled_seq_len <= filter_conf.max_len]
if filter_conf.min_len is not None:
pdb_csv = pdb_csv[pdb_csv.modeled_seq_len >= filter_conf.min_len]
if filter_conf.max_helix_percent is not None:
pdb_csv = pdb_csv[
pdb_csv.helix_percent < filter_conf.max_helix_percent]
if filter_conf.max_loop_percent is not None:
pdb_csv = pdb_csv[
pdb_csv.coil_percent < filter_conf.max_loop_percent]
if filter_conf.min_beta_percent is not None:
pdb_csv = pdb_csv[
pdb_csv.strand_percent > filter_conf.min_beta_percent]
if filter_conf.subset is not None:
pdb_csv = pdb_csv[:filter_conf.subset]
pdb_csv = pdb_csv.sort_values('modeled_seq_len', ascending=False)
self._create_split(pdb_csv)
def _create_split(self, pdb_csv):
# Training or validation specific logic.
if self.is_training:
self.csv = pdb_csv
self._log.info(
f'Training: {len(self.csv)} examples')
else:
all_lengths = np.sort(pdb_csv.modeled_seq_len.unique())
length_indices = (len(all_lengths) - 1) * np.linspace(
0.0, 1.0, self._data_conf.num_eval_lengths)
length_indices = length_indices.astype(int)
eval_lengths = all_lengths[length_indices]
eval_csv = pdb_csv[pdb_csv.modeled_seq_len.isin(eval_lengths)]
# Fix a random seed to get the same split each time.
eval_csv = eval_csv.groupby('modeled_seq_len').sample(
self._data_conf.samples_per_eval_length, replace=True, random_state=123)
eval_csv = eval_csv.sort_values('modeled_seq_len', ascending=False)
self.csv = eval_csv
self._log.info(
f'Validation: {len(self.csv)} examples with lengths {eval_lengths}')
@fn.lru_cache(maxsize=50000)
def _process_csv_row(self, processed_file_path):
processed_feats = du.read_pkl(processed_file_path)
processed_feats = du.parse_chain_feats(processed_feats)
# Only take modeled residues.
modeled_idx = processed_feats['modeled_idx']
min_idx = np.min(modeled_idx)
max_idx = np.max(modeled_idx)
del processed_feats['modeled_idx']
processed_feats = tree.map_structure(
lambda x: x[min_idx:(max_idx+1)], processed_feats)
# Run through OpenFold data transforms.
chain_feats = {
'aatype': torch.tensor(processed_feats['aatype']).long(),
'all_atom_positions': torch.tensor(processed_feats['atom_positions']).double(),
'all_atom_mask': torch.tensor(processed_feats['atom_mask']).double()
}
chain_feats = data_transforms.atom37_to_frames(chain_feats)
chain_feats = data_transforms.make_atom14_masks(chain_feats)
chain_feats = data_transforms.make_atom14_positions(chain_feats)
chain_feats = data_transforms.atom37_to_torsion_angles()(chain_feats)
# Re-number residue indices for each chain such that it starts from 1.
# Randomize chain indices.
chain_idx = processed_feats["chain_index"]
res_idx = processed_feats['residue_index']
new_res_idx = np.zeros_like(res_idx)
new_chain_idx = np.zeros_like(res_idx)
all_chain_idx = np.unique(chain_idx).tolist()
shuffled_chain_idx = np.array(
random.sample(all_chain_idx, len(all_chain_idx))) - np.min(all_chain_idx) + 1
for i,chain_id in enumerate(all_chain_idx):
chain_mask = (chain_idx == chain_id).astype(np.int)
chain_min_idx = np.min(res_idx + (1 - chain_mask) * 1e3).astype(np.int)
new_res_idx = new_res_idx + (res_idx - chain_min_idx + 1) * chain_mask
# Shuffle chain_index
replacement_chain_id = shuffled_chain_idx[i]
new_chain_idx = new_chain_idx + replacement_chain_id * chain_mask
# To speed up processing, only take necessary features
final_feats = {
'aatype': chain_feats['aatype'],
'seq_idx': new_res_idx,
'chain_idx': chain_idx,
'residx_atom14_to_atom37': chain_feats['residx_atom14_to_atom37'],
'residue_index': processed_feats['residue_index'],
'res_mask': processed_feats['bb_mask'],
'atom37_pos': chain_feats['all_atom_positions'],
'atom37_mask': chain_feats['all_atom_mask'],
'atom14_pos': chain_feats['atom14_gt_positions'],
'rigidgroups_0': chain_feats['rigidgroups_gt_frames'],
'torsion_angles_sin_cos': chain_feats['torsion_angles_sin_cos'],
}
return final_feats
def _create_diffused_masks(self, atom37_pos, rng, row):
bb_pos = atom37_pos[:, residue_constants.atom_order['CA']]
dist2d = np.linalg.norm(bb_pos[:, None, :] - bb_pos[None, :, :], axis=-1)
# Randomly select residue then sample a distance cutoff
# TODO: Use a more robust diffuse mask sampling method.
diff_mask = np.zeros_like(bb_pos)
attempts = 0
while np.sum(diff_mask) < 1:
crop_seed = rng.integers(dist2d.shape[0])
seed_dists = dist2d[crop_seed]
max_scaffold_size = min(
self._data_conf.scaffold_size_max,
seed_dists.shape[0] - self._data_conf.motif_size_min
)
scaffold_size = rng.integers(
low=self._data_conf.scaffold_size_min,
high=max_scaffold_size
)
dist_cutoff = np.sort(seed_dists)[scaffold_size]
diff_mask = (seed_dists < dist_cutoff).astype(float)
attempts += 1
if attempts > 100:
raise ValueError(
f'Unable to generate diffusion mask for {row}')
return diff_mask
def __len__(self):
return len(self.csv)
def __getitem__(self, idx):
# Sample data example.
example_idx = idx
csv_row = self.csv.iloc[example_idx]
if 'pdb_name' in csv_row:
pdb_name = csv_row['pdb_name']
elif 'chain_name' in csv_row:
pdb_name = csv_row['chain_name']
else:
raise ValueError('Need chain identifier.')
processed_file_path = csv_row['processed_path']
chain_feats = self._process_csv_row(processed_file_path)
# Use a fixed seed for evaluation.
if self.is_training:
rng = np.random.default_rng(None)
else:
rng = np.random.default_rng(idx)
gt_bb_rigid = rigid_utils.Rigid.from_tensor_4x4(
chain_feats['rigidgroups_0'])[:, 0]
diffused_mask = np.ones_like(chain_feats['res_mask'])
if np.sum(diffused_mask) < 1:
raise ValueError('Must be diffused')
fixed_mask = 1 - diffused_mask
chain_feats['fixed_mask'] = fixed_mask
chain_feats['rigids_0'] = gt_bb_rigid.to_tensor_7()
chain_feats['sc_ca_t'] = torch.zeros_like(gt_bb_rigid.get_trans())
# Sample t and diffuse.
if self.is_training:
t = rng.uniform(self._data_conf.min_t, 1.0)
diff_feats_t = self._diffuser.forward_marginal(
rigids_0=gt_bb_rigid,
t=t,
diffuse_mask=None
)
else:
t = 1.0
diff_feats_t = self.diffuser.sample_ref(
n_samples=gt_bb_rigid.shape[0],
impute=gt_bb_rigid,
diffuse_mask=None,
as_tensor_7=True,
)
chain_feats.update(diff_feats_t)
chain_feats['t'] = t
# Convert all features to tensors.
final_feats = tree.map_structure(
lambda x: x if torch.is_tensor(x) else torch.tensor(x), chain_feats)
final_feats = du.pad_feats(final_feats, csv_row['modeled_seq_len'])
if self.is_training:
return final_feats
else:
return final_feats, pdb_name
class LengthSampler(data.Sampler):
def __init__(
self,
*,
data_conf,
dataset,
):
self._data_conf = data_conf
self._dataset = dataset
self._data_csv = self._dataset.csv
def __iter__(self):
return iter(range(len(self._data_csv)))
def __len__(self):
return len(self._data_csv)
class TrainSampler(data.Sampler):
def __init__(
self,
*,
data_conf,
dataset,
batch_size,
):
self._data_conf = data_conf
self._dataset = dataset
self._data_csv = self._dataset.csv
self._dataset_indices = list(range(len(self._data_csv)))
self._data_csv['index'] = self._dataset_indices
self._batch_size = batch_size
self.epoch = 0
def __iter__(self):
random.shuffle(self._dataset_indices)
repeated_indices = np.repeat(self._dataset_indices, self._batch_size)
return iter(repeated_indices)
def set_epoch(self, epoch):
self.epoch = epoch
def __len__(self):
return len(self._dataset_indices) * self._batch_size
| blt2114/twisted_diffusion_sampler | protein_exp/data/pdb_data_loader.py | pdb_data_loader.py | py | 10,693 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pand... |
16191137869 | import datetime
from django.utils import timezone
from django.core.paginator import Paginator
from django.db import transaction
from django.db.models import Q
from the_mechanic_backend.apps.accounts.models import Store
from the_mechanic_backend.apps.stock.models import Brand, BrandModel, Spare, SpareCustomer, SpareOrder, SpareSold
from the_mechanic_backend.v0.stock import serializers
from the_mechanic_backend.v0.utils import Utils, CustomBaseClass, AppUtils
class BrandList(CustomBaseClass):
"""
Brand List and create Endpoint
"""
def get(self, request):
"""
returns the list of brand
:param request:
:return:
"""
try:
search = request.GET.get('search', '')
if search:
brands = self.get_filter_objects(Brand, name__icontains=search)
else:
brands = self.get_all_objects(Brand)
serializer = serializers.BrandSerializer(brands, many=True)
return Utils.dispatch_success(request, serializer.data)
except Exception as e:
return self.internal_server_error(request, e)
def post(self, request):
"""
Creates a new brand
:param request:
{
"name" : "Honda"
}
:return:
"""
try:
serializer = serializers.BrandSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Utils.dispatch_success(request, serializer.data)
return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)
except Exception as e:
return self.internal_server_error(request, e)
class BrandModelList(CustomBaseClass):
"""
Brand Model List and create Endpoint
"""
def get(self, request, brand_id):
"""
Returnt the list of Models of particular brand
:param request:
:param brand_id:
:return:
"""
try:
search = request.GET.get('search', '')
if search:
brands = self.get_filter_objects(BrandModel, brand=brand_id, model_name__icontains=search)
else:
brands = self.get_filter_objects(BrandModel, brand=brand_id)
serializer = serializers.BrandModelSerializer(brands, many=True)
return Utils.dispatch_success(request, serializer.data)
except Exception as e:
return self.internal_server_error(request, e)
def post(self, request, brand_id):
"""
Creates a new brand model
:param request:
{
"model_name" : "Unicorn"
}
:param brand_id:
:return:
"""
try:
data = request.data
data['brand'] = brand_id
serializer = serializers.AddBrandModelSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Utils.dispatch_success(request, serializer.data)
return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)
except Exception as e:
return self.internal_server_error(request, e)
class SpareList(CustomBaseClass):
def get(self, request, store_id, brand_model_id):
"""
return a list of spares for particular model
:param request:
@query_param
search=search_text - to search the spares
out_of_stock=true - to get only out of stock
Note - we can use both at same time :)
:param store_id
:param brand_model_id:
:return:
"""
try:
search = request.GET.get('search')
out_of_stock = request.GET.get('out_of_stock')
spare = self.get_filter_objects(Spare, brand_model=brand_model_id, store=store_id)
if search:
spare = spare.filter(Q(spare_id__icontains=search) | Q(spare_name__icontains=search))
if out_of_stock:
spare = spare.filter(quantity=0)
serializer = serializers.SpareSerializer(spare, many=True)
return Utils.dispatch_success(request, serializer.data)
except Exception as e:
return self.internal_server_error(request, e)
def post(self, request, store_id, brand_model_id):
"""
Create a spare
:param request:
{
"spare_name": "SIde Mirror",
"spare_id": #34545435,
"quantity": 10,
"per_price": "500",
"suppliers": "Glass India",
"quality_class": "FIRST"
}
:param store_id:
:param brand_model_id:
:return:
"""
try:
data = request.data
brand_model = self.get_object(BrandModel, brand_model_id)
if not brand_model:
return self.object_not_found(request)
data['brand'] = brand_model.brand.id
data['store'] = store_id
data['brand_model'] = brand_model_id
serializer = serializers.AddSpareSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Utils.dispatch_success(request, serializer.data)
return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)
except Exception as e:
return self.internal_server_error(request, e)
class SpareDetails(CustomBaseClass):
"""
particular spare details
"""
def get(self, request, spare_id):
"""
Return requested spare
:param request:
:param spare_id:
:return:
"""
try:
spare = self.get_object(Spare, spare_id)
if not spare:
return self.object_not_found(request)
serializer = serializers.SpareSerializer(spare)
return Utils.dispatch_success(request, serializer.data)
except Exception as e:
return self.internal_server_error(request, e)
def put(self, request, spare_id):
"""
Updates the requested spare
:param request:
# partial fields are also acceptable
{
"spare_name": "SIde Mirror",
"spare_id": #34545435,
"quantity": 10,
"per_price": "500",
"suppliers": "Glass India",
"quality_class": "FIRST"
}
:param spare_id:
:return:
"""
try:
spare = self.get_object(Spare, spare_id)
if not spare:
return self.object_not_found(request)
serializer = serializers.AddSpareSerializer(spare, request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Utils.dispatch_success(request, serializer.data)
return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)
except Exception as e:
return self.internal_server_error(request, e)
def delete(self, request, spare_id):
"""
Delete the request spare
:param request:
:param spare_id:
:return:
"""
try:
spare = self.get_object(Spare, spare_id)
if not spare:
return self.object_not_found(request)
spare.delete()
return Utils.dispatch_success(request, 'SUCCESS')
except Exception as e:
return self.internal_server_error(request, e)
class SpareSearchList(CustomBaseClass):
def get(self, request, store_id):
"""
return a list of spares for all models
:param request:
@query_param
search=search_text - to search the spares
out_of_stock=true - to get only out of stock
Note - we can use both at same time :)
:param store_id
:param brand_model_id:
:return:
"""
try:
search = request.GET.get('search')
out_of_stock = request.GET.get('out_of_stock')
spare = self.get_filter_objects(Spare, store=store_id)
if search:
spare = spare.filter(Q(spare_id__icontains=search) | Q(spare_name__icontains=search))
if out_of_stock:
spare = spare.filter(quantity=0)
serializer = serializers.SpareSerializer(spare, many=True)
return Utils.dispatch_success(request, serializer.data)
except Exception as e:
return self.internal_server_error(request, e)
class SpareOrderList(CustomBaseClass):
def get(self, request, store_id, *args, **kwargs):
"""
Returns the list of Spares based on Store
:param request:
# params
start_date=2019-01-31&&
end_date=2019-12-31&&
page=1
:param store_id:
:param args:
:param kwargs:
:return:
"""
try:
start_date = request.GET.get('start_date')
end_date = request.GET.get('end_date')
page = request.GET.get('page', 1)
search = request.GET.get('search', None)
if search:
qs = SpareOrder.objects.filter(store=store_id, order_id__icontains=search)
else:
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)
qs = SpareOrder.objects.filter(store=store_id, order_date__range=[start_date, end_date])
paginator = Paginator(qs, per_page=10)
serializer = serializers.SpareOrderHistorySerializer(paginator.page(page), many=True)
response_data = {
"data": serializer.data,
"page": int(page),
"total_pages": paginator.num_pages
}
return Utils.dispatch_success(request, response_data)
except Exception as e:
return self.internal_server_error(request, e)
def post(self, request, store_id, *args, **kwargs):
"""
Create a new Order
:param request:
{
"customer_info": {
"name": "Muthu Kumar",
"email": "itmemk@gmail.com",
"phone_number": "9876543210",
"address": "ADDRESSS"
},
"order_type": "IN_SOURCE / OUT_SOURCE",
"bike_number": "TN41Y5644",
"labour_charge": 0.00,
"out_source_charge": 0.00,
"spares": [
{
"spare_id": 1,
"spare_price_type": 'MRP / MECHANIC / WHOLESALER / CUSTOMER',
"spare_count": 2
},
{
"spare_id": 1,
"spare_price_type": 'MRP / MECHANIC / WHOLESALER / CUSTOMER',
"spare_count": 2
},
{
"spare_id": 1,
"spare_price_type": 'MRP / MECHANIC / WHOLESALER / CUSTOMER',
"spare_count": 2
}
]
}
:param store_id:
:param args:
:param kwargs:
:return:
"""
try:
data = request.data
try:
customer = SpareCustomer.objects.get(phone_number=data['customer_info']['phone_number'])
except SpareCustomer.DoesNotExist:
customer_serializer = serializers.SpareCustomerSerializer(data=data['customer_info'])
if customer_serializer.is_valid():
customer_serializer.save()
else:
return Utils.dispatch_failure(request, "VALIDATION_ERROR", customer_serializer.errors)
customer = SpareCustomer.objects.get(id=customer_serializer.data['id'])
today = datetime.date.today()
today_order_count = SpareOrder.objects.filter(order_date__year=today.year,
order_date__month=today.month).count()
order_id = 'SPOR{}{:05d}'.format(today.strftime("%Y%m"), today_order_count + 1)
with transaction.atomic():
store = self.get_object(Store, store_id)
share_message = f"You're successfully purchased following items from {store.name}, {store.branch}.\n"
order = SpareOrder(order_id=order_id,
store=store,
order_type=data['order_type'],
customer=customer,
total=0.0,
sold_by=request.user)
order.save()
total = 0.0
spares_to_be_created = []
for _spare in data['spares']:
spare = self.get_object(Spare, _spare['spare_id'])
price_map = {
'MRP': spare.mrp_price,
'MECHANIC': spare.mechanic_price,
'WHOLESALER': spare.wholesaler_price,
'CUSTOMER': spare.customer_price,
}
sold_spare = SpareSold(order=order,
spare=spare,
spare_count=_spare['spare_count'],
spare_name=spare.spare_name,
spare_buying_price=spare.buying_price,
spare_price=price_map[_spare['spare_price_type']],
spare_price_type=_spare['spare_price_type'])
spares_to_be_created.append(sold_spare)
current_total = float(sold_spare.spare_count * sold_spare.spare_price)
total = total + current_total
spare.quantity = spare.quantity - sold_spare.spare_count
spare.save()
share_message += f"{sold_spare.spare_name} -- {sold_spare.spare_count} x {sold_spare.spare_price} = {current_total}\n"
SpareSold.objects.bulk_create(spares_to_be_created)
if order.order_type:
order.bike_number = data['bike_number']
order.labour_charge = data['labour_charge']
order.out_source_charge = data['out_source_charge']
total = total + order.labour_charge + order.out_source_charge
share_message += f"Labour Charge = {order.labour_charge}\n\n" \
f"Out Source Charge = {order.out_source_charge}\n\n"
order.total = total
order.save()
share_message += f"Grand total = {total}.\n\n" \
f"Order ID: {order_id}\n\n" \
f"Date: {today.strftime('%d-%m-%Y')}\n\nThank you for purchasing with us!"
return Utils.dispatch_success(request, {'order_id': order.id, 'spareorder_id':order_id, 'share_info': share_message})
except Exception as e:
return self.internal_server_error(request, e)
class SparesAccountingView(CustomBaseClass):
sell_report_type = ['IN_SELL', 'OUT_SELL', 'TOTAL_SELL']
profit_report_type = ['IN_PROFIT', 'OUT_PROFIT', 'TOTAL_PROFIT']
IN_SOURCE = ['IN_SELL', 'IN_PROFIT']
OUT_SOURCE = ['OUT_SELL', 'OUT_PROFIT']
def get_total(self, qs, report_type):
"""
parms ?start_date=2019-01-31&&end_date=2019-12-31&&stores=16&&report_type=TOTAL_SELL
IN_SELL', 'OUT_SELL', 'TOTAL_SELL', 'IN_PROFIT', 'OUT_PROFIT', 'TOTAL_PROFIT
:param qs:
:param report_type:
:return:
"""
total = 0.00
total_items = 0
spares = []
if report_type in self.IN_SOURCE:
qs = qs.filter(order_type=SpareOrder.IN_SOURCE)
if report_type in self.OUT_SOURCE:
qs = qs.filter(order_type=SpareOrder.OUT_SOURCE)
for order in qs:
for spare in SpareSold.objects.filter(order=order):
if report_type in self.sell_report_type:
total = total + float(spare.spare_count * spare.spare_price)
if report_type in self.profit_report_type:
total = total + float(spare.spare_count * spare.spare_buying_price)
spares.append(spare.spare)
total_items += spare.spare_count
return total, total_items, len(set(spares))
def get(self, request):
try:
stores = [int(x) for x in request.GET.get('stores', '').split(',')]
start_date = request.GET.get('start_date')
end_date = request.GET.get('end_date')
report_type = request.GET.get('report_type')
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)
profit_map = {
"IN_PROFIT": "IN_SELL",
"OUT_PROFIT": "OUT_SELL",
"TOTAL_PROFIT": "TOTAL_SELL",
}
qs = self.get_filter_objects(SpareOrder, store__in=stores, order_date__range=[start_date, end_date])
if not qs:
return Utils.dispatch_success(request, "DATA_NOT_FOUND")
if report_type in self.sell_report_type:
selling_total, total_items, total_spares = self.get_total(qs, report_type)
response_data = {"selling_total": selling_total,
"total_items": total_items,
"total_spares": total_spares}
return Utils.dispatch_success(request, response_data)
if report_type in self.profit_report_type:
buying_total, total_items, total_spares = self.get_total(qs, report_type)
selling_total, total_items, total_spares = self.get_total(qs, profit_map[report_type])
difference = selling_total - buying_total
response_data = {"selling_total": selling_total,
"buying_total": buying_total,
"profit_total": abs(difference),
"status": "LOSS" if difference < 0 else "PROFIT",
"total_items": total_items,
"total_spares": total_spares}
return Utils.dispatch_success(request, response_data)
return self.object_not_found(request)
except Exception as e:
return self.internal_server_error(request, e)
class UrgentSpareList(CustomBaseClass):
def get(self, request, store_id, *args, **kwargs):
"""
return list of urgent stock with pagination
:param request:
:param store_id:
:param args:
:param kwargs:
:return:
"""
try:
qs = self.get_filter_objects(Spare, store=store_id, is_urgent_spare=True)
page = request.GET.get('page', 1)
paginator = Paginator(qs, per_page=10)
serializer = serializers.SpareSerializer(paginator.page(page), many=True)
response_data = {
"data": serializer.data,
"page": int(page),
"total_pages": paginator.num_pages
}
return Utils.dispatch_success(request, response_data)
except Exception as e:
return self.internal_server_error(request, e)
def put(self, request, store_id, *args, **kwargs):
"""
Updates list of urgent stock with pagination
:param request:
{
"spares":[{"id":23, "quantity": 20}, {"id":30, "quantity": 2}, {"id":11, "quantity": 12}, ]
}
:param store_id:
:param args:
:param kwargs:
:return:
"""
try:
spares_list = request.data["spares"]
for _spare in spares_list:
spare = self.get_object(Spare, _spare["id"])
spare.is_urgent_spare = False
spare.quantity += _spare["quantity"]
spare.save()
return Utils.dispatch_success(request, 'SUCCESS')
except Exception as e:
return self.internal_server_error(request, e)
class SpareOrderEmailPdf(CustomBaseClass):
def get(self, request, order_id, *args, **kwargs):
"""
Returns PDF of the invoice or email's user
:param request:
@param action=email # to send email to customer
@param action=download # to Download the invoice copy
:param order_id:
:param args:
:param kwargs:
:return:
"""
try:
action = request.GET.get('action')
order = self.get_object(SpareOrder, order_id)
data = {}
store = order.store
data['store'] = {
'store_name': store.name.upper(),
'store_branch': store.branch.upper(),
'store_type': store.branch,
'store_address': store.address.replace(',', '\n'),
'store_phone': store.phone,
'store_email': store.email,
'store_website': store.website,
}
customer = order.customer
data['customer'] = {
'name': customer.name,
'email': customer.email,
'phone_number': customer.phone_number,
'address': customer.address.replace(',', '\n')
}
data['order_id'] = order.order_id
data['date'] = order.order_date.strftime('%d-%m-%Y %H:%M:%S')
data['total'] = order.total
data['type'] = order.order_type
data['bike_number'] = order.bike_number
data['labour_charge'] = order.labour_charge
data['out_source_charge'] = order.out_source_charge
data['sold_by'] = order.sold_by.first_name
data['sub_total'] = order.total - order.labour_charge - order.out_source_charge
response = {
'csv': Utils.generate_csv,
'xls': Utils.generate_xls,
'pdf': Utils.generate_pdf
}
data['order'] = []
for i, order_spare in enumerate(SpareSold.objects.filter(order=order)):
data['order'].append([i + 1, order_spare.spare_name, order_spare.spare_price, order_spare.spare_count,
order_spare.spare_price * order_spare.spare_count])
dynamic_data = {
'pdf_template': 'spare_invoice.html',
'filename': f'Invoice_{order.order_id}',
'data': data,
'action': action
}
if action == 'download':
return response.get('pdf')(**dynamic_data)
elif action == 'email':
if customer.email:
AppUtils.send_inovice_email(response.get('pdf')(**dynamic_data), data, f'Invoice_{order.order_id}' )
return Utils.dispatch_success(request, 'SUCCESS')
return self.object_not_found(request)
except Exception as e:
return self.internal_server_error(request, e)
| muthukumar4999/the-mechanic-backend | the_mechanic_backend/v0/stock/views.py | views.py | py | 23,745 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "the_mechanic_backend.v0.utils.CustomBaseClass",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "the_mechanic_backend.apps.stock.models.Brand",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "the_mechanic_backend.apps.stock.models.Brand",
... |
75129886504 | __all__ = ["Echo"]
from textwrap import dedent
from typing import Any, Dict
from ..imagecrawler import BaseImageCrawler, Image, ImageCollection, ImageCrawlerConfig, ImageCrawlerInfo
class Echo(BaseImageCrawler):
def __init__(self, *, image_uri: str) -> None:
super().__init__(image_uri=image_uri)
@classmethod
def info(cls) -> ImageCrawlerInfo:
return ImageCrawlerInfo(
description='"Finds" the same image ... again ... and again.',
long_description=dedent('''
Not an actual crawler.
More like an Parrot that is trained to repeat what you tell it to say.
''').strip(),
config={
'image_uri': 'the URI of the image to "find"',
},
# does not have an icon
)
@classmethod
def check_config(cls, config: Dict[str, Any]) -> ImageCrawlerConfig:
image_uri = config['image_uri']
if type(image_uri) is not str:
raise TypeError(f'image_uri {image_uri!r} is not str')
if len(image_uri) == 0:
raise ValueError(f'image_uri {image_uri!r} is empty')
return ImageCrawlerConfig(
image_uri=image_uri,
)
def is_exhausted(self) -> bool:
# is generic -> never exhausts
return False
def _reset(self) -> None: # pragma: no cover
pass
def _crawl(self) -> ImageCollection:
images = ImageCollection()
image_uri = self.get_config()["image_uri"]
images.add(
Image(
uri=image_uri,
source=image_uri,
is_generic=True,
this_is_a_dummy=True,
)
)
return images
| k4cg/nichtparasoup | python-package/src/nichtparasoup/imagecrawlers/echo.py | echo.py | py | 1,743 | python | en | code | 40 | github-code | 36 | [
{
"api_name": "imagecrawler.BaseImageCrawler",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "imagecrawler.ImageCrawlerInfo",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 18,
"usage_type": "call"
},
{
"api_na... |
37634246750 | # A transformation sequence from word beginWord to word endWord using a dictionary wordList is a sequence of words such that:
# The first word in the sequence is beginWord.
# The last word in the sequence is endWord.
# Only one letter is different between each adjacent pair of words in the sequence.
# Every word in the sequence is in wordList.
# Given two words, beginWord and endWord, and a dictionary wordList, return the number of words in the shortest transformation sequence from beginWord to endWord, or 0 if no such sequence exists.
# Example 1:
# Input: beginWord = "hit", endWord = "cog", wordList = ["hot","dot","dog","lot","log","cog"]
# Output: 5
# Explanation: One shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog" with 5 words.
# Example 2:
# Input: beginWord = "hit", endWord = "cog", wordList = ["hot","dot","dog","lot","log"]
# Output: 0
# Explanation: The endWord "cog" is not in wordList, therefore there is no possible transformation.
# Constraints:
# 1 <= beginWord.length <= 10
# endWord.length == beginWord.length
# 1 <= wordList.length <= 5000
# wordList[i].length == beginWord.length
# beginWord, endWord, and wordList[i] consist of lowercase English letters.
# beginWord != endWord
# All the strings in wordList are unique.
from collections import deque
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
graph = {}
for word in wordList:
for i in range(len(word)):
graph.setdefault(word[:i]+'*'+word[i+1:],[]).append(word)
#print(graph)
queue = deque([(beginWord,1)])
visited = set([beginWord])
while queue:
curword, length = queue.popleft()
#print(curword,length)
if curword == endWord:
return length
for i in range(len(curword)):
pattern = curword[:i]+'*'+curword[i+1:]
#print("pattern",pattern)
for nextWord in graph.get(pattern,[]):
#print(nextWord)
if nextWord not in visited:
queue.append((nextWord,length+1))
visited.add(nextWord)
return 0
| sunnyyeti/Leetcode-solutions | 127 Word Ladder.py | 127 Word Ladder.py | py | 2,240 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 40,
"usage_type": "call"
}
] |
3761949834 | import cv2
import numpy as np
img2 = cv2.imread("images.jpg")
img1 = cv2.imread("new quantum.PNG")
rows, cols, channels = img2.shape # Reading image details
roi = img1[0:rows, 0:cols]
img2g = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # converting to grayscale
# defining mask , makes our logo black and background white
ret, mask = cv2.threshold(img2g, 220, 255, cv2.THRESH_BINARY_INV)
# cv2.imshow('mask',mask)
mask_inv = cv2.bitwise_not(mask) # defining non_masked area
# adds our inv_mask to region of image of main image
img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img_fg = cv2.bitwise_and(img2, img2, mask=mask) # our logo after adding mask
dst = img_bg+img_fg # we get our logo with our original image background
# swaps the region of image original image to logo with same backgound
img1[0:rows, 0:cols] = dst
cv2.imshow('res', img1)
cv2.imwrite('result.jpg', img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| AirbotsBetaProject/Day-6 | D6-04Dheeraj/Source Code/adding_logo_with_no_background.py | adding_logo_with_no_background.py | py | 929 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_numbe... |
36278744993 | import re
from pprint import pprint
import csv
from decorator import to_log
if __name__ == '__main__':
# читаем адресную книгу в формате CSV в список contacts_list
with open("phonebook_raw.csv", encoding='utf-8') as f:
rows = csv.reader(f, delimiter=",")
contacts_list = list(rows)
# rewriting phone numbers
phone_num = r'(\+7|8)\s?\(?(\w{3})\)?\s?-?(\w{3})-?(\w{2})-?(\w{2})(\s?)(\(?((доб\.)\s(\w{4}))\)?)?'
num_format = r'+7(\2)\3-\4-\5\6\9\10'
i = 1
form_list = contacts_list
while i < len(contacts_list):
form_list[i][5] = re.sub(phone_num, num_format, contacts_list[i][5])
i += 1
# pprint(form_list)
# put names, surnames... as different elements
@to_log(path=r"C:\Users\olesy\PycharmProjects\regex_decorated\log.txt")
def arrange_names(some_list):
new_list = some_list
for n in range(0, 2):
for i in range(1, 9):
if re.search(r'\s', some_list[i][n]):
name = some_list[i][n].split(' ')
if n == 0:
if some_list[i][1] != '':
name.extend(some_list[i][3:])
new_list[i] = name
else:
name.extend(some_list[i][2:])
new_list[i] = name
else:
new_list[i][n] = name[0]
new_list[i][n + 1] = name[1]
else:
pass
return new_list
arrange_names(form_list)
| OysterLover/regex_decorated | main.py | main.py | py | 1,633 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "decorator.to_log",
"line_number": 28,
... |
31619170189 | import pandas as pd
import xml.etree.ElementTree as ET
# Load the XML data
tree = ET.parse('statement_short.xml')
root = tree.getroot()
# Define a function to extract data from the XML elements
def extract_data(elem):
data = {}
for child in elem:
if len(child) == 0:
data[child.tag] = child.text
else:
data[child.tag] = extract_data(child)
return data
# Extract the data from the XML and create a DataFrame
data = []
for elem in root:
data.append(extract_data(elem))
df = pd.DataFrame(data)
df1 = pd.read_xml('statement_short.xml')
# Display the DataFrame
print(df1) | dochaauch/Tools_for_buh | xml_pank/xml1.py | xml1.py | py | 627 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pand... |
10387196560 | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pysnippets',
version='0.1.0',
description='Scattered python snippets',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/gongzhitaao/snippets/pysnippets',
author='gongzhitaao',
author_email='zhitaao.gong@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='snippets',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[],
extras_require={
'test': ['pytest'],
},
)
| gongzhitaao/snippets | pysnippets/setup.py | setup.py | py | 1,010 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
34588733658 | import os
import atexit
import secrets
import unittest
from functools import wraps
import augpathlib as aug
from sxpyr import sxpyr
from pyontutils.utils import Async, deferred # TODO -> asyncd in future
from pyontutils.utils_fast import isoformat
from sparcur import exceptions as exc
from sparcur.utils import GetTimeNow, log
from sparcur.paths import PennsieveCache, LocalPath, Path
from sparcur.backends import PennsieveRemote
from .common import test_organization, test_dataset, _pid
from .common import skipif_ci, skipif_no_net
import pytest
class _TestOperation:
_cache_class = PennsieveCache
_remote_class = PennsieveRemote
@classmethod
def tearDownClass(cls):
base = aug.AugmentedPath(__file__).parent / f'test-operation-{_pid}'
if base.exists():
base.popd() # in case we were inside it pop back out first
base.rmtree()
def setUp(self):
class Cache(self._cache_class):
pass
Cache._bind_flavours()
base = aug.AugmentedPath(__file__).parent / f'test-operation-{_pid}'
if base.exists():
base.popd() # in case we were inside it pop back out first
base.rmtree()
base.mkdir()
base.pushd()
self.Remote = self._remote_class._new(Cache._local_class, Cache)
self.Remote.init(test_organization)
self.anchor = self.Remote.dropAnchor(base)
self.root = self.anchor.remote
self.project_path = self.anchor.local
list(self.root.children) # populate datasets
self.test_base = [
p for p in self.project_path.children
if p.cache.id == test_dataset][0]
list(self.test_base.rchildren) # populate test dataset
asdf = self.root / 'lol' / 'lol' / 'lol (1)'
class Fun(os.PathLike):
name = 'hohohohohoho'
def __fspath__(self):
return ''
@property
def size(self):
return aug.FileSize(len(b''.join(self.data)))
@property
def data(self):
for i in range(100):
yield b'0' * 1000
#wat = asdf.bfobject.upload(Fun(), use_agent=False)
#breakpoint()
@skipif_ci
@skipif_no_net
class TestDelete(_TestOperation, unittest.TestCase):
def test_0(self):
assert True
def test_1_case(self):
# this is an old scenario that happens because of how the old system worked
# local working directory | x
# local cache directory | o
# remote | o
pass
def make_rand(n, width=80):
lw = width + 1
hex_lw = lw // 2
n_lines = n // lw
n_accounted = n_lines * lw
h_lines = n // (hex_lw * 2)
h_accounted = h_lines * (hex_lw * 2)
ldiff = n_lines - h_lines
adiff = n_accounted - h_accounted
accounted = n_accounted
missing = n - accounted
hex_missing = (missing + 1) // 2
diff = hex_missing * 2 - missing
hexstart = width % 2 # almost there fails on 71
# also fails len(make_rand(102, 101)) - 102
log.debug((adiff, ldiff, missing, diff))
string = '\n'.join([secrets.token_hex(hex_lw)[hexstart:]
for i in range(n_lines)]
+ [secrets.token_hex(hex_missing)[diff:-1] + '\n'])
return string.encode()
@skipif_ci
@skipif_no_net
@pytest.mark.skip('VERY SLOW')
class TestFilenames(_TestOperation, unittest.TestCase):
_evil_names = (
# '......................', # this breaks the agent with infinite timeout
'!@#$%^&*()[]{}`~;:,',
'(╯°□°)╯︵ ┻━┻)',
'הָיְתָהtestالصفحات التّحول',
'הָיְתָהtestالصفحا تالتّحول', # check bucket names
'᚛ᚄᚓᚐᚋᚒᚄ ᚑᚄᚂᚑᚏᚅ᚜',
'Z̮̞̠͙͔ͅḀ̗̞͈̻̗Ḷ͙͎̯̹̞͓G̻O̭̗̮',
# '𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐 𝖇𝖗𝖔𝖜𝖓 𝖋𝖔𝖝 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 𝖉𝖔𝖌', # this breaks the agent with ERRORED
'evil file space',
'evil_file underscore',
'evil-file dash',
'evil%20file percent 20',
'hello%20world%20%60~%21%40%23%24%25%5E%26%2A%28%29%5B%5D%7B%7D%27',
# the problem is that we don't know whether we can actually
# decode a file name, and wthe database stores the encoded filename
'hello%20world',
'hello%20world~',
)
@property
def _more_evil_names(self):
# class scope strikes back! LOL PYTHON
return [
name
for char in
('\x07',
#'/', # have to do this in a different way on unix
'\\',
'|',
'!',
'@',
'#',
'$',
'%',
'^',
'&',
'*',
'(',
')',
'[',
']',
'{',
'}',
"'",
'`',
'~',
';',
':',
',',
'"',
'?',
'<',
'>',
)
for name in
(f'prefix{char}',
f'prefix{char}suffix',
f'{char}suffix',)]
@staticmethod
def _op(test_folder, fsize, name):
test_file_a = test_folder / (name + '.ext')
test_file_b = test_folder / (name + '.txe')
test_folder_i = test_folder / name
for _f in (test_file_a, test_file_b):
if _f.exists() or _f.is_broken_symlink():
msg = (f'bad test environment: file/link already exists: {_f}')
raise FileExistsError(msg)
# FIXME maybe don't straight fail here, but instead
# don't upload and just compare the existing name?
# the fact that we get an error is a sign that the
# name matches actually ... so not getting an error
# in subsequent runs is bad ... for test_base at least
test_file_a.data = iter((make_rand(fsize),))
test_file_b.data = iter((make_rand(fsize),))
try:
remote_a = test_file_a.upload()
name_a = remote_a.bfobject.name
except Exception as e:
name_a = e
try:
remote_b = test_file_b.upload()
name_b = remote_b.bfobject.name
except Exception as e:
name_b = e
try:
remote_i = test_folder_i.mkdir_remote()
name_i = remote_i.bfobject.name
except Exception as e:
name_i = e
return name_a, name_b, name_i
def test_filenames_more_evil(self):
return self.test_filenames_evil(self._more_evil_names)
def test_filenames_evil(self, names=_evil_names):
# XXX warning slow!
now = GetTimeNow()
local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}'
remote = local.mkdir_remote()
try:
# FIXME consider going back to self.test_base instead of local here
test_folder = local / 'pandora'
test_folder.mkdir_remote()
test_folder.__class__.upload = Path.upload
fsize = 1024 # needed for uniqueish hashes colloisions will still happen
# FIXME this pretty clearly reveals a need for
# batching to multiplex the fetch ... SIGH
results = Async(rate=10)(deferred(self._op)(test_folder, fsize, name) for name in names)
#results = []
#for name in names:
# name_a, name_b, name_i = self._op(test_folder, fsize, name)
# results.append((name_a, name_b, name_i))
finally:
remote.rmdir(force=True)
# FIXME crumple fails in refresh since we use rmdir
# instead of rmtree (for safety)
#remote.cache.refresh() # FIXME
@skipif_ci
@skipif_no_net
class TestUpdate(_TestOperation, unittest.TestCase):
@pytest.mark.skip('the question has been answered')
def test_process_filesize_limit(self):
# so the 1 mb size file works, eventually, something else is wrong
test_folder = self.test_base / 'hrm'
test_folder.mkdir_remote()
test_folder.__class__.upload = Path.upload
for i in range(1024 ** 2, 5 * 1024 ** 2, 1024 ** 2):
test_file = test_folder / f'size-{i}'
if test_file.is_broken_symlink():
test_file.remote.bfobject.package.delete()
test_file.unlink()
test_file = test_folder / f'size-{i}' # remove stale cache
test_file.data = iter((make_rand(i),))
remote = test_file.upload()
def test_upload_noreplace(self):
for i in range(2):
test_file = self.test_base / 'dataset_description.csv'
test_file.data = iter((make_rand(100),))
# FIXME temp sandboxing for upload until naming gets sorted
test_file.__class__.upload = Path.upload
# create some noise
remote = test_file.upload(replace=False)
print(remote.bfobject.package.name)
def test_upload_noreplace_fail(self):
# some persistent state from other tests is causing this to fail
test_file = self.test_base / 'dataset_description.csv'
test_file.data = iter((make_rand(100),))
# FIXME temp sandboxing for upload until naming gets sorted
test_file.__class__.upload = Path.upload
test_file.upload(replace=False)
try:
test_file.upload(replace=False)
assert False, 'should have failed'
except exc.FileHasNotChangedError:
pass
def test_upload_replace(self):
test_file = self.test_base / 'dataset_description.csv'
test_file.data = iter((make_rand(100),))
# FIXME temp sandboxing for upload until naming gets sorted
test_file.__class__.upload = Path.upload
test_file.upload()
@skipif_ci
@skipif_no_net
class TestClone(_TestOperation, unittest.TestCase):
# TODO test a variety of clone scenarios
# and consider whether testing for and
# existing root should be done in dropAnchor
def setUp(self):
super().setUp()
self.alt_project_path = self.project_path.parent / 'alt' / self.project_path.name
if self.alt_project_path.parent.exists():
self.alt_project_path.parent.rmtree()
self.alt_project_path.mkdir(parents=True)
def _do_target(self, target, expect_error_type=None):
class Cache(self._cache_class):
pass
Cache._bind_flavours()
BFR = self._remote_class._new(LocalPath, Cache)
BFR.init(test_organization)
if expect_error_type:
try:
anchor = BFR.dropAnchor(target.parent)
raise AssertionError(f'should have failed with a {expect_error_type}')
except expect_error_type as e:
pass
else:
anchor = BFR.dropAnchor(target.parent)
def test_1_in_project(self):
target = self.project_path / 'some-new-folder'
target.mkdir()
self._do_target(target) # FIXME succeeds for now, but probably should not?
def test_2_project_top_level(self):
target = self.project_path
self._do_target(target, exc.DirectoryNotEmptyError)
def test_3_existing_empty(self):
target = self.alt_project_path
self._do_target(target)
def test_4_existing_has_folder(self):
target = self.alt_project_path
child = target / 'a-folder'
child.mkdir(parents=True)
self._do_target(target, exc.DirectoryNotEmptyError)
def test_5_existing_has_file(self):
target = self.alt_project_path
child = target / 'a-file'
child.touch()
self._do_target(target, exc.DirectoryNotEmptyError)
def test_6_existing_has_local_data_dir(self):
target = self.alt_project_path
child = target / self.anchor._local_data_dir
child.mkdir()
self._do_target(target, exc.DirectoryNotEmptyError)
@skipif_ci
@skipif_no_net
class TestMkdirRemote(_TestOperation, unittest.TestCase):
def test_mkdir_remote_parents_false(self):
now = GetTimeNow()
local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' / 'some-folder'
try:
remote = local.mkdir_remote()
raise AssertionError('Should have failed since parents=False')
except FileNotFoundError:
pass
def test_0_mkdir_remote_will_be_dataset(self):
now = GetTimeNow()
local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}'
remote = local.mkdir_remote()
remote.rmdir()
remote.cache.refresh() # reminder that remotes are a snapshot in time, NOT dynamic
assert not local.exists(), f'should have been deleted {remote}'
def test_1_mkdir_remote_will_be_collection(self):
now = GetTimeNow()
local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}' / 'some-folder'
remote = local.mkdir_remote(parents=True)
parent = remote.parent
try:
parent.rmdir() # should fail here
try:
remote.rmdir() # insurance
except BaseException as e:
log.exception(e)
finally:
raise AssertionError('remote parent should NOT have rmdired {parent}')
except exc.PathNotEmptyError:
pass
try:
remote.rmdir()
remote.cache.refresh()
assert not local.exists(), f'should have been deleted {remote}'
finally:
lparent = parent.local
parent.cache.refresh() # we just removed the child so the parent is stale
parent.rmdir()
parent.cache.refresh()
assert not lparent.exists(), f'should have been deleted {parent}'
class TestMoveFolder(_TestOperation, unittest.TestCase):
def test_reparent(self):
# TODO this is nowhere near complete with respect to synchronization
# but it is sufficient to test the components needed for sync
now = GetTimeNow()
local = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}'
remote = local.mkdir_remote()
try:
# FIXME consider going back to self.test_base instead of local here
test_folder_1 = local / 'dire-1'
test_folder_1.mkdir_remote()
test_folder_2 = local / 'dire-2'
test_folder_2.mkdir_remote()
list(remote.cache.children) # XXX populate with remote data
test_folder_2.remote.reparent(test_folder_1.cache_id)
test_folder_1.__class__.upload = Path.upload
fsize = 1024
test_file_1 = test_folder_1 / 'file-1.ext'
test_file_1.data = iter((make_rand(fsize),))
test_file_1.upload()
test_file_1.remote.reparent(test_folder_2.cache_id)
finally:
remote.rmdir(force=True)
# FIXME crumple fails in refresh since we use rmdir
# instead of rmtree (for safety)
#remote.cache.refresh() # FIXME
class _ChangesHelper:
_local_only = True
def _make_ops(self):
ops = tuple()
# dirs
dataset = 'project/dataset'
ops += (
#(0, 'mkdir', 'project'), # don't actually need this since we replace it when building paths
(0, 'mkdir', dataset),
(0, 'mkdir', 'project/dataset/dire-1'),
(0, 'mkdir', 'project/dataset/dire-2'),
(0, 'mkdir', 'project/dataset/dire-6'),
)
# sources
d3_1 = 'project/dataset/dire-1/dire-3-1-rn'
d3_2 = 'project/dataset/dire-1/dire-3-2-rp'
d3_3 = 'project/dataset/dire-1/dire-3-3-np'
f1_0 = 'project/dataset/dire-1/file-1-0.ext'
f1_1 = 'project/dataset/dire-1/file-1-1-rn.ext'
f1_2 = 'project/dataset/dire-1/file-1-2-rp.ext'
f1_3 = 'project/dataset/dire-1/file-1-3-np.ext'
l1_0 = 'project/dataset/dire-1/link-1-0.ext'
l1_1 = 'project/dataset/dire-1/link-1-1-rn.ext'
l1_2 = 'project/dataset/dire-1/link-1-2-rp.ext'
l1_3 = 'project/dataset/dire-1/link-1-3-np.ext'
# targets
# TODO need variants of all of these where we lose the metadata probably?
ops += (
(0, 'mkdir', d3_1),
(0, 'mkdir', d3_2),
(0, 'mkdir', d3_3),
(0, 'mkfile', f1_0), # nochange
(0, 'mkfile', f1_1),
(0, 'mkfile', f1_2),
(0, 'mkfile', f1_3),
(0, 'mklink', l1_0), # nochange
(0, 'mklink', l1_1),
(0, 'mklink', l1_2),
(0, 'mklink', l1_3),
# moves: renames, reparents, rename_reparent
(1, 'rename', d3_1, 'project/dataset/dire-1/dire-3-1-rn-r'), # rn
(1, 'rename', d3_2, 'project/dataset/dire-2/dire-3-2-rp'), # rp
(1, 'rename', d3_3, 'project/dataset/dire-2/dire-3-3-np-r'), # rnp
(1, 'rename', f1_1, 'project/dataset/dire-1/file-1-1-rn-r.ext'), # rn
(1, 'rename', f1_2, 'project/dataset/dire-2/file-1-2-rp.ext'), # rp
(1, 'rename', f1_3, 'project/dataset/dire-2/file-1-3-np-r.ext'), # rnp
(1, 'rename', l1_1, 'project/dataset/dire-1/link-1-1-rn-r.ext'), # rn
(1, 'rename', l1_2, 'project/dataset/dire-2/link-1-2-rp.ext'), # rp
(1, 'rename', l1_3, 'project/dataset/dire-2/link-1-3-np-r.ext'), # rnp
# add
(1, 'mkdir', 'project/dataset/dire-6/dire-7-add'),
(1, 'mkfile', 'project/dataset/dire-6/file-4-add.ext'),
(2, 'mklink', 'project/dataset/dire-6/link-4-add.ext'), # XXX this causes an error because it looks like the index is out of synx
)
# change (only applies to files)
f5_1 = 'project/dataset/dire-6/file-5-1-cd_.ext'
f5_2 = 'project/dataset/dire-6/file-5-2-c_m.ext'
f5_3 = 'project/dataset/dire-6/file-5-3-c_x.ext'
f5_4 = 'project/dataset/dire-6/file-5-4-cdm.ext'
f5_5 = 'project/dataset/dire-6/file-5-5-cdx.ext'
# file_id change ? should be impossible ...
ops += (
(0, 'mkfile', f5_1),
(0, 'mkfile', f5_2),
(0, 'mkfile', f5_3),
(0, 'mkfile', f5_4),
(0, 'mkfile', f5_5),
# TODO probably also change size
(1, 'change', f5_1, True, False), # data
(1, 'change', f5_2, False, True), # metadata
(1, 'change', f5_3, False, None), # no metadata # can handle this from objects cache
(1, 'change', f5_4, True, True), # data metadata
(1, 'change', f5_5, True, None), # data no metadata
)
# remove
d9 = 'project/dataset/dire-6/dire-9-rem'
f6 = 'project/dataset/dire-6/file-6-rem.ext'
l6 = 'project/dataset/dire-6/link-6-rem.ext'
ops += (
(0, 'mkdir', d9),
(0, 'mkfile', f6),
(0, 'mklink', l6),
(1, 'remove', d9),
(1, 'remove', f6),
(1, 'remove', l6),
)
# build the indexes so we can do the diff
ops += (
(0.5, 'index', dataset),
)
return ops
def setUp(self):
# TODO construct the template we need
super().setUp()
self.Path = self.Remote._local_class
#sigh = list(self.project_path.remote.children)
#[s.rmdir(force=True) for s in sigh if '2023' in s.name]
#breakpoint()
#raise ValueError('cleanup tearDown failure mess')
# TODO expected outcome after stage probably
if self._local_only:
def populate_cache(p, change=False):
# FIXME TODO change=True case may need special handling
# and probably represents a strange case of some kind
return p._cache_class.fromLocal(p)
def norm_path(p):
return self.project_path / p.replace('project/', '')
else:
# XXX WARNING extremely slow due to sequentially creating each remote file
now = GetTimeNow()
local_dataset = self.project_path / f'test-dataset-{now.START_TIMESTAMP_LOCAL_FRIENDLY}'
remote_dataset = local_dataset.mkdir_remote()
#self._test_dataset = remote_dataset
# tearDown fails to trigger if failure happens in setUp which is useless
# so use atexit instead
atexit.register(lambda : remote_dataset.rmdir(force=True))
def populate_cache(p, change=False):
# FIXME TODO change=True case may need special handling
# and probably represents a strange case of some kind
if change:
return
remote = p.create_remote()
return remote.cache
def norm_path(p):
return local_dataset / p.replace('project/dataset', '').strip('/')
def mkdir(d, add=False):
if not self._local_only and d == local_dataset:
# FIXME HACK
return
d.mkdir()
if not add:
cache = populate_cache(d)
d._cache = cache
#d._cache_class.fromLocal(d)
def mkfile(f, add=False):
f.data = iter((make_rand(100),))
if not add:
cache = populate_cache(f)
f._cache = cache
#f._cache_class.fromLocal(f)
def mklink(l):
try:
l.data = iter((make_rand(100),))
# issue with id and parent_id not being set so use fromLocal since it does it correctly
#meta = f.meta # TODO checksum probably?
#symlink = meta.as_symlink(local_name=l.name)
#cache = l._cache_class.fromLocal(l)
cache = populate_cache(l)
symlink = cache.meta.as_symlink(local_name=l.name)
finally:
l.unlink()
l.symlink_to(symlink)
def rename(path, target):
path.rename(target)
def change(f, data, metadata):
if data:
f.data = iter((make_rand(100),))
if metadata is None or metadata:
# must set xattrs to nothing if
# we want to change metadata otherwise
# PrimaryCache._meta_updater will go haywire and
# ... try to delete the file ... and it will
# actually delete it instead of crumpling it
# so definitely a FIXME very dangerous lose your work
# kind of scenario between that _meta_updater and
# BFPNCache._actually_crumple and change of BFPNCache.crumple
[f.delxattr(k) for k in f.xattrs()]
if f.xattrs():
breakpoint()
if metadata:
if not f.exists():
raise FileNotFoundError(f)
try:
populate_cache(f, change=True)
#f._cache_class.fromLocal(f)
except Exception as e:
breakpoint()
raise e
def remove(path):
if path.is_dir():
path.rmdir()
else:
path.unlink()
def index(ds):
if self._local_only:
caches = [l.cache for l in ds.rchildren] # XXX reminder, NEVER use ds.cache.rchildren that will pull
class fakeremote:
def __init__(self, id, name, parent_id, file_id, updated, local):
self.id = id
self.name = name
self._name = name
self.parent_id = parent_id
self.updated = updated
self._lol_local = local
if file_id is not None:
self.file_id = file_id
def is_dir(self):
return self._lol_local.is_dir()
for c in caches:
# FIXME causes other issues ... even while trying to avoid init issues
# we should not have to do this
cmeta = c.meta
c._remote = fakeremote(
cmeta.id, cmeta.name, cmeta.parent_id, cmeta.file_id,
cmeta.updated, c.local)
else:
# this is safe at this stage since everything should match upstream
caches = [c.cache for c in local_dataset.rchildren]
ds._generate_pull_index(ds, caches)
fops = {
'mkdir': mkdir,
'mkfile': mkfile,
'mklink': mklink,
'rename': rename,
'change': change,
'remove': remove,
'index': index,
}
def make_closure(stage, op, obj, args):
f = fops[op]
if stage > 0 and op in ('mkdir', 'mkfile'):
kwargs=dict(add=True)
else:
kwargs = {}
@wraps(f)
def inner():
f(path, *args, **kwargs)
return inner
def cargs(args):
for a in args:
if isinstance(a, str) and a.startswith('project/'):
yield norm_path(a)
else:
yield a
ops = self._make_ops()
pops = [(stage, op, norm_path(s), *cargs(args)) for stage, op, s, *args in ops]
init = set([path for stage, op, path, *args in pops if stage == 0])
test = set([p for stage, op, path, *args in pops if stage >= 1 for p in (path, *args) if isinstance(p, self.project_path.__class__)])
nochange = init - test
add_rename_reparent = test - init
change_remove = test - add_rename_reparent
cs = [(stage, path, make_closure(stage, op, path, args)) for stage, op, path, *args in pops]
scs = sorted(cs, key=(lambda abc: (abc[0], len(abc[1].parts))))
will_fails = []
for stage, path, fun in scs:
if stage > 1:
will_fails.append(fun)
else:
fun()
self._will_fails = will_fails
self.dataset = pops[0][-1]
class TestChanges(_ChangesHelper, _TestOperation, unittest.TestCase):
def test_changes(self):
from dateutil import parser as dateparser
dataset = self.dataset
dataset_id, id_name, parent_children, name_id, updated_transitive = dataset._read_indexes()
# XXX updated_transitive from _read_indexes is a string because that is what
# _transitive_changes needs internally and then transforms to a datetime object
# when it returns, therefore we don't fiddle with the types here
#tc = dataset._transitive_changes()
# XXX see sparcur.simple.utils
dataset_id, updated_cache_transitive, diff = dataset.diff()
blob = {
'dataset-id': dataset_id.id,
'updated-transitive': updated_transitive,
'diff': diff,
}
pl = sxpyr.python_to_sxpr(blob, str_as_string=True)
sxpr = pl._print(sxpyr.configure_print_plist(newline_keyword=False))
print(sxpr)
#pl = sxpyr.python_to_sxpr(diff, str_as_string=True)
#sxpr = pl._print(sxpyr.configure_print_plist(newline_keyword=False))
breakpoint()
class _WorkflowHelper:
def _do_workflow(self, paths_to_add):
# push button, receive bacon
# 0. asumme there are changes to a dataset
# 1. click upload button in main window (python get diff)
# argv-simple-diff -> sparcur.simple.utils for-racket diff -> path_dataset.diff()
# 2. select specific paths for upload (python nothing)
# racket side selects the list of files to push (push_list) that goes into paths.sxpr
# which the python side then reads in the next step
# 3. click confirm selection checkbox (python generate manifest)
# (ensure-directory! (push-dir)) -> updated_transitive push_id
# write-push-paths -> {:user-cache-path}/{dataset-uuid}/{updated-transitive}/{push-id}/paths.sxpr -> push_list
# argv-simple-make-push-manifest -> sparcur.simple.utils for-racket make-push-manifest -> path_dataset.make_mush_manifest()
# 4. click push selected to remote (python push from manifest)
# argv-simple-push -> sparcur.simple.utils for-racket push -> path_dataset.push_from_manifest()
# 5. TODO I think that after remote changes are made we probably want to create
# a modified index file that notes the changes so that incremental changes
# do not have to be pulled again ... of course if upstream has changed we are
# back in the usual world of pain ...
path_dataset = self.dataset # given
dataset_id = path_dataset.cache.identifier
# 1
__dataset_id, updated_transitive, diff = path_dataset.diff()
# 2
# write the push_list to paths.sxpr
push_id = path_dataset._write_push_list(dataset_id, updated_transitive, diff, paths_to_add)
# 3
path_dataset.make_push_manifest(dataset_id, updated_transitive, push_id)
# 4
if not self._local_only: # this fails without remote
import pennsieve.api.agent
try:
path_dataset.push_from_manifest(dataset_id, updated_transitive, push_id)
except pennsieve.api.agent.AgentError as e:
log.exception(e)
pytest.skip('pennsieve error')
class TestWorkflow(_ChangesHelper, _WorkflowHelper, _TestOperation, unittest.TestCase):
def test_workflow(self):
# splitting d r l lets us test incremental changes
paths_to_add_d = [
'dire-1/dire-3-1-rn-r', # rn
'dire-2/dire-3-2-rp', # rp
'dire-2/dire-3-3-np-r', # rnp
]
paths_to_add_r = [
'dire-1/file-1-1-rn-r.ext', # rn
'dire-2/file-1-2-rp.ext', # rp
'dire-2/file-1-3-np-r.ext', # rnp
]
paths_to_add_l = [
'dire-1/link-1-1-rn-r.ext', # rn
'dire-2/link-1-2-rp.ext', # rp
'dire-2/link-1-3-np-r.ext', # rnp
]
paths_to_add_2 = [
'dire-6/file-4-add.ext', # should error for now
]
self._do_workflow(paths_to_add_d)
self._do_workflow(paths_to_add_r)
self._do_workflow(paths_to_add_l)
try:
self._do_workflow(paths_to_add_2)
raise AssertionError('should have failed due to forbidden ops')
except ValueError:
pass
class TestWithRemoteWorkflow(TestWorkflow):
_local_only = False
class TestRemote(_TestOperation, unittest.TestCase):
def test_remote_path_does_not_exist(self):
new_thing = self.root / 'does not exist'
@pytest.mark.skip('Not ready.')
def test_cache_path_does_not_exist(self):
""" This should not produce an error.
Path objects should be able to be instantiated without
po.exists() -> True at a point in time prior to instantiation.
"""
new_thing = self.anchor / 'does not exist'
def __test_cache_path_fake_id(self):
# FIXME the right way to do this is
np = self.project_path / 'new-path'
np.mkdir()
npm = np.meta
# bad way
class FakeBase(aug.RemotePath):
def __init__(self, id, name, cache=None):
super().__init__(id, cache)
self.name = name
now = GetTimeNow()
self.created = now._start_time
self.updated = now._start_time
self.checksum = 'lolnone'
self.chunksize = 4096
self.file_id = 'asdfasdfasdf'
@property
def meta(self):
return PathMeta(size=self.size,
created=self.created,
updated=self.updated,
checksum=self.checksum,
chunksize=self.chunksize,
id=self.id,
file_id=self.file_id)
@property
def parent(self):
return None
def _parts_relative_to(self, remote, cache_parent=None):
return [self.name]
# This takes way too many steps :/
Fake = FakeBase._new(LocalPath, aug.CachePath)
fake = Fake('lol', 'double lol')
self.anchor / fake
| SciCrunch/sparc-curation | test/test_delete.py | test_delete.py | py | 32,914 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "sparcur.paths.PennsieveCache",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sparcur.backends.PennsieveRemote",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "augpathlib.AugmentedPath",
"line_number": 26,
"usage_type": "call"
},
{... |
15154019133 | import pandas as pd
from textblob import TextBlob
import multiprocessing as mp
import time
def calc(review):
review_blob = TextBlob(str(review))
polarity = review_blob.sentiment.polarity
if polarity > 0:
return "Positive"
elif polarity < 0:
return "Negative"
else:
return "Neutral"
df = pd.read_csv('googleplaystore_user_reviews.csv')
reviews = df.Translated_Review
pool = mp.Pool(processes=8)
start = time.time()
new_labels = pool.map(calc, [rev for rev in reviews])
end = time.time()
print(end - start)
pool.close()
df['new labels'] = new_labels
df.to_csv("new_data.csv")
| philipsFarraj/ParallelProject | project.py | project.py | py | 626 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "textblob.TextBlob",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.time",
... |
8827426863 | from __future__ import absolute_import, division, print_function
import os
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'README.rst')) as f:
README = f.read()
setup(name='marv',
version='3.2.0',
description='MARV framework',
long_description=README,
classifiers=[
'Development Status :: 5 - Production/Stable',
"Framework :: Flask",
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Operating System :: POSIX :: Linux', # for now
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only', # for now
'Programming Language :: Python :: Implementation :: CPython', # for now
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
'Topic :: Scientific/Engineering',
],
author='Ternaris',
author_email='team@ternaris.com',
url='https://github.com/ternaris/marv',
license='AGPLv3+',
packages=[
'marv',
'marv.app',
'marv.tests',
'marv_detail',
'marv_node',
'marv_node.testing',
'marv_node.tests',
'marv_nodes',
'marv_pycapnp',
'marv_pycapnp.tests',
'marv_store',
'marv_webapi',
],
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose'],
install_requires=['Flask-Cors',
'Flask-SQLAlchemy',
'PyJWT',
'bcrypt',
'configparser',
'cython',
'Jinja2>=2.7.3',
'requests-oauthlib',
'pycapnp-for-marv',
'marv-cli'],
extras_require={
'testing': ['coverage',
'ipdb',
'ipdbplugin',
'ipython',
'mock',
'nose',
'testfixtures'],
},
entry_points={'marv_cli': ['marv = marv.cli']})
| ternaris/marv | setup.py | setup.py | py | 2,262 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
20271187203 | '''
This module provides management methods for the pygame screen
'''
import sys
import pygame
class MetaGame(type):
'''
the metaclass for the game class - this implements classproperties on Game
'''
@property
def clock(cls):
'''
produce the game clock
'''
return cls._clock
@property
def screen(cls):
'''
get the game screen
'''
return cls._screen
@screen.setter
def screen(cls, value):
'''
set the game screen
'''
cls._screen = value
@property
def scenes(cls):
'''
get the game scene stack
'''
return cls._scenes
@scenes.setter
def scenes(cls, value):
'''
set the game scene stack
'''
cls._scenes = value
class Game(object, metaclass=MetaGame):
'''
manage the pygame screen
'''
_clock = None
_screen = None
_scenes = None
_fps_unlocked = False
_max_fps = 0
@classmethod
def init(cls, title='pygame', max_fps=0):
'''
initialize pygame and some other important things
'''
# initialize pygame
pygame.mixer.pre_init(channels=1)
pygame.init()
# set window caption
pygame.display.set_caption(title)
# initialize game clock
cls._clock = pygame.time.Clock()
cls._max_fps = max_fps
@classmethod
def main(cls):
'''
start the main loop of the game
'''
while cls._scenes:
# get the scene on top of the scene stack
scene = cls._scenes.peek()
try:
# process events
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.VIDEORESIZE:
cls.screen.resize(event.dict['size'])
elif event.type == pygame.KEYUP and event.key == pygame.K_HASH:
cls.screen.debug.enabled = not cls.screen.debug.enabled
elif event.type == pygame.KEYUP and event.key == pygame.K_EXCLAIM:
cls._fps_unlocked = not cls._fps_unlocked
else:
scene.on_event(event)
# update the scenegraph objects and redraw
scene.update()
# flip the buffers at the given maximum refresh rate
cls.screen.flip()
cls._clock.tick(0 if cls._fps_unlocked else cls._max_fps)
except StopIteration:
Game.scenes.pop()
@classmethod
def quit(cls):
'''
quit the game
'''
sys.exit(0)
| oaken-source/pyablo | pyablo/game.py | game.py | py | 2,780 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pygame.mixer.pre_init",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pygame.display.se... |
34005743410 | from torch.utils.data import *
from imutils import paths
import numpy as np
import random
import cv2
import os
CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
'苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
'桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
'新',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'I', 'O', '-'
]
CHARS_DICT = {char:i for i, char in enumerate(CHARS)}
class LPRDataLoader(Dataset):
def __init__(self, img_dir, imgSize, lpr_max_len, PreprocFun=None):
self.img_dir = img_dir
self.img_paths = []
for i in range(len(img_dir)):
self.img_paths += [el for el in paths.list_images(img_dir[i])]
random.shuffle(self.img_paths)
self.img_size = imgSize
self.lpr_max_len = lpr_max_len
if PreprocFun is not None:
self.PreprocFun = PreprocFun
else:
self.PreprocFun = self.transform
def __len__(self):
return len(self.img_paths)
def __getitem__(self, index):
filename = self.img_paths[index]
Image = cv2.imread(filename)
height, width, _ = Image.shape
if height != self.img_size[1] or width != self.img_size[0]:
Image = cv2.resize(Image, self.img_size)
Image = self.PreprocFun(Image)
basename = os.path.basename(filename)
imgname, suffix = os.path.splitext(basename)
imgname = imgname.split("-")[0].split("_")[0]
label = list()
for c in imgname:
# one_hot_base = np.zeros(len(CHARS))
# one_hot_base[CHARS_DICT[c]] = 1
label.append(CHARS_DICT[c])
if len(label) == 8:
if self.check(label) == False:
print(imgname)
assert 0, "Error label ^~^!!!"
return Image, label, len(label)
def transform(self, img):
img = img.astype('float32')
img -= 127.5
img *= 0.0078125
img = np.transpose(img, (2, 0, 1))
return img
def check(self, label):
if label[2] != CHARS_DICT['D'] and label[2] != CHARS_DICT['F'] \
and label[-1] != CHARS_DICT['D'] and label[-1] != CHARS_DICT['F']:
print("Error label, Please check!")
return False
else:
return True
| sirius-ai/LPRNet_Pytorch | data/load_data.py | load_data.py | py | 2,544 | python | en | code | 759 | github-code | 36 | [
{
"api_name": "imutils.paths.list_images",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "imutils.paths",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
... |
39803390183 | from homepageapp.models import ModelsNewSQL02Model
from django.conf import settings
from django.http import JsonResponse
from django.core.files.storage import FileSystemStorage
import os
from django.shortcuts import get_object_or_404, render
from django.core.paginator import Paginator
# repairOrder model was added on 11/5/2022. Deleted on 11/18/2022
from django.utils.translation import gettext_lazy as _
from django.contrib import messages
from django.utils import timezone
from django.shortcuts import render, redirect
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
import json
from django.contrib.auth.mixins import LoginRequiredMixin
from appointments.forms import AppointmentCreationForm, AppointmentImagesForm
from appointments.forms import AppointmentImagesForm, AppointmentImageFormSet
from appointments.models import AppointmentRequest, AppointmentImages
from django.views.generic import CreateView, FormView, TemplateView
from django.views.generic import DetailView
from django.views.generic import ListView
from formtools.wizard.views import WizardView
from django.urls import reverse_lazy
import calendar
from formtools.wizard.views import SessionWizardView
from appointments.models import APPT_STATUS_CANCELLED, APPT_STATUS_NOT_SUBMITTED
# 2023-04-10
def appointment_create_view_for_customer(request):
# form = AppointmentCreationForm(request.POST or None)
if request.method == 'POST':
# form = AppointmentCreationForm(request.POST)
form = AppointmentCreationForm(request.POST, request.FILES)
image_formset = AppointmentImageFormSet(
request.POST, request.FILES, user=request.user)
image_form = AppointmentImagesForm(
request.POST, request.FILES, user=request.user)
# form = AppointmentCreationForm(request.POST)
if form.is_valid(): # and image_formset.is_valid()
# form.save()
form.save(commit=False)
appointment_data = form.cleaned_data
# appointment_data.user = request.user
appointment_data = json.dumps(appointment_data, default=str)
request.session['appointment_data'] = appointment_data
# request.session['images'] = [image_form.cleaned_data for image_form in image_formset]
# request.session['submitted_form'] = json.dumps(form, default=dict)[0]
# json.dumps(my_dictionary, indent=4, sort_keys=True, default=str)
# appointment = form.save(commit=False)
# appointment.appointment_requested_datetime = timezone.now()
# appointment.save()
# kwargs = {'appointment': appointment}
# TODO: Send email to customer about service request status
return redirect('appointments:appointment-preview-view')
else:
print(form.errors) # print out the form errors
# return redirect('appointment_preview', args=[appointment.appointment_id])
else:
form = AppointmentCreationForm
image_formset = AppointmentImageFormSet(
queryset=AppointmentImages.objects.none())
image_form = AppointmentImagesForm()
# context = {'form': form}
context = {'form': form, 'image_formset': image_formset,
'image_form': image_form}
return render(request, 'appointments/10_appointment_create.html', context)
def appointment_preview_view(request):
# appointment = kwargs.get('appointment', None)
appointment_data = request.session.get('appointment_data')
images = request.session.get('images')
# submitted_form = request.session.get('submitted_form')
if not appointment_data:
return redirect('appointments:appointment-create-view')
# 2024-04-10 using json.loads to load back the appointment_data.
# otherwise appointment_data will be
appointment_data = json.loads(appointment_data)
images = json.loads(images)
# if request.method == 'GET':
form = AppointmentCreationForm(appointment_data)
appointment = AppointmentRequest(**appointment_data)
context = {'form': form,
'appointment': appointment,
}
if request.method == 'POST':
appointment.appointment_status = APPT_STATUS_NOT_SUBMITTED
appointment.save()
messages.success(
request, 'Appointment has been submitted successfuly.')
request.session.pop('appointment_data')
return redirect('appointments:appointment-success-view')
return render(request, 'appointments/20_appointment_preview.html', context)
# form = AppointmentCreationForm(request.POST)
# if form.is_valid():
# appointment = AppointmentRequest(form.fields)
# appointment.save()
# messages.success(request, 'Appointment has been submitted successfuly.')
# request.session.pop('appointment_data')
# # send_appointment_confirmation_email(appointment)
# return redirect('appointments:appointment-success')
# return redirect('appointment_success')
# form = AppointmentCreationForm(initial=kwargs)
# return render(request, 'appointments/02-appointment-preview.html', {'form': form})
# elif 'confirm' in request.POST:
# form = AppointmentCreationForm(request.POST)
# if form.is_valid():
# appointment = form.save(commit=False)
# appointment.appointment_status = 'C'
# appointment.save()
# # Send confirmation email -- pending
# # 2023-04-10
# # subject = 'Appointment Confirmed'
# # html_message = render_to_string('appointment_confirmation_email.html', {'appointment': appointment})
# # plain_message = strip_tags(html_message)
# # from_email = 'Your Company <noreply@yourcompany.com>'
# # to_email = appointment.appointment_email
# # send_mail(subject, plain_message, from_email, [to_email], html_message=html_message)
# # else:
# # return redirect('appointment-create-view')
# # form = AppointmentCreationForm()
# context = {'form': form}
# return render(request, 'appointments/02-appointment-preview.html', context)
# return redirect('appointment-create-view')
def appointment_success(request):
return render(request, 'appointments/30_appointment_creation_success.html')
# version 2 of appointment creation.
class AppointmentCreateView(SessionWizardView):
# def get_template_names(self):
# return ['appointments/12_appointment_create_v2_step_1.html', 'appointments/13_appointment_create_v2_step_2.html']
template_name = 'appointments/11_appointment_create_v2.html'
file_storage = FileSystemStorage(location=os.path.join(
settings.DEFAULT_FILE_STORAGE, 'appointment_images'))
form_list = [
('upload images', AppointmentImageFormSet),
('new_appointment', AppointmentCreationForm),
]
success_url = reverse_lazy('appointments:appointment-preview-view')
def done(self, form_list, **kwargs):
image_formset, appointment_form = form_list
appointment_form.save(commit=False)
appointment_data = appointment_form.cleaned_data
images = image_formset.save(commit=False)
appointment_data = json.dumps(appointment_data, default=str)
images = json.dumps(images, default=str)
self.request.session['appointment_data'] = appointment_data
self.request.session['images'] = images
# talent_data = {}
# talent_data.update(form.cleaned_data)
# # # Create the talent record
# # talent = TalentsModel.objects.create(**talent_data)
# talent = TalentsModel(**talent_data)
# Get the current user
# Add a success message
# messages.success(self.request, "Talent created successfully.")
# return redirect("talent_management:talent_list", {'talent': talent})
return redirect('appointments:appointment-preview-view')
class AppointmentPreviewView(FormView):
template_name = 'appointments/20_appointment_preview.html'
# form_class = AppointmentCreationForm
success_url = reverse_lazy('appointments:appointment-success-view')
def form_valid(self, form):
self.request.session['appointment_data'] = self.request.POST
return super().form_valid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['data'] = self.request.session.get('appointment_data', {})
return kwargs
class AppointmentSuccessView(TemplateView):
template_name = 'appointments/30_appointment_creation_success.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
appointment_data = self.request.session.get('appointment_data', {})
if appointment_data:
appointment = AppointmentRequest(**appointment_data)
appointment.save()
self.request.session['appointment_data'] = None
context['appointment'] = appointment
return context
class AppointmentListView(LoginRequiredMixin, ListView):
model = AppointmentRequest
context_object_name = 'appointments'
template_name = 'appointments/50_appointment_list.html'
login_url = reverse_lazy('internal_users:internal_user_login')
def get_queryset(self):
# `__` double undestore..more researched are needed.
qs = AppointmentRequest.objects.prefetch_related(
'appointment_repair_order').exclude(appointment_status=APPT_STATUS_CANCELLED).all()
# qs=qs.filter(appointment_status=APPT_STATUS_CANCELLED)
# select_related('repair_order_customer').prefetch_related('repair_order_customer__addresses')
# repair order phase defines the WIP (work-in-progress) caegory. 6 means invoice.
return qs
class AppointmentDetailView(DetailView):
model = AppointmentRequest
context_object_name = 'appointment'
template_name = 'appointments/60_appointment_detail.html'
# login_url = '/users/login'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = AppointmentImagesForm()
return context
def post(self, request, *args, **kwargs):
appointment = self.get_object()
form = AppointmentImagesForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.appointment = appointment
image.save()
return self.get(request, *args, **kwargs)
# def post(self, request, *args, **kwargs):
# talent = self.get_object()
# form = TalentDocumentsForm(request.POST, request.FILES)
# if form.is_valid():
# document = form.save(commit=False)
# document.talent = talent
# document.save()
# return self.get(request, *args, **kwargs)
# def get_queryset(self):
# queryset = super().get_queryset()
# return queryset.filter(user=self.request.user)
class AppointmentDetailByConfirmationIdView(AppointmentDetailView):
def get_queryset(self):
queryset = AppointmentRequest.objects.filter(
appointment_confirmation_id=self.args['appointment_confirmation_id'])
return queryset.filter(user=self.request.user)
def appointment_get_vehicle_models(request, make_id):
models = ModelsNewSQL02Model.objects.filter(
make_id=make_id).all().order_by('model_name')
model_dict_list = list(models.values('model_id', 'model_name'))
model_tuple_list = [(model.pk, model.model_name) for model in models]
# return JsonResponse(model_tuple_list, safe=False)
return JsonResponse(model_dict_list, safe=False)
def appointment_image_list(request, pk):
appointment = AppointmentRequest.objects.get(pk=pk)
images = AppointmentImages.objects.filter(
image_is_active=True).filter(appointment=appointment).all()
return render(request, 'appointments/70_appointment_image_list.html', {'images': images, 'appointment': appointment})
def appointment_image_soft_delete(request, image_id):
image = get_object_or_404(AppointmentImages, image_id=image_id)
image.image_is_active = False
image.save()
messages.add_message(request, messages.INFO,
"Image selected has been deleted.")
return redirect('appointment:appointment_image_list')
| zjgcainiao/new_place_at_76 | appointments/views.py | views.py | py | 12,477 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "appointments.forms.AppointmentCreationForm",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "appointments.forms.AppointmentImageFormSet",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "appointments.forms.AppointmentImagesForm",
"line_number... |
70973655465 | import torch
import torch.nn as nn
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // 16, 1, bias=False),
nn.ReLU(),
nn.Conv2d(in_planes // 16, in_planes, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc(self.avg_pool(x))
max_out = self.fc(self.max_pool(x))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class conv_block(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.conv1 = nn.Conv2d(in_c, out_c, kernel_size=3, padding=1)
self.in1 = nn.InstanceNorm2d(out_c)
self.conv2 = nn.Conv2d(out_c, out_c, kernel_size=3, padding=1)
self.in2 = nn.InstanceNorm2d(out_c)
self.relu = nn.ReLU()
self.ca = ChannelAttention(out_c)
self.sa = SpatialAttention()
def forward(self, inputs):
x = self.conv1(inputs)
x = self.in1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.in2(x)
x = self.relu(x)
x = self.ca(x) * x
x = self.sa(x) * x
return x
class encoder_block(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.conv = conv_block(in_c, out_c)
self.pool = nn.MaxPool2d((2, 2))
def forward(self, inputs):
x = self.conv(inputs)
p = self.pool(x)
return x, p
class decoder_block(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.up = nn.ConvTranspose2d(in_c, out_c, kernel_size=2, stride=2, padding=0)
self.conv = conv_block(out_c + out_c, out_c)
def forward(self, inputs, skip):
x = self.up(inputs)
x = torch.cat([x, skip], axis=1)
x = self.conv(x)
return x
class build_unet(nn.Module):
def __init__(self):
super().__init__()
""" Encoder """
self.e1 = encoder_block(3, 64)
self.e2 = encoder_block(64, 128)
self.e3 = encoder_block(128, 256)
self.e4 = encoder_block(256, 512)
""" Bottleneck """
self.b1 = conv_block(512, 640)
self.b2 = conv_block(640, 768)
self.b3 = conv_block(768, 896)
self.b4 = conv_block(896, 1024)
""" Decoder """
self.d1 = decoder_block(1024, 512)
self.d2 = decoder_block(512, 256)
self.d3 = decoder_block(256, 128)
self.d4 = decoder_block(128, 64)
""" Classifier """
self.outputs = nn.Conv2d(64, 1, kernel_size=1, padding=0)
def forward(self, inputs):
""" Encoder """
s1, p1 = self.e1(inputs)
s2, p2 = self.e2(p1)
s3, p3 = self.e3(p2)
s4, p4 = self.e4(p3)
""" Bottleneck """
b = self.b1(p4)
b = self.b2(b)
b = self.b3(b)
b = self.b4(b)
""" Decoder """
d1 = self.d1(b, s4)
d2 = self.d2(d1, s3)
d3 = self.d3(d2, s2)
d4 = self.d4(d3, s1)
outputs = self.outputs(d4)
return outputs
| AAleka/Cycle-CBAM-and-CBAM-UNet | UNet/model.py | model.py | py | 3,806 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.AdaptiveAvgPool2d",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
... |
14991911664 | from os import environ
import uuid
import logging
import json
# file_name = environ('log_file_name')
file_name = 'app.log'
class ModelLog:
def __init__(self):
ModelLog.load_create()
@staticmethod
def request_uid(use_case):
"""
Generate a unique unicode id for the object. The default implementation
concatenates the class name, "_", and 12 random hex chars.
"""
return str(use_case + "_" + uuid.uuid4().hex[12:])
@staticmethod
def load_create():
logging.basicConfig(filename=('%s' % file_name), level=logging.INFO,
format='%(asctime)s ->%(message)s')
logging.info('Started')
@staticmethod
def read_logs(uid):
history = []
with open(file_name) as reader:
lines = reader.readlines()
for line in lines:
try:
val = json.loads(line.split('->')[-1])
if uid == val.get('name'):
history.append(val)
except Exception as e:
print(e)
return history
@staticmethod
def write_log(name, status, **kwargs):
val = dict(name=name, status=status)
val.update(kwargs)
stringify = json.dumps(val)
logging.info(stringify)
custom_log = ModelLog()
| ahmadaneeque/my-code | kubectl_docker/model-update-framework/re-train/mlog.py | mlog.py | py | 1,357 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "uuid.uuid4",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"... |
44007123128 | """
Django default settings for medfinder project.
Crate a local.py in this same folder to set your local settings.
"""
import requests
from os import path
from django.utils.translation import ugettext_lazy as _
import environ
import datetime
import django_heroku
root = environ.Path(__file__) - 3
env = environ.Env(DEBUG=(bool, False), )
environ.Env.read_env(env_file=root('.env'))
BASE_DIR = root()
dirname = path.dirname
BASE_DIR = dirname(dirname(dirname(path.abspath(__file__))))
DEBUG = env('DEBUG')
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', [])
SECRET_KEY = env('SECRET_KEY')
SITE_ID = env('SITE_ID')
LOCAL_APPS = (
'auth_ex',
'medfinder',
'medications',
'public',
'epidemic',
'historic',
'vaccinefinder',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.gis',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'activity_log',
'corsheaders',
'django_celery_beat',
'django_s3_storage',
'health_check',
'localflavor',
'phonenumber_field',
'rest_registration',
'rest_framework',
'rest_framework_swagger',
'tinymce',
) + LOCAL_APPS
AUTH_USER_MODEL = 'auth_ex.User'
LOGIN_REDIRECT_URL = '/admin/'
# --- STATIC FILES ---
STATIC_URL = '/static/'
STATIC_ROOT = env('STATIC_ROOT', default=(root - 1)('static'))
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# --- MEDIA ---
MEDIA_URL = '/media/'
MEDIA_ROOT = env('MEDIA_ROOT', default=(root - 1)('media'))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
)
}
},
]
MIDDLEWARE = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sites.middleware.CurrentSiteMiddleware',
'activity_log.middleware.ActivityLogMiddleware',
)
ROOT_URLCONF = 'medfinder.urls'
WSGI_APPLICATION = 'medfinder.wsgi.application'
USE_TZ = True
TIME_ZONE = 'UTC'
# --- CORS RULES ---
CORS_ORIGIN_ALLOW_ALL = True
# --- ACTIVITY LOG ---
ACTIVITYLOG_METHODS = ('POST',)
NDC_DATABASE_URL = env('NDC_DATABASE_URL', default='')
CENSUS_API_KEY = env('CENSUS_API_KEY', default='')
GOOGLE_MAP_API_KEY = env('GOOGLE_MAP_API_KEY', default='')
# --- LANGUAGES ---
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en-us'
# --- FILE UPLOAD ---
DATA_UPLOAD_MAX_MEMORY_SIZE = 104857600 # 100 * 1024 * 1024 # i.e. 100 MB
FILE_UPLOAD_MAX_MEMORY_SIZE = 104857600 # 100 * 1024 * 1024 # i.e. 100 MB
FILE_UPLOAD_PERMISSIONS = None
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# --- DATABASE ---
# --- POSTGRESQL
DATABASES = {
'default': env.db(
default='postgis://postgres:postgres@postgres:5432/postgres'),
'vaccinedb': {
'CONN_MAX_AGE': 3600,
'ENGINE': 'django.db.backends.mysql',
'HOST': env('VACCINEFINDER_HOST', default=''),
'NAME': env('VACCINEFINDER_NAME', default=''),
'PASSWORD': env('VACCINEFINDER_PASSWORD', default=''),
'USER': env('VACCINEFINDER_USER', default=''),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# --- S3 SETTINGS ---
S3_STORAGE_ENABLE = env.bool('S3_STORAGE_ENABLE', default=False)
if S3_STORAGE_ENABLE:
DEFAULT_FILE_STORAGE = 'django_s3_storage.storage.S3Storage'
STATICFILES_STORAGE = 'django_s3_storage.storage.StaticS3Storage'
AWS_REGION = env('AWS_REGION')
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_S3_BUCKET_NAME = env('AWS_S3_BUCKET_NAME')
AWS_S3_BUCKET_NAME_STATIC = env('AWS_S3_BUCKET_NAME_STATIC')
AWS_S3_BUCKET_AUTH = env.bool('AWS_S3_BUCKET_AUTH', default=False)
AWS_S3_MAX_AGE_SECONDS = 60 * 60 * 24 * 365 # 1 year.
# --- DJANGO COMPRESSOR ---
# STATICFILES_FINDERS += ('compressor.finders.CompressorFinder',)
# --- DJANGO REGISTRATION REDUX ---
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = False
# --- CELERY ---
CELERY_BROKER_URL = env('CELERY_BROKER_URL', default='redis://redis:6379/')
CELERYD_TASK_SOFT_TIME_LIMIT = 60 * 60 * 24
# --- CACHE ---
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{}1".format(CELERY_BROKER_URL),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# CELERY_BEAT_SCHEDULE = {
# 'import_existing_medications': {
# 'task': 'medications.tasks.import_existing_medications',
# 'schedule': crontab(day_of_month=15),
# 'relative': True,
# },
# }
# DEBUG TOOLBAR
ENABLE_DEBUG_TOOLBAR = env.bool(
'DEBUG',
default=False,
)
# --- DJANGO REST FRAMEWORK ---
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
}
# --- JWT ---
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1),
'JWT_AUTH_HEADER_PREFIX': 'Token',
}
# --- REST REGISTRATION ---
FRONTEND_URL = env('FRONTEND_URL', default='localhost:3000')
REST_REGISTRATION = {
'REGISTER_VERIFICATION_ENABLED': False,
'REGISTER_EMAIL_VERIFICATION_ENABLED': False,
'VERIFICATION_FROM_EMAIL': 'no-reply@example.com',
'RESET_PASSWORD_VERIFICATION_URL':
'{}/reset-password'.format(FRONTEND_URL),
'USER_HIDDEN_FIELDS': (
'is_active',
'is_staff',
'is_superuser',
'user_permissions',
'groups',
'date_joined',
'secret',
),
}
# EMAIL information
EMAIL_ENABLE = env.bool('EMAIL_ENABLE', default=True)
EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', default=True)
EMAIL_HOST = env('EMAIL_HOST', default='')
EMAIL_HOST_USER = env('EMAIL_HOST_USER', default='')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', default='')
EMAIL_PORT = env('EMAIL_PORT', default=587)
EMAIL_BACKEND = env(
'EMAIL_BACKEND',
default='django.core.mail.backends.smtp.EmailBackend',
)
FROM_EMAIL = env(
'FROM_EMAIL',
default='no-reply@example.com'
)
DEFAULT_FROM_EMAIL = env(
'DEFAULT_FROM_EMAIL',
default='webmaster@localhost',
)
if ENABLE_DEBUG_TOOLBAR:
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('172.18.0.1', '127.0.0.1', 'localhost')
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': lambda *x: True,
}
# ---PHONENUMBER FIELD ---
PHONENUMBER_DEFAULT_REGION = 'US'
DECIMAL_SEPARATOR = '.'
# --- STATES, COUNTIES & ZIPCODES ---
US_STATES_DATABASE = env(
'US_STATES_DATABASE',
default='https://raw.githubusercontent.com/PublicaMundi/MappingAPI/'
'master/data/geojson/us-states.json',
)
# Use the {}_{} to format with the correspondent state code and name
US_ZIPCODES_DATABASE = env(
'US_ZIPCODES_DATABASE',
default='https://raw.githubusercontent.com/OpenDataDE/'
'State-zip-code-GeoJSON/master/{}_{}_zip_codes_geo.min.json',
)
US_COUNTIES_DATABASE = env(
'US_COUNTIES_DATABASE',
default='http://eric.clst.org/assets/wiki/uploads/'
'Stuff/gz_2010_us_050_00_500k.json',
)
GEOJSON_GEOGRAPHIC_CONTINENTAL_CENTER_US = {
"type": "Point",
"coordinates": [-98.579561, 39.828194],
}
ZOOM_US = 3
ZOOM_STATE = 7
ZOOM_ZIPCODE = 13
# --- SENTRY ---
RAVEN_DSN = env('RAVEN_DSN', default='')
if RAVEN_DSN:
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
RAVEN_CONFIG = {
'dsn': RAVEN_DSN,
}
CELERYD_HIJACK_ROOT_LOGGER = False
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
# To capture more than ERROR, change to WARNING, INFO, etc.
'level': 'INFO',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', # noqa
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'celery': {
'level': 'ERROR',
'handlers': ['sentry', 'console'],
'propagate': False,
},
},
}
# Activate Django-Heroku.
django_heroku.settings(locals())
DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
TYNYMCE_JS_URL=path.join(MEDIA_URL, 'js/tiny_mce')
| ninjadevtrack/medifiner-api | medfinder/settings/default.py | default.py | py | 11,988 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "environ.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "environ.Env",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "environ.Env.read_env",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "environ.Env",
"line... |
1985519466 | #imports libary needed to access photo
from PIL import Image
#Opens and loads the image that it processes you have to change this to what your picture is called.
money = Image.open("money.png")
money = money.convert("RGBA")
pixels = money.load()
#Sets two vairables for the loops that go through each pixel
width, height = money.size
#Nested loop for each pixel
for a in range(0,width):
for b in range(0,height):
#This checks if the pixel is transparent or not
if pixels[a,b][3] == 0:
#If the pixel is transparent then it sets it to pink and not transparent
pixels[a,b] = (255,192,203,255)
#saves the image
money.save("pinkbackgroundedmoney.png")
| koanarec/recolorimagepython | recolor.py | recolor.py | py | 730 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 5,
"usage_type": "name"
}
] |
27770460762 | import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fastapi_basic')))
from fastapi.testclient import TestClient
from app import app
from controllers.models.test import Calculate_Data
from services.auth import service_auth
client = TestClient(app)
access_token = service_auth.gen_token('johndoe', 'secret')
def test_not_authorization():
body = Calculate_Data()
response = client.post(
"/calculate_profit",
headers={
'Content-type': 'application/json',
},
json=body.dict()
)
print('Result response :', response.json())
assert response.status_code == 401
def test_calculate_profit():
body = Calculate_Data()
response = client.post(
"/calculate_profit",
headers={
'Content-type':'application/json',
'Authorization': f'Bearer {access_token}',
},
json=body.dict()
)
print('Result response :', response.json())
assert response.status_code == 200
| jinybear/fastapi_basic | tests/test_controllers.py | test_controllers.py | py | 1,030 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
73335007463 | from datetime import datetime
from .setup import config, logger
def bibcodes():
try:
with open(config.get('CLASSIC_CANONICAL_FILE'), "r") as f:
bibcodes = [line.strip() for line in f]
except:
logger.exception("Unable to retreive bibcodes from classic")
return []
else:
return bibcodes
def compare(classic_bibcodes, db_bibcodes, solr_bibcodes):
"""Compare bibcode lists against classic"""
results = {}
batch = {}
now = datetime.utcnow()
prefix = "{:04}{:02}{:02}_{:02}{:02}".format(now.year, now.month, now.day, now.hour, now.minute)
if len(classic_bibcodes) > 0:
classic_bibcodes = set(classic_bibcodes)
if len(db_bibcodes) > 0:
db_bibcodes = set(db_bibcodes)
extra_in_db = db_bibcodes.difference(classic_bibcodes)
extra_in_db = [e for e in extra_in_db if "zndo" not in e] # Filter out non-classic Zenodo records
missing_in_db = classic_bibcodes.difference(db_bibcodes)
results['extra_in_db'] = len(extra_in_db)
results['missing_in_db'] = len(missing_in_db)
else:
extra_in_db = set()
missing_in_db = set()
if len(solr_bibcodes) > 0:
solr_bibcodes = set(solr_bibcodes)
extra_in_solr = solr_bibcodes.difference(classic_bibcodes)
extra_in_solr = [e for e in extra_in_solr if "zndo" not in e] # Filter out non-classic Zenodo records
missing_in_solr = classic_bibcodes.difference(solr_bibcodes)
results['extra_in_solr'] = len(extra_in_solr)
results['missing_in_solr'] = len(missing_in_solr)
else:
extra_in_solr = set()
missing_in_solr = set()
batch.update({
"{}_extra_in_db".format(prefix): extra_in_db,
"{}_missing_in_db".format(prefix): missing_in_db,
"{}_extra_in_solr".format(prefix): extra_in_solr,
"{}_missing_in_solr".format(prefix): missing_in_solr,
})
return results, batch
| adsabs/ADSStatsCollector | statscollector/classic.py | classic.py | py | 2,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setup.config.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setup.config",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "setup.logger.exception",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "setup.logger",
... |
15491681290 | # App Libraries
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import base64
# Replication strategy library
from Asian_Option_CRR import *
# Input of rep strat descriptions
from inputDescriptions import list_input
# Creating the app object from Dash library
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], #theme for modern-looking buttons, sliders, etc
external_scripts=['https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.4/MathJax.js?config=TeX-MML-AM_CHTML', "./assets/mathjax.js"], #usage of LaTeX in the app
meta_tags=[{"content": "width=device-width"}] #content gets adapted to user device width
)
server = app.server
# Author parameters
bg_color="#506784",
font_color="#F3F6FA"
author = "Michel Vanderhulst"
emailAuthor = "michelvanderhulst@hotmail.com"
supervisor = "Prof. Frédéric Vrins"
emailSupervisor = "frederic.vrins@uclouvain.be"
logo1path = "./pictures/1200px-Louvain_School_of_Management_logo.svg.png"
logo1URL = "https://uclouvain.be/en/faculties/lsm"
logo2path = "./pictures/1280px-NovaSBE_Logo.svg.png"
logo2URL = "https://www2.novasbe.unl.pt/en/"
# Creating the app header
def header():
return html.Div(
id='app-page-header',
children=[
html.Div(children=[html.A(id='lsm-logo',
children=[html.Img(style={'height':'6%', 'width':'6%'}, src='data:image/png;base64,{}'.format(base64.b64encode(open(f"{logo1path}", 'rb').read()).decode()))],
href=f"{logo1URL}",
target="_blank", #open link in new tab
style={"margin-left":"10px"}
),
html.Div(children=[html.H5("Asian option replication strategy app"),
html.H6("Cox-Ross-Rubinstein model")
],
style={"display":"inline-block", "font-family":'sans-serif','transform':'translateY(+32%)', "margin-left":"10px"}),
html.Div(children=[dbc.Button("About", id="popover-target", outline=True, style={"color":"white", 'border': 'solid 1px white'}),
dbc.Popover(children=[dbc.PopoverHeader("About"),
dbc.PopoverBody([f"{author}",
f"\n {emailAuthor}",
html.Hr(),
f"This app was built for my Master's Thesis, under the supervision of {supervisor} ({emailSupervisor})."]),
],
id="popover",
is_open=False,
target="popover-target"),
],
style={"display":"inline-block","font-family":"sans-serif","marginLeft":"55%", "margin-right":"10px"}),
html.A(id="nova-logo",
children=[html.Img(style={"height":"9%","width":"9%"}, src="data:image/png;base64,{}".format(base64.b64encode(open(f"{logo2path}","rb").read()).decode()))],
href=f"{logo2URL}",
target="_blank",
style={}
)
]
,style={"display":"inline-block"}),
],
style={
'background': bg_color,
'color': font_color,
"padding-bottom": "10px",
"padding-top":"-10px"
}
)
# Creating the app body
def body():
return html.Div(children=[
html.Div(id='left-column', children=[
dcc.Tabs(
id='tabs', value='About this App',
children=[
dcc.Tab(
label='About this App',
value='About this App',
children=html.Div(children=[
html.Br(),
html.H4('What is this app?', style={"text-align":"center"}),
html.P(f"""This app computes the replication strategy of Asian options on a set of given inputs, in the Cox-Ross-Rubinstein framework"""),
html.P(f"""The goal is to showcase that under the Cox-Ross-Rubinstein model assumptions (see "Model" tab), the price \(V_0\) given by the pricing formula is "arbitrage-free".
Indeed, we show that in this case, it is possible to build a strategy that"""),
html.Ul([html.Li("Can be initiated with \(V_0\) cash at time \(0\)."),
html.Li('Is self-financing (i.e., no need to "feed" the strategy with extra cash later'),
html.Li("Will deliver exactly the payoff of the option at maturity")
]),
html.Hr(),
html.P(["""
The considered options are Asian options paying \(\psi(T)\) at maturity \(T\) where \(\psi(X)\) is the payoff function. Defining \(S_{ave}(T)\) as the underlying asset average price, we have \
that for a call, the payoff function is \(\psi(T)=max(0,S_{ave}(T)-K)\) and for a put \(\psi(S_T)=max(0,K-S_{ave}(T))\) where K is the strike price."""]),
html.Hr(),
html.P("""Read more about options: https://en.wikipedia.org/wiki/Option_(finance)"""),
])
),
dcc.Tab(
label="Model",
value="Model",
children=[html.Div(children=[
html.Br(),
html.H4("Model assumptions", style={"text-align":"center"}),
"Its main assumptions are:",
html.Ul([html.Li("Does not consider dividends and transaction costs"),
html.Li("The volatility and risk-free rate are assumed constant"),
html.Li("Fraction of shares can be traded"),
html.Li("The underlying asset can only either go 'up' by a fixed factor \(u<1\) or 'down' by \(0<d<1\)."),
html.Li("The log-returns are independent at all periods")]),
html.Hr(),
html.H4("Underlying asset dynamics", style={"text-align":"center"}),
html.P([
"""
Under CRR, the underlying asset follows a geometric random walk with drift \(\mu\delta\) and volatility \(\sigma\sqrt{\delta}\). The probability to go \
'up' and 'down' are respectively \(p\) and \(q=1-p\) (under \(\mathcal{P}\)).The stock price at period \(i\) can be modeled as a function of a binomial \
random variable, and the constant 'up' and 'down' factors computed: $$u=e^{\mu\delta+\sigma\sqrt{\delta}}$$ $$d=e^{\mu\delta-\sigma\sqrt{\delta}}$$ \
The \(\mathcal{Q}\)-probability allowing the discounted stock price to be a martingale amounts to the \(\\tilde{p}\) value (under \(\mathcal{Q}\)) \
that leads to the martingale property: \(\\tilde{p}=\\frac{e^{r}-d}{u-d}\).
"""]),
html.Hr(),
html.H4("Option price", style={"text-align":"center"}),
html.P(["""
With the CRR, the stock tree and the option intrinsic value are easily computed at all nodes. Under the pricing measure \(\mathcal{Q}\), \
the option price of a node is simply the discounted value of the two children nodes. The price tree is therefore filled backwards, starting from the leaves (i.e. the payoff).\
The pricing formula is thus $$V_i=e^{-r\\delta}(V_{i+1}\\tilde{p}+V_{i+1}\\tilde{q})$$
"""]),
html.Hr(),
html.H4("Academic references", style={"text-align":"center"}),
html.Ul([html.Li("Vrins, F. (2020). Course notes for LLSM2225: Derivatives Pricing. (Financial Engineering Program, Louvain School of Management, Université catholique de Louvain)"),
html.Li("Shreve, S. E. (2004). Stochastic Calculus for Finance I The Binomial Asset Pricing Model (2nd ed.). Springer Finance.")
]),
])]),
#
#
dcc.Tab(
label="Appr-oach",
value="Methodology",
children=[html.Div(children=[
html.Br(),
html.H4("Methodology followed", style={"text-align":"center"}),
html.P([
"""
To prove that the risk-neutral price is arbitrage-free, let us try to perfectly replicate it with a strategy. If the strategy is successfull, then
the price is unique and therefore arbitrage-free. For an Asian option, we will also denote with \(s_0\) the stock price at time 0 and \(Y_n=\sum_{k=0}^{n}s_k\) the sum of the
stock prices between times zero and n. From there, the payoff at time 3 will be \((\\frac{1}{4}Y_{3}-K)^+\) with strike K. Then, let \(V_n(s,y)\) be the price of
the Asian option at node n if \(s_n=s\) and \(Y_n=y\).
"""]),
html.Hr(),
html.H4("Replicating portfolio", style={"text-align":"center"}),
html.P([
"""
Let us start a replication strategy based on the option price: \(\Pi_{0} = V_{0}(s,y)\). The portfolio is composed of a cash account and a equity account.
At each period, the number of shares to hold is given by $$\Delta_{n}(s,y) = \\frac{V_{n+1}(us, y + us)-V_{n+1}(ds, y + ds)}{(u-d)s}$$
The initial amount of cash will be \(c_{0} = \Pi_{0} - \Delta_{0}(s,y)s_{0}\). At each node, a portfolio rebalancing is needed to ensure that the portfolio value is
equal to the option price. Before the rebalancing, \(\Delta\) is the same from node to node, the cash account grew at the risk-free rate \(c_{n}=c_{n-1}e^{r}\),
and the portfolio is the sum of both equity and cash positions $$\Pi_{n} = c_{n}+\Delta_{n}(s,y)s_{n}$$
The rebalancing is done by updating the shares to hold $$\Delta_{n}(s,y) = \\frac{V_{n+1}(us, y + us)-V_{n+1}(ds, y + ds)}{(u-d)s}$$ and ensuring that the value
of the strategy before and after the rebalancing is the same $$c_{n}=\pi_{n}-(\Delta_{n-1}-\Delta_{n})s_{n}$$
The tree is computed forward, and will at all times replicate with option price. At the end of it we obtain the option payoff.
"""]),
])]),
#
#
dcc.Tab(
label='Input',
value='Input',
children=html.Div(children=[
html.Br(),
#
html.P(
"""
Hover your mouse over any input to get its definition.
"""
),
dcc.Dropdown(
id='CallOrPut',
options=[{'label':'Asian Call option', 'value':"Call"},
{'label':'Asian Put option', 'value':"Put"}],
value='Call'),
#
html.Br(),
#
html.Div(children=[html.Label('Spot price', title=list_input["Spot price"], style={'font-weight': 'bold', "text-align":"left", "width":"25%",'display': 'inline-block'} ),
dcc.Input(id="S", value=100, type='number', style={"width":"16%", 'display': 'inline-block'}),
html.P("",id="message_S", style={"font-size":12, "color":"red", "padding":5, 'width': '55%', "text-align":"left", 'display': 'inline-block'})
]
),
html.Div(children=[html.Label("Strike", title=list_input["Strike"], style={'font-weight': 'bold',"text-align":"left", "width":"25%",'display': 'inline-block'} ),
dcc.Input(id="K", value=100, type='number', style={"width":"16%", 'display': 'inline-block'}),
html.P("",id="message_K", style={"font-size":12, "color":"red", "padding":5, 'width': '55%', "text-align":"left", 'display': 'inline-block'})
],
),
html.Div(children=[html.Label("Drift", title=list_input["Drift"], style={'font-weight': 'bold', 'display': 'inline-block'}),
html.Label(id="drift", style={'display': 'inline-block'}),
]),
#
dcc.Slider(id='mu', min=-0.30, max=0.30, value=0.10, step=0.01, marks={-0.30: '-30%', 0:"0%", 0.30: '30%'}),
#
html.Div([html.Label('Volatility', title=list_input["Volatility"], style={'font-weight': 'bold', "display":"inline-block"}),
html.Label(id="sigma", style={"display":"inline-block"}),]),
#
dcc.Slider(id='vol', min=0, max=0.5, step=0.01, value=0.15, marks={0:"0%", 0.25:"25%", 0.50:"50%"}),
#
html.Div([html.Label('Risk-free rate', title=list_input["Risk-free rate"], style={'font-weight': 'bold', "display":"inline-block"}),
html.Label(id="riskfree", style={"display":"inline-block"}),]),
dcc.Slider(id='Rf', min=0, max=0.1, step=0.01, value=0.05, marks={0:"0%", 0.05:"5%", 0.1:"10%"}),
#
html.Div([html.Label('Maturity', title=list_input["Maturity"], style={'font-weight':'bold', "display":"inline-block"}),
html.Label(id="matu", style={"display":"inline-block"}),]),
dcc.Slider(id='T', min=0.25, max=5,
marks={0.25:"3 months", 3:"3 years", 5:"5 years"}, step=0.25, value=3),
#
html.Br(),
html.Div(children=[html.Label('Tree periods: ', title=list_input["Tree periods"], style={'font-weight': 'bold', "text-align":"left", "width":"30%",'display': 'inline-block'} ),
dcc.Input(id="tree_periods", value=3, type='number', style={"width":"16%", 'display': 'inline-block'}),
html.P("",id="message_tree", style={"font-size":12, "color":"red", "padding":5, 'width': '40%', "text-align":"left", 'display': 'inline-block'})
],
),
])),
],),], style={'float': 'left', 'width': '25%', 'margin':"30px"}),
])
# Creating the app graphs
def graphs():
return html.Div(id='right-column',
children=[
html.Br(),
html.Div([
html.Div(children=[dcc.Markdown(children=''' #### Cumulative sum of stock'''),
dcc.Graph(id='option_intrinsic'),],
style={"float":"right", "width":"45%", "display":"inline-block"}),
html.Div(children=[dcc.Markdown(children=''' #### Stock simulation (GRW) '''),
dcc.Graph(id='stock_simul'),],
style={"float":"right", "width":"55%", "display":"inline-block"}),
]),
html.Div([
html.Div(children=[dcc.Markdown(children=''' #### Option price'''),
dcc.Graph(id='option_price'),],
style={"float":"right", "width":"45%", "display":"inline-block"}),
html.Div(children=[dcc.Markdown(children=''' #### Portfolio after rebalancing'''),
dcc.Graph(id='port_details'),],
style={"float":"right", "width":"55%", "display":"inline-block"}),
]),
html.Div([
html.Div(children=[dcc.Markdown(children=''' #### Cash account after rebalancing'''),
dcc.Graph(id='cash_acc'),],
style={"float":"right", "width":"45%", "display":"inline-block"}),
html.Div(children=[dcc.Markdown(children=''' #### Shares held after rebalancing'''),
dcc.Graph(id='nbr_shares'),],
style={"float":"right", "width":"55%", "display":"inline-block"}),
]),
],
style={'float': 'right', 'width': '70%'})
# Building together the app layout: header, body and graphs
app.layout = html.Div(
id='main_page',
children=[
dcc.Store(id='memory-output'),
header(),
body(),
graphs(),
],
)
# App interactivity 1: calling the replication strategy everytime the user changes an input
@app.callback(
Output('memory-output', 'data'),
[Input('CallOrPut', 'value'),
Input("S","value"),
Input("K", "value"),
Input("Rf", "value"),
Input("T","value"),
Input("mu","value"),
Input("vol", "value"),
Input("tree_periods", "value"),])
def get_rep_strat_data(CallOrPut, S, K, Rf,T,mu,vol,tree_periods):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = RepStrat_Asian_Option_CRR(CallOrPut, S, K, Rf, T, mu, vol, tree_periods)
return nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown
# App interactivity 2: plot of stock simulation + CRR u, d, probUp & probDown values
@app.callback(
Output('stock_simul', 'figure'),
[Input('memory-output', 'data'),])
def graph_stock_simul(data):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data
return{
'layout': go.Layout(
title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
#margin={"t":15},
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
# showlegend=False,
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
legend=dict(
x=0,
y=0.8,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'),
),
'data': [
go.Scatter(
x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(
x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=stocksLabel,
showlegend=False,
hoverinfo='none',
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Up factor: {u}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Down factor: {d}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Prob up: {probUp}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Prob down: {probDown}'
),
],
}
# App interactivity 3: plot of portfolio (cash + equity accounts)
@app.callback(
Output('port_details', 'figure'),
[Input('memory-output', 'data'),])
def graph_portf_details(data):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data
return{
'layout': go.Layout(
title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,} # numbers below}
),
'data': [
go.Scatter(
x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(
x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=portfolioLabel,
hoverinfo='none',
),
],
}
# App interactivity 4: plot of number of shares to hold at all nodes
@app.callback(
Output('nbr_shares', 'figure'),
[Input('memory-output', 'data'),])
def graph_nbr_of_shares(data):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data
return{
'layout': go.Layout(
title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
), xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,} # numbers below}
),
'data': [
go.Scatter(
x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(
x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=nbrofsharesLabel,
hoverinfo='none',
),
],
}
# App interactivity 5: cash account
@app.callback(
Output('cash_acc', 'figure'),
[Input('memory-output', 'data'),])
def graph_cash_account(data):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data
return{
'layout': go.Layout(
title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,} # numbers below}
),
'data': [
go.Scatter(
x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(
x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=cashLabel,
hoverinfo='none',
),
],
}
# App interactivity 6: option price through risk-neutral valuation
@app.callback(
Output('option_price', 'figure'),
[Input('memory-output', 'data'),])
def graph_option_pricee(data):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data
return{
'layout': go.Layout(
title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,} # numbers below}
),
'data': [
go.Scatter(
x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(
x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=optionpriceLabel,
hoverinfo='none',
),
],
}
# App interactivity 7: cumulative sum of stock price for the asian option average
@app.callback(
Output('option_intrinsic', 'figure'),
[Input('memory-output', 'data'),])
def graph_option_cumsum(data):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown = data
return{
'layout': go.Layout(
title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,} # numbers below}
),
'data': [
go.Scatter(
x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(
x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=intrinsicLabel,
hoverinfo='none',
),
],
}
# App input checks
@app.callback(Output('message_S', 'children'),
[Input('S', 'value')])
def check_input_S(S):
if S<0:
return f'Cannot be lower than 0.'
else:
return ""
@app.callback(Output('message_K', 'children'),
[Input('K', 'value')])
def check_input_K(K):
if K<0:
return f'Cannot be lower than 0.'
else:
return ""
@app.callback(Output('message_tree', 'children'),
[Input('tree_periods', 'value')])
def check_input_K(tree__periods):
if tree__periods<1:
return f'Cannot be lower than 1.'
else:
return ""
# App input visuals
@app.callback(Output('drift', 'children'),
[Input('mu', 'value')])
def display_value(value):
return f': {int(value*100)}%'
@app.callback(Output('sigma', 'children'),
[Input('vol', 'value')])
def display_value2(value):
return f': {int(value*100)}%'
@app.callback(Output('riskfree', 'children'),
[Input('Rf', 'value')])
def display_value3(value):
return f': {int(value*100)}%'
@app.callback(Output('matu', 'children'),
[Input('T', 'value')])
def display_value4(value):
if value==0.25 or value==0.5 or value==0.75:
return f": {int(value*12)} months"
elif value == 1:
return f': {value} year'
else:
return f': {value} years'
# Opens the "About" button top right
@app.callback(
Output("popover", "is_open"),
[Input("popover-target", "n_clicks")],
[State("popover", "is_open")],
)
def toggle_popover(n, is_open):
if n:
return not is_open
return is_open
# Main function, runs the app
if __name__ == '__main__':
app.run_server(debug=True) | MichelVanderhulst/web-app-asian-option-crr | app.py | app.py | py | 33,229 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dash.Dash",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.themes",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "dash_html_components.Div",
"line_number": 38,
"usage_type": "call"
},
{
"api_name... |
37466207832 | from concurrent import futures
import sys
import grpc
import summary_api_pb2
import summary_api_pb2_grpc
import pandas as pd
from summary_statistics import calculate_frequency
class DocumentSummarizer(summary_api_pb2_grpc.DocumentSummarizerServicer):
def SummarizeDocument(self, request, context):
if request.document.content != b'':
data = pd.read_json(request.document.content.decode())
else:
try:
data = pd.read_csv(request.document.source.http_uri, index_col=0)
request.document.content = data.to_json().encode('utf-8')
except Exception as ex:
raise ValueError("Cannot load html ({})".format(ex))
if request.params_for_aggregation not in data.columns:
raise ValueError("Column ({}) is not in list ({})".format(request.params_for_aggregation,','.join(data.columns)))
if len(data[request.params_for_aggregation].unique()) < int(request.exclude):
temp_df = calculate_frequency(data, request.params_for_aggregation)
else:
raise ValueError("number of categories ({}) more than threshold ({})!".format((len(data[request.params_for_aggregation].unique())), int(request.exclude)) )
return summary_api_pb2.SummarizeDocumentReply(content=temp_df.reset_index(drop=True).to_json().encode('utf-8'))
def serve(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
summary_api_pb2_grpc.add_DocumentSummarizerServicer_to_server(DocumentSummarizer(), server)
server.add_insecure_port("localhost:{}".format(port))
print("start server listening on {}".format(port))
server.start()
server.wait_for_termination()
if __name__ == '__main__':
port = sys.argv[1] if len(sys.argv) > 1 else 50052
serve(port)
| doralaura24/visma | summary-statistics-service/summary/server.py | server.py | py | 1,856 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "summary_api_pb2_grpc.DocumentSummarizerServicer",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_json",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
... |
4738737155 | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import math
import time
import multiprocessing
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn.functional as F
import copy
class conGraphConvolutionlayer(Module):
def __init__(self, in_features, out_features, bias=True):
super(conGraphConvolutionlayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class conGCN(nn.Module):
def __init__(self, nfeat,
nhid,
common_hid_layers_num,
fcnn_hid_layers_num,
dropout,
nout1,
):
super(conGCN, self).__init__()
self.nfeat = nfeat
self.nhid = nhid
self.common_hid_layers_num = common_hid_layers_num
self.fcnn_hid_layers_num = fcnn_hid_layers_num
self.nout1 = nout1
self.dropout = dropout
self.training = True
## The beginning layer
self.gc_in_exp = conGraphConvolutionlayer(nfeat, nhid)
self.bn_node_in_exp = nn.BatchNorm1d(nhid)
self.gc_in_sp = conGraphConvolutionlayer(nfeat, nhid)
self.bn_node_in_sp = nn.BatchNorm1d(nhid)
## common_hid_layers
if self.common_hid_layers_num > 0:
for i in range(self.common_hid_layers_num):
exec('self.cgc{}_exp = conGraphConvolutionlayer(nhid, nhid)'.format(i+1))
exec('self.bn_node_chid{}_exp = nn.BatchNorm1d(nhid)'.format(i+1))
exec('self.cgc{}_sp = conGraphConvolutionlayer(nhid, nhid)'.format(i+1))
exec('self.bn_node_chid{}_sp = nn.BatchNorm1d(nhid)'.format(i+1))
## FCNN layers
self.gc_out11 = nn.Linear(2*nhid, nhid, bias=True)
self.bn_out1 = nn.BatchNorm1d(nhid)
if self.fcnn_hid_layers_num > 0:
for i in range(self.fcnn_hid_layers_num):
exec('self.gc_out11{} = nn.Linear(nhid, nhid, bias=True)'.format(i+1))
exec('self.bn_out11{} = nn.BatchNorm1d(nhid)'.format(i+1))
self.gc_out12 = nn.Linear(nhid, nout1, bias=True)
def forward(self, x, adjs):
self.x = x
## input layer
self.x_exp = self.gc_in_exp(self.x, adjs[0])
self.x_exp = self.bn_node_in_exp(self.x_exp)
self.x_exp = F.elu(self.x_exp)
self.x_exp = F.dropout(self.x_exp, self.dropout, training=self.training)
self.x_sp = self.gc_in_sp(self.x, adjs[1])
self.x_sp = self.bn_node_in_sp(self.x_sp)
self.x_sp = F.elu(self.x_sp)
self.x_sp = F.dropout(self.x_sp, self.dropout, training=self.training)
## common layers
if self.common_hid_layers_num > 0:
for i in range(self.common_hid_layers_num):
exec("self.x_exp = self.cgc{}_exp(self.x_exp, adjs[0])".format(i+1))
exec("self.x_exp = self.bn_node_chid{}_exp(self.x_exp)".format(i+1))
self.x_exp = F.elu(self.x_exp)
self.x_exp = F.dropout(self.x_exp, self.dropout, training=self.training)
exec("self.x_sp = self.cgc{}_sp(self.x_sp, adjs[1])".format(i+1))
exec("self.x_sp = self.bn_node_chid{}_sp(self.x_sp)".format(i+1))
self.x_sp = F.elu(self.x_sp)
self.x_sp = F.dropout(self.x_sp, self.dropout, training=self.training)
## FCNN layers
self.x1 = torch.cat([self.x_exp, self.x_sp], dim=1)
self.x1 = self.gc_out11(self.x1)
self.x1 = self.bn_out1(self.x1)
self.x1 = F.elu(self.x1)
self.x1 = F.dropout(self.x1, self.dropout, training=self.training)
if self.fcnn_hid_layers_num > 0:
for i in range(self.fcnn_hid_layers_num):
exec("self.x1 = self.gc_out11{}(self.x1)".format(i+1))
exec("self.x1 = self.bn_out11{}(self.x1)".format(i+1))
self.x1 = F.elu(self.x1)
self.x1 = F.dropout(self.x1, self.dropout, training=self.training)
self.x1 = self.gc_out12(self.x1)
gc_list = {}
gc_list['gc_in_exp'] = self.gc_in_exp
gc_list['gc_in_sp'] = self.gc_in_sp
if self.common_hid_layers_num > 0:
for i in range(self.common_hid_layers_num):
exec("gc_list['cgc{}_exp'] = self.cgc{}_exp".format(i+1, i+1))
exec("gc_list['cgc{}_sp'] = self.cgc{}_sp".format(i+1, i+1))
gc_list['gc_out11'] = self.gc_out11
if self.fcnn_hid_layers_num > 0:
exec("gc_list['gc_out11{}'] = self.gc_out11{}".format(i+1, i+1))
gc_list['gc_out12'] = self.gc_out12
return F.log_softmax(self.x1, dim=1), gc_list
def conGCN_train(model,
train_valid_len,
test_len,
feature,
adjs,
label,
epoch_n,
loss_fn,
optimizer,
train_valid_ratio = 0.9,
scheduler = None,
early_stopping_patience = 5,
clip_grad_max_norm = 1,
load_test_groundtruth = False,
print_epoch_step = 1,
cpu_num = -1,
GCN_device = 'CPU'
):
if GCN_device == 'CPU':
device = torch.device("cpu")
print('Use CPU as device.')
else:
if torch.cuda.is_available():
device = torch.device("cuda")
print('Use GPU as device.')
else:
device = torch.device("cpu")
print('Use CPU as device.')
if cpu_num == -1:
cores = multiprocessing.cpu_count()
torch.set_num_threads(cores)
else:
torch.set_num_threads(cpu_num)
model = model.to(device)
adjs = [adj.to(device) for adj in adjs]
feature = feature.to(device)
label = label.to(device)
time_open = time.time()
train_idx = range(int(train_valid_len*train_valid_ratio))
valid_idx = range(len(train_idx), train_valid_len)
best_val = np.inf
clip = 0
loss = []
para_list = []
for epoch in range(epoch_n):
try:
torch.cuda.empty_cache()
except:
pass
optimizer.zero_grad()
output1, paras = model(feature.float(), adjs)
loss_train1 = loss_fn(output1[list(np.array(train_idx)+test_len)], label[list(np.array(train_idx)+test_len)].float())
loss_val1 = loss_fn(output1[list(np.array(valid_idx)+test_len)], label[list(np.array(valid_idx)+test_len)].float())
if load_test_groundtruth == True:
loss_test1 = loss_fn(output1[:test_len], label[:test_len].float())
loss.append([loss_train1.item(), loss_val1.item(), loss_test1.item()])
else:
loss.append([loss_train1.item(), loss_val1.item(), None])
if epoch % print_epoch_step == 0:
print("******************************************")
print("Epoch {}/{}".format(epoch+1, epoch_n),
'loss_train: {:.4f}'.format(loss_train1.item()),
'loss_val: {:.4f}'.format(loss_val1.item()),
end = '\t'
)
if load_test_groundtruth == True:
print("Test loss= {:.4f}".format(loss_test1.item()), end = '\t')
print('time: {:.4f}s'.format(time.time() - time_open))
para_list.append(paras.copy())
for i in paras.keys():
para_list[-1][i] = copy.deepcopy(para_list[-1][i])
if early_stopping_patience > 0:
if torch.round(loss_val1, decimals=4) < best_val:
best_val = torch.round(loss_val1, decimals=4)
best_paras = paras.copy()
best_loss = loss.copy()
clip = 1
for i in paras.keys():
best_paras[i] = copy.deepcopy(best_paras[i])
else:
clip += 1
if clip == early_stopping_patience:
break
else:
best_loss = loss.copy()
best_paras = None
loss_train1.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=clip_grad_max_norm)
optimizer.step()
if scheduler != None:
try:
scheduler.step()
except:
scheduler.step(metrics = loss_val1)
print("***********************Final Loss***********************")
print("Epoch {}/{}".format(epoch+1, epoch_n),
'loss_train: {:.4f}'.format(loss_train1.item()),
'loss_val: {:.4f}'.format(loss_val1.item()),
end = '\t'
)
if load_test_groundtruth == True:
print("Test loss= {:.4f}".format(loss_test1.item()), end = '\t')
print('time: {:.4f}s'.format(time.time() - time_open))
torch.cuda.empty_cache()
return output1.cpu(), loss, model.cpu()
| luoyuanlab/stdgcn | STdGCN/GCN.py | GCN.py | py | 9,989 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.modules.module.Module",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.parameter.Parameter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 22,
"usage_type": "call"
},
{
"api... |
18567314308 |
# scrape British Columbia acupuncturists
number_of_rows = 2090 # set this before running
delim = "\t"
from urllib.request import urlopen
import time
def SetHomeDirectory():
import os
os.chdir("C:\\Users\\Matt Scandale\\OneDrive - Council of Better Business Bureaus, Inc\\Desktop")
SetHomeDirectory()
iURL = "https://portal.ctcma.bc.ca/public"
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
driver = webdriver.Firefox()
driver.implicitly_wait(10)
driver.get(iURL)
print("Page title:", driver.title)
id_province_field = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Sheet0_Input5_TextBox1"
id_submit_button = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Sheet0_SubmitButton"
id_page_size = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Grid1_ctl00_ctl02_ctl00_ChangePageSizeTextBox"
id_change_page_size = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Grid1_ctl00_ctl02_ctl00_ChangePageSizeLinkButton"
id_results_grid = "ctl01_TemplateBody_WebPartManager1_gwpciPublicRegistry_ciPublicRegistry_ResultsGrid_Grid1_ctl00"
out_filename = "out.txt"
print("writing to " + out_filename)
fh = open(out_filename, "w", encoding="utf8")
time.sleep(1)
driver.execute_script("document.getElementById('" + id_province_field + "').value = 'BC'")
province = driver.find_element(by=By.ID, value=id_province_field)
print("Province: " + province.get_attribute('value'))
time.sleep(1)
driver.execute_script("document.getElementById('" + id_submit_button + "').click()")
time.sleep(1)
driver.execute_script("document.getElementById('" + id_page_size + "').value = '" + str(number_of_rows) + "'")
time.sleep(1)
driver.execute_script("document.getElementById('" + id_change_page_size + "').click()")
# THIS IS ABSOLUTELY CRITICAL TO GET ALL ROWS!
print("waiting for all rows to load")
time.sleep(15)
results_grid = driver.find_element(by=By.ID, value=id_results_grid)
rows = results_grid.find_elements(by=By.CLASS_NAME, value="rgRow")
print("Rows:", len(rows))
links = []
for row in rows:
cells = row.find_elements(by=By.XPATH, value="td")
cell = cells[0]
anchors = cell.find_elements(by=By.XPATH, value="a")
anchor = anchors[0]
links.append(anchor)
print("Links:", len(links))
row_number = 0
for link in links:
row_number += 1
time.sleep(3)
driver.switch_to.default_content()
link.click()
#iframe = driver.find_element_by_xpath("/html/body/form/div[1]/table/tbody/tr[" + str(xcount) + "]/td[2]/iframe")
iframes = driver.find_elements(by=By.TAG_NAME, value="iframe")
print("Frames:", len(iframes))
framecount = 0
for iframe in iframes:
framecount += 1
if framecount == 1:
print("switching context to frame " + str(framecount))
driver.switch_to.frame(iframe)
time.sleep(2)
#panel = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_ContentPanel"]')
#html = panel.get_attribute('innerHTML')
person = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciNewContactMiniProfileCommon_ciNewContactMiniProfileCommon_contactName_fullName"]')
person_value = person.get_attribute('innerHTML')
address = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciAddress_ciAddress__address"]')
address_value = address.get_attribute('innerHTML')
phone = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciAddress_ciAddress__phoneNumber"]')
phone_value = phone.get_attribute('innerHTML')
email = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciAddress_ciAddress__email"]')
email_value = email.get_attribute('innerHTML')
company_value = "-"
try:
company = driver.find_element(by=By.XPATH, value='//*[@id="ctl00_TemplateBody_WebPartManager1_gwpciNewContactMiniProfileCommon_ciNewContactMiniProfileCommon_contactName_institute"]')
company_value = company.get_attribute('innerHTML')
except:
pass
print(person_value, address_value, phone_value, email_value, company_value)
fh.write(
person_value + "\t" +
address_value + "\t" +
phone_value + "\t" +
email_value + "\t" +
company_value + "\t" +
"\n"
)
#close_buttons = driver.find_elements_by_xpath("/html/body/form/div[1]/table/tbody/tr[1]/td[2]/table/tbody/tr/td[3]/ul/li[3]/a")
#close_buttons = driver.find_elements_by_class_name("rwCloseButton")
#for close_button in close_buttons:
# close_button.click()
fh.close()
driver.close()
print("end")
| mscandale-iabbb/research_public | sample_iabbb_bots/scrape_bc_acupuncturists_shared.py | scrape_bc_acupuncturists_shared.py | py | 4,974 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "time.sleep",
... |
4440001992 | import re
import six
import time
import inspect
import importlib
from .dataType import *
import spannerorm.base_model
from datetime import date
from .relation import Relation
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
class Helper(object):
@classmethod
def is_property(cls, v):
"""
Check is property
:type: object
:param v:
:return:
"""
return isinstance(v, property)
@classmethod
def is_attr(cls, v):
"""
Check is model attr
:type: object
:param v:
"""
return isinstance(v, StringField) | isinstance(v, IntegerField) | isinstance(v, BoolField) \
| isinstance(v, IntegerField) | isinstance(v, FloatField) | isinstance(v, BytesField) \
| isinstance(v, DateField) | isinstance(v, TimeStampField) | isinstance(v, EnumField)
@classmethod
def is_relational_attr(cls, v):
"""
Check is model relational attr
:type: object
:param v:
"""
return isinstance(v, Relation)
@classmethod
def get_model_prop_by_name(cls, model_cls, prop_name):
"""
Return model prop by name
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls:
:type prop_name: str
:param prop_name:
:rtype: property | None
:return:
"""
for key, prop in inspect.getmembers(model_cls, Helper.is_property):
if key == prop_name:
return prop
return None
@classmethod
def get_model_props_value_by_key(cls, model_obj, prop_name):
"""
Return model props key-value
:type model_obj: spannerorm.base_model.BaseModel
:param model_obj: model
:type prop_name: str
:param prop_name: property name
:rtype: dict
:return:
"""
for key, value in inspect.getmembers(model_obj.__class__, Helper.is_property):
if key == prop_name:
return model_obj.__getattribute__(key)
return None
@classmethod
def get_model_props(cls, model_cls):
"""
Return model props
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: model
:rtype: dict
:return:
"""
model_props = {}
for key, value in inspect.getmembers(model_cls, Helper.is_property):
model_props[key] = value
return model_props
@classmethod
def get_model_attrs(cls, model_cls):
"""
Return model attr
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: Model class
:rtype: dict
:return: model attributes in key value pairs
"""
attrs = {}
for key, value in inspect.getmembers(model_cls, Helper.is_attr):
attrs[key] = value
return attrs
@classmethod
def get_model_relations_attrs(cls, model_cls):
"""
Return model relation attrs
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: Model class
:rtype: dict
:return: model relational attributes in key value pairs
"""
attrs = {}
for key, value in inspect.getmembers(model_cls, Helper.is_relational_attr):
attrs[key] = value
return attrs
@classmethod
def get_model_props_key_value(cls, model_obj):
"""
Return model props key-value
:type model_obj: spannerorm.base_model.BaseModel
:param model_obj: model
:rtype: dict
:return:
"""
model_props = {}
for key, value in inspect.getmembers(model_obj.__class__, Helper.is_property):
model_props[key] = model_obj.__getattribute__(key)
return model_props
@classmethod
def model_attr_by_prop(cls, model_cls, prop):
"""
Return model attribute by property
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: Model class
:type prop: property
:param prop: Model class property
:rtype: DataType
:return: Model attribute
"""
if isinstance(prop, property) is False:
raise TypeError('Invalid object property')
return Helper.model_attr_by_prop_name(model_cls, prop.fget.__name__)
@classmethod
def model_attr_by_prop_name(cls, model_cls, prop_name):
"""
Return model attribute by property name
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: Model class
:type prop_name: str
:param prop_name: Model class property name
:rtype: DataType
:return: Model attribute
"""
model_attr_name = '_' + prop_name
model_attrs = Helper.get_model_attrs(model_cls)
if model_attr_name not in model_attrs:
raise TypeError('Criteria model property {} not exist'.format(model_attr_name))
return model_attrs.get(model_attr_name)
@classmethod
def model_relational_attr_by_prop(cls, model_cls, prop):
"""
Return model relational attribute by property
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: Model class
:type prop: property
:param prop: Model class property
:rtype: Relation
:return: Model relational attribute
"""
if isinstance(prop, property) is False:
raise TypeError('Invalid object property')
return Helper.model_relational_attr_by_prop_name(model_cls, prop.fget.__name__)
@classmethod
def model_relational_attr_by_prop_name(cls, model_cls, prop_name):
"""
Return model relational attribute by property name
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: Model class
:type prop_name: str
:param prop_name: Model class property name
:rtype: Relation
:return: Model relational attribute
"""
model_attr_name = '_' + prop_name
model_attrs = Helper.get_model_relations_attrs(model_cls)
if model_attr_name not in model_attrs:
raise TypeError('Criteria model property {} not exist'.format(model_attr_name))
return model_attrs.get(model_attr_name)
@classmethod
def get_db_columns(cls, model_cls):
model_attrs = Helper.get_model_attrs(model_cls)
columns = []
for attr_name in model_attrs:
attr = model_attrs.get(attr_name)
columns.append(attr.db_column)
return columns
@classmethod
def validate_model_prop(cls, model_cls, prop):
"""
Validate model attr
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls: Model class
:type prop: property
:param prop:
:rtype: dict
:return: {'is_valid':bool, 'error_msg':str}
"""
if isinstance(prop, property) is False:
raise TypeError('Invalid object property')
model_attr_name = '_' + prop.fget.__name__
model_attrs = Helper.get_model_attrs(model_cls)
if model_attr_name in model_attrs:
attr = model_attrs.get(model_attr_name)
if isinstance(attr, IntegerField) or isinstance(attr, FloatField):
return Helper.validate_number_field(attr.value, max_value=attr.max_value, min_value=attr.min_value,
null=attr.null)
elif isinstance(attr, StringField):
return Helper.validate_string_field(attr.value, max_length=attr.max_length, reg_exr=attr.reg_exr,
null=attr.null)
elif isinstance(attr, BoolField):
return Helper.validate_bool_field(attr.value, null=attr.null)
elif isinstance(attr, TimeStampField):
return Helper.validate_timestamp_field(attr.value, null=attr.null)
elif isinstance(attr, DateField):
return Helper.validate_date_field(attr.value, null=attr.null)
elif isinstance(attr, EnumField):
return Helper.validate_enum_field(attr.value, enum_list=attr.enum_list, null=attr.null)
return {
'is_valid': True,
'error_msg': None
}
@classmethod
def validate_number_field(cls, value, max_value=None, min_value=None, null=True):
"""
Validate number field value
:type value: int | float
:param value: integer value
:type max_value: int
:param max_value: max allow number value
:type min_value: int
:param min_value: min allow integer value
:type null: bool
:param null: is allow None value
:rtype: dict
:return: {'is_valid':bool, 'error_msg':str}
"""
is_valid = True
error_msg = None
if null is False and value is None:
is_valid = False
error_msg = 'Property value should not be None'
if value is not None:
if isinstance(value, int) is False and isinstance(value, float) is False:
is_valid = False
error_msg = 'Data type should be <int>'
if max_value is not None and value > max_value:
is_valid = False
error_msg = 'Max allow value: {}'.format(max_value)
if min_value is not None and value < min_value:
is_valid = False
error_msg = 'Min allow value: {}'.format(min_value)
return {
'is_valid': is_valid,
'error_msg': error_msg
}
@classmethod
def validate_string_field(cls, value, max_length=None, reg_exr=None, null=True):
"""
Validate string field value
:type max_length: int
:param max_length: max allow string lenght
:type reg_exr: str
:param reg_exr: regex pattern
:type null: bool
:param null: is allow None value
:rtype: dict
:return: {'is_valid':bool, 'error_msg':str}
"""
is_valid = True
error_msg = None
if null is False and (value is None or (value is not None and str(value).strip() == '')):
is_valid = False
error_msg = 'Data should not be None or empty'
if value is not None:
if isinstance(value, six.string_types) is False:
is_valid = False
error_msg = 'Data type should be <str>'
if max_length is not None and len(value) > max_length:
is_valid = False
error_msg = 'Max allow string length: {}'.format(max_length)
if reg_exr is not None:
pattern = re.compile(reg_exr)
if pattern.match(value) is None:
is_valid = False
error_msg = 'String should match regex pattern: {}'.format(reg_exr)
return {
'is_valid': is_valid,
'error_msg': error_msg
}
@classmethod
def validate_bool_field(cls, value, null=True):
"""
Validate bool field value
:type value: bool
:param value:
:type null: bool
:param null: is allow None value
:rtype: dict
:return: {'is_valid':bool, 'error_msg':str}
"""
is_valid = True
error_msg = None
if null is False and value is None:
is_valid = False
error_msg = 'Data should not be None'
if value is not None and isinstance(value, bool) is False:
is_valid = False
error_msg = 'Data type should be <bool>'
return {
'is_valid': is_valid,
'error_msg': error_msg
}
@classmethod
def validate_timestamp_field(cls, value, null=True):
"""
Validate timestamp field value
:type value: int | float
:param value:
:type null: bool
:param null: is allow None value
:rtype: dict
:return: {'is_valid':bool, 'error_msg':str}
"""
is_valid = True
error_msg = None
if null is False and value is None:
is_valid = False
error_msg = 'Data should not be None'
if value is not None and isinstance(value, int) is False and isinstance(value, float) is False:
is_valid = False
error_msg = 'Data type should be <float> or <int> timestamp'
return {
'is_valid': is_valid,
'error_msg': error_msg
}
@classmethod
def validate_date_field(cls, value, null=True):
"""
Validate enum field value
:type value: date
:param value:
:type null: bool
:param null: is allow None value
:rtype: dict
:return: {'is_valid':bool, 'error_msg':str}
"""
is_valid = True
error_msg = None
if null is False and value is None:
is_valid = False
error_msg = 'Data should not be None'
if value is not None and isinstance(value, date) is False:
is_valid = False
error_msg = 'Data type should be <datetime.date>'
return {
'is_valid': is_valid,
'error_msg': error_msg
}
@classmethod
def validate_enum_field(cls, value, enum_list, null=True):
"""
Validate enum field value
:type value: object
:param value:
:type enum_list: list
:param enum_list: list of allow value
:type null: bool
:param null: is allow None value
:rtype: dict
:return: {'is_valid':bool, 'error_msg':str}
"""
is_valid = True
error_msg = None
if null is False and value is None:
is_valid = False
error_msg = 'Data should not be None'
if value is not None:
if value in enum_list is False:
is_valid = False
error_msg = 'Data value should be from list: {}'.format(enum_list)
return {
'is_valid': is_valid,
'error_msg': error_msg
}
@classmethod
def get_model_props_details(cls, model_cls):
"""
Return model props details
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls:
:rtype: dict
:return:
"""
model_props = Helper.get_model_props(model_cls)
model_attrs = Helper.get_model_attrs(model_cls)
props_details = {}
for prop_name in model_props:
model_attr_name = '_' + prop_name
if model_attr_name in model_attrs:
attr = model_attrs.get(model_attr_name)
props_details.update({
prop_name: Helper.get_prop_details(attr)
})
return props_details
@classmethod
def get_relation_props_details(cls, model_cls):
"""
Return model relation props details
:type model_cls: spannerorm.base_model.BaseModel
:param model_cls:
:rtype: dict
:return:
"""
model_props = Helper.get_model_props(model_cls)
model_relation_attrs = Helper.get_model_relations_attrs(model_cls)
props_details = {}
for prop_name in model_props:
model_attr_name = '_' + prop_name
if model_attr_name in model_relation_attrs:
attr = model_relation_attrs.get(model_attr_name)
props_details.update({
prop_name: Helper.get_relation_pop_detail(attr)
})
return props_details
@classmethod
def get_prop_details(cls, attr):
"""
Return model attr field details
:type attr: DataType
:param attr:
:rtype: dict
:return:
"""
details = {
'db_column': attr.db_column,
'null': attr.null,
'default_value': attr.default
}
if isinstance(attr, IntegerField):
details.update({
'data_type': 'IntegerField',
'max_value': attr.max_value,
'min_value': attr.min_value
})
if isinstance(attr, FloatField):
details.update({
'data_type': 'FloatField',
'max_value': attr.max_value,
'min_value': attr.min_value,
'decimal_places': attr.decimal_places
})
if isinstance(attr, StringField):
details.update({
'data_type': 'StringField',
'max_length': attr.max_length,
'reg_exr': attr.reg_exr
})
if isinstance(attr, EnumField):
details.update({
'data_type': 'EnumField',
'enum_list': attr.enum_list
})
if isinstance(attr, DateField):
details.update({
'data_type': 'DateField'
})
if isinstance(attr, TimeStampField):
details.update({
'data_type': 'TimeStampField'
})
if isinstance(attr, BoolField):
details.update({
'data_type': 'BoolField'
})
if isinstance(attr, BytesField):
details.update({
'data_type': 'BytesField'
})
return details
@classmethod
def get_relation_pop_detail(cls, attr):
"""
Return model relation attr field details
:type attr: Relation
:param attr:
:rtype: dict
:return:
"""
return {
'relation_type': attr.relation_type,
'relation_name': attr.relation_name,
'join_on': attr.join_on,
'refer_to': attr.refer_to
}
@classmethod
def init_model_with_default(cls, model_class):
"""
Init model object with default values
:type model_class: spannerorm.base_model.BaseModel
:param model_class:
:rtype: spannerorm.base_model.BaseModel
:return: model object
"""
model_object = model_class()
model_attrs = Helper.get_model_attrs(model_object)
for attr_name in model_attrs:
attr = model_attrs.get(attr_name)
if attr.default is not None:
attr.value = attr.default
return model_object
@classmethod
def model_cls_by_module_name(cls, prop_module_name):
"""
import module by name & return model
:type prop_module_name: str
:param prop_module_name:
:rtype: spannerorm.base_model.BaseModel
:return:
"""
prop_module = importlib.import_module(prop_module_name)
for name, model in inspect.getmembers(prop_module):
if inspect.isclass(model) and prop_module_name == model.__module__:
return model
return None
@classmethod
def process_result_set(cls, results):
data = []
for row in results:
row_data = {}
for col in row:
index = 0
for field in results.fields:
if isinstance(row[index], six.string_types):
value = str(row[index])
elif isinstance(row[index], DatetimeWithNanoseconds):
value = time.mktime(row[index].timetuple())
else:
value = row[index]
field_name = str(field.name)
row_data[field_name] = value
index += 1
data.append(row_data)
return data
| sijanonly/spanner-orm | spannerorm/helper.py | helper.py | py | 19,932 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "relation.Relation",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "inspect.getmembers",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "inspect.getmembers",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "inspect.g... |
71251001 | import torch
import torch.nn as nn
import torch.utils.data as Data
from torch.autograd import Variable
import torch.nn.functional as F
import time
import numpy as np
assert F
import csv,random
from imblearn.over_sampling import SMOTE
class Datamanager():
def __init__(self):
self.dataset = {}
self.threshold = 1e-10 ## The threshold for classifying the label of the sample after simoid function
self.normalize = {} ## Record the mean and standard deviation for testing set normalization
self.over_sample = False ## Determine whether copy the positive (attribute #4 = 2) samples
self.over_sample_rate = 150 ## Number of copies
self.down_sample = False ## Determine whether delete the negative (attribute #4 = 1) samples
self.down_sample_rate = 2 ## Make number of negative samples = down_sample_rate * Number of positive samples
self.smote = True ## Determine whether use SMOTE to generate minor class samples source: https://imbalanced-learn.org/en/stable/generated/imblearn.over_sampling.SMOTE.html
self.weighted_loss = True ## Determine whether adjust the weight of BCE loss function
self.weighted_loss_rate = 0.1 ## Weight of negative samples in loss function ( 1 - weighted_loss_rate for positive samples)
def get_data(self,name,file_name,b_size,args,shuf=True):
with open(file_name,newline='') as csvfile:
rows = csv.reader(csvfile) ## Read file
data = [] ## Store the data from file
for row in rows:
data.append(row)
data = data[2:]
data = np.array(data)
if name == 'train' :
data = np.delete(data,2,0) ## Missing data => remove
data = np.delete(data,0,0) ## Positive(attribute #4 = 2) outlier => remove
data = np.delete(data,4,0)
data = np.delete(data,5,0)
data = np.delete(data,6,0)
data = np.delete(data,11,0)
data = np.delete(data,2542,0)
if name == 'test' :
data = np.delete(data,1103,0)
data = np.delete(data,1102,0)
data = np.delete(data,699,0)
data = np.delete(data,5,0)
data = np.delete(data,176,1) ## These columns have std = 0 => remove
data = np.delete(data,167,1)
data = np.delete(data,166,1)
data = np.delete(data,165,1)
data = np.delete(data,5,1)
data = np.delete(data,4,1)
data = np.delete(data,166,1)
data = np.delete(data,165,1)
data = np.delete(data,164,1)
for i in range(data.shape[1]):
if i == 3 :
for j in range(data.shape[0]): ## Transform label of attribute #4 '2' to 1(positive), '1' to 0(negative)
if data[j][i] == '1':
data[j][i] = 0
elif data[j][i] == '2':
data[j][i] = 1
else:
print('error target')
elif data[0][i] == 'TRUE' or data[0][i] == 'FALSE': # ## Transform label 'TRUE' to 1, 'Negative' to 0
for j in range(data.shape[0]):
if data[j][i] == 'TRUE':
data[j][i] = 1.0
elif data[j][i] == 'FALSE':
data[j][i] = 0.0
else:
print(j,i,data[j][i])
print('other type')
mean = data[:,i].astype(np.double).mean() ## Normalization. Record mean and standard deviation
std = data[:,i].astype(np.double).std()
if(std == 0):
print(i)
data[:,i] = (data[:,i].astype(np.double) - mean) / std
self.normalize[i] = [mean,std]
else:
if name == 'train': ## Normalization. Record mean and standard deviation
mean = data[:,i].astype(np.double).mean()
std = data[:,i].astype(np.double).std()
if(std == 0):
print(i)
data[:,i] = (data[:,i].astype(np.double) - mean) / std
self.normalize[i] = [mean,std]
else:
data[:,i] = (data[:,i].astype(np.double) - self.normalize[i][0]) / self.normalize[i][1]
if name == 'train' :
np.random.shuffle(data)
Y = data[:int(data.shape[0]*0.9),3].reshape(-1,1).astype(np.double) ## Split training and validation set, and extract attribute #4 as targets
Y_val = data[int(data.shape[0]*0.9):,3].reshape(-1,1).astype(np.double)
X = np.delete(data,3,1).astype(np.double)[:int(data.shape[0]*0.9),:]
X_val = np.delete(data,3,1).astype(np.double)[int(data.shape[0]*0.9):,:]
if self.over_sample or self.down_sample or self.smote:
count_0 = 0
count_1 = 0
count_1_list = []
for i in range(Y.shape[0]):
if Y[i][0] == 0:
count_0 = count_0 + 1
else:
count_1 = count_1 + 1
count_1_list.append(i)
print('count_0:',count_0)
print('count_1:',count_1)
if self.over_sample: ## Copy the positive (attribute #4 = 2) samples
ori_one_X , ori_one_Y = X[count_1_list] , Y[count_1_list]
for i in range(self.over_sample_rate):
noise = np.random.normal(0, 0.3, ori_one_X.shape)
add_one_X = ori_one_X + noise
X = np.concatenate((X,add_one_X),axis = 0)
Y = np.concatenate((Y,ori_one_Y),axis = 0)
if self.down_sample: ## Delete the negative (attribute #4 = 1) samples
number = int(count_0 - count_1 * (self.over_sample_rate + 1) * self.down_sample_rate)
while(number > 0):
for i in range(Y.shape[0]):
if Y[i][0] == 0:
X = np.delete(X,i,0)
Y = np.delete(Y,i,0)
number = number - 1
break
if self.smote: ## Use SMOTE to generate minor class(positive) samples
sm = SMOTE(sampling_strategy = 1)
X, Y = sm.fit_resample(X, Y)
Y = Y.reshape(-1,1)
count_0 = 0
count_1 = 0
for i in range(Y.shape[0]):
if Y[i][0] == 0:
count_0 = count_0 + 1
else:
count_1 = count_1 + 1
#print('count_0:',count_0)
#print('count_1:',count_1)
#print(X.shape)
#print(Y.shape)
X,Y = torch.from_numpy(X).cuda(),torch.from_numpy(Y).cuda() ## Convert numpy array to tensor for Pytorch
train_dataset = Data.TensorDataset(data_tensor=X[:], target_tensor=Y[:]) ## Wrap up the input/target tensor into TensorDataset source: https://pytorch.org/docs/stable/data.html
self.dataset['train'] = Data.DataLoader(dataset=train_dataset, batch_size=b_size, shuffle=shuf) ## Put the TensorDataset in Dataloader (stored in a dictionary), shuffling the samples source: https://pytorch.org/docs/stable/data.html
X_val,Y_val = torch.from_numpy(X_val).cuda(),torch.from_numpy(Y_val).cuda()
val_dataset = Data.TensorDataset(data_tensor=X_val[:], target_tensor=Y_val[:])
self.dataset['val'] = Data.DataLoader(dataset=val_dataset, batch_size=b_size, shuffle=shuf)
elif name == 'test': ## Process the testing set
Y = data[:,3].reshape(-1,1).astype(np.double)
X = np.delete(data,3,1).astype(np.double)
X,Y = torch.from_numpy(X).cuda(),torch.from_numpy(Y).cuda()
train_dataset = Data.TensorDataset(data_tensor=X[:], target_tensor=Y[:])
self.dataset['test'] = Data.DataLoader(dataset=train_dataset, batch_size=b_size, shuffle=shuf) ## Put the TensorDataset in Dataloader (stored in a dictionary), not shuffling the samples source: https://pytorch.org/docs/stable/data.html
def train(self,model,trainloader,epoch): ## Train the model
model.train() ## Set to training mode
optimizer = torch.optim.Adam(model.parameters()) ## Use Adam optimizer to optimize all DNN parameters source: https://pytorch.org/docs/stable/optim.html
loss_func = nn.BCELoss() ## Use binary cross entropoy for model evaluation source: https://pytorch.org/docs/stable/nn.html
total_loss = 0 ## Calculate total loss in a epoch
t1_p1 = 0 ## Confusion matrix initialization
t1_p0 = 0
t0_p1 = 0
t0_p0 = 0
for batch_index, (x, y) in enumerate(trainloader): ## Process a batch of data in each timestep
x, y= Variable(x).cuda(), Variable(y).cuda()
output = model(x) ## Use present model to forecast the the result
if self.weighted_loss: ## Adjust the weight of BCE loss functional source: https://pytorch.org/docs/stable/nn.html
weight = np.empty([len(x)])
for i in range(len(x)):
weight[i] = self.weighted_loss_rate
arr = np.where(y.data == 1)
weight[arr[0].tolist()] = 1 - self.weighted_loss_rate
weight = torch.from_numpy(weight).cuda().double().view(len(x),1)
loss_func = nn.BCELoss(weight = weight)
loss = loss_func(output,y)
optimizer.zero_grad() ## Set the gradient in the previous time step to zero
loss.backward() ## Back propagate source: https://pytorch.org/docs/stable/optim.html
optimizer.step() ## Gradient descent source: https://pytorch.org/docs/stable/autograd.html
if batch_index % 4 == 0: ## Print model status source: https://pytorch.org/docs/stable/optim.html
print('\rTrain Epoch: {} | [{}/{} ({:.0f}%)]\t '.format(
epoch, batch_index * len(x), len(trainloader.dataset),
100. * batch_index / len(trainloader)),end='')
total_loss+= loss.data[0]*len(x) ## Sum up batch loss
pred = np.empty([len(x),1]) ## Calculate confusion matrix
output = output.cpu().data.numpy()
for i in range(len(x)):
if(output[i] > self.threshold):
pred[i,0] = 1
else:
pred[i,0] = 0
y = y.cpu().data.numpy()
for i in range(pred.shape[0]):
if y[i] == 1 and pred[i] == 1:
t1_p1 = t1_p1 + 1
elif y[i] == 1 and pred[i] == 0:
t1_p0 = t1_p0 + 1
elif y[i] == 0 and pred[i] == 1:
t0_p1 = t0_p1 + 1
elif y[i] == 0 and pred[i] == 0:
t0_p0 = t0_p0 + 1
total_loss/= len(trainloader.dataset)
print('Total loss: {:.4f}'.format(total_loss)) ## Print model status
print('t1_p1: ',t1_p1 , 't0_p1: ',t0_p1 )
print('t1_p0: ',t1_p0 , 't0_p0: ',t0_p0 )
return total_loss
def val(self,model,name,valloader): ## Test the model
model.eval() ## Set to evaluation mode
val_loss = 0 ## Calculate total loss
t1_p1 = 0 ## Confusion matrix initialization
t1_p0 = 0
t0_p1 = 0
t0_p0 = 0
for x, y in valloader:
x, y = Variable(x, volatile=True).cuda(), Variable(y,volatile=True).cuda()
output = model(x) ## Use present model to forecast the the result
val_loss += F.binary_cross_entropy(output, y, size_average=False).data[0] ## Sum up batch loss
pred = np.empty([len(x),1]) ## Calculate confusion matrix
output = output.cpu().data.numpy()
for i in range(len(x)):
if(output[i] > self.threshold):
pred[i,0] = 1
else:
pred[i,0] = 0
y = y.cpu().data.numpy()
for i in range(pred.shape[0]):
if y[i] == 1 and pred[i] == 1:
t1_p1 = t1_p1 + 1
elif y[i] == 1 and pred[i] == 0:
t1_p0 = t1_p0 + 1
elif y[i] == 0 and pred[i] == 1:
t0_p1 = t0_p1 + 1
elif y[i] == 0 and pred[i] == 0:
t0_p0 = t0_p0 + 1
val_loss /= len(valloader.dataset)
print(name , ' set: Average loss: {:.4f}'.format(val_loss)) ## Print model status
print('t1_p1: ',t1_p1 , 't0_p1: ',t0_p1 )
print('t1_p0: ',t1_p0 , 't0_p0: ',t0_p0 )
return val_loss
class DNN(nn.Module): ## Set up DNN
def __init__(self,args):
super(DNN, self).__init__()
print(args.unit)
self.den=nn.ModuleList()
for i in range(1,len(args.unit)-1): ## Set up hidden layers
self.den.append( nn.Sequential(
nn.Linear(args.unit[i-1], args.unit[i]), ## Source: https://pytorch.org/docs/stable/nn.html
nn.ReLU(),
nn.Dropout(0.2)
))
self.den.append( nn.Sequential(
nn.Linear(args.unit[-2], args.unit[-1]),
nn.Dropout(0.2),
nn.Sigmoid(),
))
def forward(self, x): ## Connect layers and activation function
for i in self.den:
x = i(x)
return x | b04901056/dsa2017 | qualcomm/nn.py | nn.py | py | 17,265 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.functional",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_nu... |
72640523303 | #! /usr/bin/env python3
"""
--- besspin.py is the main BESSPIN-Tool-Suite program. All documentation and features are based on
solely executing this file. Please do not execute any other file.
--- usage: besspin.py [-h] [-c CONFIGFILE | -cjson CONFIGFILESERIALIZED]
[-w WORKINGDIRECTORY] [-l LOGFILE] [-d]
[-ep {devHost,ciOnPrem,ciAWS,awsProd,awsDev}] [-job JOBID]
BESSPIN (Balancing Evaluation of System Security Properties with Industrial Needs)
optional arguments:
-h, --help show this help message and exit
-c CONFIGFILE, --configFile CONFIGFILE
Overwrites the default config file: ./config.ini
-cjson CONFIGFILESERIALIZED, --configFileSerialized CONFIGFILESERIALIZED
Overwrites and augments the default production
settings
-w WORKINGDIRECTORY, --workingDirectory WORKINGDIRECTORY
Overwrites the default working directory: ./workDir/
-l LOGFILE, --logFile LOGFILE
Overwrites the default logFile: ./${workDir}/besspin.log
-d, --debug Enable debugging mode.
-ep {devHost,ciOnPrem,ciAWS,awsProd,awsDev}, --entrypoint {devHost,ciOnPrem,ciAWS,awsProd,awsDev}
The entrypoint
-job JOBID, --jobId JOBID
The job ID in production mode.
--- Defaults:
workingDirectory = ./workDir
configFile = ./config.ini
logFile = ./${workDir}/besspin.log
logging level = INFO
entrypoint = devHost
"""
try:
from besspin.base.utils.misc import *
from besspin.base.config import loadConfiguration, genProdConfig
from besspin.target.launch import startBesspin, endBesspin, resetTarget
from besspin.cyberPhys.launch import startCyberPhys, endCyberPhys
from besspin.base.utils import aws
from besspin.base.threadControl import createBesspinLocks
import logging, argparse, os, shutil, atexit, signal
except Exception as exc:
try:
#check we're in nix
import sys
if (sys.executable.split('/')[1] != 'nix'):
print (f"(Error)~ Please run within a nix shell. [Run <nix-shell> in besspin directory].")
else:
raise
except:
print(f"(Error)~ <{exc.__class__.__name__}>: {exc}")
print(f"(Info)~ End of BESSPIN! [Exit code -1:Fatal]")
exit(-1)
def main (xArgs):
# Create working Directory
repoDir = os.path.abspath(os.path.dirname(__file__))
if (xArgs.workingDirectory):
workDir = os.path.abspath(xArgs.workingDirectory)
else:
workDir = os.path.join(repoDir,'workDir')
if (os.path.isdir(workDir)): # already exists, delete
try:
shutil.rmtree(workDir)
except Exception as exc:
print(f"(Error)~ Failed to delete <{workDir}>.\n{formatExc(exc)}.")
exitBesspin(EXIT.Configuration, preSetup=True)
try:
os.mkdir(workDir)
except Exception as exc:
print(f"(Error)~ Failed to create the working directory <{workDir}>.\n{formatExc(exc)}.")
exitBesspin(EXIT.Files_and_paths, preSetup=True)
# Check log file
if (xArgs.logFile):
logFile = os.path.abspath(xArgs.logFile)
else:
logFile = os.path.join(workDir,'besspin.log')
try:
fLog = open(logFile,'w')
fLog.close()
except Exception as exc:
print(f"(Error)~ Failed to create the log file <{logFile}>.\n{formatExc(exc)}.")
exitBesspin(EXIT.Files_and_paths, preSetup=True)
# Entrypoint
if(xArgs.entrypoint is None):
xArgs.entrypoint = 'devHost'
# Check jobId is valid, if provided
if(xArgs.jobId):
if(not(re.match("^[A-Za-z0-9-_+.]+$", xArgs.jobId))):
print("(Error)~ Provided jobId contained invalid character(s). It must match regex '[A-Za-z0-9-_+.]'")
exitBesspin(EXIT.Files_and_paths, preSetup=True)
# setup the logging
logLevel = logging.DEBUG if (xArgs.debug) else logging.INFO
logging.basicConfig(filename=logFile,filemode='w',format='%(asctime)s: (%(levelname)s)~ %(message)s',datefmt='%I:%M:%S %p',level=logLevel)
printAndLog(f"Welcome to BESSPIN!")
#Prepare the peaceful exit
setSetting('trash',trashCanObj())
atexit.register(exitPeacefully,getSetting('trash'))
# Store critical settings
setSetting('repoDir', repoDir)
setSetting ('workDir', workDir)
setSetting('logFile', logFile)
setSetting('debugMode', xArgs.debug)
setSetting('besspinEntrypoint',xArgs.entrypoint)
setSetting('prodJobId', xArgs.jobId)
# Load all configuration and setup settings
setupEnvFile = os.path.join(repoDir,'besspin','base','utils','setupEnv.json')
setSetting('setupEnvFile', setupEnvFile)
if (xArgs.configFile):
configFile = os.path.abspath(xArgs.configFile)
elif (xArgs.configFileSerialized):
configFile = os.path.join(workDir,'production.ini')
genProdConfig (xArgs.configFileSerialized, configFile)
printAndLog(f"Configuration deserialized successfully to <{configFile}>.")
else:
configFile = os.path.join(repoDir,'config.ini')
printAndLog(f"Using the default configuration in <{configFile}>.")
setSetting('configFile', configFile)
#Load the config file(s)
loadConfiguration(configFile)
#Create the global semaphores/Locks
createBesspinLocks()
#launch the tool
if (isEqSetting('mode','cyberPhys')):
startCyberPhys()
endCyberPhys()
else:
xTarget = startBesspin()
instruction = None
if (isEqSetting('mode','production')):
def sendSuccessMsgToPortal (nodeSuffix, reasonSuffix):
aws.sendSQS(getSetting(f'{getSetting("besspinEntrypoint")}SqsQueueTX'), logAndExit, 'success',
getSetting('prodJobId'), f"{getSetting('prodJobId')}-{nodeSuffix}",
reason=f'besspin-production-{reasonSuffix}',
hostIp=aws.getInstanceIp(logAndExit),
fpgaIp=getSetting('productionTargetIp')
)
printAndLog(f"Sent {reasonSuffix} message to the SQS queue.")
# Notify portal that we have deployed successfully
sendSuccessMsgToPortal('DEPLOY','deployment')
# Wait for portal to instruct us to do something
while (instruction != 'termination'):
instruction = aws.pollPortalIndefinitely (getSetting(f'{getSetting("besspinEntrypoint")}S3Bucket'), xTarget.process, logAndExit)
if (instruction == 'deadProcess'):
warnAndLog ("The main process is dead. Will exit without a notice from Portal.")
break
printAndLog(f"Received {instruction} notice from Portal.")
if (instruction == 'reset'):
# execute reset flow
xTarget = resetTarget(xTarget)
# Notify portal that we have reset successfully
sendSuccessMsgToPortal('RESET','reset')
endBesspin(xTarget,(instruction=='deadProcess'))
exitBesspin(EXIT.Success)
if __name__ == '__main__':
# Reading the bash arguments
xArgParser = argparse.ArgumentParser (description='BESSPIN (Balancing Evaluation of System Security Properties with Industrial Needs)')
xGroupConfig = xArgParser.add_mutually_exclusive_group(required=False)
xGroupConfig.add_argument ('-c', '--configFile', help='Overwrites the default config file: ./config.ini')
xGroupConfig.add_argument ('-cjson', '--configFileSerialized', help='Overwrites and augments the default production settings')
xArgParser.add_argument ('-w', '--workingDirectory', help='Overwrites the default working directory: ./workDir/')
xArgParser.add_argument ('-l', '--logFile', help='Overwrites the default logFile: ./${workDir}/besspin.log')
xArgParser.add_argument ('-d', '--debug', help='Enable debugging mode.', action='store_true')
xArgParser.add_argument ('-ep', '--entrypoint', choices=['devHost','ciOnPrem','ciAWS','awsProd','awsDev'], help='The entrypoint')
xArgParser.add_argument ('-job', '--jobId', help='The job ID in production mode.')
xArgs = xArgParser.parse_args()
#Trapping the signals
signalsToCatch = [signal.SIGINT, signal.SIGTERM]
for xSignal in signalsToCatch:
signal.signal(xSignal,exitOnInterrupt)
main(xArgs)
| GaloisInc/BESSPIN-Tool-Suite | besspin.py | besspin.py | py | 8,521 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "sys.executable.split",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sys.executable",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
72002642984 | import os.path
from django.http import HttpResponse
from cmis_storage.storage import CMISStorage
def get_file(request, path):
"""
Returns a file stored in the CMIS-compatible content management system
:param path: The full path of the file within the CMS
"""
_, filename = os.path.split(path)
storage = CMISStorage()
stream = storage.open_stream(path)
response = HttpResponse()
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response.write(stream.read())
return response
| JoseTomasTocino/cmis_storage | cmis_storage/views.py | views.py | py | 547 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.path.split",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "cmis_storage.storage.CMI... |
30382531462 | import os
import random
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import TensorDataset
import torch.nn as nn
import torch.nn.functional as F
import os
from transformers import logging
import warnings
import yaml
from pathlib import Path
logging.set_verbosity_warning()
task_path = './tasks/'
train_path = './SST2_train.tsv'
val_path = './SST2_dev.tsv'
test_path = './SST2_test.tsv'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_config():
with open(Path('/root/configs/config.yaml'), "r") as config_file:
try:
run_config = yaml.safe_load(config_file)
except yaml.YAMLError as exc:
print(exc)
return {}
return run_config
def transform_dataset(transformer, dataset, dataloader_flag = False):
if transformer is None:
dataset_new = dataset
if dataloader_flag:
feature_loader =torch.utils.data.DataLoader(dataset_new,
batch_size = 128, shuffle = True)
return feature_loader
return dataset_new
X = dataset.features
X = X.reshape(X.shape[0], -1)
feature = torch.tensor(transformer.fit_transform(X)).float()
targets = dataset.targets.clone().detach()
dataset_new = TensorDataset(feature, targets)
dataset_new.features = feature
dataset_new.targets = targets
if dataloader_flag:
feature_loader =torch.utils.data.DataLoader(dataset_new,
batch_size = 512, shuffle = False)
return feature_loader
return dataset_new
def create_feature_loader(model, data, file_path, batch_size = 512, shuffle= False):
if os.path.isfile(file_path):
print("loading from dataset")
feature_dataset = torch.load(file_path)
else:
print("reconstruct")
feature_dataset = model.get_feature_dataset(data, file_path)
feature_loader =torch.utils.data.DataLoader(feature_dataset,
batch_size=batch_size, shuffle = shuffle)
return feature_loader
| XiaoyanAmy/HLT_Coursework | src/util.py | util.py | py | 2,186 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "transformers.logging.set_verbosity_warning",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "transformers.logging",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 23,
"usage_type": "call"
},
{
"api_... |
14859359219 |
'''
Created on 2017��3��23��
@author: gb
'''
import sys
import os
import jieba
import gensim,logging
import sys
def savefile(savepath,content):
fp=open(savepath,"w")
fp.write(content)
fp.close()
def readfile(path):
fp=open(path,"r")
content=fp.read()
fp.close()
return content
corpus_path="train_corpus_small/"
seg_path="train_corpus_seg/"
catelist=os.listdir(corpus_path)
print(catelist)
for mydir in catelist:
class_path=corpus_path+mydir+"/"
seg_dir=seg_path+mydir+'/'
if not os.path.exists(seg_dir):
os.makedirs(seg_dir)
file_list=os.listdir(class_path)
for file_path in file_list:
fullname=class_path+file_path
content=readfile(fullname).strip()
content=content.replace("\r\n","").strip()
content_seg=jieba.cut(content)
savefile(seg_dir+file_path," ".join(content_seg))
print("jieba end")
| guob1l/gensimW2V | jieba.py | jieba.py | py | 966 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number... |
3169544582 | # ported from uniborg thanks to @s_n_a_p_s , @r4v4n4 , @spechide and @PhycoNinja13b
#:::::Credit Time::::::
# 1) Coded By: @s_n_a_p_s
# 2) Ported By: @r4v4n4 (Noodz Lober)
# 3) End Game Help By: @spechide
# 4) Better Colour Profile Pic By @PhycoNinja13b
import asyncio
import base64
import os
import random
import shutil
import time
from datetime import datetime
from PIL import Image, ImageDraw, ImageFont
from pySmartDL import SmartDL
from telethon.errors import FloodWaitError
from telethon.tl import functions
from . import AUTONAME, BOTLOG, BOTLOG_CHATID, DEFAULT_BIO
from .sql_helper.globals import addgvar, delgvar, gvarstatus
DEFAULTUSERBIO = DEFAULT_BIO or " рЌ»рЌЕрЈєрјбрЈєрЉјрЈђ рЈърЈєрЈдрЌ┤ рјбрЈєрЌ░рЌ┤ "
CHANGE_TIME = Config.CHANGE_TIME
DEFAULTUSER = AUTONAME or Config.ALIVE_NAME
FONT_FILE_TO_USE = "/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf"
autopic_path = os.path.join(os.getcwd(), "userbot", "original_pic.png")
digitalpic_path = os.path.join(os.getcwd(), "userbot", "digital_pic.png")
autophoto_path = os.path.join(os.getcwd(), "userbot", "photo_pfp.png")
digitalpfp = Config.DIGITAL_PIC or "https://telegra.ph/file/aeaebe33b1f3988a0b690.jpg"
@bot.on(admin_cmd(pattern="autopic ?(.*)"))
async def autopic(event):
if event.fwd_from:
return
if Config.DEFAULT_PIC is None:
return await edit_delete(
event,
"**Error**\nFor functing of autopic you need to set DEFAULT_PIC var in Heroku vars",
parse_mode=parse_pre,
)
downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=False)
downloader.start(blocking=False)
while not downloader.isFinished():
pass
input_str = event.pattern_match.group(1)
if input_str:
try:
input_str = int(input_str)
except ValueError:
input_str = 60
else:
if gvarstatus("autopic_counter") is None:
addgvar("autopic_counter", 30)
if gvarstatus("autopic") is not None and gvarstatus("autopic") == "true":
return await edit_delete(event, f"`Autopic is already enabled`")
addgvar("autopic", True)
if input_str:
addgvar("autopic_counter", input_str)
await edit_delete(event, f"`Autopic has been started by my Master`")
await autopicloop()
@bot.on(admin_cmd(pattern="digitalpfp$"))
async def main(event):
if event.fwd_from:
return
downloader = SmartDL(digitalpfp, digitalpic_path, progress_bar=False)
downloader.start(blocking=False)
while not downloader.isFinished():
pass
if gvarstatus("digitalpic") is not None and gvarstatus("digitalpic") == "true":
return await edit_delete(event, f"`Digitalpic is already enabled`")
addgvar("digitalpic", True)
await edit_delete(event, f"`digitalpfp has been started by my Master`")
await digitalpicloop()
@bot.on(admin_cmd(pattern="bloom$"))
async def autopic(event):
if event.fwd_from:
return
if Config.DEFAULT_PIC is None:
return await edit_delete(
event,
"**Error**\nFor functing of bloom you need to set DEFAULT_PIC var in Heroku vars",
parse_mode=parse_pre,
)
downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=True)
downloader.start(blocking=False)
while not downloader.isFinished():
pass
if gvarstatus("bloom") is not None and gvarstatus("bloom") == "true":
return await edit_delete(event, f"`Bloom is already enabled`")
addgvar("bloom", True)
await edit_delete(event, f"`Bloom has been started by my Master`")
await bloom_pfploop()
@bot.on(admin_cmd(pattern="autoname$"))
async def _(event):
if event.fwd_from:
return
if gvarstatus("autoname") is not None and gvarstatus("autoname") == "true":
return await edit_delete(event, f"`Autoname is already enabled`")
addgvar("autoname", True)
await edit_delete(event, "`AutoName has been started by my Master `")
await autoname_loop()
@bot.on(admin_cmd(pattern="autobio$"))
async def _(event):
if event.fwd_from:
return
if gvarstatus("autobio") is not None and gvarstatus("autobio") == "true":
return await edit_delete(event, f"`Autobio is already enabled`")
addgvar("autobio", True)
await edit_delete(event, "`Autobio has been started by my Master `")
await autobio_loop()
@bot.on(admin_cmd(pattern="end (.*)"))
async def _(event): # sourcery no-metrics
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if input_str == "autopic":
if gvarstatus("autopic") is not None and gvarstatus("autopic") == "true":
delgvar("autopic")
if os.path.exists(autopic_path):
file = await event.client.upload_file(autopic_path)
try:
await event.client(functions.photos.UploadProfilePhotoRequest(file))
os.remove(autopic_path)
except BaseException:
return
return await edit_delete(event, "`Autopic has been stopped now`")
return await edit_delete(event, "`Autopic haven't enabled`")
if input_str == "digitalpfp":
if gvarstatus("digitalpic") is not None and gvarstatus("digitalpic") == "true":
delgvar("digitalpic")
await event.client(
functions.photos.DeletePhotosRequest(
await bot.get_profile_photos("me", limit=1)
)
)
return await edit_delete(event, "`Digitalpfp has been stopped now`")
return await edit_delete(event, "`Digitalpfp haven't enabled`")
if input_str == "bloom":
if gvarstatus("bloom") is not None and gvarstatus("bloom") == "true":
delgvar("bloom")
if os.path.exists(autopic_path):
file = await event.client.upload_file(autopic_path)
try:
await event.client(functions.photos.UploadProfilePhotoRequest(file))
os.remove(autopic_path)
except BaseException:
return
return await edit_delete(event, "`Bloom has been stopped now`")
return await edit_delete(event, "`Bloom haven't enabled`")
if input_str == "autoname":
if gvarstatus("autoname") is not None and gvarstatus("autoname") == "true":
delgvar("autoname")
await event.client(
functions.account.UpdateProfileRequest(first_name=DEFAULTUSER)
)
return await edit_delete(event, "`Autoname has been stopped now`")
return await edit_delete(event, "`Autoname haven't enabled`")
if input_str == "autobio":
if gvarstatus("autobio") is not None and gvarstatus("autobio") == "true":
delgvar("autobio")
await event.client(
functions.account.UpdateProfileRequest(about=DEFAULTUSERBIO)
)
return await edit_delete(event, "`Autobio has been stopped now`")
return await edit_delete(event, "`Autobio haven't enabled`")
async def autopicloop():
AUTOPICSTART = gvarstatus("autopic") == "true"
if AUTOPICSTART and Config.DEFAULT_PIC is None:
if BOTLOG:
return await bot.send_message(
BOTLOG_CHATID,
"**Error**\n`For functing of autopic you need to set DEFAULT_PIC var in Heroku vars`",
)
return
if gvarstatus("autopic") is not None:
try:
counter = int(gvarstatus("autopic_counter"))
except Exception as e:
LOGS.warn(str(e))
while AUTOPICSTART:
if not os.path.exists(autopic_path):
downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=False)
downloader.start(blocking=False)
while not downloader.isFinished():
pass
shutil.copy(autopic_path, autophoto_path)
im = Image.open(autophoto_path)
file_test = im.rotate(counter, expand=False).save(autophoto_path, "PNG")
current_time = datetime.now().strftime(" Time: %H:%M \n Date: %d.%m.%y ")
img = Image.open(autophoto_path)
drawn_text = ImageDraw.Draw(img)
fnt = ImageFont.truetype(FONT_FILE_TO_USE, 30)
drawn_text.text((150, 250), current_time, font=fnt, fill=(124, 252, 0))
img.save(autophoto_path)
file = await bot.upload_file(autophoto_path)
try:
await bot(functions.photos.UploadProfilePhotoRequest(file))
os.remove(autophoto_path)
counter += counter
await asyncio.sleep(CHANGE_TIME)
except BaseException:
return
AUTOPICSTART = gvarstatus("autopic") == "true"
async def digitalpicloop():
DIGITALPICSTART = gvarstatus("digitalpic") == "true"
i = 0
while DIGITALPICSTART:
if not os.path.exists(digitalpic_path):
downloader = SmartDL(digitalpfp, digitalpic_path, progress_bar=False)
downloader.start(blocking=False)
while not downloader.isFinished():
pass
shutil.copy(digitalpic_path, autophoto_path)
Image.open(autophoto_path)
current_time = datetime.now().strftime("%H:%M")
img = Image.open(autophoto_path)
drawn_text = ImageDraw.Draw(img)
cat = str(base64.b64decode("dXNlcmJvdC9oZWxwZXJzL3N0eWxlcy9kaWdpdGFsLnR0Zg=="))[
2:36
]
fnt = ImageFont.truetype(cat, 200)
drawn_text.text((350, 100), current_time, font=fnt, fill=(124, 252, 0))
img.save(autophoto_path)
file = await bot.upload_file(autophoto_path)
try:
if i > 0:
await bot(
functions.photos.DeletePhotosRequest(
await bot.get_profile_photos("me", limit=1)
)
)
i += 1
await bot(functions.photos.UploadProfilePhotoRequest(file))
os.remove(autophoto_path)
await asyncio.sleep(CHANGE_TIME)
except BaseException:
return
DIGITALPICSTART = gvarstatus("digitalpic") == "true"
async def bloom_pfploop():
BLOOMSTART = gvarstatus("bloom") == "true"
if BLOOMSTART and Config.DEFAULT_PIC is None:
if BOTLOG:
return await bot.send_message(
BOTLOG_CHATID,
"**Error**\n`For functing of bloom you need to set DEFAULT_PIC var in Heroku vars`",
)
return
while BLOOMSTART:
if not os.path.exists(autopic_path):
downloader = SmartDL(Config.DEFAULT_PIC, autopic_path, progress_bar=False)
downloader.start(blocking=False)
while not downloader.isFinished():
pass
# RIP Danger zone Here no editing here plox
R = random.randint(0, 256)
B = random.randint(0, 256)
G = random.randint(0, 256)
FR = 256 - R
FB = 256 - B
FG = 256 - G
shutil.copy(autopic_path, autophoto_path)
image = Image.open(autophoto_path)
image.paste((R, G, B), [0, 0, image.size[0], image.size[1]])
image.save(autophoto_path)
current_time = datetime.now().strftime("\n Time: %H:%M:%S \n \n Date: %d/%m/%y")
img = Image.open(autophoto_path)
drawn_text = ImageDraw.Draw(img)
fnt = ImageFont.truetype(FONT_FILE_TO_USE, 60)
ofnt = ImageFont.truetype(FONT_FILE_TO_USE, 250)
drawn_text.text((95, 250), current_time, font=fnt, fill=(FR, FG, FB))
drawn_text.text((95, 250), " Ъўѕ", font=ofnt, fill=(FR, FG, FB))
img.save(autophoto_path)
file = await bot.upload_file(autophoto_path)
try:
await bot(functions.photos.UploadProfilePhotoRequest(file))
os.remove(autophoto_path)
await asyncio.sleep(CHANGE_TIME)
except BaseException:
return
BLOOMSTART = gvarstatus("bloom") == "true"
async def autoname_loop():
AUTONAMESTART = gvarstatus("autoname") == "true"
while AUTONAMESTART:
DM = time.strftime("%d-%m-%y")
HM = time.strftime("%H:%M")
name = f"Рїџ№ИЈ {HM}||Рђ║ {DEFAULTUSER} Рђ╣||ЪЊЁ {DM}"
LOGS.info(name)
try:
await bot(functions.account.UpdateProfileRequest(first_name=name))
except FloodWaitError as ex:
LOGS.warning(str(ex))
await asyncio.sleep(ex.seconds)
await asyncio.sleep(CHANGE_TIME)
AUTONAMESTART = gvarstatus("autoname") == "true"
async def autobio_loop():
AUTOBIOSTART = gvarstatus("autobio") == "true"
while AUTOBIOSTART:
DMY = time.strftime("%d.%m.%Y")
HM = time.strftime("%H:%M:%S")
bio = f"ЪЊЁ {DMY} | {DEFAULTUSERBIO} | Рїџ№ИЈ {HM}"
LOGS.info(bio)
try:
await bot(functions.account.UpdateProfileRequest(about=bio))
except FloodWaitError as ex:
LOGS.warning(str(ex))
await asyncio.sleep(ex.seconds)
await asyncio.sleep(CHANGE_TIME)
AUTOBIOSTART = gvarstatus("autobio") == "true"
bot.loop.create_task(autopicloop())
bot.loop.create_task(digitalpicloop())
bot.loop.create_task(bloom_pfploop())
bot.loop.create_task(autoname_loop())
bot.loop.create_task(autobio_loop())
CMD_HELP.update(
{
"autoprofile": """**Plugin : **`autoprofile`
Рђб **Syntax : **`.autopic angle`
Рђб **Function : **__Rotating image along with the time on it with given angle if no angle is given then doesnt rotate. You need to set __`DEFAULT_PIC`__ in heroku__
Рђб **Syntax : **`.digitalpfp`
Рђб **Function : **__Your profile pic changes to digitaltime profile picutre__
Рђб **Syntax : **`.bloom`
Рђб **Function : **__Random colour profile pics will be set along with time on it. You need to set__ `DEFAULT_PIC`__ in heroku__
Рђб **Syntax : **`.autoname`
Рђб **Function : **__for time along with name, you must set __`AUTONAME`__ in the heroku vars first for this to work__
Рђб **Syntax : **`.autobio`
Рђб **Function : **__for time along with your bio, Set __`DEFAULT_BIO`__ in the heroku vars first__
Рђб **Syntax : **`.end function`
Рђб **Function : **__To stop the given functions like autopic ,difitalpfp , bloom , autoname and autobio__
**Рџа№ИЈDISCLAIMERРџа№ИЈ**
__USING THIS PLUGIN CAN RESULT IN ACCOUNT BAN. WE ARE NOT RESPONSIBLE FOR YOUR BAN.__
"""
}
)
| rockzy77/catusertbot77 | userbot/plugins/autoprofile.py | autoprofile.py | py | 14,466 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number":... |
72516589224 | # settings.py
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# Accessing variables.
NUM_REQUESTS = os.getenv('NUM_REQUESTS')
URL = os.getenv('URL')
SECRET = os.getenv('SECRET')
SLEEP_SECONDS = os.getenv('SLEEP_SECONDS')
# Using variables.
print('Num Requests: ' + NUM_REQUESTS)
print('URL: ' + URL)
print('Secret: ' + SECRET)
print('Sleep Seconds: ' + SLEEP_SECONDS)
| blainemincey/generateApiRequests | settings.py | settings.py | py | 470 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_nu... |
43288963064 | from ctypes import *
import sys
import pytest
@pytest.fixture
def dll(sofile):
return CDLL(str(sofile), use_errno=True)
def test_char_result(dll):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_char
result = f(0, 0, 0, 0, 0, 0)
assert result == b'\x00'
def test_boolresult(dll):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_bool
false_result = f(0, 0, 0, 0, 0, 0)
assert false_result is False
true_result = f(1, 0, 0, 0, 0, 0)
assert true_result is True
def test_unicode_function_name(dll):
f = dll[u'_testfunc_i_bhilfd']
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
result = f(1, 2, 3, 4, 5.0, 6.0)
assert result == 21
def test_truncate_python_longs(dll):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
x = sys.maxint * 2
result = f(x, x, x, x, 0, 0)
assert result == -8
def test_convert_pointers(dll):
f = dll.deref_LP_c_char_p
f.restype = c_char
f.argtypes = [POINTER(c_char_p)]
#
s = c_char_p(b'hello world')
ps = pointer(s)
assert f(ps) == b'h'
assert f(s) == b'h' # automatic conversion from char** to char*
################################################################
def test_call_some_args(dll):
f = dll.my_strchr
f.argtypes = [c_char_p]
f.restype = c_char_p
result = f(b"abcd", ord("b"))
assert result == b"bcd"
def test_variadic_sum(dll):
f = dll.variadic_sum
f.argtypes = [c_long]
f.restype = c_long
result = f(3, 13, 38, 100)
assert result == 13 + 38 + 100
result = f(2, 13, 38)
assert result == 13 + 38
@pytest.mark.pypy_only
def test_keepalive_buffers(monkeypatch, dll):
import gc
f = dll.my_strchr
f.argtypes = [c_char_p]
f.restype = c_char_p
#
orig__call_funcptr = f._call_funcptr
def _call_funcptr(funcptr, *newargs):
gc.collect()
gc.collect()
gc.collect()
return orig__call_funcptr(funcptr, *newargs)
monkeypatch.setattr(f, '_call_funcptr', _call_funcptr)
#
result = f(b"abcd", ord("b"))
assert result == b"bcd"
def test_caching_bug_1(dll):
# the same test as test_call_some_args, with two extra lines
# in the middle that trigger caching in f._ptr, which then
# makes the last two lines fail
f = dll.my_strchr
f.argtypes = [c_char_p, c_int]
f.restype = c_char_p
result = f(b"abcd", ord("b"))
assert result == b"bcd"
result = f(b"abcd", ord("b"), 42)
assert result == b"bcd"
def test_argument_conversion_and_checks(dll):
#This test is designed to check for segfaults if the wrong type of argument is passed as parameter
strlen = dll.my_strchr
strlen.argtypes = [c_char_p, c_int]
strlen.restype = c_char_p
assert strlen(b"eggs", ord("g")) == b"ggs"
# Should raise ArgumentError, not segfault
with pytest.raises(ArgumentError):
strlen(0, 0)
with pytest.raises(ArgumentError):
strlen(False, 0)
def test_union_as_passed_value(dll):
class UN(Union):
_fields_ = [("x", c_short),
("y", c_long)]
dll.ret_un_func.restype = UN
dll.ret_un_func.argtypes = [UN]
A = UN * 2
a = A()
a[1].x = 33
u = dll.ret_un_func(a[1])
assert u.y == 33 * 10000
@pytest.mark.pypy_only
def test_cache_funcptr(dll):
tf_b = dll.tf_b
tf_b.restype = c_byte
tf_b.argtypes = (c_byte,)
assert tf_b(-126) == -42
ptr = tf_b._ptr
assert ptr is not None
assert tf_b(-126) == -42
assert tf_b._ptr is ptr
def test_custom_from_param(dll):
class A(c_byte):
@classmethod
def from_param(cls, obj):
seen.append(obj)
return -126
tf_b = dll.tf_b
tf_b.restype = c_byte
tf_b.argtypes = (c_byte,)
tf_b.argtypes = [A]
seen = []
assert tf_b("yadda") == -42
assert seen == ["yadda"]
@pytest.mark.xfail(reason="warnings are disabled")
def test_warnings(dll):
import warnings
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
dll.get_an_integer()
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
assert "C function without declared arguments called" in str(w[0].message)
@pytest.mark.xfail
def test_errcheck(dll):
import warnings
def errcheck(result, func, args):
assert result == -42
assert type(result) is int
arg, = args
assert arg == -126
assert type(arg) is int
return result
#
tf_b = dll.tf_b
tf_b.restype = c_byte
tf_b.argtypes = (c_byte,)
tf_b.errcheck = errcheck
assert tf_b(-126) == -42
del tf_b.errcheck
with warnings.catch_warnings(record=True) as w:
dll.get_an_integer.argtypes = []
dll.get_an_integer()
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
assert "C function without declared return type called" in str(w[0].message)
with warnings.catch_warnings(record=True) as w:
dll.get_an_integer.restype = None
dll.get_an_integer()
assert len(w) == 0
warnings.resetwarnings()
def test_errno(dll):
test_errno = dll.test_errno
test_errno.restype = c_int
set_errno(42)
res = test_errno()
n = get_errno()
assert (res, n) == (42, 43)
set_errno(0)
assert get_errno() == 0
def test_issue1655(dll):
def ret_list_p(icount):
def sz_array_p(obj, func, args):
assert ('.LP_c_int object' in repr(obj) or
'.LP_c_long object' in repr(obj))
assert repr(args) in ("('testing!', c_int(4))",
"('testing!', c_long(4))")
assert args[icount].value == 4
return [obj[i] for i in range(args[icount].value)]
return sz_array_p
get_data_prototype = CFUNCTYPE(POINTER(c_int),
c_char_p, POINTER(c_int))
get_data_paramflag = ((1,), (2,))
get_data_signature = ('test_issue1655', dll)
get_data = get_data_prototype(get_data_signature, get_data_paramflag)
assert get_data(b'testing!') == 4
get_data.errcheck = ret_list_p(1)
assert get_data(b'testing!') == [-1, -2, -3, -4]
def test_issue2533(tmpdir):
import cffi
ffi = cffi.FFI()
ffi.cdef("int **fetchme(void);")
ffi.set_source("_x_cffi", """
int **fetchme(void)
{
static int a = 42;
static int *pa = &a;
return &pa;
}
""")
ffi.compile(verbose=True, tmpdir=str(tmpdir))
import sys
sys.path.insert(0, str(tmpdir))
try:
from _x_cffi import ffi, lib
finally:
sys.path.pop(0)
fetchme = ffi.addressof(lib, 'fetchme')
fetchme = int(ffi.cast("intptr_t", fetchme))
FN = CFUNCTYPE(POINTER(POINTER(c_int)))
ff = cast(fetchme, FN)
g = ff()
assert g.contents.contents.value == 42
h = c_int(43)
g[0] = pointer(h) # used to crash here
assert g.contents.contents.value == 43
| mozillazg/pypy | extra_tests/ctypes_tests/test_functions.py | test_functions.py | py | 7,258 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.maxint",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "gc.collect",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_... |
16874206716 | from utils.rabbit_controller import RabbitMqController
from utils.tools import Tools
from utils.verifier import Verifier
from utils.watcher import Watcher
if __name__ == "__main__":
"""
This is the SAAS Client!
Results will be under the server http://3.73.75.114:5000/
1. Get all md5 of /bin
2. Watch changes in loop
3. Every change get list of events
4. Go over the event- find the file
5. Check if the file changed
6. Check if its cryptominer -> by strings/ filename.
7. Check if the file is in a md5 database of malicious feed
8. do ps -a
9. Check if there is a reverse_shell
10. do tcpdump, check if there is malicious ip / domain
11. If one of the above is true: Write it to the screen.
12. EVERY 1 MIN add cpu usage for a map of ps. Check if something is weird (cryptominer). After 30min delete
last one.
"""
verifier = Verifier()
tools = Tools()
watcher = Watcher()
files_dict = tools.get_md5()
resources = []
alerts = []
while True:
controller = RabbitMqController()
alerts.extend(verifier.verify_malware_dict(files_dict, is_send_to_rabbit=True))
for alert in alerts:
controller.send_alert(alert)
alerts = []
controller.connection.close()
events = watcher.watch()
alert_from_verifier, resources = verifier.verify_resources(resources, is_send_to_rabbit=True)
alerts.extend(alert_from_verifier)
alerts.extend(verifier.verify_filesystem_event(events, is_send_to_rabbit=True))
alerts.extend(verifier.verify_cryptominer(events, is_send_to_rabbit=True))
alerts.extend(verifier.verify_reverse_shell(events, is_send_to_rabbit=True))
alerts.extend(verifier.verify_request(is_send_to_rabbit=True))
files_dict = tools.get_md5()
| Oreldm/RuntimeDefender | saas/client.py | client.py | py | 1,910 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.verifier.Verifier",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.tools.Tools",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.watcher.Watcher",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "utils... |
38790064496 | """
This script shows how pedestrian detections and robot data can be converted to a spatio-temporal grid
The output data of this script can then be used to train a CoPA-Map model
"""
import pandas as pd
from copa_map.model.Gridifier import Gridifier, GridParams
from copa_map.model.InitInducing import InducingInitializer
from copa_map.util.occ_grid import OccGrid
from copa_map.util import util as ut
from copa_map.util import fov
from os.path import join, exists
from copy import copy
import zipfile
data_folder = join(ut.abs_path(), "data")
csv_names = ["atc_10days_path_pedest_train.csv",
"atc_10days_path_robot_train.csv",
"atc_4days_path_pedest_test.csv",
"atc_4days_path_robot_test.csv"]
# Extract the csv files from the zipped file
all_csv_exist = all([exists(join(data_folder, name)) for name in csv_names])
if not all_csv_exist:
print("Extracting csv files from zip file...")
with zipfile.ZipFile(join(data_folder, 'atc_rob_path_ped_detections.zip'), 'r') as zip_ref:
zip_ref.extractall(data_folder)
a = 1
# Create pd dataframes from csv files
# The csv files with pedestrian data (*_pedest_*) will be converted to a dataframe of the following form:
# [pos_x, pos_y, tidx_bin, t] x [n_det]
# pos_x: First spatial dim of detected pedestrian
# pos_y: Second spatial dim of detected pedestrian
# tidx_bin: The bin where the detection falls into. The data is already pre binned to a bin size of one hour
# t: timestamp of the detection
# The csv files with robot data (*_robot_*) will be converted to a dataframe of the following form:
# [robot_x, robot_y, delta_t, tidx_bin, t] x [n_rob]
# The dataframe then contains the positions of the robot
# robot_x: First spatial dim of the robot
# robot_y: Second spatial dim of the robot
# delta_t: Dwell/rest time of the robot at the corresponding spatial location
# tidx_bin: The bin where the robot position falls into. The data is already pre binned to a bin size of one hour
# t: timestamp of the robot pose
df_data_train = pd.read_csv(join(data_folder, csv_names[0]), index_col=0)
df_rob_train = pd.read_csv(join(data_folder, csv_names[1]), index_col=0)
# Also create for test data, since we only want to test at locations that were visited during training
# The values of the robot's poses correspond to the values from the training data, but the timestamps were adjusted
df_data_test = pd.read_csv(join(data_folder, csv_names[2]), index_col=0)
df_rob_test = pd.read_csv(join(data_folder, csv_names[3]), index_col=0)
# Read the occupancy map to define the location of the grid
occ_map = OccGrid.from_ros_format(path_yaml=join(data_folder, "atc_map.yaml"))
# Overwrite default params of the grid with these params
# For all default params, see class definition of GridParams class
params_grid_train = GridParams(cell_resolution=0.5,
origin=occ_map.orig, rotation=occ_map.rotation, width=occ_map.width,
height=occ_map.height, rate_min=1e-5, bin_size=3600)
params_grid_test = copy(params_grid_train)
print("Creating grid for training data")
gf_train = Gridifier(occ_map=occ_map, fov=fov.Circle(r=3.5), params=params_grid_train)
gf_train.setup_data(df_data_train, df_rob_train)
print("Creating grid for test data")
gf_test = Gridifier(occ_map=occ_map, fov=fov.Circle(r=3.5), params=params_grid_test, create_gt=True)
gf_test.setup_data(df_data_test, df_rob_test)
# Create the inducing points by clustering
# There are two methods implemented:
# 3D-KMeans: Clustering over the complete input matrix X with targets as weights
# 2D-KMeans: Do separate clustering steps for each time bin. The number of clusters for every bin follows from
# (number of spatial cells of the bin)/(number of all datapoints) * (number of all inducing points)
print("Clustering for initial inducing point selection...")
init_inducing = InducingInitializer(X=gf_train.get_input_points(), Y_all=gf_train.get_observations(), alpha=0.02)
init_inducing.get_init_inducing(method="2D-KMeans")
path_train = join(data_folder, "grid_atc_50cm_60min_train_xy.csv")
path_test = join(data_folder, "grid_atc_50cm_60min_test_xy.csv")
path_inducing = join(data_folder, "grid_atc_50cm_60min_train_z.csv")
print("Saving training data to " + str(path_train))
gf_train.output_to_text(path_train)
print("Saving inducing point data to " + str(path_inducing))
init_inducing.output_to_text(path_inducing)
print("Saving test data to " + str(path_test))
gf_test.output_to_text(path_test)
print("All data saved. Run 02_copa_map_atc_train.py to train the model.")
plot = True
if plot:
print("Plotting training data")
gf_train.plot()
| MarvinStuede/copa-map | src/copa_map/examples/01_atc_gridify_data.py | 01_atc_gridify_data.py | py | 4,685 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "copa_map.util.util.abs_path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "copa_map.util.util",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.e... |
9328318313 | import pandas as pd
import matplotlib.pyplot as plt
def gerar_grafico_tempos(caminho_entrada):
df = pd.read_csv(caminho_entrada, delimiter=';')
labels = ['Média', 'Mediana']
values = [df['Média'].iloc[0], df['Mediana'].iloc[0]]
plt.bar(labels, values, color=['blue', 'green'])
plt.title('Média e Mediana do Tempo de Correção')
plt.ylabel('Horas')
for i, v in enumerate(values):
plt.text(i, v + 5, str(v), ha='center', va='bottom', fontweight='bold')
plt.tight_layout()
plt.savefig('grafico_tempos_correcao.png', dpi=300)
plt.show()
gerar_grafico_tempos("result_correction_time.csv")
| ClaudioJansen/GitHub-Script | Tis 06/correction_time/graphic_generator.py | graphic_generator.py | py | 645 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyp... |
8298790752 | # from https://github.com/iskandr/fancyimpute
import numpy as np
from sklearn.utils.extmath import randomized_svd
from sklearn.utils import check_array
import warnings
F32PREC = np.finfo(np.float32).eps
from joblib import Memory
memory = Memory('cache_dir', verbose=0)
def soft_impute_rank(X_obs, n_folds = 5, max_rank = 10):
l_mae = []
for ii in range(n_folds):
obs_mask = ~np.isnan(X_obs)
# randomly sample some test mask
test_mask = np.array(np.random.binomial(np.ones_like(obs_mask), obs_mask * .2), dtype=bool)
X_obs_train = X_obs.copy()
X_obs_train[test_mask] = np.nan
si = SoftImpute(max_rank=max_rank, verbose=False)
X_obs_imp = si.fit_transform(X_obs_train)
si.U
mae_obs = si.mae_obs
mae_test = np.mean(np.abs(X_obs[test_mask] - X_obs_imp[test_mask]))
l_mae.append(mae_test)
return si.U, l_mae
@memory.cache
def get_U_softimpute(X_obs, list_rank=None, boxplot=False, n_folds=3):
""" return U_hat with SoftImput strategy
Rank are cross selected (wrt MSE) in list_rank"""
assert np.sum(np.isnan(X_obs)) > 0, 'X_obs do not contains any nan in "get_U_softimpute"'
best_mae = float('inf')
best_U = None
best_rank = None
list_rank = [1,2,3,4,5,6,7,8,9,10,20,30,100]
ll_mae = []
for max_rank in list_rank:
U, l_mae = soft_impute_rank(X_obs, n_folds = n_folds, max_rank = max_rank)
ll_mae.append(l_mae)
# print(' -get_U_softimpute, rank={}, mae={} + {}'.format(max_rank, np.round(np.mean(l_mae),4), np.round(np.std(l_mae),4)))
if np.mean(l_mae) < best_mae:
best_mae = np.mean(l_mae)
best_U = U
best_rank = max_rank
if boxplot:
sns.swarmplot(data=np.array(ll_mae).T)
plt.xticks(ticks = np.arange(len(list_rank)), labels=[str(x) for x in list_rank])
plt.xlabel('SVD rank')
plt.ylabel('MAE on test fold')
plt.show()
print('-get_U_softimpute, best_rank=',best_rank)
print('-get_U_softimpute, best_mae=',best_mae)
return best_U
def masked_mae(X_true, X_pred, mask):
masked_diff = X_true[mask] - X_pred[mask]
return np.mean(np.abs(masked_diff))
def generate_random_column_samples(column):
col_mask = np.isnan(column)
n_missing = np.sum(col_mask)
if n_missing == len(column):
# logging.warn("No observed values in column")
return np.zeros_like(column)
mean = np.nanmean(column)
std = np.nanstd(column)
if np.isclose(std, 0):
return np.array([mean] * n_missing)
else:
return np.random.randn(n_missing) * std + mean
class Solver(object):
def __init__(
self,
fill_method="zero",
min_value=None,
max_value=None,
normalizer=None):
self.fill_method = fill_method
self.min_value = min_value
self.max_value = max_value
self.normalizer = normalizer
def __repr__(self):
return str(self)
def __str__(self):
field_list = []
for (k, v) in sorted(self.__dict__.items()):
if v is None or isinstance(v, (float, int)):
field_list.append("%s=%s" % (k, v))
elif isinstance(v, str):
field_list.append("%s='%s'" % (k, v))
return "%s(%s)" % (
self.__class__.__name__,
", ".join(field_list))
def _check_input(self, X):
if len(X.shape) != 2:
raise ValueError("Expected 2d matrix, got %s array" % (X.shape,))
def _check_missing_value_mask(self, missing):
if not missing.any():
warnings.simplefilter("always")
warnings.warn("Input matrix is not missing any values")
if missing.all():
raise ValueError("Input matrix must have some non-missing values")
def _fill_columns_with_fn(self, X, missing_mask, col_fn):
for col_idx in range(X.shape[1]):
missing_col = missing_mask[:, col_idx]
n_missing = missing_col.sum()
if n_missing == 0:
continue
col_data = X[:, col_idx]
fill_values = col_fn(col_data)
if np.all(np.isnan(fill_values)):
fill_values = 0
X[missing_col, col_idx] = fill_values
def fill(
self,
X,
missing_mask,
fill_method=None,
inplace=False):
"""
Parameters
----------
X : np.array
Data array containing NaN entries
missing_mask : np.array
Boolean array indicating where NaN entries are
fill_method : str
"zero": fill missing entries with zeros
"mean": fill with column means
"median" : fill with column medians
"min": fill with min value per column
"random": fill with gaussian samples according to mean/std of column
inplace : bool
Modify matrix or fill a copy
"""
X = check_array(X, force_all_finite=False)
if not inplace:
X = X.copy()
if not fill_method:
fill_method = self.fill_method
if fill_method not in ("zero", "mean", "median", "min", "random"):
raise ValueError("Invalid fill method: '%s'" % (fill_method))
elif fill_method == "zero":
# replace NaN's with 0
X[missing_mask] = 0
elif fill_method == "mean":
self._fill_columns_with_fn(X, missing_mask, np.nanmean)
elif fill_method == "median":
self._fill_columns_with_fn(X, missing_mask, np.nanmedian)
elif fill_method == "min":
self._fill_columns_with_fn(X, missing_mask, np.nanmin)
elif fill_method == "random":
self._fill_columns_with_fn(
X,
missing_mask,
col_fn=generate_random_column_samples)
return X
def prepare_input_data(self, X):
"""
Check to make sure that the input matrix and its mask of missing
values are valid. Returns X and missing mask.
"""
X = check_array(X, force_all_finite=False)
if X.dtype != "f" and X.dtype != "d":
X = X.astype(float)
self._check_input(X)
missing_mask = np.isnan(X)
self._check_missing_value_mask(missing_mask)
return X, missing_mask
def clip(self, X):
"""
Clip values to fall within any global or column-wise min/max constraints
"""
X = np.asarray(X)
if self.min_value is not None:
X[X < self.min_value] = self.min_value
if self.max_value is not None:
X[X > self.max_value] = self.max_value
return X
def project_result(self, X):
"""
First undo normalization and then clip to the user-specified min/max
range.
"""
X = np.asarray(X)
if self.normalizer is not None:
X = self.normalizer.inverse_transform(X)
return self.clip(X)
def solve(self, X, missing_mask):
"""
Given an initialized matrix X and a mask of where its missing values
had been, return a completion of X.
"""
raise ValueError("%s.solve not yet implemented!" % (
self.__class__.__name__,))
def fit_transform(self, X, y=None):
"""
Fit the imputer and then transform input `X`
Note: all imputations should have a `fit_transform` method,
but only some (like IterativeImputer in sklearn) also support inductive
mode using `fit` or `fit_transform` on `X_train` and then `transform`
on new `X_test`.
"""
X_original, missing_mask = self.prepare_input_data(X)
observed_mask = ~missing_mask
X = X_original.copy()
if self.normalizer is not None:
X = self.normalizer.fit_transform(X)
X_filled = self.fill(X, missing_mask, inplace=True)
if not isinstance(X_filled, np.ndarray):
raise TypeError(
"Expected %s.fill() to return NumPy array but got %s" % (
self.__class__.__name__,
type(X_filled)))
X_result = self.solve(X_filled, missing_mask)
if not isinstance(X_result, np.ndarray):
raise TypeError(
"Expected %s.solve() to return NumPy array but got %s" % (
self.__class__.__name__,
type(X_result)))
X_result = self.project_result(X=X_result)
X_result[observed_mask] = X_original[observed_mask]
return X_result
def fit(self, X, y=None):
"""
Fit the imputer on input `X`.
Note: all imputations should have a `fit_transform` method,
but only some (like IterativeImputer in sklearn) also support inductive
mode using `fit` or `fit_transform` on `X_train` and then `transform`
on new `X_test`.
"""
raise ValueError(
"%s.fit not implemented! This imputation algorithm likely "
"doesn't support inductive mode. Only fit_transform is "
"supported at this time." % (
self.__class__.__name__,))
def transform(self, X, y=None):
"""
Transform input `X`.
Note: all imputations should have a `fit_transform` method,
but only some (like IterativeImputer in sklearn) also support inductive
mode using `fit` or `fit_transform` on `X_train` and then `transform`
on new `X_test`.
"""
raise ValueError(
"%s.transform not implemented! This imputation algorithm likely "
"doesn't support inductive mode. Only %s.fit_transform is "
"supported at this time." % (
self.__class__.__name__, self.__class__.__name__))
class SoftImpute(Solver):
"""
Implementation of the SoftImpute algorithm from:
"Spectral Regularization Algorithms for Learning Large Incomplete Matrices"
by Mazumder, Hastie, and Tibshirani.
"""
def __init__(
self,
shrinkage_value=None,
convergence_threshold=0.001,
max_iters=100,
max_rank=None,
n_power_iterations=1,
init_fill_method="zero",
min_value=None,
max_value=None,
normalizer=None,
verbose=True):
"""
Parameters
----------
shrinkage_value : float
Value by which we shrink singular values on each iteration. If
omitted then the default value will be the maximum singular
value of the initialized matrix (zeros for missing values) divided
by 100.
convergence_threshold : float
Minimum ration difference between iterations (as a fraction of
the Frobenius norm of the current solution) before stopping.
max_iters : int
Maximum number of SVD iterations
max_rank : int, optional
Perform a truncated SVD on each iteration with this value as its
rank.
n_power_iterations : int
Number of power iterations to perform with randomized SVD
init_fill_method : str
How to initialize missing values of data matrix, default is
to fill them with zeros.
min_value : float
Smallest allowable value in the solution
max_value : float
Largest allowable value in the solution
normalizer : object
Any object (such as BiScaler) with fit() and transform() methods
verbose : bool
Print debugging info
"""
Solver.__init__(
self,
fill_method=init_fill_method,
min_value=min_value,
max_value=max_value,
normalizer=normalizer)
self.shrinkage_value = shrinkage_value
self.convergence_threshold = convergence_threshold
self.max_iters = max_iters
self.max_rank = max_rank
self.n_power_iterations = n_power_iterations
self.verbose = verbose
self.U = None
self.V = None
self.S = None
self.mae_obs = None
def _converged(self, X_old, X_new, missing_mask):
# check for convergence
old_missing_values = X_old[missing_mask]
new_missing_values = X_new[missing_mask]
difference = old_missing_values - new_missing_values
ssd = np.sum(difference ** 2)
old_norm = np.sqrt((old_missing_values ** 2).sum())
# edge cases
if old_norm == 0 or (old_norm < F32PREC and np.sqrt(ssd) > F32PREC):
return False
else:
return (np.sqrt(ssd) / old_norm) < self.convergence_threshold
def _svd_step(self, X, shrinkage_value, max_rank=None):
"""
Returns reconstructed X from low-rank thresholded SVD and
the rank achieved.
"""
if max_rank:
# if we have a max rank then perform the faster randomized SVD
(U, s, V) = randomized_svd(
X,
max_rank,
n_iter=self.n_power_iterations)
else:
# perform a full rank SVD using ARPACK
(U, s, V) = np.linalg.svd(
X,
full_matrices=False,
compute_uv=True)
s_thresh = np.maximum(s - shrinkage_value, 0)
rank = (s_thresh > 0).sum()
s_thresh = s_thresh[:rank]
U_thresh = U[:, :rank]
V_thresh = V[:rank, :]
S_thresh = np.diag(s_thresh)
X_reconstruction = np.dot(U_thresh, np.dot(S_thresh, V_thresh))
self.U, self.S, self.V = U_thresh, S_thresh, V_thresh
return X_reconstruction, rank
def _max_singular_value(self, X_filled):
# quick decomposition of X_filled into rank-1 SVD
_, s, _ = randomized_svd(
X_filled,
1,
n_iter=5)
return s[0]
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
X_init = X.copy()
X_filled = X
observed_mask = ~missing_mask
max_singular_value = self._max_singular_value(X_filled)
if self.verbose:
print("[SoftImpute] Max Singular Value of X_init = %f" % (
max_singular_value))
if self.shrinkage_value:
shrinkage_value = self.shrinkage_value
else:
# totally hackish heuristic: keep only components
# with at least 1/50th the max singular value
shrinkage_value = max_singular_value / 50.0
for i in range(self.max_iters):
X_reconstruction, rank = self._svd_step(
X_filled,
shrinkage_value,
max_rank=self.max_rank)
X_reconstruction = self.clip(X_reconstruction)
# print error on observed data
if self.verbose:
mae = masked_mae(
X_true=X_init,
X_pred=X_reconstruction,
mask=observed_mask)
print(
"[SoftImpute] Iter %d: observed MAE=%0.6f rank=%d" % (
i + 1,
mae,
rank))
converged = self._converged(
X_old=X_filled,
X_new=X_reconstruction,
missing_mask=missing_mask)
X_filled[missing_mask] = X_reconstruction[missing_mask]
if converged:
break
if self.verbose:
print("[SoftImpute] Stopped after iteration %d for lambda=%f" % (
i + 1,
shrinkage_value))
self.mae_obs = masked_mae(
X_true=X_init,
X_pred=X_reconstruction,
mask=observed_mask)
return X_filled
if __name__=='__main__':
from generate_data import gen_lrmf, gen_dlvm
from generate_data import ampute
import matplotlib.pyplot as plt
import seaborn as sns
Z, X, w, y, ps = gen_lrmf(d=3)
X_obs = ampute(X)
print('boxplot of get_U_softimpute with gen_lrmf(d=3)')
U = get_U_softimpute(X_obs, boxplot=True) | TwsThomas/miss-vae | softimpute.py | softimpute.py | py | 16,429 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.finfo",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "joblib.Memory",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_nu... |
42843816488 | import pytest
import logging
import json
from work_order_tests.work_order_tests import work_order_get_result_params, \
work_order_request_params
from automation_framework.work_order_submit.work_order_submit_utility \
import verify_work_order_signature, decrypt_work_order_response
from automation_framework.utilities.request_args import TestStep
from automation_framework.utilities.workflow import validate_response_code
logger = logging.getLogger(__name__)
def test_work_order_both_in_out_Data_DataEncryptionKey_null_echo(setup_config):
""" Testing work order request by passing
null in encrypteddataencryption in indata or both
in indata and outdata. """
# input file name
request = 'work_order_tests/input' \
'/work_order_both_in_out_Data_EncryptionKey_null_echo.json'
work_order_response, generic_params = work_order_request_params(
setup_config, request)
err_cd, work_order_get_result_response = work_order_get_result_params(
work_order_response[:2], generic_params)
assert (verify_work_order_signature(work_order_get_result_response,
generic_params[0])
is TestStep.SUCCESS.value)
assert (decrypt_work_order_response(work_order_get_result_response,
work_order_response[3],
work_order_response[4])[0]
is TestStep.SUCCESS.value)
# WorkOrderGetResult API Response validation with key parameters
assert (validate_response_code(work_order_get_result_response) is
TestStep.SUCCESS.value)
def test_work_order_with_empty_indata_outdata(setup_config):
""" Testing work order request by passing
empty indata and outdata. """
# input file name
request = 'work_order_tests/input' \
'/work_order_with_empty_indata_outdata.json'
work_order_response, generic_params = (work_order_request_params
(setup_config, request))
err_cd, work_order_get_result_response = (work_order_get_result_params
(work_order_response[:6],
generic_params))
# WorkOrderGetResult API Response validation with key parameters
assert (validate_response_code(work_order_get_result_response) is
TestStep.SUCCESS.value)
| manojsalunke85/avalon0.6_automaiton | tests/validation_suite/work_order_tests/get/test_work_order_submit_get_outData.py | test_work_order_submit_get_outData.py | py | 2,427 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "work_order_tests.work_order_tests.work_order_request_params",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "work_order_tests.work_order_tests.work_order_get_result_params",
... |
25810655781 |
from datetime import datetime
import unittest
from mongoengine import Document, StringField, IntField
from eve.exceptions import SchemaException
from eve.utils import str_to_date, config
from eve_mongoengine import EveMongoengine
from tests import BaseTest, Eve, SimpleDoc, ComplexDoc, LimitedDoc, WrongDoc, SETTINGS
class TestMongoengineFix(unittest.TestCase):
"""
Test fixing mongoengine classes for Eve's purposes.
"""
def create_app(self, *models):
app = Eve(settings=SETTINGS)
app.debug = True
ext = EveMongoengine(app)
ext.add_model(models)
return app.test_client()
def assertDateTimeAlmostEqual(self, d1, d2, precission='minute'):
"""
Used for testing datetime, which cannot (or we do not want to) be
injected into tested object. Omits second and microsecond part.
"""
self.assertEqual(d1.year, d2.year)
self.assertEqual(d1.month, d2.month)
self.assertEqual(d1.day, d2.day)
self.assertEqual(d1.hour, d2.hour)
self.assertEqual(d1.minute, d2.minute)
def _test_default_values(self, app, cls, updated_name='updated',
created_name='created'):
# test updated and created fields if they are correctly generated
now = datetime.utcnow()
d = cls(a="xyz", b=29)
updated = getattr(d, updated_name)
created = getattr(d, created_name)
self.assertEqual(type(updated), datetime)
self.assertEqual(type(created), datetime)
self.assertDateTimeAlmostEqual(updated, now)
self.assertDateTimeAlmostEqual(created, now)
d.save()
# test real returned values
json_data = app.get('/simpledoc/').get_json()
created_attr = app.application.config['DATE_CREATED']
created_str = json_data[config.ITEMS][0][created_attr]
date_created = str_to_date(created_str)
self.assertDateTimeAlmostEqual(now, date_created)
d.delete()
def test_default_values(self):
app = self.create_app(SimpleDoc)
self.assertEqual(SimpleDoc._db_field_map['updated'], '_updated')
self.assertEqual(SimpleDoc._reverse_db_field_map['_updated'], 'updated')
self.assertEqual(SimpleDoc._db_field_map['created'], '_created')
self.assertEqual(SimpleDoc._reverse_db_field_map['_created'], 'created')
self._test_default_values(app, SimpleDoc)
def test_wrong_doc(self):
with self.assertRaises(TypeError):
self.create_app(WrongDoc)
def test_nondefault_last_updated_field(self):
# redefine to get entirely new class
class SimpleDoc(Document):
a = StringField()
b = IntField()
sett = SETTINGS.copy()
sett['LAST_UPDATED'] = 'last_change'
app = Eve(settings=sett)
app.debug = True
ext = EveMongoengine(app)
ext.add_model(SimpleDoc)
client = app.test_client()
with app.app_context(): # to get current app's config
self._test_default_values(client, SimpleDoc, updated_name='last_change')
def test_nondefault_date_created_field(self):
# redefine to get entirely new class
class SimpleDoc(Document):
a = StringField()
b = IntField()
sett = SETTINGS.copy()
sett['DATE_CREATED'] = 'created_at'
app = Eve(settings=sett)
app.debug = True
ext = EveMongoengine(app)
ext.add_model(SimpleDoc)
app = app.test_client()
self._test_default_values(app, SimpleDoc, created_name='created_at')
| MongoEngine/eve-mongoengine | tests/test_mongoengine_fix.py | test_mongoengine_fix.py | py | 3,618 | python | en | code | 39 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tests.Eve",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tests.SETTINGS",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "eve_mongoengine.EveMo... |
4897651596 | '''
Assignments
1)Write a Python program to sort (ascending and descending) a dictionary by value. [use sorted()]
2)Write a Python program to combine two dictionary adding values for common keys.
d1 = {'a': 100, 'b': 200, 'c':300}
d2 = {'a': 300, 'b': 200, 'd':400}
Sample output: Counter({'a': 400, 'b': 400, 'd': 400, 'c': 300})
'''
#ques 1
d1 = {'a': 100, 'b': 200, 'c':300}
d2 = {'a': 300, 'b': 200, 'd':400}
def sorted_dict_increasing(d1):
sorted_d1 = sorted(d1.items(),key= lambda x: x[1])
print(sorted_d1)
sorted_d1 = sorted(d1.items(),key= lambda x: x[1],reverse=True)
print(sorted_d1)
return sorted_d1
sorted_dict_increasing(d1)
# Ques 2
from collections import Counter
d1 = {'a': 100, 'b': 200, 'c':300}
d2 = {'a': 300, 'b': 200, 'd':400}
def dict_combine(d1,d2):
d_combine = Counter(d1) + Counter(d2)
print(d_combine)
return d_combine
dict_combine(d1,d2) | Deepak10995/node_react_ds_and_algo | assignments/week03/day1-2.py | day1-2.py | py | 901 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 26,
"usage_type": "call"
}
] |
15057824639 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import home_logs.utils.unique
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'MyHome', max_length=50)),
('created_on', models.DateTimeField(auto_now_add=True)),
('uuid', models.CharField(default=home_logs.utils.unique.get, editable=False, max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Sensor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('location', models.CharField(max_length=50, null=True, blank=True)),
],
),
migrations.CreateModel(
name='SensorKind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Space',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'MyRoom', max_length=50)),
('uuid', models.CharField(default=home_logs.utils.unique.get, editable=False, max_length=50, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('x_length', models.DecimalField(decimal_places=2, default=0, help_text=b'Using meters', max_digits=6)),
('y_length', models.DecimalField(decimal_places=2, default=0, help_text=b'Using meters', max_digits=6)),
],
),
migrations.CreateModel(
name='SpaceKind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='space',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='property.SpaceKind'),
),
migrations.AddField(
model_name='space',
name='sensors',
field=models.ManyToManyField(blank=True, related_name='spaces', to='property.Sensor'),
),
migrations.AddField(
model_name='sensor',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='property.SensorKind'),
),
migrations.AddField(
model_name='house',
name='spaces',
field=models.ManyToManyField(blank=True, to='property.Space'),
),
]
| tsaklidis/LogingAPI | home_logs/property/migrations/0001_initial.py | 0001_initial.py | py | 3,232 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 17,
"usage_type": "call"
},
... |
16514949152 | from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^about', views.mail_form),
url(r'^api', views.api),
url(r'^score/today', views.today),
url(r'^score/(?P<date_id>\d{8})/$', views.feedjson),
url(r'^admin/', include(admin.site.urls)),
url(r'^scraper/update', views.update),
url(r'^scraper/refresh', views.refresh),
] | h2r4t/npbapi | npbapi/urls.py | urls.py | py | 439 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.co... |
17895710010 | r"""Train toy segmenter model on cityscapes.
"""
# pylint: enable=line-too-long
import ml_collections
batch_size = 128
_CITYSCAPES_TRAIN_SIZE_SPLIT = 146
# Model spec.
STRIDE = 4
mlp_dim = 2
num_heads = 1
num_layers = 1
hidden_size = 1
target_size = (128, 128)
def get_config(runlocal=''):
"""Returns the configuration for Cityscapes segmentation."""
runlocal = bool(runlocal)
config = ml_collections.ConfigDict()
config.experiment_name = 'cityscapes_segmenter_toy_model'
# Dataset.
config.dataset_name = 'cityscapes'
config.dataset_configs = ml_collections.ConfigDict()
config.dataset_configs.target_size = target_size
config.dataset_configs.train_split = 'train[:5%]'
config.dataset_configs.dataset_name = '' # name of ood dataset to evaluate
# Model.
config.model_name = 'segvit'
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = (STRIDE, STRIDE)
config.model.backbone = ml_collections.ConfigDict()
config.model.backbone.type = 'vit'
config.model.backbone.mlp_dim = mlp_dim
config.model.backbone.num_heads = num_heads
config.model.backbone.num_layers = num_layers
config.model.backbone.hidden_size = hidden_size
config.model.backbone.dropout_rate = 0.1
config.model.backbone.attention_dropout_rate = 0.0
config.model.backbone.classifier = 'gap'
# Decoder
config.model.decoder = ml_collections.ConfigDict()
config.model.decoder.type = 'linear'
# Training.
config.trainer_name = 'segvit_trainer'
config.optimizer = 'adam'
config.optimizer_configs = ml_collections.ConfigDict()
config.l2_decay_factor = 0.0
config.max_grad_norm = 1.0
config.label_smoothing = None
config.num_training_epochs = ml_collections.FieldReference(2)
config.batch_size = batch_size
config.rng_seed = 0
config.focal_loss_gamma = 0.0
# Learning rate.
config.steps_per_epoch = _CITYSCAPES_TRAIN_SIZE_SPLIT // config.get_ref(
'batch_size')
# setting 'steps_per_cycle' to total_steps basically means non-cycling cosine.
config.lr_configs = ml_collections.ConfigDict()
config.lr_configs.learning_rate_schedule = 'compound'
config.lr_configs.factors = 'constant * cosine_decay * linear_warmup'
config.lr_configs.warmup_steps = 0
config.lr_configs.steps_per_cycle = config.get_ref(
'num_training_epochs') * config.get_ref('steps_per_epoch')
config.lr_configs.base_learning_rate = 1e-4
# model and data dtype
config.model_dtype_str = 'float32'
config.data_dtype_str = 'float32'
# init not included
# Logging.
config.write_summary = True
config.write_xm_measurements = True # write XM measurements
config.xprof = False # Profile using xprof.
config.checkpoint = True # Do checkpointing.
config.checkpoint_steps = 5 * config.get_ref('steps_per_epoch')
config.debug_train = False # Debug mode during training.
config.debug_eval = False # Debug mode during eval.
config.log_eval_steps = 1 * config.get_ref('steps_per_epoch')
# Evaluation.
config.eval_mode = False
config.eval_configs = ml_collections.ConfigDict()
config.eval_configs.mode = 'standard'
config.eval_covariate_shift = True
config.eval_label_shift = True
if runlocal:
config.count_flops = False
return config
def get_sweep(hyper):
return hyper.product([])
| google/uncertainty-baselines | experimental/robust_segvit/configs/cityscapes/toy_model.py | toy_model.py | py | 3,338 | python | en | code | 1,305 | github-code | 36 | [
{
"api_name": "ml_collections.ConfigDict",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ml_collections.ConfigDict",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "ml_collections.ConfigDict",
"line_number": 38,
"usage_type": "call"
},
{
"api... |
21346552302 | from django.shortcuts import render, redirect
from django.core.mail import send_mail
from django.contrib import messages
from django.conf import settings
# Create your views here.
# Paypal email id[to donate] :- testingofshopkproject2@gmail.com & password :- Shopk@4994
def home(request):
return render(request, 'home.html')
def donate(request):
if request.method == 'POST':
try:
transaction_completed = True
if transaction_completed:
user_email = request.user.email
subject = 'Donation Confirmation'
message = 'Thank you for your donation!'
from_email = settings.EMAIL_HOST_USER
recipient_list = [user_email]
send_mail(subject, message, from_email, recipient_list, fail_silently=False)
messages.success(request, 'Donation successful. Check your email for confirmation.')
else:
messages.error(request, 'Transaction failed.')
return redirect('donate')
except Exception as e:
messages.error(request, 'An error occurred during the transaction: ' + str(e))
return render(request, 'donate.html')
| Kiran4949/Donation | app/views.py | views.py | py | 1,234 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.EMAIL_HOST_USER",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 27,
"usage_type": "name"
},
{... |
8649453711 | """
============================
Author:柠檬班-木森
Time:2020/5/6 10:03
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
def work1():
"""课堂派登录"""
driver = webdriver.Chrome()
driver.implicitly_wait(30)
driver.get('https://www.ketangpai.com/User/login.html')
driver.find_element(By.XPATH, '//input[@placeholder="邮箱/账号/手机号"]').send_keys('11234')
driver.find_element(By.CSS_SELECTOR, 'input[placeholder="密码"]').send_keys('123454321')
driver.find_element(By.XPATH, '//a[text()="登录"]').click()
time.sleep(5)
driver.quit()
def work2():
"""腾讯客服页面操作"""
driver = webdriver.Chrome()
driver.get('https://kf.qq.com/product/weixin.html')
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, "//a[text()='手机版微信']"))).click()
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, "//a[text()='微信群']"))).click()
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, "//a[text()='微信群创建及设置方法']"))).click()
time.sleep(5)
driver.quit()
def work3():
"""艺龙搜索操作"""
driver = webdriver.Chrome()
driver.implicitly_wait(30)
driver.get('http://www.elong.com/')
city = driver.find_element(By.XPATH, "//div[@id='domesticDiv']//dl[dt/text()='目的地']//input")
city.clear()
# 输入长沙市
city.send_keys('长沙市')
# 等待搜索的元素加载出来,然后再进行选择
# time.sleep(1)
WebDriverWait(driver, 30, 0.5).until(
EC.element_to_be_clickable((By.XPATH, '//ul[@method="cityData"]/li[@data=0]'))).click()
# 输入区域
driver.find_element(By.XPATH, '//input[@placeholder="如位置\酒店名\品牌"]').send_keys('麓谷')
# 点击搜索
driver.find_element(By.XPATH, '//span[@data-bindid="search"]').click()
time.sleep(5)
driver.close()
driver.quit()
def work4():
"""12306"""
driver = webdriver.Chrome()
driver.implicitly_wait(30)
driver.get('https://www.12306.cn/index/')
# 点击往返
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, '//div[@class="search-tab-hd"]//a[text()="往返"]'))).click()
# 输入起始地
start_addr = driver.find_element(By.ID, "fromStationFanText")
start_addr.click()
# start_addr.clear()
start_addr.send_keys('长沙')
driver.find_element(By.ID, "citem_0").click()
# # 输入终止地
driver.find_element(By.ID, "toStationFanText").send_keys('上海')
driver.find_element(By.ID, "citem_0").click()
# # 点击高铁
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, '//li[@id="isHigh"]/i'))).click()
# # # 点击搜索
driver.find_element(By.XPATH, '//a[@id="search_two"]').click()
time.sleep(10)
driver.quit()
if __name__ == '__main__':
# work1()
# work2()
# work3()
work4()
| huchaoyang1991/py27_web | web_06day(鼠标和下拉选择框)/task_05day.py | task_05day.py | py | 3,284 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 22,
"usage_type": "attribute"
},
... |
17964305010 | from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import chi2
import numpy as np
import argparse
import sys
import os
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import AdaBoostClassifier
import csv
from sklearn.model_selection import KFold
from scipy import stats
import warnings
#to ignore convergence warning
warnings.filterwarnings('ignore')
def accuracy( C ):
''' Compute accuracy given Numpy array confusion matrix C. Returns a floating point value '''
total = np.sum(C)
correct=0
for i in range(len(C)):
correct = correct+C[i][i]
if total != 0:
acc = correct/total
else:
acc = 0
return acc
def recall( C ):
''' Compute recall given Numpy array confusion matrix C. Returns a list of floating point values '''
lst1 = []
lst2 = []
result = []
for i in range(len(C)):
lst1.append(C[i][i])
lst2.append(np.sum(C[i]))
for i in range(len(C)):
if lst2[i] !=0:
result.append(lst1[i]/lst2[i])
else:
result.append(0)
return result
def precision( C ):
''' Compute precision given Numpy array confusion matrix C. Returns a list of floating point values '''
lst1 = []
lst2 = []
result = []
for i in range(len(C)):
lst1.append(C[i][i])
lst2.append(np.sum(C[:,i]))
for i in range(len(C)):
if lst2[i] != 0 :
result.append(lst1[i]/lst2[i])
else:
result.append(0.0)
return result
def class31(filename):
''' This function performs experiment 3.1
Parameters
filename : string, the name of the npz file from Task 2
Returns:
X_train: NumPy array, with the selected training features
X_test: NumPy array, with the selected testing features
y_train: NumPy array, with the selected training classes
y_test: NumPy array, with the selected testing classes
i: int, the index of the supposed best classifier
'''
print("Doing class31")
iBest = 0
max_acc = 0
line1 = [1]
line2 = [2]
line3 = [3]
line4 = [4]
line5 = [5]
#load file and array
data = np.load(filename)
data = data['arr_0']
#get x and y then split them by using train_test_split
X, y = data[:,0:-1], data[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#1.SVC: support vector machine with a linear kernel.
clf_linear = SVC(kernel='linear',max_iter=1000)
clf_linear.fit(X_train, y_train)
#Perform classification on samples in X.
y_pred_linear = clf_linear.predict(X_test)
svc_linear_matrix = confusion_matrix(y_test, y_pred_linear)
iBest = 1
max_acc = accuracy(svc_linear_matrix)
#2.SVC: support vector machine with a radial basis function (γ = 2) kernel.
clf_rbf = SVC(kernel='rbf',gamma=2,max_iter=1000)
clf_rbf.fit(X_train, y_train)
y_pred_rbf = clf_rbf.predict(X_test)
svc_rbf_matrix = confusion_matrix(y_test, y_pred_rbf)
if accuracy(svc_rbf_matrix) > max_acc:
iBest = 2
max_acc = accuracy(svc_rbf_matrix)
#3.RandomForestClassifier: with a maximum depth of 5, and 10 estimators.
clf_forest = RandomForestClassifier(n_estimators=10, max_depth=5)
clf_forest.fit(X_train,y_train)
y_pred_forest = clf_forest.predict(X_test)
forest_matrix = confusion_matrix(y_test, y_pred_forest)
if accuracy(forest_matrix) > max_acc:
iBest = 3
max_acc = accuracy(forest_matrix)
#4.MLPClassifier: A feed-forward neural network, with α = 0.05.
clf_mlp = MLPClassifier(alpha=0.05)
clf_mlp.fit(X_train,y_train)
y_pred_mlp = clf_mlp.predict(X_test)
mlp_matrix = confusion_matrix(y_test,y_pred_mlp)
if accuracy(mlp_matrix) > max_acc:
iBest = 4
max_acc = accuracy(mlp_matrix)
#5.AdaBoostClassifier: with the default hyper-parameters.
clf_ada = AdaBoostClassifier()
clf_ada.fit(X_train,y_train)
y_pred_ada = clf_ada.predict(X_test)
ada_matrix = confusion_matrix(y_test,y_pred_ada)
if accuracy(ada_matrix) > max_acc:
iBest = 5
max_acc = accuracy(ada_matrix)
#save result to a csv file
line1.append(accuracy(svc_linear_matrix))
line2.append(accuracy(svc_rbf_matrix))
line3.append(accuracy(forest_matrix))
line4.append(accuracy(mlp_matrix))
line5.append(accuracy(ada_matrix))
line1 = line1 + recall(svc_linear_matrix)
line2 = line2 + recall(svc_rbf_matrix)
line3 = line3 + recall(forest_matrix)
line4 = line4 + recall(mlp_matrix)
line5 = line5 + recall(ada_matrix)
line1 = line1 + precision(svc_linear_matrix)
line2 = line2 + precision(svc_rbf_matrix)
line3 = line3 + precision(forest_matrix)
line4 = line4 + precision(mlp_matrix)
line5 = line5 + precision(ada_matrix)
for i in range(len(svc_linear_matrix)):
line1 = line1 + list(svc_linear_matrix[i])
line2 = line2 + list(svc_rbf_matrix[i])
line3 = line3 + list(forest_matrix[i])
line4 = line4 + list(mlp_matrix[i])
line5 = line5 + list(ada_matrix[i])
with open( 'a1_3.1.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(line1)
writer.writerow(line2)
writer.writerow(line3)
writer.writerow(line4)
writer.writerow(line5)
print(iBest)
print("Class31 done!")
return (X_train, X_test, y_train, y_test,iBest)
def class32(X_train, X_test, y_train, y_test,iBest):
''' This function performs experiment 3.2
Parameters:
X_train: NumPy array, with the selected training features
X_test: NumPy array, with the selected testing features
y_train: NumPy array, with the selected training classes
y_test: NumPy array, with the selected testing classes
i: int, the index of the supposed best classifier (from task 3.1)
Returns:
X_1k: numPy array, just 1K rows of X_train
y_1k: numPy array, just 1K rows of y_train
'''
accuracies=[]
print("Doing class32")
if iBest == 1:
clf = SVC(kernel='linear',max_iter=1000)
if iBest == 2:
clf = SVC(kernel='rbf',gamma=2,max_iter=1000)
if iBest == 3:
clf = RandomForestClassifier(n_estimators=10, max_depth=5)
if iBest == 4:
clf = MLPClassifier(alpha=0.05)
if iBest == 5:
clf = AdaBoostClassifier()
for i in [1000,5000,10000,15000,20000]:
if i == 1000:
X_1k = X_train[:i]
y_1k = y_train[:i]
new_X_test = X_test[:i]
new_y_test = y_test[:i]
new_X_train = X_train[:i]
new_y_train = y_train[:i]
clf.fit(new_X_train, new_y_train)
y_pred = clf.predict(new_X_test)
matrix = confusion_matrix(new_y_test,y_pred)
accuracies.append(accuracy(matrix))
with open( 'a1_3.2.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(accuracies)
print("Class32 done!")
return (X_1k, y_1k)
def class33(X_train, X_test, y_train, y_test, i, X_1k, y_1k):
''' This function performs experiment 3.3
Parameters:
X_train: NumPy array, with the selected training features
X_test: NumPy array, with the selected testing features
y_train: NumPy array, with the selected training classes
y_test: NumPy array, with the selected testing classes
i: int, the index of the supposed best classifier (from task 3.1)
X_1k: numPy array, just 1K rows of X_train (from task 3.2)
y_1k: numPy array, just 1K rows of y_train (from task 3.2)
'''
print("Doing class33")
k=[5,10,20,30,40,50]
line1 =[]
line2 =[]
line3 =[]
line4 =[]
line5 =[]
line6 =[]
line7 =[]
line8 =[]
#3.3.1
# I tried to use cols_32k = selector.get_support(indices=True)
# Then get selector.pvalues_[cols_32k]
# I found that result is same as what I do (sort pvalues then convert it to a list and find first K p values)
for j in k:
selector = SelectKBest(f_classif, j)
new_X = selector.fit_transform(X_train, y_train)
pp = np.sort(selector.pvalues_)
pp = pp.tolist()
if j == 5:
line1.append(5)
line1 = line1 + pp[:j]
# 3.3.2
if i == 1:
clf = SVC(kernel='linear',max_iter=1000)
if i == 2:
clf = SVC(kernel='rbf',gamma=2,max_iter=1000)
if i == 3:
clf = RandomForestClassifier(n_estimators=10, max_depth=5)
if i == 4:
clf = MLPClassifier(alpha=0.05)
if i == 5:
clf = AdaBoostClassifier()
# for 1K part
new_X = selector.fit_transform(X_1k, y_1k)
clf.fit(new_X,y_1k)
y_pred_1 = clf.predict(selector.transform(X_test))
matrix_1 = confusion_matrix(y_test,y_pred_1)
line7.append(accuracy(matrix_1))
# 3.3.3 (a) get index of 5 features
cols_1k = selector.get_support(indices=True)
print(cols_1k)
# for 32K part
new_X = selector.fit_transform(X_train, y_train)
clf.fit(new_X,y_train)
y_pred_32 = clf.predict(selector.transform(X_test))
matrix_32 = confusion_matrix(y_test,y_pred_32)
line7.append(accuracy(matrix_32))
#3.3.3 (a) get index of 5 features
cols_32k = selector.get_support(indices=True)
print(cols_32k)
#3.3.3(a) find common features
line8.append(list(set(cols_1k) & set(cols_32k)))
if j == 10:
line2.append(10)
line2 = line2 + pp[:j]
if j == 20:
line3.append(20)
line3 = line3 + pp[:j]
if j == 30:
line4.append(30)
line4 = line4 + pp[:j]
if j == 40:
line5.append(40)
line5 = line5 + pp[:j]
if j == 50:
line6.append(50)
line6 = line6 + pp[:j]
with open( 'a1_3.3.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(line1)
writer.writerow(line2)
writer.writerow(line3)
writer.writerow(line4)
writer.writerow(line5)
writer.writerow(line6)
writer.writerow(line7)
writer.writerow(line8)
print("Class33 done!")
def class34( filename, i ):
''' This function performs experiment 3.4
Parameters
filename : string, the name of the npz file from Task 2
i: int, the index of the supposed best classifier (from task 3.1)
'''
print("Doing class34")
print(i)
X_train_list = []
y_train_list = []
X_test_list = []
y_test_list = []
fold_1 = []
fold_2 = []
fold_3 = []
fold_4 = []
fold_5 = []
p_values = []
#read data and use Kfold to make 5 folds.
data = np.load(filename)
data = data['arr_0']
X, y = data[:,0:-1], data[:,-1]
kf = KFold(n_splits=5,shuffle=True)
for train_index, test_index in kf.split(X):
X_train_list.append(X[train_index])
X_test_list.append(X[test_index])
y_train_list.append(y[train_index])
y_test_list.append(y[test_index])
for k in range(5):
accuracy_list = []
X_train = X_train_list[k]
X_test = X_test_list[k]
y_train = y_train_list[k]
y_test = y_test_list[k]
#1.for clf linear
clf_linear = SVC(kernel='linear',max_iter=1000)
clf_linear.fit(X_train, y_train)
y_pred_linear = clf_linear.predict(X_test)
matrix_linear = confusion_matrix(y_test, y_pred_linear)
accuracy_list.append(accuracy(matrix_linear))
#2.for clf rbf
clf_rbf = SVC(kernel='rbf',gamma=2,max_iter=1000)
clf_rbf.fit(X_train, y_train)
y_pred_rbf = clf_rbf.predict(X_test)
matrix_rbf = confusion_matrix(y_test, y_pred_rbf)
accuracy_list.append(accuracy(matrix_rbf))
#3.for forest
clf_forest = RandomForestClassifier(n_estimators=10, max_depth=5)
clf_forest.fit(X_train,y_train)
y_pred_forest = clf_forest.predict(X_test)
forest_matrix = confusion_matrix(y_test, y_pred_forest)
accuracy_list.append(accuracy(forest_matrix))
#4.for MLP
clf_mlp = MLPClassifier(alpha=0.05)
clf_mlp.fit(X_train,y_train)
y_pred_mlp = clf_mlp.predict(X_test)
mlp_matrix = confusion_matrix(y_test,y_pred_mlp)
accuracy_list.append(accuracy(mlp_matrix))
#5.for AdaBoost
clf_ada = AdaBoostClassifier()
clf_ada.fit(X_train,y_train)
y_pred_ada = clf_ada.predict(X_test)
ada_matrix = confusion_matrix(y_test,y_pred_ada)
accuracy_list.append(accuracy(ada_matrix))
if k == 0:
fold_1 = accuracy_list
if k == 1:
fold_2 = accuracy_list
if k == 2:
fold_3 = accuracy_list
if k == 3:
fold_4 = accuracy_list
if k == 4:
fold_5 = accuracy_list
matrix = np.array([fold_1,fold_2,fold_3,fold_4,fold_5])
a=matrix[:,i-1]
for k in range(5):
if k != i-1 :
b = matrix[:,k]
S = stats.ttest_rel(a, b)
p_values.append(S.pvalue)
with open( 'a1_3.4.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(fold_1)
writer.writerow(fold_2)
writer.writerow(fold_3)
writer.writerow(fold_4)
writer.writerow(fold_5)
writer.writerow(p_values)
print("Class34 done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="the input npz file from Task 2", required=True)
args = parser.parse_args()
#TODO : complete each classification experiment, in sequence.
X_train, X_test, y_train, y_test,iBest = class31(args.input)
X_1k, y_1k = class32(X_train, X_test, y_train, y_test,iBest)
class33(X_train, X_test, y_train, y_test, iBest, X_1k, y_1k)
class34(args.input, iBest)
| Yangnnnn/Identifying-political-persuasion-on-Reddit | code/a1_classify.py | a1_classify.py | py | 15,122 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_num... |
70003672425 | import asyncio, config, aiohttp
import logging
from .utils import instance_tools
log = logging.getLogger()
class StatHandler:
def __init__(self, bot):
self.bot = bot
self.has_started = 0
async def postloop(self):
if not self.has_started == 1:
self.has_started = 1
while self.has_started:
log.info("Getting all servers.")
log.info("Attempting to update server count.")
i = instance_tools.InstanceTools(self.bot.instances, self.bot.redis)
guilds = await i.get_all_guilds()
log.info("Servers: %s" % guilds)
if self.bot.instance == 0:
async with aiohttp.ClientSession() as cs:
x = await cs.post(
"https://discordbots.org/api/bots/310039170792030211/stats",
json={
"server_count": int(guilds),
"shard_count": self.bot.shard_count
},
headers={
"Authorization": config.dbots_key
}
)
log.info("Posted to discordbots.org, {}".format(await x.json()))
x = await cs.post(
"https://discord.bots.gg/api/v1/bots/310039170792030211/stats",
json={
"guildCount": int(guilds),
"shardCount": self.bot.shard_count
},
headers={
"Authorization": config.dpw_key
}
)
log.info("Posted to discord.bots.gg, {}".format(await x.json()))
await cs.post(
"https://discord.services/api/bots/310039170792030211",
json={
"guild_count": int(guilds)
},
headers={
"Authorization": config.ds_key
}
)
log.info("Posted to discord.services, {}".format(await x.json()))
await cs.post(
"https://lbots.org/api/v1/bots/310039170792030211/stats",
json={
"guild_count": int(guilds),
"shard_count": self.bot.shard_count
},
headers={
"Authorization": config.lbots_key
}
)
log.info("Posted to lbots.org, {}".format(await x.json()))
await asyncio.sleep(1800)
async def on_ready(self):
self.bot.loop.create_task(self.postloop())
def setup(bot):
bot.add_cog(StatHandler(bot))
| harumaki4649/nekobot | modules/unused/stat_handler.py | stat_handler.py | py | 3,136 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "utils.instance_tools.InstanceTools",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "utils.instance_tools",
"line_number": 21,
"usage_type": "name"
},
{
"api_name... |
70876552105 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 13 09:12:14 2022
@author: Santiago Pinzon-Cortes
@contact: sanpinzoncor@unal.edu.co
"""
"""
Functions modified by functions made by Natalia Gomez-Perez (BGS) ngp@nerc.ac.uk
The functions are readers for data from WDC database and INTERMAGNET database
"""
# Libraries
import pandas as pd
import numpy as np
import os
from datetime import datetime
#from dateutil.rrule import rrule, MONTHLY
from dateutil import rrule
# Clean data
def clean_data(Bh):
return [i for i, element in enumerate(np.diff(Bh)) if (abs(element)<250 and abs(element)>0)]
#WDC data reader
def ReadYear_OBS(ye,obs):
datei = datetime(ye,1,1)
datef = datetime(ye+1,1,1)
datea = []
for i in rrule.rrule(rrule.HOURLY, dtstart=datei, until=datef):
a = datea.append(i)
datea = datea[:-1]
wdc_hourpath = '/Users/santiagopinzon/Mac/articles/Dst_proxies/DATA/WDC/'
filepath = os.path.join(os.path.dirname(wdc_hourpath),'wdc_'+obs.upper())
filename = obs.lower()+str(ye)+'.wdc'
full = os.path.join(filepath,filename)
if os.path.exists(full):
#number of chars on data from WDC:
nc=4
with open(full,'r') as csv_file:
df1 = pd.read_csv(csv_file,names=['data'])
if ye>=2000:
df1['YYYY'] = df1.data.str[3:5].astype(int)+2000
else:
df1['YYYY'] = df1.data.str[3:5].astype(int)+1900
df1['MM'] = df1.data.str[5:7].astype(int)
df1['DD'] = df1.data.str[8:10].astype(int)
df1['Component'] = df1.data.str[7].astype(str)
df1['Val'] = df1.data.str[16:21].astype(str)
df1['Vdata'] = df1.data.str[16:].astype(str)
df1['list'] = None
df1['length'] = df1['Vdata'].apply(len)
#print(df.length[0])
X = []
Y = []
H = []
#F = []
#if df1.Component[0]=='X':
for j in range(len(df1.Vdata)):
line = df1.Vdata[j]
#df.list[j] = [line[i:i+nc] for i in range(0, len(line), nc)]
l = np.array([int(line[i:i+nc]) for i in range(0, len(line), nc)])
element = df1.Component[j]
tabB = l[0]
x = l[1:-1]
#dfx = pd.DataFrame()
#dfx2 = pd.DataFrame()
magnitude = x+(tabB*100)
if element=='H':
H.extend(magnitude)
elif element=='X':
X.extend(magnitude)
elif element=='Y':
Y.extend(magnitude)
else: continue
#print(H)
H1 = np.array(H)
X1 = np.array(X)
Y1 = np.array(Y)
H2 = np.sqrt((X1**2)+(Y1**2))
#df1.insert(0,'t',[datetime(df.)])
if len(H1)!=0:
df = pd.DataFrame({'t':datea,'H':H1})
else:
df = pd.DataFrame({'t':datea,'H':H2})
df.index = df.t
df2 = df.copy()
df2['Hc'] = df2['H'].values
df2 = df2.drop(columns=['H'])
cleanh = clean_data(np.asarray(df2['Hc']))
df2 = df2.iloc[cleanh]
dfc = pd.concat((df, df2), axis=1)
dfc = dfc.drop(columns=['H','t'])
return dfc
#Intermagnet Data Reader
def read_Intermagnet(day, obs):
"""
Function to read Intermagnet data.
Input:
day: date
obs: Observatory's IAGA code
Output:
df: Observatory's Dataframe
"""
import datetime as dt
IAGApath = ('/Users/santiagopinzon/Mac/articles/Dst_proxies/DATA/IAGA/')
filepath = (os.path.join(os.path.dirname(IAGApath),day.strftime("%Y"),'IAGA_'+obs.upper()))
filename=obs.lower()+day.strftime("%Y%m%ddmin.min")
full=os.path.join(filepath,filename)
"""
Review observatory format
"""
ofile = open(full)
rfile = ofile.read()
sfile = rfile.split('\n')
hn = sfile.index(' # value of horizontal intensity. |')
hn = hn+1
#if obs.upper()=='HER':
# if day>dt.datetime(2013,12,31):
# hn=24
# else:
# hn=25
# else:
# if day>dt.datetime(2014,12,31):
# hn = 24
# else:
# hn=25
my_parser = lambda x,y : dt.datetime.strptime(x+' '+y,"%Y-%m-%d %H:%M:%S.%f")
df = pd.read_csv(full, sep='\s+',skiprows=hn,
#header=hn,
parse_dates={'DateTime':[0,1]},
date_parser=my_parser, index_col=0)
df=df.where(df<99999.00)
if obs.upper()+'X' in df.columns:
df[obs.upper()+'H']=df.apply(lambda row: -(np.sqrt(row[obs.upper()+'X']**2 + row[obs.upper()+'Y']**2)), axis=1)
df = df.drop(columns='|')
return df
| sanhbk/Dst-index-proxies-LDi | Readers.py | Readers.py | py | 5,270 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.diff",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "dateutil.rrule.rrule",... |
1411710184 | import requests
class YaDisk:
base_url = 'https://cloud-api.yandex.net/v1/disk/'
def __init__(self, token):
self.token = token
def get_headers(self):
return {
'Content-Type': 'application/json',
'Authorization': f'OAuth {self.token}'
}
def create_folder(self, folder_name):
yandex_api_url = f'{self.base_url}resources'
headers = self.get_headers()
params = {
'path': folder_name
}
response = requests.put(yandex_api_url, headers=headers, params=params)
if response.status_code == 201:
return True
elif response.status_code == 409:
print('The folder already exists on Yandex.Disk.')
return True
else:
print('Failed to create folder on Yandex.Disk.')
def upload_photo(self, folder_name, file_name, file_url):
yandex_api_url = f'{self.base_url}resources/upload'
headers = self.get_headers()
params = {
'path': f'{folder_name}/{file_name}',
'url': file_url,
'overwrite': False
}
response = requests.post(yandex_api_url, headers=headers, params=params)
data = response.json()
if 'href' in data:
return True
elif response.status_code == 409:
print(f'The file {file_name} already exists on Yandex.Disk.')
else:
print(f'Failed to upload photo to Yandex.Disk: {data}')
| kanadass/photo_backup_cw | ya_disk.py | ya_disk.py | py | 1,510 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.put",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 42,
"usage_type": "call"
}
] |
37298905972 | # -*- coding: utf-8 -*-
import json
import requests
def get_proxy():
response = requests.get('')
res = json.loads(response.text)
if res['code'] == 0:
try:
ip = res['data']['IP']
port = res['data']['PORT']
proxy = {}
proxy['http'] = ip+":"+port
return proxy
except Exception as e:
return None
| LogicJake/bilibili_user | function.py | function.py | py | 393 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 6,
"usage_type": "call"
}
] |
5547014999 | """
Voting 12/05/2023.
Lido V2 (Shapella-ready) protocol upgrade
1. Update `WithdrawalVault` proxy implementation
2. Call `ShapellaUpgradeTemplate.startUpgrade()`
3. Publish new `Lido` implementation in Lido app APM repo
4. Update `Lido` implementation
5. Publish new `NodeOperatorsRegistry` implementation in NodeOperatorsRegistry app APM repo
6. Update `NodeOperatorsRegistry` implementation
7. Publish new `LidoOracle` implementation in LidoOracle app APM repo
8. Update `LidoOracle` implementation to `LegacyOracle`
9. Create new role `STAKING_ROLE_ROLE` and assign to `StakingRouter`
10. Call `ShapellaUpgradeTemplate.finishUpgrade()`
11. Revoke `MANAGE_FEE` role from `Voting`
12. Revoke `MANAGE_WITHDRAWAL_KEY` role from `Voting`
13. Revoke `MANAGE_PROTOCOL_CONTRACTS_ROLE` role from `Voting`
14. Revoke `SET_EL_REWARDS_VAULT_ROLE` role from `Voting`
15. Revoke `SET_EL_REWARDS_WITHDRAWAL_LIMIT_ROLE` role from `Voting`
16. Revoke `DEPOSIT_ROLE` role from old `DepositSecurityModule`
17. Revoke `BURN_ROLE` role from `SelfOwnedStETHBurner`
18. Revoke `ADD_NODE_OPERATOR_ROLE` role from `Voting`
19. Revoke `SET_NODE_OPERATOR_ACTIVE_ROLE` role from `Voting`
20. Revoke `SET_NODE_OPERATOR_NAME_ROLE` role from `Voting`
21. Revoke `SET_NODE_OPERATOR_ADDRESS_ROLE` role from `Voting`
22. Revoke `REPORT_STOPPED_VALIDATORS_ROLE` role from `Voting`
23. Revoke `MANAGE_MEMBERS` role from `Voting`
24. Revoke `MANAGE_QUORUM` role from `Voting`
25. Revoke `SET_BEACON_SPEC` role from `Voting`
26. Revoke `SET_REPORT_BOUNDARIES` role from `Voting`
27. Revoke `SET_BEACON_REPORT_RECEIVER` role from `Voting`
28. Grant `MANAGE_TOKEN_URI_ROLE` role to `Voting`
29. Set `WithdrawalQueueERC721` baseUri to `https://wq-api.lido.fi/v1/nft`
30. Revoke `MANAGE_TOKEN_URI_ROLE` role from `Voting`
31. Fund Gas Funder multisig 0x5181d5D56Af4f823b96FE05f062D7a09761a5a53 for deposits with 50 stETH
"""
import time
from typing import Dict, Tuple, Optional
from brownie.network.transaction import TransactionReceipt
from brownie import ShapellaUpgradeTemplate # type: ignore
from utils.agent import agent_forward
from utils.finance import make_steth_payout
from utils.voting import bake_vote_items, confirm_vote_script, create_vote
from utils.repo import (
add_implementation_to_lido_app_repo,
add_implementation_to_nor_app_repo,
add_implementation_to_oracle_app_repo,
)
from utils.kernel import update_app_implementation
from utils.config import (
get_deployer_account,
get_is_live,
contracts,
STAKING_ROUTER,
WITHDRAWAL_VAULT,
WITHDRAWAL_VAULT_IMPL,
SELF_OWNED_STETH_BURNER,
get_priority_fee,
)
from utils.permissions import (
encode_oz_grant_role,
encode_oz_revoke_role,
encode_permission_create,
encode_permission_revoke,
)
# noinspection PyUnresolvedReferences
from utils.brownie_prelude import *
# Content URI: https://github.com/lidofinance/lido-dao/blob/b70881f026096790308d7ac9e277ad7f609c7117/apps/lido/README.md
update_lido_app = {
"new_address": "0x17144556fd3424EDC8Fc8A4C940B2D04936d17eb",
"content_uri": "0x697066733a516d525358415a724632785235726762556445724456364c47746a7151315434415a677336796f586f734d516333",
"id": "0x3ca7c3e38968823ccb4c78ea688df41356f182ae1d159e4ee608d30d68cef320",
"version": (4, 0, 0),
}
# Content URI: https://github.com/lidofinance/lido-dao/blob/b70881f026096790308d7ac9e277ad7f609c7117/apps/node-operators-registry/README.md
update_nor_app = {
"new_address": "0x8538930c385C0438A357d2c25CB3eAD95Ab6D8ed",
"content_uri": "0x697066733a516d54346a64693146684d454b5576575351316877786e33365748394b6a656743755a7441684a6b6368526b7a70",
"id": "0x7071f283424072341f856ac9e947e7ec0eb68719f757a7e785979b6b8717579d",
"version": (4, 0, 0),
}
# Content URI: https://github.com/lidofinance/lido-dao/blob/b70881f026096790308d7ac9e277ad7f609c7117/apps/lidooracle/README.md
update_oracle_app = {
"new_address": "0xa29b819654cE6224A222bb5f586920105E2D7E0E",
"content_uri": "0x697066733a516d575461635041557251614376414d5663716e5458766e7239544c666a57736861736334786a536865717a3269",
"id": "0x8b47ba2a8454ec799cd91646e7ec47168e91fd139b23f017455f3e5898aaba93",
"version": (4, 0, 0),
}
WITHDRAWAL_QUEUE_ERC721_BASE_URI = "https://wq-api.lido.fi/v1/nft"
def encode_template_start_upgrade(template_address: str) -> Tuple[str, str]:
template = ShapellaUpgradeTemplate.at(template_address)
return template.address, template.startUpgrade.encode_input()
def encode_template_finish_upgrade(template_address: str) -> Tuple[str, str]:
template = ShapellaUpgradeTemplate.at(template_address)
return template.address, template.finishUpgrade.encode_input()
def encode_withdrawal_vault_proxy_update(vault_proxy_address: str, implementation: str) -> Tuple[str, str]:
proxy = interface.WithdrawalVaultManager(vault_proxy_address)
return proxy.address, proxy.proxy_upgradeTo.encode_input(implementation, b"")
def encode_withdrawal_queue_base_uri_update(withdrawal_queue_address: str, base_uri: str) -> Tuple[str, str]:
withdrawal_queue = interface.WithdrawalQueueERC721(withdrawal_queue_address)
return withdrawal_queue.address, withdrawal_queue.setBaseURI.encode_input(base_uri)
def start_vote(tx_params: Dict[str, str], silent: bool) -> Tuple[int, Optional[TransactionReceipt]]:
"""Prepare and run voting."""
voting = contracts.voting
node_operators_registry = contracts.node_operators_registry
lido = contracts.lido
legacy_oracle = contracts.legacy_oracle
withdrawal_queue = contracts.withdrawal_queue
call_script_items = [
# 1)
encode_withdrawal_vault_proxy_update(WITHDRAWAL_VAULT, WITHDRAWAL_VAULT_IMPL),
# 2)
encode_template_start_upgrade(contracts.shapella_upgrade_template),
# 3)
add_implementation_to_lido_app_repo(
update_lido_app["version"], update_lido_app["new_address"], update_lido_app["content_uri"]
),
# 4)
update_app_implementation(update_lido_app["id"], update_lido_app["new_address"]),
# 5)
add_implementation_to_nor_app_repo(
update_nor_app["version"], update_nor_app["new_address"], update_nor_app["content_uri"]
),
# 6)
update_app_implementation(update_nor_app["id"], update_nor_app["new_address"]),
# 7)
add_implementation_to_oracle_app_repo(
update_oracle_app["version"], update_oracle_app["new_address"], update_oracle_app["content_uri"]
),
# 8)
update_app_implementation(update_oracle_app["id"], update_oracle_app["new_address"]),
# 9)
encode_permission_create(STAKING_ROUTER, node_operators_registry, "STAKING_ROUTER_ROLE", manager=voting),
# 10)
encode_template_finish_upgrade(contracts.shapella_upgrade_template),
# 11)
encode_permission_revoke(lido, "MANAGE_FEE", revoke_from=voting),
# 12)
encode_permission_revoke(lido, "MANAGE_WITHDRAWAL_KEY", revoke_from=voting),
# 13)
encode_permission_revoke(lido, "MANAGE_PROTOCOL_CONTRACTS_ROLE", revoke_from=voting),
# 14)
encode_permission_revoke(lido, "SET_EL_REWARDS_VAULT_ROLE", revoke_from=voting),
# 15)
encode_permission_revoke(lido, "SET_EL_REWARDS_WITHDRAWAL_LIMIT_ROLE", revoke_from=voting),
# 16)
encode_permission_revoke(lido, "DEPOSIT_ROLE", revoke_from=contracts.deposit_security_module_v1),
# 17)
encode_permission_revoke(lido, "BURN_ROLE", revoke_from=SELF_OWNED_STETH_BURNER),
# 18)
encode_permission_revoke(node_operators_registry, "ADD_NODE_OPERATOR_ROLE", revoke_from=voting),
# 19)
encode_permission_revoke(node_operators_registry, "SET_NODE_OPERATOR_ACTIVE_ROLE", revoke_from=voting),
# 20)
encode_permission_revoke(node_operators_registry, "SET_NODE_OPERATOR_NAME_ROLE", revoke_from=voting),
# 21)
encode_permission_revoke(node_operators_registry, "SET_NODE_OPERATOR_ADDRESS_ROLE", revoke_from=voting),
# 22)
encode_permission_revoke(node_operators_registry, "REPORT_STOPPED_VALIDATORS_ROLE", revoke_from=voting),
# 23)
encode_permission_revoke(legacy_oracle, "MANAGE_MEMBERS", revoke_from=voting),
# 24)
encode_permission_revoke(legacy_oracle, "MANAGE_QUORUM", revoke_from=voting),
# 25)
encode_permission_revoke(legacy_oracle, "SET_BEACON_SPEC", revoke_from=voting),
# 26)
encode_permission_revoke(legacy_oracle, "SET_REPORT_BOUNDARIES", revoke_from=voting),
# 27)
encode_permission_revoke(legacy_oracle, "SET_BEACON_REPORT_RECEIVER", revoke_from=voting),
# 28)
agent_forward([encode_oz_grant_role(withdrawal_queue, "MANAGE_TOKEN_URI_ROLE", grant_to=voting)]),
# 29)
encode_withdrawal_queue_base_uri_update(withdrawal_queue, base_uri=WITHDRAWAL_QUEUE_ERC721_BASE_URI),
# 30)
agent_forward([encode_oz_revoke_role(withdrawal_queue, "MANAGE_TOKEN_URI_ROLE", revoke_from=voting)]),
# 31)
make_steth_payout(
target_address="0x5181d5D56Af4f823b96FE05f062D7a09761a5a53",
steth_in_wei=50 * (10**18),
reference="Fund Gas Funder multisig"
)
]
vote_desc_items = [
"1) Update `WithdrawalVault` proxy implementation",
"2) Call `ShapellaUpgradeTemplate.startUpgrade()",
"3) Publish new implementation in Lido app APM repo",
"4) Updating implementation of Lido app",
"5) Publishing new implementation in Node Operators Registry app APM repo",
"6) Updating implementation of Node Operators Registry app",
"7) Publishing new implementation in Oracle app APM repo",
"8) Updating implementation of Oracle app",
"9) Create permission for STAKING_ROUTER_ROLE of NodeOperatorsRegistry assigning it to StakingRouter",
"10) Finish upgrade by calling `ShapellaUpgradeTemplate.finishUpgrade()`",
"11) Revoke `MANAGE_FEE` role from `Voting`",
"12) Revoke `MANAGE_WITHDRAWAL_KEY` role from `Voting`",
"13) Revoke `MANAGE_PROTOCOL_CONTRACTS_ROLE` role from `Voting`",
"14) Revoke `SET_EL_REWARDS_VAULT_ROLE` role from `Voting`",
"15) Revoke `SET_EL_REWARDS_WITHDRAWAL_LIMIT_ROLE` role from `Voting`",
"16) Revoke `DEPOSIT_ROLE` role from old `DepositSecurityModule`",
"17) Revoke `BURN_ROLE` role from `SelfOwnedStETHBurner`",
"18) Revoke `ADD_NODE_OPERATOR_ROLE` role from `Voting`",
"19) Revoke `SET_NODE_OPERATOR_ACTIVE_ROLE` role from `Voting",
"20) Revoke `SET_NODE_OPERATOR_NAME_ROLE` role from `Voting`",
"21) Revoke `SET_NODE_OPERATOR_ADDRESS_ROLE` role from `Voting`",
"22) Revoke `REPORT_STOPPED_VALIDATORS_ROLE` role from `Voting`",
"23) Revoke `MANAGE_MEMBERS` role from `Voting`",
"24) Revoke `MANAGE_QUORUM` role from `Voting`",
"25) Revoke `SET_BEACON_SPEC` role from `Voting`",
"26) Revoke `SET_REPORT_BOUNDARIES` role from `Voting`",
"27) Revoke `SET_BEACON_REPORT_RECEIVER` role from `Voting`",
"28) Grant `MANAGE_TOKEN_URI_ROLE` role to `Voting`",
"29) Set `WithdrawalQueueERC721` baseUri to `https://wq-api.lido.fi/v1/nft`",
"30) Revoke `MANAGE_TOKEN_URI_ROLE` role from `Voting`",
"31) Fund Gas Funder multisig 0x5181d5D56Af4f823b96FE05f062D7a09761a5a53 for deposits with 50 stETH"
]
vote_items = bake_vote_items(vote_desc_items, call_script_items)
return confirm_vote_script(vote_items, silent) and list(create_vote(vote_items, tx_params))
def main():
tx_params = {"from": get_deployer_account()}
if get_is_live():
tx_params["max_fee"] = "300 gwei"
tx_params["priority_fee"] = get_priority_fee()
vote_id, _ = start_vote(tx_params=tx_params, silent=False)
vote_id >= 0 and print(f"Vote created: {vote_id}.")
time.sleep(5) # hack for waiting thread #2.
| lidofinance/scripts | archive/scripts/upgrade_shapella.py | upgrade_shapella.py | py | 12,033 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "brownie.ShapellaUpgradeTemplate.at",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "brownie.ShapellaUpgradeTemplate",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 102,
"usage_type": "name"
},
{
... |
15905142289 | import random
import time
import tweepy
import pandas
import logging
from config import consumer_key, consumer_secret, access_token, access_token_secret
#Declare variables
timer = 10800 # three hours
df = pandas.read_csv('quotes.csv', delimiter='*')
index = df.index
number_of_rows = len(index)
logger = logging.getLogger('MotivatorBot')
hdlr = logging.FileHandler('./motivatorbot.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# logger.setLevel(logging.info)
# authenticate the consumer key and secret
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
# authentication of access token and secret
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
# Get a follower and add return their tag to add to the string to be sent
def getUser():
follows = []
for follower in tweepy.Cursor(api.followers).items():
follows.append(follower.screen_name)
# Random number to choose user
follow_rand = random.randint(0,(len(follows)-1))
followName = follows[follow_rand]
preString = "Hey @{f} ".format(f=followName)
return preString
# Get a quote from th CSV
def getQuote():
quote_picker = random.randint(0,(number_of_rows - 1))
quote = df['quote'][quote_picker]
author = df['author'][quote_picker]
quoteFormat = "{quote} -{author} ".format(quote=quote, author=author)
return quoteFormat
# This is the main loop.
while True:
print('starting Loop')
quoteString = ''
userChoose = random.randint(0,3)
if userChoose <1:
quoteString += getUser()
quoteString += getQuote()
sleepSecs = random.randint(600,timer)
print(quoteString)
print(sleepSecs)
#Try the API Call. One possible error is 187, which is an error coming from Twitter that limits tweeting the same tweet out multiple times.
# I can't find any documentation from Twitter on the timeframe tht they're looking for, so what I'm doing is going back to the start of
# the loop if I see an error, because the liklihood that it will come back with another quote that will violate Twitter's policies is slim
# to none, and even if it does it will just keep trying quotes until it gets something that works.
try:
api.update_status(status = quoteString)
logging.info('SUCCESS! - %s', quoteString)
except tweepy.TweepError as e:
logging.error(e.reason)
print('Error Code', e.api_code)
print('Reason ', e.reason)
continue
time.sleep(sleepSecs)
| dannymccaslin/MotivatorBot | motivator.py | motivator.py | py | 2,587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.Formatt... |
495133127 | from collections import namedtuple
from dagster import check
from dagster.core.definitions.logger import LoggerDefinition
from dagster.core.definitions.pipeline import PipelineDefinition
class InitLoggerContext(
namedtuple('InitLoggerContext', 'logger_config pipeline_def logger_def run_id')
):
'''Logger-specific initialization context.
An instance of this class is made available as the first argument to the ``logger_fn`` decorated
by :py:func:`@logger <logger>` or set on a :py:class:`LoggerDefinition`.
Users should not instantiate this class.
Attributes:
logger_config (Any): The configuration data provided by the environment config. The
schema for this data is defined by ``config_field`` on the :py:class:`LoggerDefinition`
pipeline_def (PipelineDefinition): The pipeline definition currently being executed.
logger_def (LoggerDefinition): The logger definition for the logger being constructed.
run_id (str): The ID for this run of the pipeline.
'''
def __new__(cls, logger_config, pipeline_def, logger_def, run_id):
return super(InitLoggerContext, cls).__new__(
cls,
logger_config,
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition),
check.inst_param(logger_def, 'logger_def', LoggerDefinition),
check.str_param(run_id, 'run_id'),
)
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/execution/context/logger.py | logger.py | py | 1,419 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dagster.check.inst_param",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "dagster.core.definitions.pipeline.PipelineDefinition",
"line_number": 30,
"usage_type": "a... |
38214358765 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 19 3:15 2019
@author: deepnikajain
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table
from penquins import Kowalski
from Coadding.keypairs import get_keypairs
DEFAULT_AUTHs = get_keypairs()
DEFAULT_AUTH_kowalski = DEFAULT_AUTHs[1]
def query_kowal(name, lowjd, upjd):
k = Kowalski(username=DEFAULT_AUTH_kowalski[0], password=DEFAULT_AUTH_kowalski[1], verbose=False)
qu = {"query_type": "general_search",
"query": "db['ZTF_alerts'].find({'objectId': {'$eq': '%s'}, 'candidate.jd': {'$gt': '%d'}, \
'candidate.jd': {'$lt': '%d'}, {'candidate.jd': 1, 'candidate.fid': 1, \
'candidate.programid': 1, 'candidate.field': 1, 'candidate.magzpsci': 1, 'candidate.magzpsciunc': 1,})"%(name, lowjd, upjd)}
r = k.query(query=qu)
return r
def get_kdata(targetdir, name, r):
if 'result_data' in list(r.keys()):
rdata = r['result_data']
rrdata = rdata['query_result']
n = len(rrdata)
jd = []
fid = []
programid = []
fieldid = []
mag = []
magzp = []
magzp_unc = []
for i in range(n):
if rrdata[i]['candidate']['programid'] == 1:
jd.append(rrdata[i]['candidate']['jd'])
fid.append(rrdata[i]['candidate']['fid'])
programid.append(rrdata[i]['candidate']['programid'])
fieldid.append(rrdata[i]['candidate']['field'])
# mag.append(rrdata[i]['candidate']['magpsf'])
magzp.append(rrdata[i]['candidate']['magzpsci'])
magzp_unc.append(rrdata[i]['candidate']['magzpsciunc'])
jd = np.array(jd)
fid = np.array(fid)
programid = np.array(programid)
fieldid = np.array(fieldid)
mag = np.array(mag)
magzp = np.array(magzp)
magzp_unc = np.array(magzp_unc)
k_data = Table([jd, fid, programid, fieldid, mag, magzp, magzp_unc], \
names = ['jdobs', 'fid', 'programid', 'fieldid', 'mag', 'magzp', 'magzp_unc'])
kdata = k_data.to_pandas()
kdata.to_csv(targetdir + 'data/kowalski_data_' + name + '.csv', index = False, encoding = 'utf8')
else:
print(('Kowalski query is not succesful for %s'%name))
kdata = kdata.drop_duplicates()
kdata.sort_values(['jdobs'], inplace = True)
p = kdata.shape
if 'result_data' in list(r.keys()):
rdata = r['result_data']
rrdata = rdata['query_result']
n = len(rrdata)
jd = []
fid = []
programid = []
fieldid = []
mag = []
magzp = []
magzp_unc = []
for i in range(n):
if rrdata[i]['prv_candidates'] != None:
m = len(rrdata[i]['prv_candidates'])
for j in range(m):
if 'magzpsci' in list(rrdata[i]['prv_candidates'][j].keys()):
if rrdata[i]['prv_candidates'][j]['magpsf'] != None:
if rrdata[i]['prv_candidates'][j]['programid'] == 1:
jd.append (rrdata[i]['prv_candidates'][j]['jd'])
fid.append (rrdata[i]['prv_candidates'][j]['fid'])
programid.append (rrdata[i]['prv_candidates'][j]['programid'])
fieldid.append (rrdata[i]['prv_candidates'][j]['field'])
# mag.append (rrdata[i]['prv_candidates'][j]['magpsf'])
magzp.append (rrdata[i]['prv_candidates'][j]['magzpsci'])
magzp_unc.append (rrdata[i]['prv_candidates'][j]['magzpsciunc'])
jd = np.array(jd)
fid = np.array(fid)
programid = np.array(programid)
fieldid = np.array(fieldid)
mag = np.array(mag)
magzp = np.array(magzp)
magzp_unc = np.array(magzp_unc)
k_data = Table([jd, fid, programid, fieldid, mag, magzp, magzp_unc], \
names = ['jdobs', 'fid', 'programid', 'fieldid', 'mag', 'magzp', 'magzp_unc'])
kdata1 = k_data.to_pandas()
kdata1.to_csv(targetdir + 'data/kowalski_data1_' + name + '.csv', index = False, encoding = 'utf8')
else:
print(('Kowalski query is not succesful for %s'%name))
kdata1 = kdata1.drop_duplicates()
kdata1.sort_values(['jdobs'], inplace = True)
q = kdata1.shape
kdata = kdata.append(kdata1)
kdata = kdata.drop_duplicates()
kdata.sort_values(['jdobs'], inplace = True)
return p, q, kdata
| Deepnika/Assembling-lightcurves-SLSNe | query_kowalski.py | query_kowalski.py | py | 4,748 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Coadding.keypairs.get_keypairs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "penquins.Kowalski",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.ar... |
72166511464 | # -*- coding: utf-8 -*-
from instrument import Instrument
import instruments
import numpy
import types
import logging
class virtual_period(Instrument):
'''
This is the driver to handle period.
'''
def __init__(self, name, pulser):
'''
Initialize the virtual instruments
Input:
name : Name of the virtual instruments
pulser : Name given to the pulser
Output:
None
'''
Instrument.__init__(self, name, tags=['virtual'])
self.add_parameter('period', units='ns', flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET, type=types.FloatType)
self.add_parameter('cooling_time', units='ns', flags=Instrument.FLAG_GETSET, type=types.FloatType)
self.add_parameter('origin', units='ns', flags=Instrument.FLAG_GETSET, type=types.FloatType)
# Defining some stuff
self._instruments = instruments.get_instruments()
self._pulser = self._instruments.get(pulser)
self._cooling_time = 1e3 #in [ns]
self._origin = 0. #in [ns]
self.get_all()
def get_all(self):
'''
Get all parameters of the virtual device
Input:
None
Output:
None
'''
self.get_period()
#########################################################
#
#
# Period
#
#
#########################################################
def do_set_period(self, period):
'''
set the period of the instrument
Input:
period (float): period of the pulser[ns]
Output:
None
'''
logging.info(__name__+' : set the period of the pulser')
self._pulser.set_period(period)
def do_get_period(self):
'''
Get the period of the instrument
Input:
None
Output:
period (float): period of the pulser[ns]
'''
logging.info(__name__+' : Get the period of the pulser')
return float(self._pulser.get_period())
#########################################################
#
#
# cooling time
#
#
#########################################################
def do_set_cooling_time(self, cooling_time=1e3):
'''
Set the cooling_time of the pulser
Input:
cooling_time (float): cooling_time of the pulser [ns]
Output:
None
'''
logging.info(__name__+' : Set the cooling_time of the pulser')
self._cooling_time = cooling_time
period1 = self.get_period()
period = self.get_origin() + self._pulser.get_chA_width()
period = max(period, period1) #added by Remy
self.set_period(period + cooling_time)
def do_get_cooling_time(self):
'''
Get the cooling time
Input:
None
Output:
period (float): cooling time [ns]
'''
logging.info(__name__+' : Get the cooling time')
return float(self._cooling_time)
#########################################################
#
#
# Origin
#
#
#########################################################
def do_set_origin(self, origin=1e3):
'''
Set the origin of the pulses
Input:
origin (float): origin of the pulses [ns]
Output:
None
'''
logging.info(__name__+' : Set the origin of the pulses')
self._origin = origin
oldPeriod = self.get_period()
cooling_time = self.get_cooling_time()
periodA = origin + self._pulser.get_chA_width() + cooling_time
periodC = self._pulser.get_chC_delay() + self._pulser.get_chC_width() + cooling_time
periodD = self._pulser.get_chD_delay() + self._pulser.get_chD_width() + cooling_time
newPeriod = max(periodA, periodC, periodD)
#If the new period is longer than the old,
#We set first the period and next we change the delaies
if newPeriod > oldPeriod:
self.set_period(newPeriod)
boardDelay = self._pulser.get_chB_delay() - self._pulser.get_chA_delay()
self._pulser.set_chA_delay(origin)
self._pulser.set_chB_delay(origin + boardDelay)
else:
boardDelay = self._pulser.get_chB_delay() - self._pulser.get_chA_delay()
self._pulser.set_chA_delay(origin)
self._pulser.set_chB_delay(origin + boardDelay)
self.set_period(newPeriod)
def do_get_origin(self):
'''
Get the origin of the pulses
Input:
None
Output:
period (float): origin of the pulses [ns]
'''
logging.info(__name__+' : Get the origin of the pulses')
return float(self._origin)
| QCoherence/python_drivers | virtual_period.py | virtual_period.py | py | 5,286 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "instrument.Instrument",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "instrument.Instrument.__init__",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "instrument.Instrument",
"line_number": 27,
"usage_type": "name"
},
{
"api_nam... |
34342251813 | # import asyncio
import time
from evdev import InputDevice, categorize, ecodes
source_device = None
target_device = None
# Init dev reference
while source_device is None and target_device is None:
try:
source_device = InputDevice('/dev/input/event1')
target_device = InputDevice('/dev/hidg0')
except Exception as err:
print ("No device - waiting...")
time.sleep (10)
# # Async helper
# async def helper(source_device, target_device):
# async for ev in source_device.async_read_loop():
# print(categorize(ev))
# target_device.write_event(ev)
# # Loop waiting for keystroke
# loop = asyncio.get_event_loop()
# loop.run_until_complete(helper(source_device, target_device))
for ev in source_device.async_read_loop():
print(categorize(ev))
target_device.write_event(ev) | c4software/raspberry-pi-hid-proxy | sample.py | sample.py | py | 804 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "evdev.InputDevice",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "evdev.InputDevice",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "evdev.categorize",
... |
72448270504 | import numpy as np
import tensornetwork as tn
np_vec = np.array([[0], [1]])
tn_vec1 = tn.Node(np_vec)
tn_vec2 = tn.Node(np_vec)
# Contracting the first index gives matrix with 1 element
tn_outer = tn.contract(tn_vec1[0] ^ tn_vec2[0])
# Contracting the second index gives matrix
tn_outer1 = tn.contract(tn_vec1[1] ^ tn_vec2[1])
tn_outer2 = tn.contract(tn_vec2[1] ^ tn_vec1[1])
# Matrix multiplication
np_mat1 = np.array([[1, 2], [3, 4]])
np_mat2 = np.array([[0, 1], [1, 0]])
tn_mat1 = tn.Node(np_mat1)
tn_mat2 = tn.Node(np_mat2)
# Multiplying by contracting the edges
mat12 = tn.contract(tn_mat1[1] ^ tn_mat2[0])
# Vector multiplication
vec1 = tn.contract(mat12[0] ^ tn_vec1[0]) # Picks 2nd row.
vec2 = tn.contract(mat12[1] ^ tn_vec1[0]) # Picks 2nd column.
# Quantum gate simulation
qubit = tn.Node(np.array([[0], [1]]))
z = tn.Node(np.array([[1, 0], [0, -1]]))
# _ = z[1] ^ qubit[0]
val = tn.contract(qubit[0] ^ z[1])
# val = qubit @ z
| Zshan0/TensorSimulations | src/dummy/matrix.py | matrix.py | py | 953 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tensornetwork.Node",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tensornetwork.Node",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensornetwork.contract... |
73497717544 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch, numpy as np, os
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
PAD_TOKEN = 50256
# Constants that should not be changed due to some hard-coded values in the original ConvNext-base model
VOCAB_SIZE, N_EMDB, N_POSITIONS = 50257, 768, 1024
class GRN(nn.Module):
""" GRN (Global Response Normalization) layer
"""
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
def forward(self, x):
Gx = torch.norm(x, p=2, dim=(1,2), keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return self.gamma * (x * Nx) + self.beta + x
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Block(nn.Module):
""" Generator Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
"""
def __init__(self, dim, drop_path=0.):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.grn = GRN(4 * dim)
self.pwconv2 = nn.Linear(4 * dim, dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class Generator(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, depths=[3, 3, 20, 3], dims=[128, 256, 512, 1024], drop_path_rate=0., head_init_scale=1.):
super().__init__()
# Stem and 3 intermediate downsampling conv layers
self.downsample_layers = nn.ModuleList()
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
# 4 feature resolution stages, each consisting of multiple residual blocks
self.stages = nn.ModuleList()
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.wte = nn.Embedding(VOCAB_SIZE, N_EMDB)
self.wpe = nn.Embedding(N_POSITIONS, N_EMDB)
# Final norm layer (not used in this implementation)
# self.norm = nn.LayerNorm(dims[-1], eps=1e-6)
self.pre_head = nn.ConvTranspose1d(dims[-1], 1024, kernel_size=12, stride=12)
self.head = nn.Linear(self.wte.weight.shape[1], self.wte.weight.shape[0], bias=False)
self.head.weight = self.wte.weight
self.softmax = nn.Softmax(dim=1)
self.apply(self._init_weights)
def pad_indices(self, indices):
if indices.shape[1] < N_POSITIONS:
indices = torch.nn.functional.pad(indices, (0, N_POSITIONS - indices.shape[1]), value=50256)
else:
indices = indices[-N_POSITIONS:]
return indices
def build_image(self, patches):
# patches: (B, 1024, 3, 16, 16)
image = torch.zeros((patches.shape[0], 3, 256, 256)).cuda()
for i in range(16):
for j in range(16):
image[:, :, i*16:(i+1)*16, j*16:(j+1)*16] = patches[:, i*16+j, :, :, :]
return image
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
# Check if bias is present (ignore final logit output layer)
if m.bias is not None:
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
# global average pooling, (N, C, H, W) -> (N, C) (not used in this implementation)
# return self.norm(x.mean([-2, -1]))
# [B, 1024, 8, 8] -> [B, 1024, 64]
x = x.view(x.shape[0], x.shape[1], -1)
x = self.pre_head(x)
return x
def forward(self, input_ids):
# Reverse the order of the tokens
input_ids = torch.flip(input_ids, [1])
# Padd with 50256
input_ids = self.pad_indices(input_ids)
# Prepare the position ids / embeddings
position_ids = torch.arange(0, input_ids.size(-1) + 0, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
# Embeddings and position encoding
x = self.wte(input_ids) + self.wpe(position_ids)
# Reshape to (B, 1024, 3, 16, 16)
x = torch.reshape(x, (x.shape[0], x.shape[1], 3, 16, 16))
# Build an image from the patches
x = self.build_image(x)
# Run though the convnet
x = self.forward_features(x)
# Output logits
x = self.head(x)
return x[:, -1] | Aveygo/WordsAreWorth16x16Pixels | models/pacing_model.py | pacing_model.py | py | 8,183 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
71859082343 | import logging
import os
from io import StringIO
import boto3
import pandas as pd
from botocore.exceptions import ClientError
AWS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET = os.environ.get("AWS_SECRET_ACCESS_KEY")
bucket = "kiwi-bot"
# key = "ordersDB.csv"
prefix = "data/"
filename = "https://kiwi-bot.s3.us-east-2.amazonaws.com/ordersDB.csv"
filepath = "kiwi_bot\data\order.csv"
s3 = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id=AWS_KEY_ID,
aws_secret_access_key=AWS_SECRET,
)
read_file = s3.get_object(Bucket=bucket, Key=key)
df = pd.read_csv(read_file["Body"])
df.to_csv("kiwi_bot\data\order2.csv")
print("File succesfully loaded")
| edward0rtiz/clustering-demand-scm | lib/data_engine/s3_get_object.py | s3_get_object.py | py | 717 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_nu... |
2201109453 | import numpy as np
import subprocess
import os
import sys
from Bio import SeqIO
from Bio import PDB
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import PPBuilder
from joblib import Parallel, delayed
import multiprocessing as mp
import time
import re
from joblib import Parallel, delayed
import concurrent.futures
class energy_calculation:
def __init__(self, model_filename, model_dir):
self.model_filename = model_filename
self.model_dir = model_dir
self.model_ID = model_filename.replace("_model.pdb","")
self.model_path = model_dir + model_filename
self.origin = "positives"
self.partition = "1"
self.model_output_dir = f"/home/projects/ht3_aim/people/idamei/results/energy_calc_full_output/{self.partition}/{self.origin}/{self.model_ID}/"
#self.model_output_dir = f"/home/projects/ht3_aim/people/idamei/full_output_test/"
self.numpy_output_dir = f"/home/projects/ht3_aim/people/idamei/results/energy_output_arrays/{self.partition}/{self.origin}/"
#self.numpy_output_dir = f"/home/projects/ht3_aim/people/idamei/numpy_output_test/"
if self.origin == "positives":
self.binder = 1
else:
self.binder = 0
def pipeline(self):
startstart_time = time.time()
print("Start " + self.model_filename)
# Make output directory
os.makedirs(self.model_output_dir, exist_ok = True)
os.chdir(self.model_output_dir)
# Get PDB features
self.extract_pdb_features()
# Split pdb
try:
self.splitPDB()
except Exception as err:
print("Splitting PDB failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Run FoldX
try:
start_time = time.time()
self.run_foldx()
runtime = (time.time() - start_time) / 60
print("FoldX took {} min".format(runtime))
except Exception as err:
print("FoldX failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Extract foldX energies
try:
self.extract_foldx_energies()
except Exception as err:
print("Extracting foldX energies failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Run Rosetta
try:
start_time = time.time()
self.rosetta_scorefile_path_complex, self.rosetta_per_res_scorefile_path_complex = self.run_rosetta(self.model_path)
runtime = (time.time() - start_time) / 60
print("Rosetta for complex took {} min".format(runtime))
start_time = time.time()
self.rosetta_scorefile_path_tcr, self.rosetta_per_res_scorefile_path_tcr = self.run_rosetta(self.tcr_path)
runtime = (time.time() - start_time) / 60
print("Rosetta for TCR took {} min".format(runtime))
start_time = time.time()
self.rosetta_scorefile_path_pmhc, self.rosetta_per_res_scorefile_path_pmhc = self.run_rosetta(self.pmhc_path)
runtime = (time.time() - start_time) / 60
print("Rosetta for pMHC took {} min".format(runtime))
except Exception as err:
print("Rosetta failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Extract Rosetta energies
try:
self.rosetta_overall_scores_complex, self.rosetta_per_res_scores_complex = self.extract_rosetta_energies(
self.rosetta_scorefile_path_complex,
self.rosetta_per_res_scorefile_path_complex)
self.rosetta_overall_scores_tcr, self.rosetta_per_res_scores_tcr = self.extract_rosetta_energies(
self.rosetta_scorefile_path_tcr,
self.rosetta_per_res_scorefile_path_tcr)
self.rosetta_overall_scores_pmhc, self.rosetta_per_res_scores_pmhc = self.extract_rosetta_energies(
self.rosetta_scorefile_path_pmhc,
self.rosetta_per_res_scorefile_path_pmhc)
except Exception as err:
print("Extracting Rosetta energies failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Create output
try:
self.create_output()
except Exception as err:
print("Creating output failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
runtime = (time.time() - startstart_time) / 60
print("{} took {} min".format(self.model_ID, runtime))
def run_foldx(self):
# RepairPDB
if not os.path.exists(self.model_filename.replace(".pdb", "_Repair.pdb")):
repair_command = "foldx --command=RepairPDB --pdb={} --ionStrength=0.05 --pH=7 --water=CRYSTAL --vdwDesign=2 --out-pdb=1 --pdbHydrogens=false --output-dir={}".format(self.model_filename, self.model_output_dir)
subprocess.run(repair_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_dir)
# AnalyseComplex
repaired_pdb_path = self.model_filename.replace(".pdb", "_Repair.pdb")
analyse_command = "foldx --command=AnalyseComplex --pdb={} --output-dir={}".format(repaired_pdb_path,
self.model_output_dir)
subprocess.run(analyse_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
def extract_foldx_energies(self):
self.interaction_file_path = self.model_output_dir + "Interaction_" + self.model_filename.replace(".pdb",
"_Repair_AC.fxout")
foldx_output = open(self.interaction_file_path, "r")
foldx_interaction_energies = dict()
for line in foldx_output:
if line.startswith("./"):
splitted_line = line.split("\t")
group1 = splitted_line[1]
group2 = splitted_line[2]
interaction_energy = splitted_line[6]
foldx_interaction_energies[group1 + group2] = float(interaction_energy)
foldx_output.close()
self.foldx_interaction_energies = foldx_interaction_energies
def run_rosetta(self, infile):
# Relaxation
rosetta_relax_command = "relax.default.linuxgccrelease \
-ignore_unrecognized_res \
-nstruct 1 \
-s {} \
-out:path:pdb {}".format(infile, self.model_output_dir)
subprocess.run(rosetta_relax_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
# Scoring
result = re.search(r'/([^/]+)$', infile)
infilename = result.group(1)
relaxed_pdb_path = self.model_output_dir + infilename.replace(".pdb", "_0001.pdb")
rosetta_scorefile_path = self.model_output_dir + infilename + "_score.sc"
rosetta_score_command = "score_jd2.linuxgccrelease \
-in:file:s {} \
-out:file:scorefile {}".format(relaxed_pdb_path, rosetta_scorefile_path)
subprocess.run(rosetta_score_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
# Per residue scoring
rosetta_per_res_scorefile_path = self.model_output_dir + infilename + "_per_residue_score.sc"
rosetta_per_res_score_command = "per_residue_energies.linuxgccrelease \
-in:file:s {} \
-out:file:silent {}".format(relaxed_pdb_path, rosetta_per_res_scorefile_path)
subprocess.run(rosetta_per_res_score_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
return rosetta_scorefile_path, rosetta_per_res_scorefile_path
def extract_rosetta_energies(self, rosetta_scorefile_path, rosetta_per_res_scorefile_path):
# Rosetta overall energies
rosetta_scorefile = open(rosetta_scorefile_path, "r")
rosetta_scorefile.readline()
rosetta_scorefile.readline()
# SCORE: total_score score dslf_fa13 fa_atr fa_dun fa_elec fa_intra_rep fa_intra_sol_xover4 fa_rep fa_sol hbond_bb_sc hbond_lr_bb hbond_sc hbond_sr_bb linear_chainbreak lk_ball_wtd omega overlap_chainbreak p_aa_pp pro_close rama_prepro ref time yhh_planarity description
line = rosetta_scorefile.readline()
splitted_line = line.strip().split()
rosetta_overall_scores = splitted_line[1:-1] # 24 elements
rosetta_overall_scores = [float(x) for x in rosetta_overall_scores]
rosetta_scorefile.close()
# Rosetta per residue energies
rosetta_per_res_scorefile = open(rosetta_per_res_scorefile_path, "r")
rosetta_per_res_scores = {"M": {}, "P": {}, "A": {}, "B": {}}
# SCORE: pose_id pdb_id fa_atr fa_rep fa_sol fa_intra_rep fa_intra_sol_xover4 lk_ball_wtd fa_elec pro_close hbond_sr_bb hbond_lr_bb hbond_bb_sc hbond_sc dslf_fa13 omega fa_dun p_aa_pp yhh_planarity ref rama_prepro score description
for line in rosetta_per_res_scorefile:
splitted_line = line.strip().split()
if splitted_line[1] == "pose_id":
continue
pdb_id = splitted_line[2]
chain = pdb_id[-1]
position = int(pdb_id[:-1])
rosetta_per_res_scores[chain][position] = [float(x) for x in splitted_line[3:-1]] # 20 elements
rosetta_scorefile.close()
return rosetta_overall_scores, rosetta_per_res_scores
def create_output(self):
# Output array:
# one-hot AA (20), M (1), P (1), TCRA (1), TCRB (1)
# Rosetta_per_res_indiv_energies_complex (20), Rosetta_per_res_indiv_energies_pmhc/Rosetta_per_res_indiv_energies_tcr (20)
# foldx_MP (1), foldx_MA (1), foldx_MB (1), foldx_PA (1), foldx_PB (1), foldx_AB (1),
# Rosetta_total_energy_complex (24), Rosetta_total_energy_tcr (24), Rosetta_total_energy_pmhc (24),
# Positive/negative (1), origin (10X, swapped, posi) (3) WAS REMOVED LATER
# Total: 142
output_array = np.zeros(shape=(self.total_length, 146))
k1 = 0 # chain
k2 = 0 # residue number total
for chain in self.sequences:
sequence = self.sequences[chain]
k1 += 1
k3 = 0 # chain residue number
for aminoacid in sequence:
number = self.numbering[chain][k3]
output_array[k2, 0:20] = self.oneHot(aminoacid)
if chain == "M":
output_array[k2, 20:24] = np.array([1, 0, 0, 0])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["M"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_pmhc["M"][number]
if chain == "P":
output_array[k2, 20:24] = np.array([0, 1, 0, 0])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["P"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_pmhc["P"][number]
if chain == "A":
output_array[k2, 20:24] = np.array([0, 0, 1, 0])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["A"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_tcr["A"][number]
if chain == "B":
output_array[k2, 20:24] = np.array([0, 0, 0, 1])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["B"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_tcr["B"][number]
output_array[k2, 64:70] = list(self.foldx_interaction_energies.values())
output_array[k2, 70:94] = self.rosetta_overall_scores_complex
output_array[k2, 94:118] = self.rosetta_overall_scores_tcr
output_array[k2, 118:142] = self.rosetta_overall_scores_pmhc
output_array[k2, 142] = self.binder
if self.origin == "tenx_negatives":
output_array[k2, 143:146] = np.array([1, 0, 0])
elif self.origin == "swapped_negatives":
output_array[k2, 143:146] = np.array([0, 0, 1])
else:
output_array[k2, 143:146] = np.array([0, 0, 1])
k2 += 1
k3 += 1
np.save(file=self.numpy_output_dir + self.model_ID + ".npy", arr=output_array)
def extract_pdb_features(self):
# Get chain names and sequence numbering
pdb_file = open(self.model_path, "r")
numbering = {"M": [], "P": [], "A": [], "B": []}
chain_names = []
old_number = 0
old_chain = ""
for line in pdb_file:
splitted_line = line.split()
if splitted_line[0] != "ATOM":
continue
chain = splitted_line[4]
if chain != old_chain:
chain_names.append(chain)
old_chain = chain
new_number = splitted_line[5]
if new_number != old_number:
numbering[chain].append(int(new_number))
old_number = new_number
# Get sequences
structure = PDBParser().get_structure('', self.model_path)
ppb = PPBuilder()
chain_sequences = {}
i = 0
for pp in ppb.build_peptides(structure):
chain_name = chain_names[i]
chain_sequences[chain_name] = str(pp.get_sequence())
i += 1
self.chain_names = chain_names
self.sequences = chain_sequences
self.numbering = numbering
self.length_A = len(chain_sequences["A"])
self.length_B = len(chain_sequences["B"])
self.length_M = len(chain_sequences["M"])
self.length_P = len(chain_sequences["P"])
self.total_length = self.length_P + self.length_M + self.length_B + self.length_A
@staticmethod
def oneHot(residue):
mapping = dict(zip("ACDEFGHIKLMNPQRSTVWY", range(20)))
if residue in "ACDEFGHIKLMNPQRSTVWY":
return np.eye(20)[mapping[residue]]
else:
return np.zeros(20)
@staticmethod
def selectChain(ifn, ofn, chainID):
"""Saves selected chains from PDB in a new PDB"""
parser = PDB.PDBParser()
structure = parser.get_structure('x', ifn)
class ChainSelector():
def __init__(self, chainID=chainID):
self.chainID = chainID
def accept_chain(self, chain):
if chain.get_id() in self.chainID:
return 1
return 0
def accept_model(self, model):
return 1
def accept_residue(self, residue):
return 1
def accept_atom(self, atom):
return 1
sel = ChainSelector(chainID)
io = PDB.PDBIO()
io.set_structure(structure)
io.save(ofn, sel)
def splitPDB(self):
self.tcr_path = self.model_output_dir + "TCR.pdb"
self.pmhc_path = self.model_output_dir + "pMHC.pdb"
self.selectChain(ifn=self.model_path, ofn=self.tcr_path, chainID=["A", "B"])
self.selectChain(ifn=self.model_path, ofn=self.pmhc_path, chainID=["M", "P"])
def worker(model_filename):
instance = energy_calculation(model_filename, model_dir)
instance.pipeline()
origin = "positives"
partition = "1"
model_dir = f"/home/projects/ht3_aim/people/idamei/results/models/{partition}/{origin}/"
p = subprocess.Popen(["ls", model_dir],
stdout=subprocess.PIPE, universal_newlines=True)
models = p.communicate()[0].split()
if "molecules" in models:
models.remove("molecules")
if "rotabase.txt" in models:
models.remove("rotabase.txt")
#pool = mp.Pool(40)
#pool.map(worker, [model for model in models])
#pool.close()
#models_slice = models[108:118]
#print(len(models[118:]))
#models_slice = models[118:]
models_slice = ["12528_model.pdb"]
Parallel(n_jobs=20)(delayed(worker)(model) for model in models_slice)
| alschaap/master-thesis | scripts/energy_calc_pipeline.py | energy_calc_pipeline.py | py | 16,561 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 49,
... |
70183485543 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from decouple import config
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'senda.settings')
enviroment = config("ENVIROMENT")
if enviroment == "staging":
DJANGO_CONFIGURATION = 'Staging'
elif enviroment == "preview":
DJANGO_CONFIGURATION = 'Preview'
elif enviroment == "production":
DJANGO_CONFIGURATION = 'Production'
else:
DJANGO_CONFIGURATION = 'Development'
os.environ.setdefault('DJANGO_CONFIGURATION', DJANGO_CONFIGURATION)
from django.conf import settings
try:
from configurations.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| UNPSJB/SendaAlquiler | backend/manage.py | manage.py | py | 1,129 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.setdefault",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "decouple.config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ.setd... |
25282880574 | import entrypoints
import mimetypes
def open(*args, **kwargs):
"""
Dispatch to a compatible PIMS reader.
"""
# Dispatch using the first argument which is assumed to be a file buffer,
# filename, or filename glob.
reader = _dispatch(args[0])
return reader(*args, **kwargs)
def _dispatch(file):
# Ensure mimetypes is initialized. (This step pulls from the operating
# system's MIME type registry.)
mimetypes.init()
if isinstance(file, str):
# file is inferred to be a filename or filename glob.
mimetype, _ = mimetypes.guess_type(file)
else:
# file is inferred to be a file buffer, which has a name attribute.
# If this is a non-file-based buffer like a StringIO object it won't
# have a name, in which case we can't infer the type this way.
# In the future, we could employ libraries that peek at the first
# couple bytes and infer the MIME type from the file signature, which
# would work on any buffer.
try:
filename = file.name
except AttributeError:
raise DispatchError(
"Expected a filename or file buffer with a 'name' attribute.")
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
raise DispatchError(f"Could not detect MIME type of {file}")
try:
entrypoint = entrypoints.get_single('TBD.readers', mimetype)
except entrypoints.NoSuchEntryPoint:
raise DispatchError(f"No PIMS reader found for MIME type {mimetype}")
reader = entrypoint.load()
return reader
class PIMSError(Exception):
"base class for all exceptions raised directly by PIMS"
...
class DispatchError(PIMSError):
...
| danielballan/pims2-prototype | pims/__init__.py | __init__.py | py | 1,750 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "mimetypes.init",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "entrypoints.... |
6267092868 | # -*- coding: utf-8 -*-
import scrapy
import re
import datetime
from scrapy.http import Request
from urllib import parse
from ..items import JobBoleArticleItem
from ..utils.common import get_md5
class JobboleSpider(scrapy.Spider):
name = 'jobbole'
allowed_domains = ['blog.jobbole.com']
start_urls = ['http://blog.jobbole.com']
#start_urls = ['http://blog.jobbole.com/licai/zq/164942.html']
def parse(self, response: scrapy.http.TextResponse):
"""
1、获取文章列表页中的文章url并交给scrapy,下载后并进行解析
2、获取下一页的url并交给scrapy进行下载,下载完成后交给parse
"""
# 提取下一页并交给scrapy进行下载
next_page = response.meta.get("next_page", 0)+1
if next_page <= 10:
next_url = f"http://blog.jobbole.com/kaifadou/snews-getajax.php?next={next_page}"
yield Request(url=next_url, meta={"next_page": next_page}, callback=self.parse)
#解析列表页中的所有文章url并交给scrapy下载后并进行解析
post_nodes = response.css(".zhicheng_news_list a")
for post_node in post_nodes:
image_url = post_node.css("img::attr(src)").extract_first("")
post_url = post_node.css("::attr(href)").extract_first("")
#new_url = response.url + post_url
#parse.urljoin(response.url,post_url) 拼接url
yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": parse.urljoin(response.url, image_url)}, callback=self.parse_detail)
#print(post_url)
def parse_detail(self, response):
article_item = JobBoleArticleItem()
#提取文章的具体字段
#xpath语法
# title = response.xpath('/html/body/div[3]/div[2]/div[1]/h2[1]/text()').extract()[0]
# creatdate = response.xpath('/html/body/div[3]/div[2]/div[1]/div[1]/span[1]/text()').extract()[0]
# #方法一:
# content = response.xpath("//div[@class='wen_article']").extract_first("")
# content = re.findall(r">(.*?)<", content)
# content = ''.join(content)
# #方法二:
# #content = response.xpath("//div[@class='wen_article']/p/text()").extract()
# #content = ''.join(content)
# #方法三:
# #content = response.xpath("//div[@class='wen_article']").extract().replace("<p>", "").replace("</p>", "")
#css选择器语法
front_image_url = response.meta.get("front_image_url", "")
title = response.css(".ship_wrap h2::text").extract()[0]
create_date = response.css(".meta span::text").extract()[0]
content = response.css(".wen_article p::text").extract()
content = "".join(content)
#print(re_selector)
article_item["url_object_id"] = get_md5(response.url)
article_item["title"] = title
try:
create_date = datetime.datetime.strftime(create_date, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
article_item["create_date"] = create_date
article_item["url"] = response.url
article_item["front_image_url"] = [front_image_url]
article_item["content"] = content
yield article_item
| jasonxu510/scrapypro | ArticleSpider/ArticleSpider/spiders/jobbole.py | jobbole.py | py | 3,319 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "scrapy.http",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "scrapy.http.Request",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scrapy.http.R... |
44448619471 | from pathlib import Path
import sqlite3
from datetime import datetime
from pymongo import ASCENDING
import pandas as pd
from utils.many_utils import PATH, get_collection
def inserisci_scadenza(
st,
nome,
data_scadenza,
dettagli,
importo,
stato,
categoria,
frequenza,
notifiche=False,
):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
data_inserimento = datetime.now().strftime("%Y-%m-%d")
data_completamento = None
completata = 1 if stato == "Completata" else 0
c.execute(
"""
INSERT INTO scadenze (nome, data_inserimento, data_scadenza, data_completamento, dettagli, importo, categoria, frequenza, notifiche, completata)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
nome,
data_inserimento,
data_scadenza,
data_completamento,
dettagli,
importo,
categoria,
frequenza, # mensile? annuale? settimanale?
notifiche, # o meglio, "quanti giorni prima della scadenza voglio essere notificato"
completata, # completata/da fare
),
)
conn.commit()
conn.close()
def get_scadenze(st):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
c.execute("SELECT * FROM scadenze ORDER BY data_scadenza")
scadenze = c.fetchall()
cols = [
"id",
"nome",
"data_inserimento",
"data_scadenza",
"data_completamento",
"dettagli",
"importo",
"categoria",
"frequenza",
"notifiche",
"completata",
]
st.write()
conn.close()
return pd.DataFrame(scadenze, columns=cols).sort_values(
"data_scadenza", ascending=True
)
def aggiorna_stato_scadenza(st, id_scadenza):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
c.execute("UPDATE scadenze SET completata = 1 WHERE id = ?", (id_scadenza))
c.execute(
"UPDATE scadenze SET data_completamento = ? WHERE id = ?",
(datetime.strftime(datetime.now(), "%Y-%m-%d"), id_scadenza),
)
conn.commit()
conn.close()
def elimina_scadenza(st, id_scadenza):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
c.execute("DELETE FROM scadenze WHERE id = ?;", (id_scadenza))
conn.commit()
conn.close()
def genera_body_scadenza(s):
res_id = s["_id"]
res_nome = s["nome"]
res_data_inserimento = s["data_inserimento"]
res_data_scadenza = s["data_scadenza"]
res_data_completamento = s["data_completamento"]
res_dettagli = s["dettagli"]
res_importo = s["importo"]
res_categoria = s["categoria"]
res_frequenza = s["frequenza"]
res_notifiche = s["notifiche"]
res_completata = "Si" if s["completata"] == 1 else "No"
return f"\
ID: {res_id} - {res_nome}, \n\
Categoria: {res_categoria},\n\
Inserita il: {res_data_inserimento}\n\
Scadenza: {res_data_scadenza},\n\
Dettagli: {res_dettagli},\n\
Importo: {res_importo},\n\
Completata: {res_completata}\n\
Data completamento: {res_data_completamento}\
"
######### Mongo
def get_scadenze_mongo(st, mongo_uri):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
mongo_uri,
mongo_db="solexiv_db",
)
scadenze = list(scadenze_col.find().sort("data_scadenza", ASCENDING))
cols = [
"_id",
"nome",
"data_inserimento",
"data_scadenza",
"data_completamento",
"dettagli",
"importo",
"categoria",
"frequenza",
"notifiche",
"completata",
]
return pd.DataFrame(scadenze, columns=cols).sort_values(
"data_scadenza", ascending=True
)
def inserisci_scadenza_mongo(
st,
nome,
data_scadenza,
dettagli,
importo,
stato,
categoria,
frequenza,
notifiche=False,
):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
st.session_state["mongo_uri"],
mongo_db="solexiv_db",
)
data_inserimento = datetime.now().strftime("%Y-%m-%d")
data_scadenza = data_scadenza.strftime("%Y-%m-%d")
data_completamento = None
completata = True if stato == "Completata" else False
scadenza = {
"nome": nome,
"data_inserimento": data_inserimento,
"data_scadenza": data_scadenza,
"data_completamento": data_completamento,
"dettagli": dettagli,
"importo": importo,
"categoria": categoria,
"frequenza": frequenza,
"notifiche": notifiche,
"completata": completata,
}
res = scadenze_col.insert_one(scadenza)
st.toast("Scadenza inserita correttamente.")
return True
def aggiorna_stato_scadenza_mongo(st, id_scadenza):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
st.session_state["mongo_uri"],
mongo_db="solexiv_db",
)
scadenza = scadenze_col.find_one({"_id": id_scadenza})
if scadenza:
scadenza["completata"] = True
scadenza["data_completamento"] = datetime.now().strftime("%Y-%m-%d")
scadenze_col.update_one({"_id": id_scadenza}, {"$set": scadenza})
st.toast("Stato della scadenza aggiornato correttamente.")
else:
st.error("Scadenza non trovata.")
def elimina_scadenza_mongo(st, id_scadenza):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
st.session_state["mongo_uri"],
mongo_db="solexiv_db",
)
# Elimina la scadenza corrispondente all'id fornito
result = scadenze_col.delete_one({"_id": id_scadenza})
if result.deleted_count > 0:
st.toast("Scadenza eliminata correttamente.")
else:
st.error("Scadenza non trovata.")
| piopy/solexiv | src/logica_applicativa/Scadenze.py | Scadenze.py | py | 6,066 | python | it | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "utils.many_utils.PATH",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "datetime.datet... |
15974641193 | # *-* coding: utf-8 *-*
"""
Created on mar 23 fév 2021 09:14:21 UTC
@author: vekemans
"""
import math as mt
import numpy as np
from math import pi as π
from numpy.fft import fft,fftshift,ifft
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
nfig = 1
def dft(func, start,end,N, order=1):
"""
Return the 1D-derivative of function 'func', at specified order
"""
L = end-start
h = L/N
x = np.arange(start,end, step=h)
# -- Compute the FFT
u = func(x)
u_hat = fftshift(fft(u))
k = np.arange(-N/2,N/2)
# -- Compute the Derivative
u_dot_hat = (1j*k)**order * u_hat
u_dot = ifft(fftshift(u_dot_hat)).real
return x, u_dot
def gaussian(x, μ=0,σ=1):
"""
Gaussian function
"""
u = 1/mt.sqrt(2*π*σ**2) * np.exp(-(x-μ)**2/(2*σ**2))
return u
def dgaussian(x, μ=0,σ=1):
"""
Derivative of the gaussian function
"""
u_dot = -2*(x-μ)/(2*σ**2) * gaussian(x, μ=μ,σ=σ)
return u_dot
if __name__=='__main__':
m = np.arange(2,15)
Nvec = 2**m
error = np.empty(m.shape)
μ = π
σ = 0.5
f = lambda x: gaussian(x, μ=μ,σ=σ)
for (i,N) in enumerate(Nvec):
x,u_dot = dft(f, 0,2*π,N, order=1)
uprime = -2*(x-μ)/(2*σ**2) * f(x)
error[i] = np.abs(uprime-u_dot).max()
fig = plt.figure(nfig)
plt.plot(x,f(x), 'k-', label=r'$u$')
plt.plot(x,uprime, 'k--', label=r"$u'$")
plt.xlabel(r'$x$')
# plt.title('Gaussian function and its derivative')
plt.legend()
plt.tight_layout()
fig.savefig('../figs/gaussian_function.pdf')
nfig += 1
fig = plt.figure(nfig)
plt.loglog(Nvec,error, 'k-o', markersize=4, lw=0)
plt.xlabel(r'$N$')
plt.ylabel('Error')
# plt.title('Convergence of spectral method for differentiation')
plt.grid(which='major', linestyle='--', linewidth=0.5)
plt.grid(which='minor', linestyle=':', linewidth=0.25)
plt.tight_layout()
fig.savefig('../figs/spectral_convergence.pdf')
nfig += 1
N = 2**8
x = np.arange(0,2*π, step=2*π/N)
u = gaussian(x, μ=π,σ=σ)
# u = np.exp(-100*(x-1)**2)
c = 0.2 + np.power(np.sin(x-1),2)
tmax = 8.
tplot = .02
plotgap = int(tplot/0.001)
dt = tplot/plotgap
Nplots = int(tmax/tplot)
data = np.zeros((Nplots,N))
data[0,:] = u; u_old = u
time = np.arange(Nplots)/Nplots * tmax
for i in range(Nplots):
for n in range(plotgap):
u_hat = fftshift(fft(u))
u_dot_hat = (1j*np.arange(-N/2,N/2)) * u_hat
u_dot = ifft(fftshift(u_dot_hat)).real
u_new = u_old - 2*dt*c*u_dot if i>0 else u_old - dt*c*u_dot
u_old = u
data[i,:] = u_new
fig = plt.figure(nfig)
u_plt, = plt.plot([], [], 'k-')
u_txt = plt.text(π, 0.9, '', ha='center', fontsize=10)
plt.xlim([x[0],x[-1]])
plt.ylim([0,1])
plt.xlabel(r'$x$', fontsize=10)
plt.ylabel(r'$u$', fontsize=10)
# plt.title(r'$u_t + c(x) u_x = 0 \qquad u^0 = \mathcal{N}(\pi,0.5)$', fontsize=12)
plt.tight_layout()
nfig += 1
def animate(t):
u_plt.set_data(x, data[t,:])
u_txt.set_text('Current time : t = %.2f [s]' %(t*tplot))
return u_plt,u_txt,
anim = animation.FuncAnimation(fig, animate, Nplots, interval=100*tplot, blit=True)
writer = animation.PillowWriter(fps=25)
anim.save('../figs/gaussian_animation.gif', writer=writer)
fig = plt.figure(nfig)
ax = plt.axes(projection='3d')
for i in range(Nplots):
t = time[i]*np.ones(x.shape)
ax.plot3D(x,t, data[i,:], color='tab:blue')
# X,T = np.meshgrid(x,time)
# print(x.shape,time.shape,X.shape,data.shape)
# ax.plot_surface(X,T, data, cmap='w')
ax.set_xlim([x[0],x[-1]])
ax.set_ylim([0,tmax])
ax.set_zlim([0,4])
ax.set_xlabel('X-axis')
ax.set_ylabel('Time-axis')
ax.set_zlabel(r'$u$')
ax.view_init(40,-70)
fig.tight_layout()
fig.savefig('../figs/gaussian_convection.pdf')
nfig += 1
# -- Show figures
plt.show()
| abbarn/lmeca2300 | homeworks/fft3.py | fft3.py | py | 4,093 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.fft.fftshift",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.fft.fft",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"lin... |
14575147176 | from collections import defaultdict
from sys import stdin
def rec(graph, v, visited, visited2):
if v == 'end':
return 1
if v.islower():
if v not in visited:
visited.add(v)
else:
visited2.add(v)
res = 0
for to in graph[v]:
if not to.islower() or \
to != 'start' and (to not in visited or not len(visited2)):
res += rec(graph, to, visited, visited2)
if v.islower():
if v not in visited2:
visited.remove(v)
else:
visited2.remove(v)
return res
graph = defaultdict(set)
for line in stdin:
a, b = line.strip().split('-')
graph[a].add(b)
graph[b].add(a)
print(rec(graph, 'start', set(), set()))
| vfolunin/archives-solutions | Advent of Code/2021/12.2.py | 12.2.py | py | 758 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 32,
"usage_type": "name"
}
] |
570245906 | import logging
import math
from .geomsmesh import geompy
from .triedreBase import triedreBase
O, OX, OY, OZ = triedreBase()
def ellipsoideDefaut(minRad,allonge):
"""Le bloc contenant la fissure est un ellipsoide construit centre a l'origine,
contenant le tore elliptique de fissure
@param minRad :petit rayon
@param allonge :rapport grand rayon / petit rayon
@return ellipsoide (geomObject)
"""
logging.info("start")
boule = geompy.MakeSphereR(2)
bouler = geompy.MakeRotation(boule, OY, math.pi/2.0)
face = geompy.MakeFaceHW(100, 100, 3)
boulepart = geompy.MakePartition([bouler], [face], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
solids = geompy.ExtractShapes(boulepart, geompy.ShapeType["SOLID"], True)
solid0 = solids[0]
for i in range(1,len(solids)):
solid0 = geompy.MakeFuse(solid0, solids[i])
ellipsoide = geompy.MakeScaleAlongAxes(solid0, O, minRad, minRad*(allonge+2.0)/2.0, minRad) # on limite l'allongement de l'ellipsoide
#geompy.addToStudy( ellipsoide, 'ellipsoide' )
return ellipsoide
| luzpaz/occ-smesh | src/Tools/blocFissure/gmu/ellipsoideDefaut.py | ellipsoideDefaut.py | py | 1,044 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "triedreBase.triedreBase",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "geomsmesh.geompy.MakeSphereR",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "geom... |
71778077224 | import xml.etree.ElementTree as Tree
import unittest
import os
# Clear Screen
def clear_screen():
if os.name == 'posix': # Linux
os.system('clear')
elif os.name in ('nt', 'dos', 'ce'): # Windows
os.system('CLS')
class ETLTool:
# Constructor
def __init__(self):
self.tree = None
# Function to read an XML file and load the elements.
# Since the presence of the file is tested here, we don"t test it elsewhere.
def parse_xml(self, file_path):
try:
self.tree = Tree.parse(file_path)
except Exception as e:
print(f"Error: {e}")
# Function to save the XML file after data manipulation.
def save_xml(self, save_path):
if self.tree is not None:
self.tree.write(save_path)
# Modified price of items in a category.
def modify_price(self, category, increase):
root = self.tree.getroot()
for prod in root.findall(f"./product[@category='{category}']"):
price = float(prod.find("price").text)
new_price = price * (1 + increase/100)
prod.find("price").text = str(new_price)
# Rename a category to another one.
def rename_category(self, old_name, new_name):
root = self.tree.getroot()
for prod in root.findall(f"./product[@category='{old_name}']"):
prod.set("category", new_name)
# Removes products below a certain rating.
def remove_products(self, category, min_rating):
root = self.tree.getroot()
for prod in root.findall(f"./product[@category='{category}']"):
if float(prod.find("rating").text) < min_rating:
root.remove(prod)
# Outputs the report on the CLI
def generate_report(self):
report = {}
root = self.tree.getroot()
for prod in root.findall("product"):
category = prod.attrib["category"]
price = float(prod.find("price").text)
report[category] = report.get(category, {"count": 0, "total_price": 0})
report[category]["count"] += 1
report[category]["total_price"] += price
for k, v in report.items():
print(f"\nCategory: {k}")
print(f"Total Product Count: {v['count']}")
print(f"Total Price: {v['total_price']}")
# Runs the UI for the tool as a menu based interface.
def run_tool(self):
while True:
clear_screen()
print("\nWelcome to the Product ETL Tool")
print("1: Load XML File")
print("2: Increase Price By Percent")
print("3: Rename Category")
print("4: Remove Products Below Minimum Rating")
print("5: Save Changes to New File")
print("6: Generate Report on CLI")
print("7: Exit")
select = input("Enter your choice here (Number): ")
LOAD_ERR = "Please load an XML file first by selecting option 1."
# Menu based CLI
if select == "1":
file_path = input("Enter the relative path to the XML file: ")
self.parse_xml(file_path)
elif select == "2":
# Check if tree is filled with a loaded XML. If not, we first need to load it (1)
if self.tree is None:
print(LOAD_ERR)
break
category = input("Enter the category name: ")
percentage = float(input("Enter the percentage increase (number only): "))
self.modify_price(category, percentage)
elif select == "3":
if self.tree is None:
print(LOAD_ERR)
break
old_name = input("Enter the current category name: ")
new_name = input("Enter the new category name: ")
self.rename_category(old_name, new_name)
elif select == "4":
if self.tree is None:
print(LOAD_ERR)
break
category = input("Enter the category name: ")
min_rating = float(input("Enter the minimum rating: "))
self.remove_products(category, min_rating)
elif select == "5":
if self.tree is None:
print(LOAD_ERR)
break
save_path = input("Enter the path to save the XML file: ")
self.save_xml(save_path)
elif select == "6":
if self.tree is None:
print(LOAD_ERR)
break
self.generate_report()
elif select == "7":
print("\nGoodbye!")
break
input("\nClick any key to proceed") # Breakpoint before next menu appears
if __name__ == "__main__":
# Init new tool
etl_tool = ETLTool()
# Run the tool
etl_tool.run_tool() | rkaushik29/xml_etl | etl_tool.py | etl_tool.py | py | 5,037 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.name",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 10,
... |
12730428499 | """
The main, core program. A CLI program.
For now: whilst this is just a CLI program, you must edit any key settings with the file:
C:\Potts' Software\Fortnite Exploit\fe-sett.ini
Changing this file is permitted, I'm not the best at efficient programming. Work away.
Written by Elliot Potts,
https://www.elliotpotts.me/
"""
import os
import time
import keyboard
import ConfigParser
local_settings = {
'hotkey': None,
'audio_mute': None,
'space_delay': None,
'bool_temp': None
}
cPar = ConfigParser.SafeConfigParser()
def spacer(amount):
time.sleep(1)
print("" * amount)
def settingsDigest():
if os.path.isdir("C:\Potts' Software\Fortnite Exploit"):
pass
else:
os.makedirs("C:\Potts' Software\Fortnite Exploit")
os.chdir("C:\Potts' Software\Fortnite Exploit")
if os.path.isfile("fe-sett.ini"):
try:
settRead = cPar.read("fe-sett.ini")
local_settings['hotkey'] = cPar.get("settings", "Hotkey")
local_settings['space_delay'] = cPar.get("settings", "ParachuteDelay")
except:
print(" [-] Error reading configuration file. Regenerating...")
os.remove("fe-sett.ini")
spacer(3)
print(" [-] Configuration file removed. It will be regenerated when the application is next launched.")
quit()
else:
cfgFile = open("fe-sett.ini", "w")
cPar.add_section("settings")
cPar.set("settings", "Hotkey", "V")
cPar.set("settings", "ParachuteDelay", "0.5")
cPar.write(cfgFile)
cfgFile.close()
settRead = cPar.read("fe-sett.ini")
local_settings['hotkey'] = cPar.get("settings", "Hotkey")
local_settings['space_delay'] = cPar.get("settings", "ParachuteDelay")
settingsDigest()
def startProgram(mode):
print("Program activated. Press {} to activate glitch.".format(str(local_settings['hotkey'])))
print("Press CTRL+C to TERMINATE the program. Release {} to stop the glitch.".format(str(local_settings['hotkey'])))
spacer(2)
if mode == 1:
while True:
# print("DBG: ACTV")
if keyboard.is_pressed(local_settings['hotkey']):
print("Activation key is pressed. Exploitin'.")
keyboard.press_and_release('space')
else:
# print("DBG: ACTV3")
pass
elif mode == 2:
while True:
if keyboard.is_pressed(local_settings['hotkey']):
print("Activation key is pressed. Exploitin'.")
keyboard.press_and_release('space')
time.sleep(float(local_settings['space_delay']))
else:
print(" [-] Invalid function call. Report this. Terminating.")
quit()
def main():
spacer(10)
print("""Your settings are as follows:
1). Your hotkey is set to: {},
2). Your timed delay setting is: {}
You may change these values manually or by using the menu function.""".format(str(local_settings['hotkey']),
str(local_settings['space_delay'])))
spacer(10)
print("""Please chose a menu option to get started:
1). Modify Hotkey
2). Display settings
--------------------------------------
3). Start program with spam mode (can be either really useful or really bad, play around)
4). Start program with timed mode (custom wait period between each glider deploy, see the config file/settings)
--------------------------------------
5). Quit program""")
try:
getMenuChoice = int(raw_input(" >> Enter your choice: "))
except ValueError:
print("You have entered an invalid input.")
main()
if getMenuChoice == 1:
print("Your hotkey is currently: {}, enter what you would like it to be changed to and press enter.".format(str(
local_settings['hotkey']
)))
getHotKeyChange = raw_input("Enter your hotkey: ")
cfgFile = open(r"C:\Potts' Software\Fortnite Exploit\fe-sett.ini", "w")
cPar.set("settings", "Hotkey", getHotKeyChange)
cPar.write(cfgFile)
cfgFile.close()
settRead = cPar.read(r"C:\Potts' Software\Fortnite Exploit\fe-sett.ini")
local_settings['hotkey'] = cPar.get("settings", "Hotkey")
print("Your hotkey has been changed to {} successfully.".format(str(local_settings['hotkey'])))
print("Returning to the main menu...")
spacer(10)
main()
elif getMenuChoice == 2:
spacer(10)
main()
elif getMenuChoice == 3:
startProgram(1)
elif getMenuChoice == 4:
startProgram(2)
elif getMenuChoice == 5:
print("Program is being terminated via menu choice. Bye!")
spacer(10)
quit()
else:
print(" [-] Invalid input. Chose a number from the menu. Restarting.")
main()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print(" [-] The program has been manually terminated via the keyboard...")
| vbuckgartuit/Fornite-Parachute-Exploit | cli/main.py | main.py | py | 5,224 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ConfigParser.SafeConfigParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
40729675478 | from typing import Dict, List, Optional
from fuzzly.models.post import PostId, PostIdValidator
from fuzzly.models.tag import TagGroupPortable
from fuzzly.models.user import UserPortable
from pydantic import BaseModel
class LookupRequest(BaseModel) :
tag: Optional[str]
class TagsRequest(BaseModel) :
_post_id_converter = PostIdValidator
post_id: PostId
tags: List[str]
class RemoveInheritance(BaseModel) :
parent_tag: str
child_tag: str
class InheritRequest(RemoveInheritance) :
deprecate: Optional[bool] = False
class UpdateRequest(BaseModel) :
name: Optional[str]
group: Optional[TagGroupPortable]
owner: Optional[str]
description: Optional[str]
deprecated: Optional[bool] = None
class TagPortable(str) :
pass
class TagGroups(Dict[TagGroupPortable, List[TagPortable]]) :
pass
class Tag(BaseModel) :
tag: str
owner: Optional[UserPortable]
group: TagGroupPortable
deprecated: bool
inherited_tags: List[TagPortable]
description: Optional[str]
count: int
class InternalTag(BaseModel) :
tag: str
owner: Optional[int]
group: str
deprecated: bool
inherited_tags: List[str]
description: Optional[str]
| kheina-com/tagger | models.py | models.py | py | 1,142 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "fuzzly.models.po... |
14017935134 | '''
this module makes helix curve
'''
import math
import maya.cmds as cmds
def helix(radius, pitch, sr, sp, ncvs,*args):
'''
create helix curve
'''
deg = 3
spas = ncvs - deg
knots = ncvs + deg -1
points = []
points.append((radius, 0, 0.5))
#cmds.joint(p=(0,0,0))
d = 1
for i in range(ncvs):
radius = radius*sr
pitch = pitch*sp
x = radius * math.cos(i)
y = pitch * i
z = -radius * math.sin(i)
if i%d == 0:
points.append((x, y, z))
cmds.curve(d=3, p=points)
def do_helix(*args):
radius = cmds.floatField("radius", value=True, q=True)
pitch = cmds.floatField("pitch", value=True, q=True)
sr = cmds.floatField("sr", value=True, q=True)
sp = cmds.floatField("sp", value=True, q=True)
ncv = cmds.intField("ncv", value=True, q=True)
helix(radius, pitch, sr, sp, ncv)
def do_helix_UI():
'''
main rondom copy function
'''
cmds.window()
cmds.rowColumnLayout(numberOfColumns=2)
cmds.text(label="radius")
cmds.floatField("radius", value=3)
cmds.text(label="pitch")
cmds.floatField("pitch", value=0.4)
cmds.text(label="sr")
cmds.floatField("sr", value=1.0)
cmds.text(label="sp")
cmds.floatField("sp", value=1.0)
cmds.text(label="ncv")
cmds.intField("ncv", value=20)
cmds.text(label="execute")
cmds.button(label="DoHelix", command=do_helix)
cmds.showWindow()
do_helix_UI() | s-nako/MayaPythonTools | ModelingTools/curves/do_helix.py | do_helix.py | py | 1,471 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.cos",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "maya.cmds.curve",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 26,
... |
27787353861 | #v.2.0.0
import json, os, time
import resources.config_server as config
from resources.lib.xlogger import Logger
from resources.lib.blasters import *
from resources.lib.websocket_server import WebsocketServer
class Main:
def __init__( self, thepath ):
"""Start IguanaIR Blaster Server."""
self.ROOTPATH = os.path.dirname( thepath )
self.LW = Logger( logfile=os.path.join( self.ROOTPATH, 'data', 'logs', 'server.log' ),
numbackups=config.Get( 'logbackups' ), logdebug=config.Get( 'debug' ) )
self.LW.log( ['script started'], 'info' )
self.WAIT_BETWEEN = config.Get( 'wait_between' )
self.CMDRUNNING = False
self.SERVER = WebsocketServer( config.Get( 'ws_port' ), host=config.Get( 'ws_ip' ) )
self.SERVER.set_fn_new_client( self._new_client )
self.SERVER.set_fn_client_left( self._client_left )
self.SERVER.set_fn_message_received( self._message_received )
self.SERVER.run_forever()
self.LW.log( ['script finished'], 'info' )
def _new_client( self, client, server ):
self.LW.log( ['Client connected'] )
def _client_left( self, client, server ):
self.LW.log( ['Client disconnected'] )
def _message_received(self, client, server, message ):
if len(message) > 200:
message = message[:200] + '..'
self.LW.log( ['Client said: %s' % message] )
jm = json.loads( message )
blaster = self._pick_blaster( jm )
if not blaster:
self.LW.log( ['invalid blaster type configured in settings, not sending any commands'], 'info' )
else:
while self.CMDRUNNING:
time.sleep( 1 )
self.LW.log( ['checking to see if previous command has completed'], 'info' )
self.CMDRUNNING = True
self.LW.log( ['sending commands on to %s' % jm.get( 'blaster' )], 'info' )
loglines = blaster.SendCommands( jm.get( 'commands' ) )
self.LW.log( loglines )
self.CMDRUNNING = False
def _pick_blaster( self, jm ):
if jm.get( 'blaster' ) == 'iguanair':
return iguanair.Blaster( keypath=os.path.join( self.ROOTPATH, 'data', 'keys' ), key_ext=config.Get( 'key_ext' ),
path_to_igc=config.Get( 'path_to_IGC' ), irc=jm.get( 'irc' ), wait_between=self.WAIT_BETWEEN )
else:
return None
| pkscout/iguana-blaster | resources/lib/server.py | server.py | py | 2,443 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "resources.lib.xlogger.Logger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.joi... |
74552235943 | import json
import datetime, time
import itertools
import pyverdict
import decimal
import os
import multiprocessing
from multiprocessing import Queue
from common import util
import pandas as pd
import numpy as np
import queue
import threading
from threading import Thread
#logger = logging.getLogger("idebench")
class IDEBenchDriver:
# def init(self, options, schema, driver_arg):
# pass
#
# def workflow_start(self):
# print("workflow start")
# pass
#
# def workflow_end(self):
# #os.system("/usr/local/pgsql/bin/pg_ctl stop -D ~/xdb_data")
# #os.system('sudo -b bash -c "echo 1 > /proc/sys/vm/drop_caches"')
# #os.system("/usr/local/pgsql/bin/pg_ctl start -D ~/xdb_data")
# pass
#def can_execute_online(self, sql_statement):
# return (not " or " in sql_statement.lower()) and (not " AVG(" in sql_statement)
def verdictdbedit(self, sql_statement):
sql_statement=sql_statement.replace('FROM movies','FROM public.movies_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
sql_statement=sql_statement.replace('FROM flights','FROM public.flights_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
sql_statement=sql_statement.replace('FROM weather','FROM public.weather_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
#print("SQL:",sql_statement)
return sql_statement.lower()
def create_connection(self):
connection = pyverdict.postgres(host=self.config['host'], user='crossfilter', password=self.config['password'], port=self.config['port'], dbname='crossfilter-eval-db')
connection.set_loglevel("ERROR")
return connection
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
with open("verdictdb.config.json","r") as f:
self.verdictdbconfig = json.load(f)
self.config = json.load(open(os.path.join(os.path.dirname(__file__),'..','verdictdb.config.json')))
def execute_vizrequest(self, viz_request, options, schema, result_queue):
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
#calculate connection time
# get a connection from the pool - block if non is available
# connection = self.pool.get()
connection=self.conn
viz_request.start_time = util.get_current_ms_time()
try:
editedSqlStatement = self.verdictdbedit(sql_statement)
#print(editedSqlStatement)
data = connection.sql(editedSqlStatement)
except Exception as e:
print(e, flush=True)
viz_request.result = {}
viz_request.margins = {}
viz_request.end_time = util.get_current_ms_time()
result_queue.put(viz_request)
return
viz_request.end_time = util.get_current_ms_time()
# put connection back in the queue so the next thread can use it.
#cursor.close()
#connection.close()
#connection=self.create_connection()
#self.pool.put(connection)
results = {}
for i, row in data.iterrows():
keys = []
if row[0] is None:
continue
for i, bin_desc in enumerate(viz_request.viz.binning):
if "width" in bin_desc:
bin_width = bin_desc["width"]
keys.append(str(int(row[0])))
else:
keys.append(str(row[0]).strip())
key = ",".join(keys)
row = list(row)
for i, r in enumerate(row):
if isinstance(r, decimal.Decimal):
row[i] = float(r)
results[key] = row[1]
viz_request.result = results
#viz_request.margins = margins
viz_request.margins = {}
result_queue.put(viz_request)
print("delivering...")
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
# while the workflow is running, pop the latest request from the stack and execute it
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except Exception as e:
# ignore queue-empty exceptions
print(e, flush=True)
pass
self.conn.close()
def workflow_start(self):
# pool a number of db connections
self.isRunning = True
#self.pool = queue.Queue()
#for i in range(1):
# conn = self.create_connection()
# self.pool.put(conn)
self.conn=self.create_connection()
thread = Thread(target = self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
# close all db connections at the end of a workflow
#for i in range(self.pool.qsize()):
# conn = self.pool.get(timeout=1)
# conn.close()
# def process_request(self, viz_request, options, schema, out_q):
# print("processsing..." + str(viz_request.operation_id))
# if viz_request.viz.binning:
# sql_statement = viz_request.viz.get_computed_filter_as_sql(schema)
# sql_statement = sql_statement.replace(schema.get_fact_table_name(), "%s_%s%s" % (
# schema.get_fact_table_name(), options.settings_size, "n" if options.settings_normalized else ""))
# #if self.can_execute_online(sql_statement):
# # sql_statement = sql_statement.replace("SELECT ", "SELECT ONLINE ")
# # sql_statement += " WITHTIME %s CONFIDENCE 95" % options.settings_time_requirement
# # sql_statement += " REPORTINTERVAL %s;" % options.settings_time_requirement
# # connection, cursor = self.create_connection(options.settings_time_requirement + 20)
#
# #connection, cursor = self.create_connection(options.settings_time_requirement)
# #calculate connection time
# t1=util.get_current_ms_time()
# connection, cursor = self.create_connection()
# t2=util.get_current_ms_time()
# viz_request.connection_time=t2-t1
# viz_request.start_time = util.get_current_ms_time()
# try:
# data = connection.sql(self.verdictdbedit(sql_statement))
# except:
# viz_request.result = {}
# viz_request.margins = {}
# viz_request.timedout = True
# viz_request.end_time = util.get_current_ms_time()
# out_q.put(viz_request)
# return
# #data = connection.sql(self.verdictdbedit(sql_statement))
# #data=connection.sql(sql_statement)
#
# viz_request.end_time = util.get_current_ms_time()
# connection.close()
#
# results = {}
# margins = {}
| leibatt/crossfilter-benchmark-public | drivers/verdictdb.py | verdictdb.py | py | 7,794 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pyverdict.postgres",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "queue.LifoQueue",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_nu... |
138761933 | import json
def json_read(path):
'''
Parses file of json type.
'''
with open(path, 'r', encoding='utf-8') as f:
text = json.load(f)
return text
def conti_with_count():
names = json_read('names.json')
continents = json_read('continent.json')
result = {}
for country in continents:
if continents[country] not in result:
result[continents[country]] = [names[country]]
else:
result[continents[country]].append(names[country])
return result
| AndriiTurko/homeworks_programming | json_make_dict.py | json_make_dict.py | py | 534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
}
] |
20981029783 | """
Created on Mon Feb 10 03:29:54 2020
@author: Luthfi (lsaif.github.com)
"""
from flask import Flask, render_template, request
import csv
import re
with open('litho_dict.csv', newline='') as infile:
reader = csv.reader(infile)
next(reader)
litholist = dict(reader)
def translate(desc,transdict):
words = desc.split(' ')
trans = [transdict.get(x.lower(),x) for x in words]
translation = (' '.join(trans))
words = re.split('(\W)', translation)
trans = [transdict.get(x.lower(),x) for x in words]
translation = (''.join(trans))
translation = translation.replace('.',',')
return translation
app = Flask(__name__)
@app.route('/')
def homepage():
return render_template('index.html')
@app.route('/', methods=['GET','POST'])
def mudlog_translator():
if request.method == "POST":
lithodesc = request.form.get('lithology_description')
print (translate(lithodesc,litholist))
result = (translate(lithodesc,litholist))
return render_template('index.html', result = result)
if __name__ == "__main__":
app.run(debug=True) | luthfigeo/MudLog-Translator | MudLogTranslator.py | MudLogTranslator.py | py | 1,117 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_num... |
15056294029 | import os
import sqlite3
import subprocess as sp
import sys
from pathlib import Path
db_path = Path(Path.home() / '.mypycheck.sqlite3')
def _create_files_table(con: sqlite3.Connection) -> None:
con.execute('''CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
timestamp DOUBLE NOT NULL
);''')
try:
con.execute('''CREATE UNIQUE INDEX idx_files_name
ON files (name);''')
except:
pass
def _check(file: str, stdout: int=-1, stderr: int=-1) -> None:
path = Path(file).resolve(strict=True)
try:
connection = sqlite3.connect(db_path)
_create_files_table(connection)
except:
if db_path.exists():
os.remove(db_path)
connection = sqlite3.connect(db_path)
_create_files_table(connection)
cursor = connection.execute("SELECT name,timestamp FROM files WHERE name = ?", (str(path), ))
row = cursor.fetchone()
mtime = path.stat().st_mtime
if row is not None and row[1] >= mtime:
return
if stdout < 0:
stdout = sys.stdout.fileno()
if stderr < 0:
stderr = sys.stderr.fileno()
# Throws sp.CalledProcessError on failed check
sp.check_call(['mypy', file, '--strict'], stdout=stdout, stderr=stderr)
connection.execute("INSERT OR REPLACE INTO files (name, timestamp) VALUES (?, ?);", (str(path), mtime))
connection.commit()
connection.close()
def check(file: str) -> None:
try:
if '/site-packages/' in str(file):
return
_check(file)
except sp.CalledProcessError as err:
exit(1)
def clean() -> None:
if db_path.exists():
os.remove(db_path)
def check_main() -> None:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('target')
parser.add_argument('--clean', action='store_true')
args = parser.parse_args()
if args.clean:
clean()
check(args.target)
| dlsloan/mypycheck | src/mypycheck/__init__.py | __init__.py | py | 2,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.home",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlite3.Connection",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
... |
7135815082 | # -*- coding: utf-8 -*-
# ***************************************************
# * File : main.py
# * Author : Zhefeng Wang
# * Email : wangzhefengr@163.com
# * Date : 2023-04-11
# * Version : 0.1.041123
# * Description : description
# * Link : link
# * Requirement : 相关模块版本需求(例如: numpy >= 2.1.0)
# ***************************************************
# python libraries
import os
import sys
from typing import List, Optional, Callable, Iterable
import torch
from torch import nn
from gluonts.torch.model.predictor import PyTorchPredictor
from gluonts.torch.distributions import StudentTOutput
from gluonts.model.forecast_generator import DistributionForecastGenerator
import pytorch_lightning as pl
# global variable
LOGGING_LABEL = __file__.split('/')[-1][:-3]
def mean_abs_scaling(context, min_scale = 1e-5):
return context.abs().mean().clamp(min_scale, None).unsqueeze(1)
class FeedForwardNetwork(nn.Module):
def __init__(self, prediction_length: int, context_length: int,
hidden_dimensions: List[int], batch_norm: bool = False,
distr_output: Callable = StudentTOutput(),
scaling: Callable = mean_abs_scaling) -> None:
super(FeedForwardNetwork, self).__init__()
# ------------------------------
# Parameters
# ------------------------------
# check params
assert prediction_length > 0
assert context_length > 0
assert len(hidden_dimensions) > 0
# params init
self.prediction_length = prediction_length # 预测长度
self.context_length = context_length # 10
self.hidden_dimensions = hidden_dimensions # 隐藏层维度 []
self.distr_output = distr_output # 分布输出
self.batch_norm = batch_norm # 是否进行 BatchNormalization
# ------------------------------
# Layers
# ------------------------------
# layer1: 数据转换
self.scaling = scaling
# layer2:
modules = []
dimensions = [context_length] + hidden_dimensions[:-1] # dimensions=[0, 1, 2, ..., n]
for in_size, out_size in zip(dimensions[:-1], dimensions[1:]):
# layer2.1
modules += [
self.__make_linear(in_size, out_size),
nn.ReLU()
]
# layer2.2
if batch_norm:
modules.append(nn.BatchNorm1d(out_size))
# layer3:
modules.append(
self.__make_linear(dimensions[-1], prediction_length * hidden_dimensions[-1])
)
# layer4: output
self.nn = nn.Sequential(*modules)
self.args_proj = self.distr_output.get_args_proj(hidden_dimensions[-1])
@staticmethod
def __make_linear(dim_in, dim_out):
linear = nn.Linear(dim_in, dim_out)
torch.nn.init.uniform_(linear.weight, -0.07, 0.07)
torch.nn.init.zeros_(linear.bias)
return linear
def forward(self, context):
# data scaling
scale = self.scaling(context)
scaled_context = context / scale
# output
nn_out = self.nn(scaled_context)
nn_out_reshaped = nn_out.reshape(-1, self.prediction_length, self.hidden_dimensions[-1])
# student t distribution outout
distr_args = self.args_proj(nn_out_reshaped)
return distr_args, torch.zeros_like(scale), scale
def get_predictor(self, input_transform, batch_size = 32, device = None):
return PyTorchPredictor(
prediction_length = self.prediction_length,
input_names = ["past_target"],
prediction_net = self,
batch_size = batch_size,
input_transform = input_transform,
forecast_generator = DistributionForecastGenerator(self.distr_output),
device = device,
)
class LightningFeedForwardNetwork(FeedForwardNetwork, pl.LightningModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def training_step(self, batch, batch_idx):
# TODO
context = batch["past_target"]
target = batch["future_target"]
assert context.shape[-1] == self.context_length
assert target.shape[-1] == self.prediction_length
distr_args, loc, scale = self(context)
distr = self.distr_output.distribution(distr_args, loc, scale)
# loss function
loss = -distr.log_prob(target)
return loss.mean()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3)
return optimizer
# 测试代码 main 函数
def main():
from gluonts.dataset.repository.datasets import get_dataset
dataset = get_dataset("electricity")
if __name__ == "__main__":
main()
| wangzhefeng/tsproj | models/todo/FeedForwardNetwork.py | FeedForwardNetwork.py | py | 4,869 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line... |
32793914547 | import torch
import torch.nn as nn
from conf import device
class Encoder(nn.Module):
def __init__(self, vocab_size, hidden_size=256):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedd = self.embedding(input).view(1, 1, -1)
output, hidden = self.gru(embedd, hidden)
return output, hidden
def init_hidden(self):
return torch.zeros(1, 1, self.hidden_size, dtype=torch.float32, device=device)
| junix/gen_poem | encoder.py | encoder.py | py | 622 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
3419488617 | from spectractor import parameters
from spectractor.config import set_logger
import matplotlib.pyplot as plt
import pandas as pd
import os
import numpy as np
class LogBook:
"""Class to load_image and analyse observation logbook csv files."""
def __init__(self, logbook="./tests/data/ctiofulllogbook_jun2017_v5.csv"):
"""Load and initialise the logbook
Parameters
----------
logbook: str
Path to the logbook. Must be a CSV file.
Examples
----------
>>> logbook = LogBook('./tests/data/ctiofulllogbook_jun2017_v5.csv')
>>> assert logbook.df is not None
>>> print(logbook.logbook)
./tests/data/ctiofulllogbook_jun2017_v5.csv
>>> print(logbook.df['disperser'][:2])
0 Ron400
1 Ron400
Name: disperser, dtype: object
>>> logbook = LogBook('./log.csv')
"""
self.my_logger = set_logger(self.__class__.__name__)
self.logbook = logbook
if not os.path.isfile(logbook):
self.my_logger.error('CSV logbook file {} not found.'.format(logbook))
return
# self.csvfile = open(self.logbook, 'rU', encoding='latin-1')
# self.reader = csv.DictReader(self.csvfile, delimiter=';', dialect=csv.excel_tab)
self.df = pd.read_csv(self.logbook, sep=";", decimal=",", encoding='latin-1', header='infer')
self.df['date'] = pd.to_datetime(self.df.date)
def search_for_image(self, filename):
"""
Look for an image file name in the logbook and load_image properties:
- Obj-posXpix and Obj-posYpix: the [x0,y0] guessed pixel position in the image
- Dx and Dy: the x and y windows in pixel to search for the target; set XWINDOW and YWINDOW variables in parameters.py
- object: the name of the target
Parameters
----------
filename: str
the fits image file name (not the path, only the file name.)
Returns
-------
disperser_label: str
the name of the disperser
target: str
the name of the target
xpos: int
the x position of the target (in pixel)
ypos: int
the y position of the target (in pixel)
Examples
--------
>>> logbook = LogBook('./tests/data/ctiofulllogbook_jun2017_v5.csv')
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("unknown_file.fits")
>>> print(disperser_label, target, xpos, ypos)
None None None None
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("reduc_20170605_028.fits")
>>> print(disperser_label, target, xpos, ypos)
HoloPhAg PNG321.0+3.9 814 585
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("reduc_20170608_119.fits")
>>> print(disperser_label, target, xpos, ypos)
None HD205905 None None
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("reduc_20170630_001.fits")
>>> print(disperser_label, target, xpos, ypos)
None bias None None
"""
disperser_label = None
target = None
xpos = None
ypos = None
skip = False
try:
row = self.df.loc[(self.df['file'] == filename)].iloc[0]
target = row['object']
if row['object'] == 'bias' or row['object'] == 'flat' or row['object'] == 'zero':
self.my_logger.error(
'Fits file %s in logbook %s has flag %s. Skip file.' % (filename, self.logbook, target))
skip = True
if row['skip'] == 'skip':
self.my_logger.error('Fits file %s in logbook has flag "skip". Skip file.' % filename)
skip = True
if np.isnan(row['Obj-posXpix']):
self.my_logger.error(
'Fits file %s in logbook %s has no target x position. Skip file.' % (filename, self.logbook))
skip = True
if np.isnan(row['Obj-posYpix']):
self.my_logger.error(
'Fits file %s in logbook %s has no target y position. Skip file.' % (filename, self.logbook))
skip = True
if not np.isnan(row['Dx']):
parameters.XWINDOW = int(row['Dx'])
parameters.XWINDOW_ROT = int(row['Dx'])
if not np.isnan(row['Dy']):
parameters.YWINDOW = int(row['Dy'])
parameters.YWINDOW_ROT = int(row['Dy'])
if not skip:
xpos = int(row['Obj-posXpix'])
ypos = int(row['Obj-posYpix'])
disperser_label = row['disperser']
except IndexError:
if target is None and skip is False:
self.my_logger.error('Fits file %s not found in logbook %s.' % (filename, self.logbook))
return disperser_label, target, xpos, ypos
def plot_columns_vs_date(self, column_names):
"""Plot of the column property with respect to the dates.
Parameters
----------
column_names: list, str
List of column names to plot versus time from the log book.
Examples
--------
>>> logbook = LogBook('./tests/data/ctiofulllogbook_jun2017_v5.csv')
>>> logbook.plot_columns_vs_date(['seeing'])
>>> logbook.plot_columns_vs_date(['P', 'T'])
"""
if isinstance(column_names, str):
column_names = [column_names]
self.df.plot(x='date', y=column_names)
if parameters.DISPLAY:
plt.show()
if parameters.PdfPages:
parameters.PdfPages.savefig()
if __name__ == "__main__":
import doctest
doctest.testmod()
| LSSTDESC/Spectractor | spectractor/logbook.py | logbook.py | py | 5,793 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "spectractor.config.set_logger",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pandas.read... |
6790152771 | import csv
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from exo_accounts.models import EmailAddress
from exo_certification.tasks import HubspotCertificationDealSyncTask
from ...models import ExOCertification, CertificationRequest
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'-f', '--file', nargs='+', type=str, help='CSV file')
def handle(self, *args, **kwargs):
path = kwargs.get('file')[0]
certification = ExOCertification.objects.get(level='L2A')
with open(path) as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
try:
str_email = row[1]
email = EmailAddress.objects.get(email=str_email)
user = email.user
data = {
'price': 0,
'created': datetime.strptime(row[2], '%Y-%m-%d'),
'status': settings.EXO_CERTIFICATION_REQUEST_STATUS_CH_DRAFT,
'user': user,
'requester_email': str_email,
'requester_name': user.full_name,
}
certification_request, created = CertificationRequest.objects.get_or_create(
user=user,
certification=certification,
defaults=data,
)
if created:
HubspotCertificationDealSyncTask().s(pk=certification_request.pk).apply()
certification_request.refresh_from_db()
self.stdout.write(
self.style.SUCCESS('email [{}]: Successfully created {}'.format(
str_email, certification_request)
)
)
certification_request.status = settings.EXO_CERTIFICATION_REQUEST_STATUS_CH_APPROVED
certification_request.save(update_fields=['status'])
else:
self.stdout.write(
self.style.WARNING('email [{}]: Already exists CertRequest {}'.format(
str_email, certification_request)
)
)
except Exception as exc:
self.stdout.write(
self.style.ERROR('email [{}]: Errored {}'.format(str_email, exc)))
| tomasgarzon/exo-services | service-exo-core/exo_certification/management/commands/generate_certification_requests_free_coaches.py | generate_certification_requests_free_coaches.py | py | 2,647 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.management.BaseCommand",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.ExOCertification.objects.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.ExOCertification.objects",
"line_number": 21,
"usage_type":... |
20465273392 | # -*- coding: utf-8 -*-
# @Project : CrawlersTools
# @Time : 2022/6/21 17:06
# @Author : MuggleK
# @File : proxy.py
import httpx
from loguru import logger
def get_proxies(proxy_url=None, http2=False):
"""
默认httpx代理模式
@param proxy_url: 代理请求链接
@param http2: 默认http1.1规则
@return:
"""
if not proxy_url: return
protocol = 'http://'
try:
proxy = httpx.get(proxy_url).text.strip()
proxy = protocol + proxy
if http2:
return {protocol: proxy, 'https://': proxy}
return {"http": proxy, "https": proxy}
except Exception as err:
logger.error(f'获取代理失败:{err}')
| MuggleK/CrawlersTools | CrawlersTools/requests/proxy.py | proxy.py | py | 697 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "httpx.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "loguru.logger.error",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 29,
"usage_type": "name"
}
] |
2403591134 | import time
import pygame
from pygame.locals import *
import random
from os import environ
# Class for creating a window structure for the simulation
class WindowStructure:
def __init__(self, win_pos=(0, 0), state=0):
self.y_origin, self.x_origin = win_pos
self.state = state
# Method for creating a window with a given position and flags
def create_window(self):
environ['SDL_VIDEO_WINDOW_POS'] = f"{self.y_origin},{self.x_origin}"
self.screen = pygame.display.set_mode(flags=self.state)
self.clock = pygame.time.Clock()
# Class for running the simulation
class Simulation(WindowStructure):
def __init__(self, win_pos, control_system, state, num_sprites, direction, velocity, shift_direction,
shift_time, black_screen_duration, pre_experiment_duration, trial_duration):
super().__init__(win_pos, state)
self.create_window()
self.num_sprites = num_sprites
self.direction = direction
self.velocity = velocity
self.shift_direction = shift_direction
self.shift_time = shift_time
self.control_system = control_system
self.black_screen_duration = black_screen_duration
self.pre_experiment_duration = pre_experiment_duration
self.trial_duration = trial_duration
# Method to run the experiment
def run_experiment(self, flag):
pygame.event.set_allowed([QUIT])
self.display_black_screen()
self.pre_experiment()
# self.simple_loop_direction(flag)
# self.simple_loop_pause(flag)
self.direction = 'hidden' if self.direction == 'forward' else 'backward'
self.move_your_swarm(flag)
self.direction = 'hidden' if self.direction == 'backward' else 'forward'
self.move_your_swarm(flag)
self.direction = 'hidden' if self.direction == 'forward' else 'backward'
self.move_your_swarm(flag)
self.direction = 'hidden' if self.direction == 'backward' else 'forward'
self.move_your_swarm(flag)
print(" -------- END OF THE SIMULATION --------")
exit(0)
def display_black_screen(self):
self.screen.fill((0, 0, 0))
pygame.display.update()
time.sleep(self.black_screen_duration)
# Method to create an animation with given parameters
def create_animation(self):
all_sprites_list = pygame.sprite.Group()
for _ in range(self.num_sprites):
stimuli = SpriteObj(self.screen.get_size(), self.direction, self.velocity)
all_sprites_list.add(stimuli)
return Animation(self.screen, self.screen.get_size(), self.num_sprites, self.direction, self.velocity,
self.shift_direction)
# Main loop for running the simulation
def simple_loop_pause(self, flag):
start_time = time.time()
counter = 0
shift_counter = 0
animation = self.create_animation()
trial_duration_counter = time.time()
for i in range(1200):
animation.run_logic()
animation.display_frame()
# time.sleep(1)
while time.time() - trial_duration_counter < self.trial_duration:
shift_counter += 1
if animation.shift_direction and shift_counter > self.shift_time * 120:
animation.change_direction()
shift_counter = 0
if self.control_system == 2:
if flag.value:
animation.display_frame()
time.sleep(1)
continue
elif self.control_system == 1:
if not flag.value:
animation.display_frame()
#time.sleep(1)
continue
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
animation.run_logic()
animation.display_frame()
self.clock.tick(120)
counter += 1
if (time.time() - start_time) > 1:
print("FPS: ", int(counter / (time.time() - start_time)))
counter = 0
start_time = time.time()
def simple_loop_direction(self, flag):
start_time = time.time()
trial_duration_counter = time.time()
counter = 0
shift_counter = 0
animation = self.create_animation()
prev_flag_value = flag.value
while time.time() - trial_duration_counter < self.trial_duration:
shift_counter += 1
if animation.shift_direction and shift_counter > self.shift_time * 120:
animation.change_direction()
shift_counter = 0
if prev_flag_value != flag.value:
print(f"Flag value changed from {prev_flag_value} to {flag.value}")
animation.change_direction()
prev_flag_value = flag.value
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
animation.run_logic()
animation.display_frame()
self.clock.tick(120)
counter += 1
if (time.time() - start_time) > 1:
print("FPS: ", int(counter / (time.time() - start_time)))
counter = 0
start_time = time.time()
# Method to perform pre-experiment tasks
def pre_experiment(self): # 20 white, black, white
self.screen.fill((255, 255, 255))
pygame.display.update(self.screen.get_rect())
time.sleep(self.pre_experiment_duration)
def move_your_swarm(self, flag):
start_time = time.time()
counter = 0
animation = self.create_animation()
trial_duration_counter = time.time()
for i in range(1200):
animation.run_logic()
animation.display_frame()
while time.time() - trial_duration_counter < self.trial_duration:
animation.run_logic()
animation.display_frame()
self.clock.tick(120)
counter += 1
if (time.time() - start_time) > 1:
print("FPS: ", int(counter / (time.time() - start_time)))
counter = 0
start_time = time.time()
# Class for creating sprite objects
class SpriteObj(pygame.sprite.Sprite):
def __init__(self, screen_size, direction, velocity, image_data=None):
super().__init__()
self.width, self.height = screen_size
self.direction = direction
self.heading = 'original'
if self.direction == 'forward':
self.heading = 'flipped'
self.velocity = velocity
self.image_data = None
self.image = None
self.image_data = 'D:\AmirA21\Desktop\Yossef\Visual-Based_Collective-Motion\Simulator_ver_02\images\image_for_peer_recognition_test.png'
self.create_surface()
self.reset_pos()
# Method to create a sprite surface with an image or default data
def create_surface(self):
if self.image_data is None:
pass
else:
self.image = pygame.image.load(self.image_data).convert_alpha()
self.generate_surface()
# Method to generate the sprite surface using image_data
def generate_surface(self):
if self.image_data is None:
self.image_data = (45, 45)
width, height = 45, 45
self.image = pygame.Surface([width, height], pygame.SRCALPHA)
self.image.fill((255, 255, 255))
pygame.draw.circle(self.image, (0, 0, 0), (22.5, 22.5), 22.5)
else:
# width, height = self.image.get_size()
self.image.set_colorkey((255, 255, 255))
if self.heading == 'flipped':
self.image = pygame.transform.flip(self.image, True, False)
self.rect = self.image.get_rect()
# Method to reset the position of the sprite
def reset_pos(self):
self.rect.y = random.randrange(0, self.height)
if self.direction == "forward":
self.rect.x = random.randrange(self.width, self.width * 2)
else:
self.rect.x = random.randrange(self.width * -1, 0)
# Method to update the sprite's position based on its direction and velocity
def update(self):
if self.direction == "forward":
self.rect.x += -self.velocity
if self.rect.x > self.width * 2 or self.rect.x < 0:
self.reset_pos()
else:
self.rect.x += self.velocity
if self.rect.x < self.width * -1 or self.rect.x > self.width:
self.reset_pos()
expected_heading = 'flipped' if self.direction == 'forward' else 'original'
if self.heading != expected_heading:
self.flip()
def flip(self):
"""Flips the direction and heading of the sprite."""
self.direction = 'backward' if self.direction == 'forward' else 'forward'
self.heading = 'flipped' if self.heading == 'original' else 'original'
self.image = pygame.transform.flip(self.image, True, False)
class Animation(object):
def __init__(self, screen, screen_size, num_sprites, direction, velocity, shift_direction):
self.direction = direction
self.screen = screen
self.num_sprites = num_sprites
self.velocity = velocity
self.screen_size = screen_size
self.shift_direction = shift_direction
self.all_sprites_list = pygame.sprite.Group()
self.create_sprites()
# Method to create sprite objects and add them to the sprite group
def create_sprites(self):
screen_size = self.screen.get_size()
for _ in range(self.num_sprites):
stimuli = SpriteObj(screen_size, self.direction, self.velocity)
self.all_sprites_list.add(stimuli)
# Method to change the direction of all sprites in the animation
def change_direction(self):
for sprite in self.all_sprites_list:
sprite.flip()
# Method to display the current frame of the animation
def display_frame(self):
self.screen.fill((255, 255, 255))
# self.reduce_brightness(self.screen, 100)
if self.direction != 'hidden':
self.all_sprites_list.draw(self.screen)
pygame.display.update()
# Method to update the logic of the animation, including the position of the sprites
def run_logic(self):
self.all_sprites_list.update()
@staticmethod
def reduce_brightness(screen, alpha_value):
overlay = pygame.Surface(screen.get_size(), pygame.SRCALPHA)
overlay.fill((0, 0, 0, alpha_value))
screen.blit(overlay, (0, 0))
| Time2bImmortal/Heart_of_the_swarm | Simulator_ver_02/VirtualSimulation.py | VirtualSimulation.py | py | 11,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Cl... |
8594392164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Scripts to plot Figure 2, co-occurrence and spearman rho for top 1000 videos in terms of views and watch times.
Usage: python plot_intersection_spearman_top1000.py
Time: ~2M
"""
from __future__ import division, print_function
import os
from scipy import stats
import operator
import matplotlib.pyplot as plt
def plot_intersection(ax, view_rank_dict, watch_rank_dict, color, linestyle, label):
sorted_view_rank = sorted(view_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
sorted_watch_rank = sorted(watch_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
x_axis = []
y_axis = []
# iterate from 50 to 1000, with gap 10
for i in range(50, 1001, 10):
view_set = set([item[0] for item in sorted_view_rank[:i]])
watch_set = set([item[0] for item in sorted_watch_rank[:i]])
x_axis.append(i)
y_axis.append(len(view_set.intersection(watch_set))/i)
ax.plot(x_axis, y_axis, color=color, linestyle=linestyle, label=label)
def plot_spearman(ax, view_rank_dict, watch_rank_dict, color, linestyle, label):
sorted_view_rank = sorted(view_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
sorted_watch_rank = sorted(watch_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
x_axis = []
y_axis = []
# iterate from 50 to 1000, with gap 10
for i in range(50, 1001, 10):
view_set = set([item[0] for item in sorted_view_rank[:i]])
watch_set = set([item[0] for item in sorted_watch_rank[:i]])
unit_list = list(view_set.union(watch_set))
view_rank = [view_rank_dict[x] for x in unit_list]
watch_rank = [watch_rank_dict[x] for x in unit_list]
x_axis.append(i)
y_axis.append(stats.spearmanr(view_rank, watch_rank)[0])
ax.plot(x_axis, y_axis, color=color, linestyle=linestyle, label=label)
if __name__ == '__main__':
# == == == == == == == == Part 1: Set up experiment parameters == == == == == == == == #
# setting parameters
view_rank_dict = {}
watch_rank_dict = {}
music_view_rank_dict = {}
music_watch_rank_dict = {}
news_view_rank_dict = {}
news_watch_rank_dict = {}
# == == == == == == == == Part 2: Load dataset == == == == == == == == #
input_doc = '../../production_data/new_tweeted_dataset_norm/'
for subdir, _, files in os.walk(input_doc):
for f in files:
with open(os.path.join(subdir, f), 'r') as fin:
fin.readline()
for line in fin:
vid, dump = line.rstrip().split('\t', 1)
view30 = float(dump.split('\t')[7])
watch30 = float(dump.split('\t')[8])
view_rank_dict[vid] = view30
watch_rank_dict[vid] = watch30
if f.startswith('10'):
music_view_rank_dict[vid] = view30
music_watch_rank_dict[vid] = watch30
if f.startswith('25'):
news_view_rank_dict[vid] = view30
news_watch_rank_dict[vid] = watch30
print('>>> Loading data: {0} done!'.format(os.path.join(subdir, f)))
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
plot_intersection(ax1, view_rank_dict, watch_rank_dict, color='r', linestyle='-', label='ALL')
plot_intersection(ax1, music_view_rank_dict, music_watch_rank_dict, color='k', linestyle='--', label='Music')
plot_intersection(ax1, news_view_rank_dict, news_watch_rank_dict, color='k', linestyle=':', label='News')
ax1.set_ylim([0, 1])
ax1.set_xlabel('top $n$ videos', fontsize=18)
ax1.set_ylabel('Intersection size', fontsize=18)
ax1.set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax1.tick_params(axis='both', which='major', labelsize=16)
ax1.legend(loc='lower right', handlelength=1, frameon=False, fontsize=20)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.set_title('(a) co-occurrence rate', fontsize=18)
plt.tight_layout()
plt.show()
fig = plt.figure()
ax2 = fig.add_subplot(1, 1, 1)
plot_spearman(ax2, view_rank_dict, watch_rank_dict, color='r', linestyle='-', label='ALL')
plot_spearman(ax2, music_view_rank_dict, music_watch_rank_dict, color='k', linestyle='--', label='Music')
plot_spearman(ax2, news_view_rank_dict, news_watch_rank_dict, color='k', linestyle=':', label='News')
ax2.set_ylim([-1, 1])
ax2.set_xlabel('top $n$ videos', fontsize=18)
ax2.set_ylabel("Spearman's $\\rho$", fontsize=18)
ax2.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
ax2.tick_params(axis='both', which='major', labelsize=16)
ax2.legend(loc='lower right', handlelength=1, frameon=False, fontsize=20)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.set_title("(b) Spearman's $\\rho$", fontsize=18)
plt.tight_layout()
plt.show()
| avalanchesiqi/yt-longevity | engagement_plots/plot_intersection_spearman_top1000.py | plot_intersection_spearman_top1000.py | py | 5,027 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "operator.itemgetter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "operator.... |
40155422506 | import os, sys, time, datetime
# Additional packages
import numpy as np
# ARL Env
from dVRK.PSM_cartesian_ddpg_env import PSMCartesianDDPGEnv
# Stable baselines algorithms
from stable_baselines.ddpg.policies import MlpPolicy
from stable_baselines import HER, DDPG
from stable_baselines.common.noise import NormalActionNoise
from stable_baselines.common.noise import OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec
from stable_baselines.common.callbacks import CheckpointCallback
def redirect_stdout(filepath: str = None):
"""Redirect the ouput stream to a file. Also redirect error output stream
"""
cdir = os.getcwd()
basepath = os.path.join(cdir, '.logs')
if not os.path.exists(basepath):
os.makedirs(basepath)
if filepath is None:
now = datetime.datetime.now()
filepath = 'log_' + now.strftime("%Y_%m_%d-%H_%M_%S.txt")
filepath = os.path.join(basepath, filepath)
err_filepath = filepath[:-4] + '_err.txt'
if os.path.exists(filepath):
filepath = filepath[:-4]
filepath += now.strftime("_%H_%M_%S") + '.txt'
sys.stdout = open(filepath, 'w')
sys.stderr = open(err_filepath, 'w')
print("Began logging")
return
def main(env: PSMCartesianDDPGEnv):
# the noise objects for DDPG
n_actions = env.action.action_space.shape[0]
param_noise = None
action_noise = OrnsteinUhlenbeckActionNoise(
mean=np.zeros(n_actions),
sigma=float(0.5) * np.ones(n_actions)
)
model = DDPG(
MlpPolicy,
env,
gamma=0.95,
verbose=1,
nb_train_steps=300,
nb_rollout_steps=150,
param_noise=param_noise,
batch_size=128,
action_noise=action_noise,
random_exploration=0.05,
normalize_observations=True,
tensorboard_log="./ddpg_dvrk_tensorboard/",
observation_range=(-1.5,
1.5),
critic_l2_reg=0.01
)
model.learn(
total_timesteps=4000000,
log_interval=100,
callback=CheckpointCallback(save_freq=100000,
save_path="./ddpg_dvrk_tensorboard/")
)
model.save("./ddpg_robot_env")
# NOTE:
# If continuing learning from previous checkpoint,
# Comment above chunk of code {model=DDPG(''') till model.save("./her_robot_env")} and uncomment below lines:
# Replace the XXXXX below with the largest number present in (rl_model_) directory ./ddpg_dvrk_tensorboard/
# remaining_training_steps = 4000000 - XXXXX
# model_log_dir = './ddpg_dvrk_tensorboard/rl_model_XXXXX_steps.zip'
# model = DDPG.load(model_log_dir, env=env)
# # Reset the model
# env.reset()
# model.learn(remaining_training_steps, log_interval=100,
# callback=CheckpointCallback(save_freq=100000, save_path="./ddpg_dvrk_tensorboard/"))
# model.save("./ddpg_robot_env")
def load_model(eval_env):
model = DDPG.load('./ddpg_robot_env', env=eval_env)
count = 0
step_num_arr = []
for _ in range(20):
number_steps = 0
obs = eval_env.reset()
for _ in range(400):
action, _ = model.predict(obs)
obs, reward, done, _ = eval_env.step(action)
number_steps += 1
if done:
step_num_arr.append(number_steps)
count += 1
print("----------------It reached terminal state -------------------")
break
print(
"Robot reached the goal position successfully ",
count,
" times and the Average step count was ",
np.average(np.array(step_num_arr))
)
if __name__ == '__main__':
# redirect_stdout()
root_link_name = 'baselink'
env_kwargs = {
'action_space_limit': 0.05,
'goal_position_range': 0.05,
'position_error_threshold': 0.01,
'goal_error_margin': 0.0075,
'joint_limits':
{
'lower_limit': np.array([-0.2,
-0.2,
0.1,
-1.5,
-1.5,
-1.5,
-1.5]),
'upper_limit': np.array([0.2,
0.2,
0.24,
1.5,
1.5,
1.5,
1.5])
},
'workspace_limits':
{
'lower_limit': np.array([-0.04,
-0.03,
-0.2]),
'upper_limit': np.array([0.03,
0.04,
-0.091])
},
'enable_step_throttling': False,
'steps_to_print': 10000
}
# Training
ambf_env = PSMCartesianDDPGEnv(**env_kwargs)
time.sleep(5)
ambf_env.make(root_link_name)
ambf_env.reset()
main(env=ambf_env)
ambf_env.ambf_client.clean_up()
# Evaluate learnt policy
eval_env = PSMCartesianDDPGEnv(**env_kwargs)
time.sleep(5)
eval_env.make(root_link_name)
eval_env.reset()
load_model(eval_env=eval_env)
eval_env.ambf_client.clean_up()
| WPI-AIM/ambf_rl | scripts/dVRK/PSM_cartesian_ddpg_algorithm.py | PSM_cartesian_ddpg_algorithm.py | py | 4,942 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number... |
6320198920 | import pathlib2
import os
import wx
from option_loader import OptHandle
class EditFrame(wx.Frame):
def __init__(self, opt_instance:OptHandle):
super().__init__(parent = None, title="Edit Panel")
self.opt_handle = opt_instance
self.setup_panel()
self.SetSize(0, 0, 500, 750)
self.Center()
def setup_panel(self):
panel = wx.Panel(self)
panel.BackgroundColour = wx.Colour(50, 100, 255)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
flex_grid = wx.FlexGridSizer(2, len(self.opt_handle.dict), 2)
sizer.Add(flex_grid, wx.SizerFlags().Center().Expand())
for key_value_pair in self.opt_handle.dict.items():
key, value = key_value_pair
flex_grid.Add(wx.StaticText(panel, label = str(key)), wx.SizerFlags().Center())
tmp_sizer = wx.BoxSizer()
tmp_sizer.Add(wx.StaticText(panel, label = str(value)), wx.SizerFlags().Center())
button = wx.Button(panel, label = "Change Hotkey")
tmp_sizer.Add(button)
flex_grid.Add(tmp_sizer, wx.SizerFlags().Right())
"""
row_sizer = wx.BoxSizer()
sizer.Add(row_sizer, wx.SizerFlags().Expand())
left_sizer = wx.BoxSizer()
left_label = wx.StaticText(panel, label = str(key))
left_sizer.Add(left_label)
row_sizer.Add(left_sizer, flags = wx.SizerFlags().Border(wx.LEFT, 20).Center())
right_sizer = wx.BoxSizer()
right_label = wx.StaticText(panel, label = str(value))
right_button = wx.Button(panel, label = "Change Key")
right_sizer.Add(right_label, flags = wx.SizerFlags())
right_sizer.Add(right_button, flags = wx.SizerFlags())
row_sizer.Add(right_sizer, flags = wx.SizerFlags().Right())
"""
if __name__ == "__main__":
app = wx.App()
frame = EditFrame(OptHandle())
frame.Show()
app.MainLoop()
| Nickiel12/Church-Programs | WX-StreamController/edit_gui.py | edit_gui.py | py | 2,034 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wx.Frame",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "option_loader.OptHandle",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "wx.Panel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "wx.Colour",
"line_nu... |
1151371916 | import json
from pprint import pprint
with open('rpPurchases.json') as f:
data = json.load(f)
l = []
for idx in data:
l.append((idx["amount"],idx["paymentType"]))
rp = 0
for a in l:
rp += a[0]
print(rp)
| kickass9797/Testing | rand/omg.py | omg.py | py | 239 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
}
] |
6044172734 | from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
from models import Entry
urlpatterns = patterns(
'',
url(r'^(?P<slug>\S+)/copy/$', views.copy_blog, name='blog_copy'),
url(r'^folders/info/', views.folderview, name='folder_view'),
url(r'^folders/$', views.TagList.as_view(), name='folders'),
url(r'^new-folder/$', views.TagCreate.as_view(), name='tag_new'),
url(r'^(?P<slug>\S+)/edit/$', views.BlogUpdate.as_view(), name='blog_edit'),
url(r'^new-user/$', views.UserCreate.as_view(), name='user_new'),
url(r'^new-post/$', views.EntryCreate.as_view(), name='entry_new'),
url(r'^(?P<slug>\S+)/delete/$', views.EntryDelete.as_view(), name='blog_delete'),
url(r'^tag-delete/(?P<slug>\S+)$', views.TagDelete.as_view(), name='tag_delete'),
url(r'^tag-edit/(?P<slug>\S+)$', views.TagUpdate.as_view(), name='tag_edit'),
url(r'^search-form/$', views.search_form, name='search_form'),
url(r'^search/$', views.search, name='search'),
url(r'^login$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}, name='user_login'),
url(r'^logout$', 'django.contrib.auth.views.logout_then_login', {'login_url': reverse_lazy('user_login')}, name='user_logout'),
url(r'^$', views.BlogIndex.as_view(), name="index"),
url(r'^order_by_title/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('title')), name='by_title'),
url(r'^order_by_slug/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('slug')), name='by_slug'),
url(r'^order_by_body/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('body')), name='by_body'),
url(r'^date_desc/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('created')), name='by_date_desc'),
url(r'^date_ascd/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('-created')), name='index'),
url(r'^(?P<slug>\S+)/decrypt_form/$', views.decrypt_form, name="decrypt_form"),
url(r'/decrypt/$', views.decrypt, name="decrypt"),
url(r'^(?P<slug>\S+)$', views.BlogDetail.as_view(), name="entry_detail"),
)
| lowmanb/cs3240-f14-team01 | blog/urls.py | urls.py | py | 2,245 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dja... |
41831305861 | from typing import Optional
from cartes import COEUR, COULEURS, CarteBelote, CarteSetBelote, Couleur, Pli
class Annonce:
VALID_SCORES = list(range(80, 170, 10)) + [0, 1000, 2000]
def __init__(self, atout, score_a_faire, joueur):
if int(score_a_faire) not in self.VALID_SCORES:
raise ValueError("score_a_faire non valide")
if not isinstance(atout, Couleur):
raise ValueError("couleur non valide")
self.atout = atout
self.score_a_faire = score_a_faire
self.joueur = joueur
def __lt__(self, other):
if self.score_a_faire < other.score_a_faire:
return True
ANNONCE_NULLE = Annonce(atout=COEUR, score_a_faire=0, joueur=None)
def poser_question(question, reponses_possibles):
reponse = None
while reponse not in reponses_possibles:
reponse = input(question)
return reponse
class Joueur:
def __init__(self, nom: str):
self.nom: str = nom
self.belote: bool = False
self.main: CarteSetBelote = CarteSetBelote()
self.doit_annoncer: bool = True
self.equipe: Optional[Equipe] = None # Défini lors de la création de l'équipe
self.plis: list[Pli] = []
def __repr__(self) -> str:
return self.nom
def _reinit(self):
self.doit_annoncer = True
self.belote = False
self.main = CarteSetBelote()
self.plis = []
@property
def _total_points(self):
score = 0
for pli in self.plis:
score += pli._points
if self.belote:
score += 20
return score
def _afficher_main(self):
print(self.main)
def _annoncer(self, meilleure_annonce):
self._afficher_main()
reponse = poser_question(f"Souhaitez-vous annoncer {self} ?", ["o", "n"])
if reponse == "o":
couleur = poser_question("Couleur ?", list(map(lambda x: x.nom, COULEURS)))
couleur = list(filter(lambda x: x.nom == couleur, COULEURS))[0]
scores_possibles = list(
map(
lambda x: str(x),
filter(
lambda x: x > meilleure_annonce.score_a_faire,
Annonce.VALID_SCORES,
),
)
)
score = poser_question("Score ?", scores_possibles)
annonce = Annonce(atout=couleur, score_a_faire=int(score), joueur=self)
else:
annonce = None
self.doit_annoncer = False
return annonce
def _demander_melanger(self) -> bool:
reponse = input(f"Souhaitez-vous mélanger {self.nom}? [o/n]: ")
if reponse == "o":
return True
return False
def _ajouter_carte_en_main(self, carte):
self.main.append(carte)
def _faire_annonce(self) -> bool:
reponse = input(f"Souhaitez-vous faire une annonce {self.nom}? [o/n]: ")
if reponse == "o":
return True
return False
def _couleur_demandee_en_main(self, couleur_demandee) -> bool:
return (
len(
list(
filter(
lambda x: x.couleur.forme == couleur_demandee.couleur.forme,
self.main,
)
)
)
> 0
)
def _atout_en_main(self) -> bool:
return True if True in [carte.atout for carte in self.main] else False
def _meilleur_atout_en_main(self, other_atout: CarteBelote) -> bool:
if other_atout.atout is False:
raise ValueError("Vous devez appeler cette fonction avec un autre atout")
meilleurs_atouts_en_main = list(
filter(lambda x: x.atout and x > other_atout, self.main)
)
if meilleurs_atouts_en_main:
return True
return False
def _jouer_carte(self, pli: Pli, couleur_atout: Couleur):
premiere_carte_jouee = pli[0] if pli else None
print(f"À toi de jouer {self}")
self._afficher_main()
while True:
index_carte = 999
while index_carte not in range(0, len(self.main)):
index_carte = input("Index de la carte à jouer: ")
try:
index_carte = int(index_carte)
except Exception:
...
carte_a_jouer = self.main[index_carte]
if premiere_carte_jouee is not None:
carte_gagnante = pli._carte_la_plus_forte(couleur_atout=couleur_atout)
if carte_a_jouer.couleur.forme != premiere_carte_jouee.couleur.forme:
couleur_en_main: bool = self._couleur_demandee_en_main(
couleur_demandee=premiere_carte_jouee
)
if couleur_en_main:
print(
f"Vous possèder du {premiere_carte_jouee.couleur.forme} "
"en main. "
f"Vous ne pouvez pas jouer du {carte_a_jouer.couleur.forme}"
)
continue
else:
if self._atout_en_main():
if (
carte_a_jouer.atout is False
and carte_gagnante.joueur.equipe != self.equipe
):
print("Vous devez couper !")
continue
else:
if premiere_carte_jouee.atout and carte_a_jouer.atout:
if carte_a_jouer < premiere_carte_jouee:
if self._meilleur_atout_en_main(other_atout=carte_gagnante):
print("Vous avez un atout supérieur en main")
if self.belote and carte_a_jouer.atout:
if carte_a_jouer.valeur in ("D", "R"):
atouts_en_main = list(filter(lambda x: x.atout is True, self.main))
valeurs_atouts = list(map(lambda x: x.valeur, atouts_en_main))
if "D" in valeurs_atouts or "R" in valeurs_atouts:
print("Belote")
else:
print("Rebelote")
self.main.pop(index_carte)
return carte_a_jouer
class Equipe:
def __init__(self, joueur1, joueur2):
self.joueur1: Joueur = joueur1
self.joueur2: Joueur = joueur2
joueur1.equipe = self
joueur2.equipe = self
self.score: int = 0
def __repr__(self) -> str:
return f"{self.joueur1.nom} & {self.joueur2.nom}"
@property
def joueurs(self) -> tuple:
return (self.joueur1, self.joueur2)
| slim0/pyContree | joueurs.py | joueurs.py | py | 6,836 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "cartes.Couleur",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "cartes.COEUR",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "cartes.CarteSetBelote",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.Optional... |
8589141283 | from forum.models import Post, Comment
from django import forms
from tinymce.widgets import TinyMCE
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ['author', 'slug', 'course']
title = forms.CharField(
label='Title',
max_length=50,
widget=forms.TextInput(
attrs={
'required': 'True',
'placeholder': 'Enter post title'
}))
content = forms.CharField(widget=TinyMCE())
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['author', 'post', 'course']
content = forms.CharField(label='Message', widget=TinyMCE())
| rafidirg/forum-saas-kowan | forum/forms.py | forms.py | py | 689 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "forum.models.Post",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.... |
27097993038 | import logging
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
cyan = "\u001b[36m"
green = "\u001b[32m"
yellow = "\u001b[33m"
red = "\u001b[35m"
bold_red = "\u001b[31m"
reset = "\u001b[0m"
debug_format = "%(asctime)s - %(name)s - {colour}%(levelname)s - %(message)s\u001b[0m (%(filename)s:%(lineno)d)"
norm_format = "%(asctime)s - %(name)s - {colour}%(levelname)s - %(message)s\u001b[0m"
FORMATS = {
logging.DEBUG: debug_format.format(colour=cyan),
logging.INFO: norm_format.format(colour=green),
logging.WARNING: debug_format.format(colour=yellow),
logging.ERROR: debug_format.format(colour=red),
logging.CRITICAL: debug_format.format(colour=bold_red)
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt, datefmt="%Y-%m-%d %H:%M:%S")
return formatter.format(record)
def get_logger(module_name, output_file, terminal_level=logging.DEBUG, file_level=logging.DEBUG):
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
terminal_handler = logging.StreamHandler()
terminal_handler.setLevel(terminal_level)
terminal_handler.setFormatter(CustomFormatter())
logger.addHandler(terminal_handler)
file_handler = logging.FileHandler(output_file)
file_handler.setLevel(file_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
| Beatson-Institute-Digital-Pathology/reinhard-wsi-normalisation | reinhard_wsi/logging.py | logging.py | py | 1,711 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.Formatter",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.WA... |
18394694315 | import os
import numpy as np
import pytest
from numpy.testing import assert_array_equal
import tiledb
def test_schema_evolution(tmp_path):
ctx = tiledb.default_ctx()
se = tiledb.ArraySchemaEvolution(ctx)
uri = str(tmp_path)
attrs = [
tiledb.Attr(name="a1", dtype=np.float64),
tiledb.Attr(name="a2", dtype=np.int32),
]
dims = [tiledb.Dim(domain=(0, 3), dtype=np.uint64)]
domain = tiledb.Domain(*dims)
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, sparse=False)
tiledb.Array.create(uri, schema)
data1 = {
"a1": np.arange(5, 9),
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
}
with tiledb.open(uri, "w") as A:
A[:] = data1
with tiledb.open(uri) as A:
res = A[:]
assert_array_equal(res["a1"], data1["a1"])
assert_array_equal(res["a2"], data1["a2"])
assert "a3" not in res.keys()
newattr = tiledb.Attr("a3", dtype=np.int8)
se.add_attribute(newattr)
with pytest.raises(tiledb.TileDBError) as excinfo:
se.add_attribute(newattr)
assert "Input attribute name is already there" in str(excinfo.value)
assert "tiledb/schema_evolution.cc" in str(excinfo.value)
se.array_evolve(uri)
data2 = {
"a1": np.arange(5, 9),
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
"a3": np.random.randint(0, 255, size=4).astype(np.int8),
}
with tiledb.open(uri, "w") as A:
A[:] = data2
def test_it():
with tiledb.open(uri) as A:
res = A[:]
assert_array_equal(res["a1"], data2["a1"])
assert_array_equal(res["a2"], data2["a2"])
assert_array_equal(res["a3"], data2["a3"])
test_it()
tiledb.consolidate(uri)
test_it()
se = tiledb.ArraySchemaEvolution(ctx)
se.drop_attribute("a1")
se.array_evolve(uri)
data3 = {
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
"a3": np.random.randint(0, 255, size=4).astype(np.int8),
}
def test_it2():
with tiledb.open(uri) as A:
res = A[:]
assert "a1" not in res.keys()
assert_array_equal(res["a2"], data3["a2"])
assert_array_equal(res["a3"], data3["a3"])
with tiledb.open(uri, "w") as A:
A[:] = data3
test_it2()
tiledb.consolidate(uri)
test_it2()
def test_schema_evolution_timestamp(tmp_path):
ctx = tiledb.default_ctx()
se = tiledb.ArraySchemaEvolution(ctx)
vfs = tiledb.VFS()
uri = str(tmp_path)
schema_uri = os.path.join(uri, "__schema")
attrs = [tiledb.Attr(name="a1", dtype=np.float64)]
domain = tiledb.Domain(tiledb.Dim(domain=(0, 3), dtype=np.uint64))
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, sparse=False)
tiledb.Array.create(uri, schema)
def get_schema_timestamps(schema_uri):
schema_files = filter(lambda x: "__enumerations" not in x, vfs.ls(schema_uri))
return [int(os.path.basename(file).split("_")[2]) for file in schema_files]
assert 123456789 not in get_schema_timestamps(schema_uri)
newattr = tiledb.Attr("a2", dtype=np.int8)
se.timestamp(123456789)
se.add_attribute(newattr)
se.array_evolve(uri)
assert 123456789 in get_schema_timestamps(schema_uri)
def test_schema_evolution_with_enmr(tmp_path):
ctx = tiledb.default_ctx()
se = tiledb.ArraySchemaEvolution(ctx)
uri = str(tmp_path)
attrs = [
tiledb.Attr(name="a1", dtype=np.float64),
tiledb.Attr(name="a2", dtype=np.int32),
]
dims = [tiledb.Dim(domain=(0, 3), dtype=np.uint64)]
domain = tiledb.Domain(*dims)
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, sparse=False)
tiledb.Array.create(uri, schema)
data1 = {
"a1": np.arange(5, 9),
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
}
with tiledb.open(uri, "w") as A:
A[:] = data1
with tiledb.open(uri) as A:
assert not A.schema.has_attr("a3")
newattr = tiledb.Attr("a3", dtype=np.int8, enum_label="e3")
se.add_attribute(newattr)
with pytest.raises(tiledb.TileDBError) as excinfo:
se.array_evolve(uri)
assert " Attribute refers to an unknown enumeration" in str(excinfo.value)
se.add_enumeration(tiledb.Enumeration("e3", True, np.arange(0, 8)))
se.array_evolve(uri)
se = tiledb.ArraySchemaEvolution(ctx)
with tiledb.open(uri) as A:
assert A.schema.has_attr("a3")
assert A.attr("a3").enum_label == "e3"
se.drop_enumeration("e3")
with pytest.raises(tiledb.TileDBError) as excinfo:
se.array_evolve(uri)
assert "Unable to drop enumeration" in str(excinfo.value)
se.drop_attribute("a3")
se.array_evolve(uri)
with tiledb.open(uri) as A:
assert not A.schema.has_attr("a3")
@pytest.mark.parametrize(
"type,data",
(
("int", [0]),
("bool", [True, False]),
("str", ["abc", "defghi", "jk"]),
("bytes", [b"abc", b"defghi", b"jk"]),
),
)
def test_schema_evolution_extend_enmr(tmp_path, type, data):
uri = str(tmp_path)
enmr = tiledb.Enumeration("e", True, dtype=type)
attrs = [tiledb.Attr(name="a", dtype=int, enum_label="e")]
domain = tiledb.Domain(tiledb.Dim(domain=(0, 3), dtype=np.uint64))
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, enums=[enmr])
tiledb.Array.create(uri, schema)
with tiledb.open(uri) as A:
assert A.schema.has_attr("a")
assert A.attr("a").enum_label == "e"
assert A.enum("e") == enmr
se = tiledb.ArraySchemaEvolution()
updated_enmr = enmr.extend(data)
se.extend_enumeration(updated_enmr)
se.array_evolve(uri)
with tiledb.open(uri) as A:
assert A.schema.has_attr("a")
assert A.attr("a").enum_label == "e"
assert A.enum("e") == updated_enmr
def test_schema_evolution_extend_check_bad_type():
enmr = tiledb.Enumeration("e", True, dtype=str)
with pytest.raises(tiledb.TileDBError):
enmr.extend([1, 2, 3])
with pytest.raises(tiledb.TileDBError):
enmr.extend([True, False])
enmr.extend(["a", "b"])
enmr = tiledb.Enumeration("e", True, dtype=int)
with pytest.raises(tiledb.TileDBError):
enmr.extend(["a", "b"])
with pytest.raises(tiledb.TileDBError):
enmr.extend([True, False])
enmr.extend([1, 2, 3])
enmr = tiledb.Enumeration("e", True, dtype=bool)
with pytest.raises(tiledb.TileDBError):
enmr.extend(["a", "b"])
with pytest.raises(tiledb.TileDBError):
enmr.extend([1, 2, 3])
enmr.extend([True, False])
| TileDB-Inc/TileDB-Py | tiledb/tests/test_schema_evolution.py | test_schema_evolution.py | py | 6,667 | python | en | code | 165 | github-code | 36 | [
{
"api_name": "tiledb.default_ctx",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tiledb.ArraySchemaEvolution",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tiledb.Attr",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.floa... |
11504622560 | """
This module contains the Distribution class which defines a standard
interface for distributions It also provides several implemented
distributions, which inherit from Distribution Any user-specified
distributions should inherit from Distribution
"""
import numpy as np
from .utils import overrides, package_path
import os
from scipy import stats
import pickle
class Distribution(object):
"""
Interface/abstract class for distributions.
Any user-specified distributions should be defined by inheriting from this class and
overriding the appropriate methods.
"""
def __init__(self, ndims=2, nbatch=100):
""" Creates a Distribution object
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:returns: a Distribution object
:rtype: Distribution
"""
# distribution dimensions
self.ndims = ndims
# number of sampling particles to use
self.nbatch = nbatch
# TensorflowDistributions require some special treatment
# this attribute is to be used instead of isinstance, as that would require
# tensorflow to be imported globally
if not hasattr(self, 'backend'):
self.backend = 'numpy'
# true iff being sampled with a jump process
self.mjhmc = None
# number of times energy op has been called
self.E_count = 0
# number of times gradient op has been called
self.dEdX_count = 0
# only set to true when I have a bias initialization and am being burned in
# to generate and cache a fair initialization for continuous samplers
self.generation_instance = False
# so some distributions may modify the default
if not hasattr(self, 'max_n_particles'):
self.max_n_particles = None
# set the state fairly. calls out to a cache
self.init_X()
def E(self, X):
self.E_count += X.shape[1]
return self.E_val(X)
def E_val(self, X):
"""
Subclasses should implement this with the correct energy function
"""
raise NotImplementedError()
def dEdX(self, X):
self.dEdX_count += X.shape[1]
return self.dEdX_val(X)
def dEdX_val(self, X):
"""
Subclasses should implement this with the correct energy gradient function
"""
raise NotImplementedError()
def __hash__(self):
""" Subclasses should implement this as the hash of the tuple of all parameters
that effect the distribution, including ndims. This is very important!!
nbatch should not be part of the hash!! Including it will break everything
As an example, see how this is implemented in Gaussian
:returns: a hash of the relevant parameters of self
:rtype: int
"""
raise NotImplementedError()
def init_X(self):
"""
Sets self.Xinit to a good initial value
"""
# TODO: make production ready by adding global flag to disable
# research options like this
self.cached_init_X()
def cached_init_X(self):
""" Sets self.Xinit to cached (serialized) initial states for continuous-time samplers, generated by burn in
*For use with continuous-time samplers only*
:returns: None
:rtype: none
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
if file_name in os.listdir(file_prefix):
with open('{}/{}'.format(file_prefix, file_name), 'rb') as cache_file:
mjhmc_endpt, _, _, control_endpt = pickle.load(cache_file)
if self.mjhmc:
self.Xinit = mjhmc_endpt[:, :self.nbatch]
else:
self.Xinit = control_endpt[:, :self.nbatch]
else:
from mjhmc.misc.gen_mj_init import MAX_N_PARTICLES, cache_initialization
# modify this object so it can be used by gen_mj_init
old_nbatch = self.nbatch
self.nbatch = self.max_n_particles or MAX_N_PARTICLES
self.generation_instance = True
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
# start with biased initializations
# changes self.nbatch
try:
self.gen_init_X()
except NotImplementedError:
# completely arbitrary choice
self.Xinit = np.random.randn(self.ndims, self.nbatch)
#generate and cache fair initialization
cache_initialization(self)
# reconstruct this object using fair initialization
self.nbatch = old_nbatch
self.generation_instance = False
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
self.cached_init_X()
def gen_init_X(self):
""" Sets self.Xinit to generated initial states for the sampling particles
*For use with discrete-time samplers only*
:returns: None
:rtype: None
"""
raise NotImplementedError()
def reset(self):
"""
resets the object. returns self for convenience
"""
self.E_count = 0
self.dEdX_count = 0
if not self.generation_instance:
self.init_X()
return self
def __call__(self, X):
"""
Convenience method for NUTS compatibility
returns -E, -dEdX
"""
rshp_X = X.reshape(len(X), 1)
E = float(self.E(rshp_X))
dEdX = self.dEdX(rshp_X).T[0]
return -E, -dEdX
def load_cache(self):
""" Loads and returns the cached fair initializations and
estimated variances associated with this
distribution. Throws an error if the cache does not exist
:returns: the loaded cache: (fair_initialization, emc_var_estimate, true_var_estimate)
:rtype: (np.ndarray, float, float)
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
with open('{}/{}'.format(file_prefix, file_name)) as cache_file:
return pickle.load(cache_file)
class LambdaDistribution(Distribution):
""" An `anonymous' distribution object for quick
experimentation. Due to the initialization time that is required
at first run it, one shouldn't use this object in the
long-term. Rather create your own distribution class that inherits
from Distribution.
You should give your LambdaDistribution objects a name. Use a
descriptive name, and use the same for functionally equivalent
LambdaDistributions - the hash of the name is used to label the
initialization information which is generated at first run time of
a new distribution. This requirement is a side effect of the
unfortunate fact that there is no computable hash function which
assigns functionally identical programs to the same number.
"""
#pylint: disable=too-many-arguments
def __init__(self, energy_func=None, energy_grad_func=None, init=None, name=None):
""" Creates an anonymous distribution object.
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:param energy_func: function specifying the energy
:param energy_grad_func: function specifying gradient of the energy
:param name: name of this distribution. use the same name for
functionally identical distributions
:param init: fair initialization for this distribution. array of shape (ndims, nbatch)
:returns: an anonymous distribution object
:rtype: LambdaDistribution
"""
self.energy_func = energy_func
self.energy_grad_func = energy_grad_func
self.init = init
# TODO: raise warning if name is not passed
self.name = name or str(np.random())
super(LambdaDistribution, self).__init__(ndims=init.shape[0], nbatch=init.shape[1])
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.init
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.nbatch, self.name))
class Gaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, log_conditioning=6):
"""
Energy function, gradient, and hyperparameters for the "ill
conditioned Gaussian" example from the LAHMC paper.
"""
self.conditioning = 10**np.linspace(-log_conditioning, 0, ndims)
self.J = np.diag(self.conditioning)
self.description = '%dD Anisotropic Gaussian, %g self.conditioning'%(ndims, 10**log_conditioning)
super(Gaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = (1./np.sqrt(self.conditioning).reshape((-1,1))) * np.random.randn(self.ndims,self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, hash(tuple(self.conditioning))))
class RoughWell(Distribution):
def __init__(self, ndims=2, nbatch=100, scale1=100, scale2=4):
"""
Energy function, gradient, and hyperparameters for the "rough well"
example from the LAHMC paper.
"""
self.scale1 = scale1
self.scale2 = scale2
self.description = '{} Rough Well'.format(ndims)
super(RoughWell, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
cosX = np.cos(X*2*np.pi/self.scale2)
E = np.sum((X**2) / (2*self.scale1**2) + cosX, axis=0).reshape((1,-1))
return E
@overrides(Distribution)
def dEdX_val(self, X):
sinX = np.sin(X*2*np.pi/self.scale2)
dEdX = X/self.scale1**2 + -sinX*2*np.pi/self.scale2
return dEdX
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.scale1 * np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.scale1, self.scale2))
class MultimodalGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, separation=3):
self.sep_vec = np.array([separation] * nbatch +
[0] * (ndims - 1) * nbatch).reshape(ndims, nbatch)
# separated along first axis
self.sep_vec[0] += separation
super(MultimodalGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
return -np.log(np.exp(-np.sum((X + trim_sep_vec)**2, axis=0)) +
np.exp(-np.sum((X - trim_sep_vec)**2, axis=0)))
@overrides(Distribution)
def dEdX_val(self, X):
# allows for partial batch size
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
common_exp = np.exp(np.sum(4 * trim_sep_vec * X, axis=0))
# floating point hax
return ((2 * ((X - trim_sep_vec) * common_exp + trim_sep_vec + X)) /
(common_exp + 1))
@overrides(Distribution)
def init_X(self):
# okay, this is pointless... sep vecs cancel
self.Xinit = ((np.random.randn(self.ndims, self.nbatch) + self.sep_vec) +
(np.random.randn(self.ndims, self.nbatch) - self.sep_vec))
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.separation))
class TestGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, sigma=1.):
"""Simple default unit variance gaussian for testing samplers
"""
self.sigma = sigma
super(TestGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X**2, axis=0).reshape((1, -1)) / (2. * self.sigma ** 2)
@overrides(Distribution)
def dEdX_val(self, X):
return X/self.sigma**2
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.sigma))
#pylint: disable=too-many-instance-attributes
class ProductOfT(Distribution):
""" Provides the product of T experts distribution
"""
#pylint: disable=too-many-arguments
def __init__(self, ndims=36, nbasis=36, nbatch=100, lognu=None, W=None, b=None):
""" Product of T experts, assumes a fixed W that is sparse and alpha that is
"""
# awkward hack to import theano in poe only
try:
import theano.tensor as T
import theano
self.theano = theano
self.T = T
except:
raise ImportError("Theano could not be imported")
if ndims != nbasis:
raise NotImplementedError("Initializer only works for ndims == nbasis")
self.ndims = ndims
self.nbasis = nbasis
self.nbatch = nbatch
if W is None:
W = np.eye(ndims, nbasis)
self.weights = self.theano.shared(np.array(W, dtype='float32'), 'W')
if lognu is None:
pre_nu = np.random.rand(nbasis,) * 2 + 2.1
else:
pre_nu = np.exp(lognu)
self.nu = self.theano.shared(np.array(pre_nu, dtype='float32'), 'nu')
if b is None:
b = np.zeros((nbasis,))
self.bias = self.theano.shared(np.array(b, dtype='float32'), 'b')
state = T.matrix()
energy = self.E_def(state)
gradient = T.grad(T.sum(energy), state)
#@overrides(Distribution)
self.E_val = self.theano.function([state], energy, allow_input_downcast=True)
#@overrides(Distribution)
self.dEdX_val = self.theano.function([state], gradient, allow_input_downcast=True)
super(ProductOfT,self).__init__(ndims,nbatch)
self.backend = 'theano'
def E_def(self,X):
"""
energy for a POE with student's-t expert in terms of:
samples [# dimensions]x[# samples] X
receptive fields [# dimensions]x[# experts] W
biases [# experts] b
degrees of freedom [# experts] nu
"""
rshp_b = self.bias.reshape((1,-1))
rshp_nu = self.nu.reshape((1, -1))
alpha = (rshp_nu + 1.)/2.
energy_per_expert = alpha * self.T.log(1 + ((self.T.dot(X.T, self.weights) + rshp_b) / rshp_nu) ** 2)
energy = self.T.sum(energy_per_expert, axis=1).reshape((1, -1))
return energy
@overrides(Distribution)
def gen_init_X(self):
#hack to remap samples from a generic product of experts to
#the model we are actually going to generate samples from
Zinit = np.zeros((self.ndims, self.nbatch))
for ii in xrange(self.ndims):
Zinit[ii] = stats.t.rvs(self.nu.get_value()[ii], size=self.nbatch)
Yinit = Zinit - self.bias.get_value().reshape((-1, 1))
self.Xinit = np.dot(np.linalg.inv(self.weights.get_value()), Yinit)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims,
self.nbasis,
hash(tuple(self.nu.get_value())),
hash(tuple(self.weights.get_value().ravel())),
hash(tuple(self.bias.get_value().ravel()))))
| rueberger/MJHMC | mjhmc/misc/distributions.py | distributions.py | py | 16,295 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "utils.package_path",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "mjhmc.misc.gen_mj_init.M... |
23497344492 | from io import BytesIO
from PIL import Image
from uuid import uuid4
from django.core.files import File
JPEG_IMAGE_QUALITY = 100
def crop_image(image):
im = Image.open(image)
(height, width) = (im.height, im.width)
shortest_side = min(height, width)
dimensions = (0, 0, shortest_side, shortest_side)
image_name = _generate_random_file_name()
im = im.convert("RGB")
im = im.crop(dimensions)
_bytes = BytesIO()
im.save(_bytes, "JPEG", quality=JPEG_IMAGE_QUALITY)
return File(_bytes, image_name)
def _generate_random_file_name():
return str(uuid4()) + ".jpg"
| adrianeriksen/photographic | photographic/photos/utils.py | utils.py | py | 610 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.core.files.File",
"lin... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.