blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c80b169d597c9c063958437671ceecd7b71750f5 | efb5914d7e4cb5e1f150f05b2047475c5f742a03 | /article/models.py | 09b65d6d82d963d44bf273d8eddfb48b108b1a82 | [] | no_license | fengyaowu/python_blog | 89271d0ab95e452984a46988e2a75546ab976854 | fed9321e6f9d0daef905da96c404153dee3c63fd | refs/heads/master | 2020-12-25T10:35:56.113671 | 2016-06-20T13:22:45 | 2016-06-20T13:22:45 | 61,433,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from django.db import models
# Create your models here.
class Article(models.Model) :
title = models.CharField(max_length = 100) #博客题目
category = models.CharField(max_length = 50, blank = True) #博客标签
date_time = models.DateTimeField(auto_now_add = True) #博客日期
content = models.TextField(blank = True, null = True) #博客文章正文
def __str__(self):
return self.title
# def __unicode__(self) :
# return self.title
class Meta: #按时间下降排序
ordering = ['-date_time']
| [
"245522482@qq.com"
] | 245522482@qq.com |
d6886e124c6a5e23cfe5c3167ad569f20e55a369 | 3cedb583e9f3dfcdf16aeba56a0b3ff7c6213e99 | /python-codes/m2_curso_em_video_estruturas_de_controle/ex048.0.py | 7c2fd6a5930cb1648fc94bfe1920cee6b20f008b | [
"MIT"
] | permissive | lucasportella/learning-python | 0f39ae2389db6d07b5b8c14ebe0c24f1e93c77c5 | a9449dffd489e7e1f1619e3acef86bc2c64f0f14 | refs/heads/master | 2022-12-26T15:04:12.806300 | 2020-10-14T23:17:47 | 2020-10-14T23:17:47 | 260,685,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | soma = 0
somaPA = 0
print('Todos os números ímpares múltiplos de 3 até 500:')
for cont in range(1,501,2):
if cont % 3 == 0:
soma += 1
somaPA += cont
print(cont,end=' ')
print('\n Número de repetições:', soma)
print('Soma da PA:', somaPA)
| [
"lucasportellaagu@gmail.com"
] | lucasportellaagu@gmail.com |
6221fa9402eb869ad7d946b60149e5551b64ed3d | e190e06b7670b1b81c2e949deebbf85edc4322c1 | /python/recurssion/power_sum.py | 9b72ee1dae4f0dcef892983302ae2cf5a69add8f | [] | no_license | sridattayalla/Programming | ecb6dc337fa05c8c5bcbbc83e1e6c054c99061e0 | f37b1beaef478ae18cebc05e08195e3fe782e204 | refs/heads/master | 2020-04-14T08:25:11.838889 | 2019-01-10T14:45:26 | 2019-01-10T14:45:26 | 163,735,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
exp_op = int(input())
N = int(input())
def fun(total, i):
if total == 0:
return 1
s = 0
j = i
while not (j ** N) > total :
s += fun(total - j ** N, j+1)
j +=1
return s
print(fun(exp_op, 1)) | [
"sridatta555@gmail.com"
] | sridatta555@gmail.com |
bdfc67134e68f38d32f6dacdf3e2b988157cdd84 | 592cfd8644c8783a11bb794b882d8cd026223810 | /test_Model/SALDR_U_test.py | 028d4ec87fafe9101c528f45c9274df29a39b6d5 | [] | no_license | wikiloa/SALDR | 34c5c1a1fd4c727f0306d4c55007a052f3453350 | 818ce267df8f376a1a2ce6f25b39eec37126071b | refs/heads/main | 2023-04-24T13:34:29.475560 | 2021-05-13T03:15:09 | 2021-05-13T03:15:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,232 | py | # -*- coding: UTF-8 -*-
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Reshape, Conv2D, add, LeakyReLU, ReLU, Lambda, dot
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Activation
import scipy.io as sio
import numpy as np
import math
import time
import os
# Enable GPU
tf.compat.v1.reset_default_graph()
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True #
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
# image params
img_height = 32
img_width = 32
img_channels = 2
img_total = img_height * img_width * img_channels # 2048
# network params
envir = 'indoor' # 'indoor' or 'outdoor'
B = 3 # quantization bits; set B <= 0 to disable quantization
CR = 8 # compression ratio
encoded_dim1 = 256 # compress rate=1/8
encoded_dim2 = 128 # compress rate=1/16
encoded_dim3 = 64 # compress rate=1/32
encoded_dim4 = 32 # compress rate=1/64
batchsize = 200
limit1 = 50
limit2 = 100
limit3 = 150
def Num2Bit(Num, B):
Num_ = Num.numpy()
bit = (np.unpackbits(np.array(Num_, np.uint8), axis=1).reshape(-1, Num_.shape[1], 8)[:, :,8-B:]).reshape(-1,
Num_.shape[
1] * B)
# unpackbit:(None,32)-(None, 32*8)-reshape-(None, 32, 8)-[::4]-(None,32,4)-reshape-(None,128)
bit.astype(np.float32)
return tf.convert_to_tensor(bit, dtype=tf.float32)
def Bit2Num(Bit, B):
Bit_ = Bit.numpy()
Bit_.astype(np.float32)
Bit_ = np.reshape(Bit_, [-1, int(Bit_.shape[1] / B), B])
num = np.zeros(shape=np.shape(Bit_[:, :, 1]))
for i in range(B):
num = num + Bit_[:, :, i] * 2 ** (B - 1 - i)
return tf.cast(num, dtype=tf.float32)
@tf.custom_gradient
def QuantizationOp(x, B):
step = tf.cast((2 ** B), dtype=tf.float32)
result = tf.cast((tf.round(x * step - 0.5)), dtype=tf.float32)
result = tf.py_function(func=Num2Bit, inp=[result, B], Tout=tf.float32)
def custom_grad(dy):
grad = dy
return grad, grad
return result, custom_grad
class QuantizationLayer(tf.keras.layers.Layer):
def __init__(self, B, **kwargs):
self.B = B
super(QuantizationLayer, self).__init__()
def call(self, x):
return QuantizationOp(x, self.B)
def get_config(self):
# Implement get_config to enable serialization. This is optional.
base_config = super(QuantizationLayer, self).get_config()
base_config['B'] = self.B
return base_config
@tf.custom_gradient
def DequantizationOp(x, B):
x = tf.py_function(func=Bit2Num, inp=[x, B], Tout=tf.float32)
step = tf.cast((2 ** B), dtype=tf.float32)
result = tf.cast((x + 0.5) / step, dtype=tf.float32)
def custom_grad(dy):
grad = dy
return grad, grad
return result, custom_grad
class DeuantizationLayer(tf.keras.layers.Layer):
def __init__(self, B,**kwargs):
self.B = B
super(DeuantizationLayer, self).__init__()
def call(self, x):
return DequantizationOp(x, self.B)
def get_config(self):
base_config = super(DeuantizationLayer, self).get_config()
base_config['B'] = self.B
return base_config
class UnfoldLayer(tf.keras.layers.Layer):
def __init__(self, Kh, Kw, stride=1, **kwargs):
self.Kh = Kh
self.Kw = Kw
self.stride = stride
super(UnfoldLayer, self).__init__(**kwargs)
def call(self, x, **kwargs):
return self.Unfold(x)
# return DequantizationOp(x, self.B)
def Unfold(self, x):
_, C, H, W = x.shape
Co = C * self.Kw * self.Kh
x_out = x[:, :, 0:self.Kh, 0:self.Kw]
x_out = tf.reshape(x_out, [-1, Co, 1])
for i in range(H - (self.Kh - 1)):
for j in range(W - (self.Kw - 1)):
Hstart = i * self.stride
Hend = Hstart + self.Kh
Wstart = j * self.stride
Wend = Wstart + self.Kw
xi = x[:, :, Hstart:Hend, Wstart:Wend]
xi = tf.reshape(xi, [-1, Co, 1])
x_out = tf.concat([x_out, xi], axis=2)
x_out = x_out[:, :, 1:]
return x_out
def get_config(self):
base_config = super(UnfoldLayer, self).get_config()
base_config['Kw'] = self.Kw
return base_config
def SA_block(y, k, rel_planes, mid_planes, out_planes):
# pre
shortcut = y
y = BatchNormalization()(y)
y = ReLU()(y)
# y1
y1 = Conv2D(rel_planes, (1, 1), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='SA_y1conv')(y)
y1 = Reshape((rel_planes, 1, -1,))(y1)
# y2
y2 = Conv2D(rel_planes, (1, 1), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='SA_y2conv')(y)
y2 = Lambda(lambda e: tf.pad(e, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC'))(y2)
y2 = UnfoldLayer(Kh=k, Kw=k, stride=1)(y2)
y2 = Lambda(lambda e: tf.expand_dims(e, axis=2))(y2)
# y3
y3 = Conv2D(mid_planes, (1, 1), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='SA_y3conv')(y)
y3 = BatchNormalization()(y3)
y3 = ReLU()(y3)
# cat y12
y12 = keras.layers.concatenate([y1, y2], axis=1)
# convW
y12 = BatchNormalization()(y12)
y12 = ReLU()(y12)
y12 = Conv2D(mid_planes, (1, 1), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='SA_conw1')(y12)
y12 = BatchNormalization()(y12)
y12 = ReLU()(y12)
y12 = Conv2D(mid_planes, (1, 1), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='SA_conw2')(y12)
y12 = Reshape((mid_planes, y3.shape[2], y3.shape[3],))(y12)
# multi
y = tf.multiply(y12, y3)
y = BatchNormalization()(y)
y = ReLU()(y)
y = Conv2D(out_planes, (3, 3), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='SA_multi_conv')(y)
y = add([shortcut, y])
y = BatchNormalization()(y)
y = ReLU()(y)
return y
def residual_block_decoded(y, n):
y = Conv2D(32, (3, 3), padding='same', data_format="channels_first", name='resi%s_conv1' % n)(y)
y = BatchNormalization(name='resi%s_bn1' % n)(y)
y = LeakyReLU(name='resi%s_leakyrelu1' % n)(y)
y = Conv2D(16, (3, 3), padding='same', data_format="channels_first", name='resi%s_conv2' % n)(y)
y = BatchNormalization(name='resi%s_bn2' % n)(y)
y = LeakyReLU(name='resi%s_leakyrelu2' % n)(y)
y = Conv2D(8, (3, 3), padding='same', data_format="channels_first", name='resi%s_conv3' % n)(y)
y = BatchNormalization(name='resi%s_bn3' % n)(y)
y = LeakyReLU(name='resi%s_leakyrelu3' % n)(y)
return y
# Bulid the autoencoder model of SALDR
def residual_network(x):
ip = x
# encoder Net
x1 = Conv2D(16, (1, 1), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='conv00')(ip)
x1 = BatchNormalization(name='encoder_bn0')(x1)
x1 = LeakyReLU(name='encoder_leakyrulu0')(x1)
x1 = Conv2D(16, (1, 1), padding='same', use_bias=False, data_format='channels_first',
kernel_initializer='he_normal', name='conv01')(x1) #
x1 = SA_block(x1, k=3, rel_planes=4, mid_planes=8, out_planes=16)
x1 = Conv2D(32, (3, 3), padding='same', data_format="channels_first", name='encoder_conv1')(x1)
x1 = BatchNormalization()(x1)
x1 = LeakyReLU()(x1)
x1 = Conv2D(16, (3, 3), padding='same', data_format="channels_first", name='encoder_conv2')(x1)
x1 = BatchNormalization()(x1)
x1 = LeakyReLU()(x1)
x1 = Conv2D(8, (3, 3), padding='same', data_format="channels_first", name='encoder_conv3')(x1)
x1 = BatchNormalization()(x1)
x1 = LeakyReLU()(x1)
x1 = Conv2D(2, (3, 3), padding='same', data_format="channels_first", name='encoder_conv4')(x1) #
# x = tf.add(x1, ip) # concate or add
x = keras.layers.concatenate([x1, ip], axis=1)
x = Conv2D(2, (1, 1), padding='same', data_format="channels_first", name='encoder_conv5')(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = Reshape((img_total,), name='encoder_reshape1')(x)
encoded1 = Dense(encoded_dim1, activation='linear', name='encoder_cr8_dense')(x)
# sigmoid: Limit the output to [0, 1] for quantization
encoded1 = Activation('sigmoid')(encoded1)
encoded2 = Dense(encoded_dim2, activation='linear', name='encoder_cr16_dense')(encoded1)
encoded2 = Activation('sigmoid')(encoded2)
encoded3 = Dense(encoded_dim3, activation='linear', name='encoder_cr32_dense')(encoded2)
encoded3 = Activation('sigmoid')(encoded3)
encoded4 = Dense(encoded_dim4, activation='linear', name='encoder_cr64_dense')(encoded3)
encoded4 = Activation('sigmoid')(encoded4)
if B > 0:
encoded1 = QuantizationLayer(B)(encoded1)
encoded2 = QuantizationLayer(B)(encoded2)
encoded3 = QuantizationLayer(B)(encoded3)
encoded4 = QuantizationLayer(B)(encoded4)
# decoder
if B > 0:
decoder1 = DeuantizationLayer(B)(encoded1)
decoder1 = Reshape((encoded_dim1,))(decoder1)
decoder2 = DeuantizationLayer(B)(encoded2)
decoder2 = Reshape((encoded_dim2,))(decoder2)
decoder3 = DeuantizationLayer(B)(encoded3)
decoder3 = Reshape((encoded_dim3,))(decoder3)
decoder4 = DeuantizationLayer(B)(encoded4)
decoder4 = Reshape((encoded_dim4,))(decoder4)
else:
decoder1 = encoded1
decoder2 = encoded2
decoder3 = encoded3
decoder4 = encoded4
if CR == 8:
x = Dense(img_total, activation='linear', name='decoder_cr8_dense')(decoder1)
if CR == 16:
x = Dense(img_total, activation='linear', name='decoder_cr16_dense')(decoder2)
if CR == 32:
x = Dense(img_total, activation='linear', name='decoder_cr32_dense')(decoder3)
if CR == 64:
x = Dense(img_total, activation='linear', name='decoder_cr64_dense')(decoder4)
x = Reshape((2, 32, 32,), name='decoder_reshape2')(x)
ip = x
x1 = ip
x = residual_block_decoded(ip, 0)
x2 = x
x = keras.layers.concatenate([x1, x2], axis=1, name='resi_concate1')
x = residual_block_decoded(x, 1)
x3 = x
x = keras.layers.concatenate([x1, x2, x3], axis=1, name='resi_concate2')
x = residual_block_decoded(x, 2)
x4 = x
x = keras.layers.concatenate([x1, x2, x3, x4], axis=1, name='resi_concate3')
x = residual_block_decoded(x, 3)
x5 = x
x = keras.layers.concatenate([x1, x2, x3, x4, x5], axis=1, name='resi_concate4')
x = residual_block_decoded(x, 4)
x6 = x
x = keras.layers.concatenate([x1, x2, x3, x4, x5, x6], axis=1, name='resi_concate5')
x = Conv2D(2, (3, 3), activation='sigmoid', padding='same', data_format="channels_first", name='decoder_convo')(x)
return x
image_tensor = Input(shape=(img_channels, img_height, img_width))
network_output = residual_network(image_tensor)
autoencoder = Model(inputs=[image_tensor], outputs=[network_output])
adam = keras.optimizers.Adam(lr=0.0001)
autoencoder.compile(optimizer=adam, loss='mse')
print(autoencoder.summary())
outfile = '../SALDR_result/SALDR_U_indoor.h5' #
autoencoder.load_weights(outfile, by_name=True)
# Data loading
if envir == 'indoor':
mat = sio.loadmat('../data/DATA_Htrainin.mat')
x_train = mat['HT'] # array 100000*2048
mat = sio.loadmat('../data/DATA_Hvalin.mat')
x_val = mat['HT'] # array 30000*2048
mat = sio.loadmat('../data/DATA_Htestin.mat')
x_test = mat['HT'] # array 20000*2048
elif envir == 'outdoor':
mat = sio.loadmat('../data/DATA_Htrainout.mat')
x_train = mat['HT'] # array
mat = sio.loadmat('../data/DATA_Hvalout.mat')
x_val = mat['HT'] # array
mat = sio.loadmat('../data/DATA_Htestout.mat')
x_test = mat['HT'] # array
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_test = x_test.astype('float32')
x_train = np.reshape(x_train, (
len(x_train), img_channels, img_height, img_width)) # adapt this if using `channels_first` image data format
x_val = np.reshape(x_val, (
len(x_val), img_channels, img_height, img_width)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (
len(x_test), img_channels, img_height, img_width)) # adapt this if using `channels_first` image data format
# Testing data
tStart = time.time()
x_hat = autoencoder.predict(x_test)
tEnd = time.time()
print("It cost %f sec" % ((tEnd - tStart) / x_test.shape[0])) # calculate the time of recontribute the CSI for
# every channel matrix
x_test_real = np.reshape(x_test[:, 0, :, :], (len(x_test), -1)) # 20000*1024
x_test_imag = np.reshape(x_test[:, 1, :, :], (len(x_test), -1))
x_test_C = x_test_real - 0.5 + 1j * (x_test_imag - 0.5) # recover complex, why subtract 0.5
x_hat_real = np.reshape(x_hat[:, 0, :, :], (len(x_hat), -1))
x_hat_imag = np.reshape(x_hat[:, 1, :, :], (len(x_hat), -1))
x_hat_C = x_hat_real - 0.5 + 1j * (x_hat_imag - 0.5)
# ###############################################################################################
power = np.sum(abs(x_test_C) ** 2, axis=1)
mse = np.sum(abs(x_test_C - x_hat_C) ** 2, axis=1)
print("In " + envir + " environment")
print("NMSE of CR:%s is" % CR, 10 * math.log10(np.mean(mse / power)))
| [
"noreply@github.com"
] | noreply@github.com |
26b3004e71f04e82258a624cffc6fefb66233471 | 471a7c00d3ec517f4d8097623d47bed99972388a | /util/message_parser.py | 2c368617fcf9cfd850897ea638024c56ec151d3d | [] | no_license | KeithMaxwellZ/QQBot-Mirai | 0d7bd39c047ba27e038f16f95b3bc382b9a88013 | ae5fca302c1864968e69d622624a64df05856493 | refs/heads/master | 2023-06-17T18:59:46.655652 | 2021-07-04T07:17:22 | 2021-07-04T07:17:22 | 381,435,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | from p_var import COMMAND, ENV
def test_command():
def func(_, __, *args):
return [{"type": "Plain", "text": "SUCCESS"}], True
return [("TEST", func)]
class ParserManager:
def __init__(self):
ENV['cmd'].extend(test_command())
def parse(self, msg: str, rd: dict):
res = []
if msg[0] == COMMAND:
print(msg)
c = msg.split(' ')
i = 0
while i < len(c):
if c[i] == " ":
c.remove(c[i])
else:
i += 1
raw = c[0][1:]
qq = int(rd['sender']['id'])
res = []
matched = False
for i in ENV['cmd']:
if raw == i[0]:
matched = True
tr, at = i[1](rd, tuple(c))
if tr:
if at:
res.extend([{'display': '', 'target': qq, 'type': 'At'},
{"type": "Plain", "text": "\n"}])
res.extend(tr)
break
if not matched:
res.extend([{'display': '', 'target': qq, 'type': 'At'},
{"type": "Plain", "text": "\n"},
{"type": "Plain", "text": "未知指令\n"}])
else:
if 'ACTV_cm' in ENV and ENV['ACTV_cm']:
tr = ENV['cm'].search(rd['sender']['group']['id'], msg)
res.extend(tr)
return res
| [
"keithzhang021@outlook.com"
] | keithzhang021@outlook.com |
0e32f524995c1548bc7ebefdf82ad162bf3602eb | 194e7f2ea97a25915530c34c88d811fecb4cb49e | /将pandas填充到QTableView并根据条件更改颜色.py | b03231d6ca3bb85621a0e6cfb540e54725788494 | [] | no_license | webclinic017/TDXPystock | 77471d6b7896b5e39d8bceb109af24b3215374f4 | 94b974858697b0495dec7fb125443d786b0ff52a | refs/heads/master | 2023-08-24T18:15:08.407012 | 2021-10-16T09:58:22 | 2021-10-16T09:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,951 | py |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.Qt import *
import pandas as pd
import sys
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, df = pd.DataFrame(), parent=None):
QtCore.QAbstractTableModel.__init__(self, parent=parent)
self._df = df
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if role != QtCore.Qt.DisplayRole:
return QtCore.QVariant()
if orientation == QtCore.Qt.Horizontal:
try:
return self._df.columns.tolist()[section]
except (IndexError, ):
return QtCore.QVariant()
elif orientation == QtCore.Qt.Vertical:
try:
# return self.df.index.tolist()
return self._df.index.tolist()[section]
except (IndexError, ):
return QtCore.QVariant()
def data(self, index, role=QtCore.Qt.DisplayRole):
current_column = index.column()
current_row = index.row()
if index.isValid():
if role == QtCore.Qt.ForegroundRole:
if current_column == 3 or current_column==4:
it = self._df.iloc[current_row, current_column]
if float(it) <0:
return QtGui.QBrush(QtCore.Qt.green)
if role == Qt.TextAlignmentRole:
return Qt.AlignCenter
# if role == QtCore.Qt.BackgroundColorRole:
# if current_column == 3:
# it = self._df.iloc[index.row(), current_column]
# if float(it) < 0:
# return QtGui.QBrush(QtCore.Qt.blue)
# if role == QtCore.Qt.FontRole:
# table_font = QtGui.QFont('open sans', 9)
# return table_font
#
if role == QtCore.Qt.DisplayRole:
return str(self._df.iloc[index.row(), index.column()])
if role != QtCore.Qt.DisplayRole:
return QtCore.QVariant()
if not index.isValid():
return QtCore.QVariant()
def setData(self, index, value, role):
row = self._df.index[index.row()]
col = self._df.columns[index.column()]
if hasattr(value, 'toPyObject'):
# PyQt4 gets a QVariant
value = value.toPyObject()
else:
# PySide gets an unicode
dtype = self._df[col].dtype
if dtype != object:
value = None if value == '' else dtype.type(value)
self._df.set_value(row, col, value)
return True
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self._df.index)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self._df.columns)
def sort(self, column, order):
colname = self._df.columns.tolist()[column]
self.layoutAboutToBeChanged.emit()
self._df.sort_values(colname, ascending= order == QtCore.Qt.AscendingOrder, inplace=True)
self._df.reset_index(inplace=True, drop=True)
Main.sortorder=order
Main.orderbycolunm=colname
self.layoutChanged.emit()
class Main(QMainWindow):
sortorder=1
orderbycolunm =''
def __init__(self,parent=None):
super().__init__()
self.setWindowTitle('根据条件改变颜色')
self.tableView = QTableView()
pddata = pd.DataFrame(
{'代码': ['000625', '000628'], '名称': ['长安汽车', '高薪发展'], '价格': ['16.37', '8.77'], '涨幅': ['-1.32', '-2.34'],
'3日涨幅': ['-1', '-4.12']})
self.model = PandasModel(pddata)
self.tableView.setModel(self.model)
self.initui()
self.tableView.show()
def initui(self):
self.tableView.setFixedSize(900, 600)
self.tableView.setSortingEnabled(True)
self.tableView.horizontalHeader().setStyleSheet(
"QHeaderView::section {""color: red;padding-left: 2px;border: 1px solid #6c6c6c;background-color:rgb(16, 0, 25);font: bold 12pt};");
# self.ui.tableView_bankMonitor.horizontalHeader().setFont(font)
self.tableView.verticalHeader().setStyleSheet("QHeaderView::section {"
"color: red;padding-left: 2px;border: 1px solid #6c6c6c;background-color:rgb(16, 0, 25)}");
self.tableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch); # 根据内容自动调整列宽
self.tableView.horizontalHeader().setStretchLastSection(True); # 最后一列自动铺满
self.tableView.setStyleSheet(
"QTableView{border: 1px;color: yellow;background-color: rgb(16, 0, 25)};tableView QTableCornerButton::section {border: 1px solid gray; background-color: rgb(16, 0, 25);}")
if __name__ == "__main__":
app = QApplication(sys.argv)
runmain = Main()
# main.show()
sys.exit(app.exec_()) | [
"newhackerman@163.com"
] | newhackerman@163.com |
d0f1683880dd97dbf57a8ff8ca500f4470b5aa9f | a42ed872908291bbfc5ae2f68968edc4c47edfcf | /lesson_16/choices_test.py | 8c1ac0adaa1e1dc47e02cf3b312e2d5874927c43 | [] | no_license | antonplkv/itea_advanced_august | b87f48cc48134ce1a73e167a5c834322792d0167 | 265c124e79747df75b58a1fd8c5d13605c1041b2 | refs/heads/master | 2023-01-02T23:31:39.050216 | 2020-10-28T19:15:01 | 2020-10-28T19:15:01 | 291,792,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import mongoengine as me
me.connect('testtetsttetst')
class Task(me.Document):
LOW_PRIORITY = 1
MEDIUM_PRIORITY = 2
HIGH_PRIORITY = 3
PRIORITIES = (
(LOW_PRIORITY, 'Низкий приоритет'),
(MEDIUM_PRIORITY, 'Средний приоритет'),
(HIGH_PRIORITY, 'Высокий приоритет')
)
INSIDE_CATEGORY = 1
OUTSIDE_CATEGORY = 2
CATEGORIES = (
(INSIDE_CATEGORY, 'Проблема в магазине'),
(OUTSIDE_CATEGORY, 'Проблема на складе'),
)
priority = me.IntField(choices=PRIORITIES)
category = me.IntField(choices=CATEGORIES) | [
"polyakov.anton@ukr.net"
] | polyakov.anton@ukr.net |
1d76a739d45168f20e05f2036d432c109bc00d75 | cbeef32eb6058803feb5cb4ff27cf33dc66930a7 | /venv/bin/pip3.7 | 6141025d6533c9164719875a94983801510093e3 | [] | no_license | NgyAnthony/python-course | e0a1b92c6a358f0225f136da0fb5b4cfbd495ae9 | 1737d513637c2ad797fe6612e98d2b5aec8d7d62 | refs/heads/master | 2020-03-25T03:10:14.517841 | 2018-08-19T21:03:04 | 2018-08-19T21:03:04 | 143,326,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | 7 | #!/Users/Anthony/GitHub/python-course/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"nguyen.anthony@protonmail.com"
] | nguyen.anthony@protonmail.com |
e123eb5ce78814907f4c0576ae6dc701c3f31bc2 | c105570f12f1d56087ffb831f5d34cd763d6c90b | /top/api/rest/WlbWaybillIQuerydetailRequest.py | e31f3da969397463b9eaf5940b3a4f8b3c5c3026 | [] | no_license | wjianwei126/Alinone | 01607423833d7736b2fd3c77e9e21f63c69b4e4c | 80144d4657cb049d651c09647eb245405240f12f | refs/heads/master | 2020-12-07T05:14:58.746777 | 2015-05-06T12:48:33 | 2015-05-06T12:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | '''
Created by auto_sdk on 2014-11-09 14:51:18
'''
from top.api.base import RestApi
class WlbWaybillIQuerydetailRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.waybill_detail_query_request = None
def getapiname(self):
return 'taobao.wlb.waybill.i.querydetail'
| [
"rapospectre@0163.com"
] | rapospectre@0163.com |
7f5de79bf22a129c02a35fdd8c9a11b23ddd65a3 | 64db5b9df7daf92cf6229fe8b91898d79539a147 | /b1.py | 6b8f325c8273cdd3d4c342ddc03e52626b8d1d73 | [] | no_license | Trieu210/Duc-session5-C4T8 | c463a23176236a126df9a7fa5556d71ce6375742 | 3c9794a101718b858f63b2f484c6a8b519925ef2 | refs/heads/master | 2020-03-29T13:57:06.707392 | 2018-09-25T13:24:38 | 2018-09-25T13:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | x = int(input('Pls type a month u like'))
if x < 2:
print('winter')
if 2 < x < 5:
print('spring')
if 5 < x < 8:
print('summer')
if 8 < x < 11:
print('autumn')
elif 11< x < 13:
print("winter")
| [
"vanoss010@gmail.com"
] | vanoss010@gmail.com |
2d36654f0aab37213e7a98c059b27cb7667d61cd | 4fe2212150dd474f1914424d6fada4f0e2bf2999 | /View/SystemMonitor/login/migrations/0001_initial.py | c89d15f6156fef1d98bc7b1758bdc2841bd3256a | [] | no_license | mn3711698/SystemMonitor | c05b5e2df5d1482ea3c48c64c4de074cbcc3b646 | 03def90ecf375313243a182dd15fcc18f19f771b | refs/heads/master | 2020-04-19T22:03:40.115395 | 2017-03-29T13:29:31 | 2017-03-29T13:29:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='login',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('passwd', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='user',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user_num', models.CharField(max_length=8)),
('user_type', models.IntegerField()),
('name', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='login',
name='user_id',
field=models.ForeignKey(to='login.user'),
),
]
| [
"zhoupans_mail@163.com"
] | zhoupans_mail@163.com |
872de0991877dfef1b21247ab755ef7a130f0585 | 3f3f0d63aa36aec52f1bc3c1d8a8a0cebc5d6418 | /MatchData.py | 7240aab78339dbf69300d562506bc96685a90823 | [] | no_license | Hosstell/bet | 2f04b52986c3fb786274b1524bdb9fb51503b508 | 48e6d0fdc3380c718cde516261dcbc1d1920112f | refs/heads/master | 2021-06-13T18:23:10.809455 | 2019-09-06T16:35:19 | 2019-09-06T16:35:19 | 201,738,105 | 0 | 0 | null | 2021-06-02T00:28:24 | 2019-08-11T08:27:08 | Python | UTF-8 | Python | false | false | 8,965 | py | #coding:utf-8
from selenium import webdriver
import time
def add_to_file(set_game, coeff, result):
file = open('bets.log', 'a')
line = '{}: {} - {}\n'.format(result, coeff, set_game)
file.write(line)
file.close()
class MatchData:
def __init__(self, link):
self.data = {}
self.MINIMAL_COUNT_SCORES = 2
self.max_update_time = 5
self.last_update_time = None
self.bets = []
self.AMOUNT = 50
options = webdriver.ChromeOptions()
options.add_argument('--window-size=1000,500')
self.window = webdriver.Chrome(chrome_options=options)
self.window.set_window_position(0, 0)
self.window.get(link)
self.update_data()
def update_data(self):
try:
self.__update_data()
self.last_update_time = time.time()
except:
pass
def is_active_game(self):
status_text = self.window.find_element_by_class_name('info-status').text
return 'сет' in status_text
def __update_data(self):
self.data["scores"] = self.__get_information_of_games()
self.data["probability"] = self.__get_probability_ball_by_players()
def __get_information_of_games(self):
data = []
set = self.__get_current_set()
filling = self.get_initial_filing()
for i in range(set):
self.__click_to_set(i + 1)
statistics = self.window.find_element_by_id("tab-mhistory-{0}-history".format(i + 1))
statistics = [elem.text for elem in statistics.find_elements_by_class_name("fifteen")]
scores = [j.text for j in self.window.find_elements_by_class_name("match-history-score") if j.text != ""]
game = []
for j in range(len(statistics)):
player = ["left", "right"][j % 2 == ("left" == filling[i])]
score = statistics[j]
score = self.__get_only_balls(score)
if j < len(scores):
game.append({
"player": player,
"score": score,
"table": scores[j]
})
else:
game.append({
"player": player,
"score": score,
"table": "live"
})
data.append(game)
return data
def __get_only_balls(self, elem):
elem = elem.replace(u"БП", u"")
elem = elem.replace(u" ", u"")
elem = elem.replace(u"СП", u"")
elem = elem.replace(u"M\u041f", u"")
return elem
def __get_current_set(self):
table = self.window.find_elements_by_class_name('scoreboard')
current_set = sum(map(lambda x: int(x.text), table))
if self.is_active_game():
current_set += 1
return current_set
# return int(self.window.find_element_by_class_name("mstat").text[0])
def __get_current_game(self):
try:
scores = self.window.find_element_by_class_name("mstat").text.split("\n")[1]
games = scores.split("(")[0].split(":")
game = int(games[0]) + int(games[1]) + 1
return game
except:
return 0
def get_current_set_and_game(self):
return (self.__get_current_set(), self.__get_current_game())
# Возращает первые подачи в сетах
def get_initial_filing(self):
mas = []
history = self.window.find_element_by_id('match-history-content')
for i in range(1, 6, 1):
name_class = "tab-mhistory-{0}-history".format(i)
pages = history.find_elements_by_id(name_class)
if len(pages):
servers = pages[0].find_elements_by_class_name("server")
if (servers[0].find_elements_by_class_name("icon-box") != []):
mas.append("left")
else:
mas.append("right")
return mas
def __click_to_set(self, set):
classname = "mhistory-{0}-history".format(set)
self.window.find_element_by_id(classname).find_element_by_tag_name('a').click()
def __get_probability_ball_by_players(self):
return {
"left": self.__get_probability_ball_by("left"),
"right": self.__get_probability_ball_by("right")
}
def __get_probability_ball_by(self, player):
left, right = 0, 0
scores = [u"15", u"30", u"40"]
for set in self.data["scores"]:
for game in set:
if game["player"] == player:
left_str = [point.split(u":")[0] for point in game["score"].split(u",")]
right_str = [point.split(u":")[1] for point in game["score"].split(u",")]
left_player = sum([1 for score in scores if score in left_str]) + left_str.count(u"A") + right_str[:-1].count(u"A")
right_player = sum([1 for score in scores if score in right_str]) + right_str.count(u"A") + left_str[:-1].count(u"A")
if left_player > right_player:
left_player += 1
else:
right_player += 1
left += left_player
right += right_player
if left + right > self.MINIMAL_COUNT_SCORES:
if player == "left":
return left / (right+left)
else:
return right / (right + left)
def get_probability_ball(self, player):
self.__update_data_if_needed()
return self.data["probability"][player]
def get_probability_ball_by_player(self, player):
self.__update_data_if_needed()
return self.data["probability"][player]
def __update_data_if_needed(self):
if self.__check_time_last_update():
self.update_data()
def __check_time_last_update(self):
return time.time() - self.last_update_time > self.max_update_time
def get_probability_by_set_game(self, set, game, player):
ball_player_in_game = self.get_initial_filing()[int(set)-1]
if int(game) % 2 == 0:
ball_player_in_game = ['left', 'right'][ball_player_in_game == 'left']
probability = self.get_probability_ball(ball_player_in_game)
if player == ball_player_in_game:
return probability
else:
return 1 - probability
def who_is_winner_in_set_game(self, set, game):
try:
set_balls = self.data["scores"][set-1]
scores_in_game = set_balls[game-1]['score']
scores = [u"15", u"30", u"40"]
left_str = [point.split(u":")[0] for point in scores_in_game.split(u",")]
right_str = [point.split(u":")[1] for point in scores_in_game.split(u",")]
left_player = sum([1 for score in scores if score in left_str]) + left_str.count(u"A") + right_str[:-1].count(u"A")
right_player = sum([1 for score in scores if score in right_str]) + right_str.count(u"A") + left_str[:-1].count(u"A")
return ['right', 'left'][left_player > right_player]
except:
pass
def ball_player_set_game(self, set, game):
try:
start_ball_player = self.data["scores"][set-1][0]['player']
next_ball_player = ['right', 'left'][start_ball_player == 'right']
return next_ball_player if game % 2 == 0 else start_ball_player
except:
return None
def make_bet(self, set_game, player, coeff):
maked_set_game = list(map(lambda x: x['set_game'], self.bets))
if set_game not in maked_set_game:
self.bets.append({
'set_game': set_game,
'player': player,
'coeff': coeff,
'status': False
})
def check_bets(self):
for bet in self.bets:
if bet['status']:
continue
winner_in_game = self.who_is_winner_in_set_game(*bet['set_game'])
if not winner_in_game:
continue
if winner_in_game == bet['player']:
add_to_file(bet['set_game'], bet['coeff'], self.AMOUNT*bet['coeff'])
else:
add_to_file(bet['set_game'], bet['coeff'], 0)
bet['status'] = True
def get_names(self):
names = self.window.find_elements_by_class_name('participant-imglink')[1::2]
return list(map(lambda x: x.text, names))
def __del__(self):
self.window.close()
self.window.quit()
if __name__ == '__main__':
match = MatchData('https://www.myscore.ru/match/bT72dnmk/#match-summary')
print(match.get_names())
# print(match.ball_player_set_game(2, 1))
# print(match.get_probability_by_set_game(2, 2, 'right'))
# print(match.get_probability_by_set_game(2, 2, 'left'))
| [
"st.andrey-serov@yandex.ru"
] | st.andrey-serov@yandex.ru |
c2c10b270410119bce4a476fa68496d932d8b585 | d60e8f913f2bbef8f9aa8ff9c2874d2a16def2c1 | /venv/bin/eb | 82d098d82fedc573f31efafc368914507f9748b5 | [
"Apache-2.0"
] | permissive | Justin-Bee/SER401_Trynkit | f8fc2dc23d7a0aa6655d37e57dc82a184de3278a | 3bf826121b2115f67271d88aa253075d71e81a90 | refs/heads/master | 2022-12-10T01:01:57.428388 | 2020-04-14T17:53:54 | 2020-04-14T17:53:54 | 208,181,054 | 0 | 2 | Apache-2.0 | 2022-12-08T06:51:17 | 2019-09-13T02:19:51 | Python | UTF-8 | Python | false | false | 419 | #!/home/jbee/PycharmProjects/SER401_Trynkit/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'awsebcli==3.15.3','console_scripts','eb'
__requires__ = 'awsebcli==3.15.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('awsebcli==3.15.3', 'console_scripts', 'eb')()
)
| [
"jbee.appz@gmail.com"
] | jbee.appz@gmail.com | |
8dc79e0dd5147c35f465679e469e100e180fb330 | 4b056544003cffc3155c64af649535df1bcd1f25 | /crawler/shurikenCrawler/shurikenCrawler/spiders/knef.py | 1a843f628940b23bc93da2c59b3df94e1215bbd3 | [] | no_license | ilcapone/Shuriken | 9a304eb0a102f19a19d552196725bbd5fd26c3a2 | 7473dbdc50423b26519b7f495d6e3f138c7e6316 | refs/heads/master | 2021-01-22T21:27:15.284213 | 2017-06-23T08:38:02 | 2017-06-23T08:38:02 | 85,427,637 | 0 | 0 | null | 2017-04-02T19:58:17 | 2017-03-18T19:44:48 | Perl | UTF-8 | Python | false | false | 897 | py | import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from shurikenCrawler.items import UrlKnef
from datetime import datetime
class Knef(CrawlSpider):
name = 'knef'
allowed_domains = []
def __init__(self, startUrl=None, *args, **kwargs):
super(Knef, self).__init__(*args, **kwargs)
self.start_urls = [startUrl]
rules = (Rule(LinkExtractor(allow=()), callback='parse_item', follow=True),)
def parse_item(self, response):
for link in LinkExtractor(allow=(),deny = self.allowed_domains).extract_links(response):
item = UrlKnef()
item['url'] = link.url
item['time'] = str(datetime.now())
item['urlComes'] = response.url
self.logger.info('Link url search: %s', link.url)
return item | [
"juanzumbido@hotmail.com"
] | juanzumbido@hotmail.com |
10f47f80b7c9c1ebf1af1c941dbe2dbbc69c281d | a5b4384d1eaef17875499a3f721fedb91afa9fba | /usr/app/wsgi/tests/test_identification.py | 4b2a91ae1772c81ee9d77f68ffac95c017821d1e | [] | no_license | wizardsofindustry/quantum-usr | 85f609b8c08264d69204f696bea0446df19f0eb6 | d49a3dcdf4df2ce31324f5ec98ae5c7130e01cbb | refs/heads/master | 2021-07-18T06:51:16.034613 | 2018-11-23T19:20:23 | 2018-11-23T19:25:16 | 136,974,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | import unittest
import ioc
import sq.test
import sq.lib.x509
from ....infra import orm
from ..endpoints import IdentificationEndpoint
@sq.test.integration
class X509SubjectIdentificationTestCase(sq.test.SystemTestCase):
gsid ="00000000-0000-0000-0000-000000000000"
metadata = orm.Relation.metadata
def setUp(self):
super(X509SubjectIdentificationTestCase, self).setUp()
self.endpoint = IdentificationEndpoint()
self.service = ioc.require('SubjectIdentificationService')
with open('dev/usr.testing.crt', 'rb') as f:
self.pem = f.read()
self.crt = sq.lib.x509.Certificate.frompem(self.pem)
self.service.associate(self.gsid,
{'type': 'x509', 'crt': bytes.hex(self.pem)})
@unittest.skip
def test_subject_is_identified_by_email(self):
"""Identify a Subject by email."""
request = sq.test.request_factory(
method='POST',
json=[{
'type': 'email',
'email': "cochise.ruhulessin@wizardsofindustry.net"
}]
)
response = self.run_callable(self.loop, self.endpoint.handle, request)
def test_subject_is_identified_by_x509(self):
"""Identify a Subject by X.509 certificate."""
dto = {
'type': 'x509',
'crt': bytes.hex(self.pem)
}
request = self.request_factory(method='POST', json=dto)
response = self.run_callable(self.loop, self.endpoint.handle, request)
self.assertEqual(response.status_code, 200)
def test_unknown_principal_type_returns_404(self):
"""Identify a Subject by X.509 certificate."""
dto = {
'type': 'foo',
'crt': bytes.hex(self.pem)
}
request = self.request_factory(method='POST', json=dto)
response = self.run_callable(self.loop, self.endpoint.handle, request)
self.assertEqual(response.status_code, 404)
#pylint: skip-file
| [
"cochise.ruhulessin@wizardsofindustry.net"
] | cochise.ruhulessin@wizardsofindustry.net |
668b21490b70115fd45ed0b58eb91c441cd93947 | 1d1d97c7759d9fd0446dfaa103b9c6fb17e7341a | /backups/migrations/0024_auto_20210314_1254.py | 3d691d8460f81eaf50a98b42a5e978b591fa308b | [] | no_license | sulaimanchakkiyathil/travport | 032c69d51a7b8e3312042aed343f9a321e0902f5 | 7828c6f52ce86a479eda3488ce1c5dce7cb6dee1 | refs/heads/master | 2023-04-12T09:36:31.280748 | 2021-05-06T05:23:52 | 2021-05-06T05:23:52 | 364,793,044 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | # Generated by Django 3.0.8 on 2021-03-14 07:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0023_auto_20210314_1252'),
]
operations = [
migrations.RemoveField(
model_name='cust_registration_table',
name='dob',
),
migrations.RemoveField(
model_name='cust_registration_table',
name='passport_expiry',
),
]
| [
"suluc4u@gmail.com"
] | suluc4u@gmail.com |
5c430d9445c2f61964997f22bfa2ba75c98eba92 | a183ffb47d54e47dedd40536a64a39b994869977 | /inversion.py | 314513e0737a22970030fd675a13c51937ee5023 | [] | no_license | luissergiovaldivia/Python | bf9ada210e8ba521126d231a05729cf1f07b98c8 | 6a3d30baa6a0a320ba4cb1b38825f7c3a7edc2c7 | refs/heads/main | 2023-02-14T16:48:10.300129 | 2021-01-09T15:15:09 | 2021-01-09T15:15:09 | 328,156,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | cadena = input('introduce una cadena : ')
inversion = ' '
for caracter in cadena:
inversion = caracter + inversion
print('Su inversion es:', inversion) | [
"i.h.m.p.servicio@gmail.com"
] | i.h.m.p.servicio@gmail.com |
579e5cbdefc699b964e62dbd7f5acf9f3aee439e | 36fbba7b0823e04062c41f26385ed71da5b4f4d4 | /tests/test_pipelines/test_bottom_up_pipelines.py | f687ecb2a385b85dc28988e7ed7937410b446a76 | [
"Apache-2.0"
] | permissive | cherryjm/mmpose | 2fcd4504a0a0d46f6a4dce6d0be1141fdead6bb5 | b0acfc423da672e61db75e00df9da106b6ead574 | refs/heads/master | 2023-06-12T10:15:45.964450 | 2021-05-07T06:06:31 | 2021-05-07T06:06:31 | 346,599,724 | 1 | 0 | Apache-2.0 | 2021-03-11T06:22:06 | 2021-03-11T06:22:05 | null | UTF-8 | Python | false | false | 12,297 | py | import copy
import os.path as osp
import numpy as np
import pytest
import xtcocotools
from xtcocotools.coco import COCO
from mmpose.datasets.pipelines import (BottomUpGenerateHeatmapTarget,
BottomUpGeneratePAFTarget,
BottomUpGenerateTarget,
BottomUpGetImgSize,
BottomUpRandomAffine,
BottomUpRandomFlip, BottomUpResizeAlign,
LoadImageFromFile)
def _get_mask(coco, anno, img_id):
img_info = coco.loadImgs(img_id)[0]
m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)
for obj in anno:
if obj['iscrowd']:
rle = xtcocotools.mask.frPyObjects(obj['segmentation'],
img_info['height'],
img_info['width'])
m += xtcocotools.mask.decode(rle)
elif obj['num_keypoints'] == 0:
rles = xtcocotools.mask.frPyObjects(obj['segmentation'],
img_info['height'],
img_info['width'])
for rle in rles:
m += xtcocotools.mask.decode(rle)
return m < 0.5
def _get_joints(anno, ann_info, int_sigma):
num_people = len(anno)
if ann_info['scale_aware_sigma']:
joints = np.zeros((num_people, ann_info['num_joints'], 4),
dtype=np.float32)
else:
joints = np.zeros((num_people, ann_info['num_joints'], 3),
dtype=np.float32)
for i, obj in enumerate(anno):
joints[i, :ann_info['num_joints'], :3] = \
np.array(obj['keypoints']).reshape([-1, 3])
if ann_info['scale_aware_sigma']:
# get person box
box = obj['bbox']
size = max(box[2], box[3])
sigma = size / 256 * 2
if int_sigma:
sigma = int(np.ceil(sigma))
assert sigma > 0, sigma
joints[i, :, 3] = sigma
return joints
def _check_flip(origin_imgs, result_imgs):
"""Check if the origin_imgs are flipped correctly."""
h, w, c = origin_imgs.shape
for i in range(h):
for j in range(w):
for k in range(c):
if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:
return False
return True
def test_bottomup_pipeline():
data_prefix = 'tests/data/coco/'
ann_file = osp.join(data_prefix, 'test_coco.json')
coco = COCO(ann_file)
ann_info = {}
ann_info['flip_pairs'] = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]
ann_info['flip_index'] = [
0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15
]
ann_info['use_different_joint_weights'] = False
ann_info['joint_weights'] = np.array([
1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5,
1.5
],
dtype=np.float32).reshape((17, 1))
ann_info['image_size'] = np.array(512)
ann_info['heatmap_size'] = np.array([128, 256])
ann_info['num_joints'] = 17
ann_info['num_scales'] = 2
ann_info['scale_aware_sigma'] = False
ann_ids = coco.getAnnIds(785)
anno = coco.loadAnns(ann_ids)
mask = _get_mask(coco, anno, 785)
anno = [
obj for obj in anno if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0
]
joints = _get_joints(anno, ann_info, False)
mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]
joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]
results = {}
results['dataset'] = 'coco'
results['image_file'] = osp.join(data_prefix, '000000000785.jpg')
results['mask'] = mask_list
results['joints'] = joints_list
results['ann_info'] = ann_info
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (425, 640, 3)
# test HorizontalFlip
random_horizontal_flip = BottomUpRandomFlip(flip_prob=1.)
results_horizontal_flip = random_horizontal_flip(copy.deepcopy(results))
assert _check_flip(results['img'], results_horizontal_flip['img'])
random_horizontal_flip = BottomUpRandomFlip(flip_prob=0.)
results_horizontal_flip = random_horizontal_flip(copy.deepcopy(results))
assert (results['img'] == results_horizontal_flip['img']).all()
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_horizontal_flip(
copy.deepcopy(results_copy))
# test TopDownAffine
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'short', 0)
results_affine_transform = random_affine_transform(copy.deepcopy(results))
assert results_affine_transform['img'].shape == (512, 512, 3)
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'short',
40)
results_affine_transform = random_affine_transform(copy.deepcopy(results))
assert results_affine_transform['img'].shape == (512, 512, 3)
results_copy = copy.deepcopy(results)
results_copy['ann_info']['scale_aware_sigma'] = True
joints = _get_joints(anno, results_copy['ann_info'], False)
results_copy['joints'] = \
[joints.copy() for _ in range(results_copy['ann_info']['num_scales'])]
results_affine_transform = random_affine_transform(results_copy)
assert results_affine_transform['img'].shape == (512, 512, 3)
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[0]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['joints'] = joints_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
results_copy = copy.deepcopy(results)
results_copy['mask'] = mask_list[:1]
with pytest.raises(AssertionError):
results_horizontal_flip = random_affine_transform(
copy.deepcopy(results_copy))
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5], 'long', 40)
results_affine_transform = random_affine_transform(copy.deepcopy(results))
assert results_affine_transform['img'].shape == (512, 512, 3)
with pytest.raises(ValueError):
random_affine_transform = BottomUpRandomAffine(30, [0.75, 1.5],
'short-long', 40)
results_affine_transform = random_affine_transform(
copy.deepcopy(results))
# test BottomUpGenerateTarget
generate_multi_target = BottomUpGenerateTarget(2, 30)
results_generate_multi_target = generate_multi_target(
copy.deepcopy(results))
assert 'targets' in results_generate_multi_target
assert len(results_generate_multi_target['targets']
) == results['ann_info']['num_scales']
# test BottomUpGetImgSize when W > H
get_multi_scale_size = BottomUpGetImgSize([1])
results_get_multi_scale_size = get_multi_scale_size(copy.deepcopy(results))
assert 'test_scale_factor' in results_get_multi_scale_size['ann_info']
assert 'base_size' in results_get_multi_scale_size['ann_info']
assert 'center' in results_get_multi_scale_size['ann_info']
assert 'scale' in results_get_multi_scale_size['ann_info']
assert results_get_multi_scale_size['ann_info']['base_size'][1] == 512
# test BottomUpResizeAlign
transforms = [
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]
resize_align_multi_scale = BottomUpResizeAlign(transforms=transforms)
results_copy = copy.deepcopy(results_get_multi_scale_size)
results_resize_align_multi_scale = resize_align_multi_scale(results_copy)
assert 'aug_data' in results_resize_align_multi_scale['ann_info']
# test BottomUpGetImgSize when W < H
results_copy = copy.deepcopy(results)
results_copy['img'] = np.random.rand(640, 425, 3)
results_get_multi_scale_size = get_multi_scale_size(results_copy)
assert results_get_multi_scale_size['ann_info']['base_size'][0] == 512
def test_BottomUpGenerateHeatmapTarget():
data_prefix = 'tests/data/coco/'
ann_file = osp.join(data_prefix, 'test_coco.json')
coco = COCO(ann_file)
ann_info = {}
ann_info['heatmap_size'] = np.array([128, 256])
ann_info['num_joints'] = 17
ann_info['num_scales'] = 2
ann_info['scale_aware_sigma'] = False
ann_ids = coco.getAnnIds(785)
anno = coco.loadAnns(ann_ids)
mask = _get_mask(coco, anno, 785)
anno = [
obj for obj in anno if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0
]
joints = _get_joints(anno, ann_info, False)
mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]
joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]
results = {}
results['dataset'] = 'coco'
results['image_file'] = osp.join(data_prefix, '000000000785.jpg')
results['mask'] = mask_list
results['joints'] = joints_list
results['ann_info'] = ann_info
generate_heatmap_target = BottomUpGenerateHeatmapTarget(2)
results_generate_heatmap_target = generate_heatmap_target(results)
assert 'target' in results_generate_heatmap_target
assert len(results_generate_heatmap_target['target']
) == results['ann_info']['num_scales']
def test_BottomUpGeneratePAFTarget():
ann_info = {}
ann_info['skeleton'] = [[1, 2], [3, 4]]
ann_info['heatmap_size'] = np.array([5])
ann_info['num_joints'] = 4
ann_info['num_scales'] = 1
mask = np.ones((5, 5), dtype=bool)
joints = np.array([[[1, 1, 2], [3, 3, 2], [0, 0, 0], [0, 0, 0]],
[[1, 3, 2], [3, 1, 2], [0, 0, 0], [0, 0, 0]]])
mask_list = [mask.copy() for _ in range(ann_info['num_scales'])]
joints_list = [joints.copy() for _ in range(ann_info['num_scales'])]
results = {}
results['dataset'] = 'coco'
results['mask'] = mask_list
results['joints'] = joints_list
results['ann_info'] = ann_info
generate_paf_target = BottomUpGeneratePAFTarget(1)
results_generate_paf_target = generate_paf_target(results)
sqrt = np.sqrt(2) / 2
print(results_generate_paf_target['target'])
assert (results_generate_paf_target['target'] == np.array(
[[[sqrt, sqrt, 0, sqrt, sqrt], [sqrt, sqrt, sqrt, sqrt, sqrt],
[0, sqrt, sqrt, sqrt, 0], [sqrt, sqrt, sqrt, sqrt, sqrt],
[sqrt, sqrt, 0, sqrt, sqrt]],
[[sqrt, sqrt, 0, -sqrt, -sqrt], [sqrt, sqrt, 0, -sqrt, -sqrt],
[0, 0, 0, 0, 0], [-sqrt, -sqrt, 0, sqrt, sqrt],
[-sqrt, -sqrt, 0, sqrt, sqrt]],
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]],
dtype=np.float32)).all()
| [
"noreply@github.com"
] | noreply@github.com |
3ecb8306c4120d34f4d50837b65d730ed957c23e | df04a39be0cb31fa66a084afa2a4c161839d8d88 | /core/ajax.py | 735c1792f36a713211835a1a5e8c3cfca7979354 | [
"Apache-2.0"
] | permissive | skyle97/Watcher3 | 58e95febbd81608e9de8ce5486c62c0ec45958d7 | 3eaee90069caee3a7fbff096184de33ad97fe7f3 | refs/heads/master | 2021-01-19T17:10:32.147456 | 2017-08-21T02:43:34 | 2017-08-21T02:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,912 | py | import json
import logging
import os
import threading
import time
import cherrypy
import datetime
import core
from core import config, library, searchresults, searcher, snatcher, version, movieinfo, notification, plugins
from core.providers import torrent, newznab
from core.downloaders import nzbget, sabnzbd, transmission, qbittorrent, deluge, rtorrent, blackhole
from core.helpers import Conversions
from core.rss import predb
import backup
from gettext import gettext as _
logging = logging.getLogger(__name__)
class Errors():
''' Namespace for common error messages used in AJAX responses '''
database_write = _('Unable to write to database.')
database_read = _('Unable to read {} details from database.')
tmdb_not_found = _('Unable to find {} on TheMovieDB.')
class Ajax(object):
''' These are all the methods that handle ajax post/get requests from the browser.
Except in special circumstances, all should return an 'ajax-style response', which is a
dict with a response key to indicate success, and additional keys for expected data output.
For example {'response': False, 'error': 'something broke'}
{'response': True, 'results': ['this', 'is', 'the', 'output']}
'''
def __init__(self):
self.tmdb = movieinfo.TMDB()
self.config = config.Config()
self.metadata = library.Metadata()
self.predb = predb.PreDB()
self.searcher = searcher.Searcher()
self.score = searchresults.Score()
self.snatcher = snatcher.Snatcher()
self.version = version.Version()
@cherrypy.expose
@cherrypy.tools.json_out()
def library(self, sort_key, sort_direction, limit=50, offset=0):
''' Get 50 movies from library
sort_key (str): column name to sort by
sort_direction (str): direction to sort [ASC, DESC]
limit: int number of movies to get <optional - default 50>
offset: int list index postition to start slice <optional - default 0>
Gets a 25-movie slice from library sorted by sort key
Returns list of dicts of movies
'''
return core.sql.get_user_movies(sort_key, sort_direction.upper(), limit, offset, hide_finished=core.CONFIG['Server']['hidefinished'])
@cherrypy.expose
@cherrypy.tools.json_out()
def search_tmdb(self, search_term):
''' Search tmdb for movies
search_term (str): title and year of movie (Movie Title 2016)
Returns str json-encoded list of dicts that contain tmdb's data.
'''
results = self.tmdb.search(search_term)
if not results:
logging.info('No Results found for {}'.format(search_term))
return results
@cherrypy.expose
@cherrypy.tools.json_out()
def get_search_results(self, imdbid, quality=None):
''' Gets search results for movie
imdbid (str): imdb id #
quality (str): quality profile for movie <optional - default None>
Passes request to sql.get_search_results() then filters out unused download methods.
Returns dict ajax-style response
'''
results = core.sql.get_search_results(imdbid, quality=quality)
if not core.CONFIG['Downloader']['Sources']['usenetenabled']:
results = [res for res in results if res.get('type') != 'nzb']
if not core.CONFIG['Downloader']['Sources']['torrentenabled']:
results = [res for res in results if res.get('type') != 'torrent']
if not results:
return {'response': False, 'next': Conversions.human_datetime(core.NEXT_SEARCH)}
else:
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
return {'response': True, 'results': results}
@cherrypy.expose
def get_trailer(self, title, year):
''' Gets trailer embed url from youtube
title (str): title of movie
year (str/int): year of movie release
Returns str
'''
return movieinfo.trailer('{} {}'.format(title, year))
@cherrypy.expose
@cherrypy.tools.json_out()
def add_wanted_movie(self, data):
''' Adds movie to library
data (str): json-formatted dict of known movie data
Calls library.Manage.add_movie to add to library.
Returns dict ajax-style response
'''
movie = json.loads(data)
return core.manage.add_movie(movie, full_metadata=False)
@cherrypy.expose
@cherrypy.tools.json_out()
def save_settings(self, data):
''' Saves settings to config file
data (dict): of Section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
All dicts must contain the full tree or data will be lost.
Fires off additional methods if neccesary, ie scheduler restart/reloads
Returns dict ajax-style response
'''
logging.info('Saving settings.')
data = json.loads(data)
save_data = {}
for key in data:
if data[key] != core.CONFIG[key]:
save_data[key] = data[key]
if not save_data:
return {'response': True, 'message': _('Settings saved.')}
try:
self.config.write(save_data)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Writing config.', exc_info=True)
return {'response': False, 'error': _('Unable to write to config file.')}
return {'response': True, 'message': _('Settings saved.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def remove_movie(self, imdbid):
''' Removes movie
imdbid (str): imdb id #
Returns dict ajax-style response
'''
return core.manage.remove_movie(imdbid)
@cherrypy.expose
@cherrypy.tools.json_out()
def delete_movie_file(self, imdbid):
''' Deletes movie file for imdbid
imdbid (str): imdb id #
Returns dict ajax-style response
'''
logging.info('Deleting file for {}.'.format(imdbid))
f = core.sql.get_movie_details('imdbid', imdbid).get('finished_file')
try:
logging.debug('Finished file for {} is {}'.format(imdbid, f))
os.unlink(f)
return {'response': True, 'message': _('Deleted movie file {}.').format(f)}
except Exception as e:
logging.error('Unable to delete file {}'.format(f), exc_info=True)
return {'response': False, 'error': str(e)}
@cherrypy.expose
@cherrypy.tools.json_out()
def search(self, imdbid):
''' Search indexers for specific movie.
imdbid (str): imdb id #
Gets movie data from database and sends to searcher.search()
Returns dict ajax-style response
'''
movie = core.sql.get_movie_details("imdbid", imdbid)
if not movie:
return {'response': False, 'error': Errors.database_read.format(imdbid)}
else:
success = self.searcher.search(imdbid, movie['title'], movie['year'], movie['quality'])
status = core.sql.get_movie_details("imdbid", imdbid)['status']
if success:
results = core.sql.get_search_results(imdbid, movie['quality'])
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
return {'response': True, 'results': results, 'movie_status': status, 'next': Conversions.human_datetime(core.NEXT_SEARCH)}
else:
return {'response': False, 'error': Errors.database_read.format(imdbid), 'movie_status': status}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_download(self, year, guid, kind):
''' Sends search result to downloader manually
guid (str): download link for nzb/magnet/torrent file.
kind (str): type of download (torrent, magnet, nzb)
Returns dict ajax-style response
'''
torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']
usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']
if kind == 'nzb' and not usenet_enabled:
return {'response': False, 'error': _('Link is NZB but no Usent client is enabled.')}
elif kind in ('torrent', 'magnet') and not torrent_enabled:
return {'response': False, 'error': _('Link is Torrent/Magnet but no Torrent client is enabled.')}
data = dict(core.sql.get_single_search_result('guid', guid))
if data:
data['year'] = year
return self.snatcher.download(data)
else:
return {'response': False, 'error': Errors.database_read.format(kind)}
@cherrypy.expose
@cherrypy.tools.json_out()
def mark_bad(self, guid, imdbid, cancel_download=False):
''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
guid (str): guid of download to mark
imdbid (str): imdb id # of movie
cancel_download (bool): send command to download client to cancel download
Returns dict ajax-style response
'''
sr_orig = core.sql.get_single_search_result('guid', guid)
sr = core.manage.searchresults(guid, 'Bad')
core.manage.markedresults(guid, 'Bad', imdbid=imdbid)
if sr:
response = {'response': True, 'message': _('Marked release as Bad.')}
else:
response = {'response': False, 'error': Errors.database_write}
response['movie_status'] = core.manage.movie_status(imdbid)
if not response['movie_status']:
response['error'] = (Errors.database_write)
response['response'] = False
if cancel_download:
cancelled = False
if sr_orig.get('status') != 'Snatched':
return response
client = sr_orig['download_client'] if sr_orig else None
downloadid = sr_orig['downloadid'] if sr_orig else None
if not client:
logging.info('Download client not found, cannot cancel download.')
return response
elif client == 'NZBGet':
cancelled = nzbget.Nzbget.cancel_download(downloadid)
elif client == 'SABnzbd':
cancelled = sabnzbd.Sabnzbd.cancel_download(downloadid)
elif client == 'QBittorrent':
cancelled = qbittorrent.QBittorrent.cancel_download(downloadid)
elif client == 'DelugeRPC':
cancelled = deluge.DelugeRPC.cancel_download(downloadid)
elif client == 'DelugeWeb':
cancelled = deluge.DelugeWeb.cancel_download(downloadid)
elif client == 'Transmission':
cancelled = transmission.Transmission.cancel_download(downloadid)
elif client == 'rTorrentSCGI':
cancelled = rtorrent.rTorrentSCGI.cancel_download(downloadid)
elif client == 'rTorrentHTTP':
cancelled = rtorrent.rTorrentHTTP.cancel_download(downloadid)
if not cancelled:
response['response'] = False
response['error'] = response.get('error', '') + _(' Could not remove download from client.')
return response
@cherrypy.expose
def notification_remove(self, index):
''' Removes notification from core.notification
index (str/int): index of notification to remove
'index' will be of type string since it comes from ajax request.
Therefore we convert to int here before passing to Notification
Simply calls Notification module.
Does not return
'''
notification.remove(int(index))
return
@cherrypy.expose
@cherrypy.tools.json_out()
def update_check(self):
''' Manually check for updates
Returns list:
[0] dict ajax-style response
[1] dict of core notifications
'''
response = self.version.manager.update_check()
if response['status'] == 'current':
n = [[{'message': _('No updates available.')}, {'type': 'primary'}]]
return [response, n]
else:
return [response, core.NOTIFICATIONS]
@cherrypy.expose
@cherrypy.tools.json_out()
def test_downloader_connection(self, mode, data):
''' Test connection to downloader.
mode (str): which downloader to test.
data (dict): connection information (url, port, login, etc)
Executes staticmethod in the chosen downloader's class.
Returns dict ajax-style response
'''
response = {}
data = json.loads(data)
if mode == 'sabnzbd':
test = sabnzbd.Sabnzbd.test_connection(data)
elif mode == 'nzbget':
test = nzbget.Nzbget.test_connection(data)
elif mode == 'blackhole':
test = blackhole.Base.test_connection(data)
elif mode == 'transmission':
test = transmission.Transmission.test_connection(data)
elif mode == 'delugerpc':
test = deluge.DelugeRPC.test_connection(data)
elif mode == 'delugeweb':
test = deluge.DelugeWeb.test_connection(data)
elif mode == 'qbittorrent':
test = qbittorrent.QBittorrent.test_connection(data)
elif mode == 'rtorrentscgi':
test = rtorrent.rTorrentSCGI.test_connection(data)
elif mode == 'rtorrenthttp':
test = rtorrent.rTorrentHTTP.test_connection(data)
if test is True:
response['response'] = True
response['message'] = _('Connection successful.')
else:
response['response'] = False
response['error'] = test
return response
@cherrypy.expose
def server_status(self, mode):
''' Check or modify status of CherryPy server_status
mode (str): command or request of state
Restarts or Shuts Down server in separate thread.
Delays by one second to allow browser to redirect.
If mode == 'online', asks server for status.
(ENGINE.started, ENGINE.stopped, etc.)
Returns nothing for mode == restart || shutdown
Returns str server state if mode == online
'''
if mode == 'restart':
threading.Timer(1, core.restart).start()
return
elif mode == 'shutdown':
threading.Timer(1, core.shutdown).start()
return
elif mode == 'online':
return str(cherrypy.engine.state)
@cherrypy.expose
def update_server(self, mode):
''' Starts and executes update process.
mode (str): 'set_true' or 'update_now'
This method has two major functions based on mode
set_true:
Sets core.UPDATING to True, the browser should then automatically redirect
the user to the update page that calls update_server('update_now')
update_now:
Starts update process:
* Stops task scheduler to cancel all Timers
* Waits for in-process tasks to finish. Yields to browser a list of
currently-running tasks every 1.5 seconds
* Yields updating message to browser. Calls update method
* Sets core.UPDATING to False
* Yields response from update method to browser
If False, starts scheduler plugin again to get back to a normal state
If True, calls restart method. Browser is responsible for redirecting
afer the server is back up.
Returns dict ajax-style response
'''
if mode == 'set_true':
core.UPDATING = True
return json.dumps({'response': True})
if mode == 'update_now':
logging.info('Update process started.')
core.scheduler_plugin.stop()
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
while len(active_tasks) > 0:
yield json.dumps({'response': True, 'status': 'waiting', 'active_tasks': active_tasks})
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
time.sleep(1.5)
yield json.dumps({'response': True, 'status': 'updating'})
update_status = version.Version().manager.execute_update()
core.UPDATING = False
if update_status is False:
logging.error('Update Failed.')
yield json.dumps({'response': False, 'error': _('Unable to complete update.')})
core.scheduler_plugin.restart()
elif update_status is True:
yield json.dumps({'response': True, 'status': 'complete'})
self.server_status('restart')
else:
return json.dumps({'response': False})
update_server._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def update_movie_options(self, quality, status, imdbid):
''' Updates quality settings for individual title
quality (str): name of new quality
status (str): management state ('automatic', 'disabled')
imdbid (str): imdb identification number
Returns dict ajax-style response
'''
success = {'response': True, 'message': _('Movie settings updated.')}
logging.info('Updating quality profile to {} for {}.'.format(quality, imdbid))
if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
logging.info('Updating status to {} for {}.'.format(status, imdbid))
if status == 'Automatic':
if not core.sql.update('MOVIES', 'status', 'Waiting', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
new_status = core.manage.movie_status(imdbid)
if not new_status:
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = new_status
return success
elif status == 'Disabled':
if not core.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = 'Disabled'
return success
@cherrypy.expose
def get_log_text(self, logfile):
''' Gets log file contents
logfile (str): name of log file to read
logfile should be filename only, not the path to the file
Returns str
'''
logging.info('Dumping log file {} to text.'.format(logfile))
with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
log_text = ''.join(reversed(f.readlines()))
return log_text
@cherrypy.expose
@cherrypy.tools.json_out()
def indexer_test(self, indexer, apikey, mode):
''' Tests connection to newznab indexer
indexer (str): url of indexer
apikey (str): indexer's api key
mode (str): newznab or torznab
Returns dict ajax-style response
'''
if mode == 'newznab':
return newznab.NewzNab.test_connection(indexer, apikey)
elif mode == 'torznab':
return torrent.Torrent.test_connection(indexer, apikey)
else:
return {'response': False, 'error': _('Invalid test mode.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_plugin_conf(self, folder, conf):
''' Calls plugin_conf_popup to render html
folder (str): folder to read config file from
conf (str): filename of config file (ie 'my_plugin.conf')
Returns string
'''
c = os.path.join(core.PLUGIN_DIR, folder, conf)
logging.info('Reading plugin config {}'.format(c))
try:
with open(c) as f:
config = json.load(f)
except Exception as e:
logging.error("Unable to read config file.", exc_info=True)
return ''
return plugins.render_config(config)
@cherrypy.expose
@cherrypy.tools.json_out()
def save_plugin_conf(self, folder, filename, config):
''' Calls plugin_conf_popup to render html
folder (str): folder to store config file
filename (str): filename of config file (ie 'my_plugin.conf')
config (str): json data to store in conf file
Returns dict ajax-style response
'''
conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, filename)
logging.info('Saving plugin config as {}'.format(conf_file))
config = json.loads(config)
response = {'response': True, 'message': _('Settings saved.')}
try:
with open(conf_file, 'w') as output:
json.dump(config, output, indent=2)
except Exception as e:
response = {'response': False, 'error': str(e)}
return response
@cherrypy.expose
def scan_library_directory(self, directory, minsize, recursive):
''' Calls library to scan directory for movie files
directory (str): directory to scan
minsize (str/int): minimum file size in mb, coerced to int
resursive (bool): whether or not to search subdirs
Finds all files larger than minsize in directory.
Removes all movies from gathered list that are already in library.
If error, yields {'error': reason} and stops Iteration
If movie has all metadata, yields:
{'complete': {<metadata>}}
If missing imdbid or resolution, yields:
{'incomplete': {<knownn metadata>}}
All metadata dicts include:
'path': 'absolute path to file'
'progress': '10 of 250'
Yeilds dict ajax-style response
'''
recursive = json.loads(recursive)
minsize = int(minsize)
files = core.library.ImportDirectory.scan_dir(directory, minsize, recursive)
if files.get('error'):
yield json.dumps({'error': files['error']})
raise StopIteration()
library = [i['imdbid'] for i in core.sql.get_user_movies()]
files = files['files']
length = len(files)
if length == 0:
yield json.dumps({'response': None})
raise StopIteration()
logging.info('Parsing {} directory scan results.'.format(length))
for index, path in enumerate(files):
logging.info('Gathering metatadata for {}'.format(path))
metadata = {}
response = {'progress': [index + 1, length]}
try:
metadata = self.metadata.from_file(path)
if not metadata.get('imdbid'):
metadata['imdbid'] = ''
logging.info('IMDB unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
elif metadata['imdbid'] in library:
logging.info('{} ({}) already in library, ignoring.'.format(metadata['title'], path))
response['response'] = 'in_library'
elif not metadata.get('resolution'):
logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
else:
logging.info('All data found for import {}'.format(metadata['title']))
response['response'] = 'complete'
if response['response'] == 'complete':
p = metadata.get('poster_path')
metadata = self.metadata.convert_to_db(metadata)
metadata['poster_path'] = p
metadata['size'] = os.path.getsize(path)
metadata['human_size'] = Conversions.human_file_size(metadata['size'])
metadata['finished_file'] = path
if response['response'] == 'in_library':
metadata = {'title': metadata['title']}
response['movie'] = metadata
yield json.dumps(response)
except Exception as e:
logging.warning('Error gathering metadata.', exc_info=True)
yield json.dumps({'response': 'incomplete', 'movie': metadata})
continue
scan_library_directory._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def import_dir(self, movies, corrected_movies):
''' Imports list of movies in data
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
logging.info('Adding directory scan movies to library.')
today = str(datetime.date.today())
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('{} corrected movies, gathering metadata.'.format(len(corrected_movies)))
for data in corrected_movies:
tmdbdata = self.tmdb._search_tmdbid(data['tmdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['tmdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['tmdbid'])})
progress += 1
logging.info('Adding {} directory scan movies to library.'.format(len(movie_data)))
for movie in movie_data:
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Directory Import'
movie['finished_date'] = today
movie['id'] = movie['tmdbid']
response = core.manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['title']))
logging.debug(movie)
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['title'])})
progress += 1
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
core.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def list_files(self, current_dir, move_dir):
''' Lists files in directory
current_dir (str): base path
move_dir (str): child path to read
Joins and normalizes paths:
('/home/user/movies', '..')
Becomes /home/user
Returns dict ajax-style response
'''
current_dir = current_dir.strip()
move_dir = move_dir.strip()
response = {}
new_path = os.path.normpath(os.path.join(current_dir, move_dir))
response['new_path'] = new_path
try:
response['list'] = [i for i in os.listdir(new_path) if os.path.isdir(os.path.join(new_path, i)) and not i.startswith('.')]
except Exception as e:
response = {'error': str(e)}
logging.error('Error listing directory.', exc_info=True)
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def update_metadata(self, imdbid, tmdbid=None):
''' Re-downloads metadata for imdbid
imdbid (str): imdbid of movie
tmdbid (str): tmdbid of movie <optional - default None>
If tmdbid is None, looks in database for tmdbid using imdbid.
If that fails, looks on tmdb api for imdbid
If that fails returns error message
Returns dict ajax-style response
'''
r = self.metadata.update(imdbid, tmdbid)
if r['response'] is True:
return {'response': True, 'message': _('Metadata updated.')}
else:
return r
@cherrypy.expose
@cherrypy.tools.json_out()
def get_kodi_movies(self, url):
''' Gets list of movies from kodi server
url (str): url of kodi server
Calls Kodi import method to gather list.
Returns dict ajax-style response
'''
return library.ImportKodiLibrary.get_movies(url)
@cherrypy.expose
def import_kodi_movies(self, movies):
''' Imports list of movies in movies from Kodi library
movie_data (str): json-formatted list of dicts of movies
Iterates through movies and gathers all required metadata.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movies = json.loads(movies)
fake_results = []
success = []
length = len(movies)
progress = 1
logging.info('Adding {} Kodi movies to library.'.format(length))
for movie in movies:
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])
if not tmdb_data or not tmdb_data.get('id'):
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
movie['id'] = tmdb_data['id']
movie['size'] = 0
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['finished_file'] = movie.get('finished_file', '').strip()
movie['origin'] = 'Kodi Import'
response = core.manage.add_movie(movie)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
core.sql.write_search_results(fake_results)
import_kodi_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def upload_plex_csv(self, file_input):
''' Recieves upload of csv from browser
file_input (b'str): csv file fo read
Reads/parses csv file into a usable dict
Returns dict ajax-style response
'''
try:
csv_text = file_input.file.read().decode('utf-8')
file_input.file.close()
except Exception as e:
logging.error('Unable to parse Plex CSV', exc_info=True)
return {'response': False, 'error': str(e)}
if csv_text:
return library.ImportPlexLibrary.read_csv(csv_text)
else:
return {'response': True, 'complete': [], 'incomplete': []}
@cherrypy.expose
def import_plex_csv(self, movies, corrected_movies):
''' Imports list of movies genrated by csv import
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('Adding {} Plex movies to library.'.format(len(corrected_movies)))
for movie in corrected_movies:
tmdbdata = self.tmdb._search_imdbid(movie['imdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
movie['year'] = tmdbdata['release_date'][:4]
movie.update(tmdbdata)
movie_data.append(movie)
else:
logging.error(Errors.tmdb_not_found.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
logging.info('Adding {} Plex movies to library.'.format(length))
for movie in movie_data:
logging.info('Importing Plex movie {} {}'.format(movie.get('title', ''), movie.get('year', '')))
fm = False
if not movie.get('imdbid') and movie.get('tmdbid'):
tmdb_data = self.tmdb._search_tmdbid(movie['tmdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
fm = True
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['tmdbid'])})
progress += 1
continue
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Plex Import'
if not movie.get('id'):
tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
response = core.manage.add_movie(movie, full_metadata=fm)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'error': response['error'], 'title': movie['title']})
progress += 1
continue
else:
logging.error(Errors.tmdb_not_found.format(movie['title']))
yield json.dumps({'response': False, 'progress': [progress, length], 'error': _('Unable to find IMDB ID for {} on TheMovieDB.').format(movie['title']), 'title': movie['title']})
progress += 1
continue
if fake_results:
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
if fake_results:
core.sql.write_search_results(fake_results)
import_plex_csv._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_cp_movies(self, url, apikey):
''' Gets movies from CP server
url (str): url to cp server
apikey (str): cp api key
Reads/parses cp api response
Returns dict ajax-style response
'''
url = '{}/api/{}/movie.list/'.format(url, apikey)
if not url.startswith('http'):
url = 'http://{}'.format(url)
return library.ImportCPLibrary.get_movies(url)
@cherrypy.expose
def import_cp_movies(self, wanted, finished):
''' Imports movies from CP list to library
wanted (list): dicts of wanted movies
finished (list): dicts of finished movies
Yields dict ajax-style response
'''
wanted = json.loads(wanted)
finished = json.loads(finished)
fake_results = []
success = []
length = len(wanted) + len(finished)
progress = 1
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(wanted)))
for movie in wanted:
response = core.manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(finished)))
for movie in finished:
movie['predb'] = 'found'
movie['status'] = 'Disabled'
movie['origin'] = 'CouchPotato Import'
response = core.manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = self.score.score(fake_results, imported=True)
for i in success:
score = None
for r in fake_results:
if r['imdbid'] == i['imdbid']:
score = r['score']
break
if score:
core.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])
core.sql.write_search_results(fake_results)
import_cp_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_backlog_search(self, movies):
''' Bulk manager action for backlog search
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk backlog search for {} movies.'.format(len(movies)))
ids = [i['imdbid'] for i in movies]
movies = [i for i in core.sql.get_user_movies() if i['imdbid'] in ids]
for i, movie in enumerate(movies):
title = movie['title']
year = movie['year']
imdbid = movie['imdbid']
year = movie['year']
quality = movie['quality']
logging.info("Performing backlog search for {} {}.".format(title, year))
if not self.searcher.search(imdbid, title, year, quality):
response = {'response': False, 'error': Errors.database_write, 'imdbid': imdbid, "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield json.dumps(response)
manager_backlog_search._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_update_metadata(self, movies):
''' Bulk manager action for metadata update
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk metadata update for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
r = self.metadata.update(movie.get('imdbid'), movie.get('tmdbid'))
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield json.dumps(response)
manager_update_metadata._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_change_quality(self, movies, quality):
''' Bulk manager action to change movie quality profile
movies (list): dicts of movies, must contain keys imdbid
quality (str): quality to set movies to
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Setting quality to {} for: {}'.format(quality, ', '.join(i['imdbid'] for i in movies)))
for i, movie in enumerate(movies):
if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', movie['imdbid']):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield json.dumps(response)
manager_change_quality._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_reset_movies(self, movies):
''' Bulk manager action to reset movies
movies (list): dicts of movies, must contain key imdbid
Removes all search results
Updates database row with db_reset dict
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Resetting status for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
logging.debug('Resetting {}'.format(movie['imdbid']))
imdbid = movie['imdbid']
if not core.sql.purge_search_results(imdbid):
yield json.dumps({'response': False, 'error': _('Unable to purge search results.'), 'imdbid': imdbid, "index": i + 1})
continue
db_reset = {'quality': 'Default',
'status': 'Waiting',
'finished_date': None,
'finished_score': None,
'backlog': 0,
'finished_file': None,
'predb': None,
'predb_backlog': None
}
if not core.sql.update_multiple('MOVIES', db_reset, imdbid=imdbid):
yield json.dumps({'response': False, 'error': Errors.database_write, 'imdbid': imdbid, "index": i + 1})
continue
yield json.dumps({'response': True, "index": i + 1})
manager_reset_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_remove_movies(self, movies):
''' Bulk action to remove movies
movies (list): dicts of movies, must contain key imdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Removing {} movies from library.'.format(len(movies)))
for i, movie in enumerate(movies):
r = self.remove_movie(movie['imdbid'])
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], "index": i + 1}
else:
response = {'response': True, "index": i + 1}
yield(json.dumps(response))
manager_remove_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def generate_stats(self):
''' Gets library stats for graphing page
Returns dict of library stats
'''
return core.manage.get_stats()
@cherrypy.expose
@cherrypy.tools.json_out()
def create_backup(self):
''' Creates backup zip file ./watcher.zip
Returns dict ajax-style response
'''
logging.info('Creating backup of Watcher as {}'.format(os.path.join(core.PROG_PATH, 'watcher.zip')))
try:
backup.backup(require_confirm=False)
except Exception as e:
logging.error('Unable to create backup.', exc_info=True)
return {'response': False, 'error': str(e)}
return {'response': True, 'message': _('Backup created as {}').format(os.path.join(core.PROG_PATH, 'watcher.zip'))}
@cherrypy.expose
@cherrypy.tools.json_out()
def restore_backup(self, fileUpload):
logging.info('Restoring backup from uploaded zip.')
n = datetime.datetime.today().microsecond
tmp_zip = os.path.join(core.PROG_PATH, 'restore_{}.zip'.format(n))
try:
with open(tmp_zip, 'wb') as f:
f.seek(0)
f.write(fileUpload.file.read())
logging.info('Restore zip temporarily stored as {}.'.format(tmp_zip))
backup.restore(require_confirm=False, file=tmp_zip)
logging.info('Removing temporary zip {}'.format(tmp_zip))
os.unlink(tmp_zip)
except Exception as e:
logging.error('Unable to restore backup.', exc_info=True)
return {'response': False}
threading.Timer(3, core.restart).start()
return {'response': True}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_task_execute(self, name):
''' Calls task's now() function to execute task now
name (str): name of scheduled task to run
Response includes core.NOTIFICATIONS so the browser can display any
notifications generated during the task.
Returns dict ajax-style response
'''
try:
logging.info('Manually executing task {}.'.format(name))
task = core.scheduler_plugin.task_list[name]
task.now()
le = task.last_execution
return {'response': True, 'message': _('Finished task {}.').format(name), 'last_execution': le, 'notifications': core.NOTIFICATIONS}
except Exception as e:
return {'response': False, 'error': str(e)}
| [
"nosmokingbandit@gmail.com"
] | nosmokingbandit@gmail.com |
879079013be7911a134b9b62a20ff2df3d5483f6 | 7d852b8d7b8a6ad7fc9c39957e1097509d08e607 | /cf/test/create_test_files.py | e3fd0f5939be75bd460a289c3d6ae479c19c9f7f | [
"MIT"
] | permissive | AJamesPhillips/cf-python | ca0a7ca8681fe928f069d5809bf067d064265e38 | 4631bc4ba3c0cb51dcd18905116440007e291e6b | refs/heads/master | 2020-09-20T10:04:38.336267 | 2019-11-27T14:07:53 | 2019-11-27T14:07:53 | 224,445,029 | 0 | 0 | MIT | 2019-11-27T14:08:11 | 2019-11-27T14:08:10 | null | UTF-8 | Python | false | false | 23,397 | py | import datetime
import os
import unittest
import numpy
import netCDF4
import cf
def _make_contiguous_file(filename):
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.7'
n.featureType = 'timeSeries'
station = n.createDimension('station', 4)
obs = n.createDimension('obs' , 24)
name_strlen = n.createDimension('name_strlen', 8)
bounds = n.createDimension('bounds', 2)
lon = n.createVariable('lon', 'f8', ('station',))
lon.standard_name = "longitude"
lon.long_name = "station longitude"
lon.units = "degrees_east"
lon.bounds = "lon_bounds"
lon[...] = [-23, 0, 67, 178]
lon_bounds = n.createVariable('lon_bounds', 'f8', ('station', 'bounds'))
lon_bounds[...] = [[-24, -22],
[ -1, 1],
[ 66, 68],
[177, 179]]
lat = n.createVariable('lat', 'f8', ('station',))
lat.standard_name = "latitude"
lat.long_name = "station latitude"
lat.units = "degrees_north"
lat[...] = [-9, 2, 34, 78]
alt = n.createVariable('alt', 'f8', ('station',))
alt.long_name = "vertical distance above the surface"
alt.standard_name = "height"
alt.units = "m"
alt.positive = "up"
alt.axis = "Z"
alt[...] = [0.5, 12.6, 23.7, 345]
station_name = n.createVariable('station_name', 'S1',
('station', 'name_strlen'))
station_name.long_name = "station name"
station_name.cf_role = "timeseries_id"
station_name[...] = numpy.array([[x for x in 'station1'],
[x for x in 'station2'],
[x for x in 'station3'],
[x for x in 'station4']])
station_info = n.createVariable('station_info', 'i4', ('station',))
station_info.long_name = "some kind of station info"
station_info[...] = [-10, -9, -8, -7]
row_size = n.createVariable('row_size', 'i4', ('station',))
row_size.long_name = "number of observations for this station"
row_size.sample_dimension = "obs"
row_size[...] = [3, 7, 5, 9]
time = n.createVariable('time', 'f8', ('obs',))
time.standard_name = "time"
time.long_name = "time of measurement"
time.units = "days since 1970-01-01 00:00:00"
time.bounds = "time_bounds"
time[ 0: 3] = [-3, -2, -1]
time[ 3:10] = [1, 2, 3, 4, 5, 6, 7]
time[10:15] = [0.5, 1.5, 2.5, 3.5, 4.5]
time[15:24] = range(-2, 7)
time_bounds = n.createVariable('time_bounds', 'f8', ('obs', 'bounds'))
time_bounds[..., 0] = time[...] - 0.5
time_bounds[..., 1] = time[...] + 0.5
humidity = n.createVariable('humidity', 'f8', ('obs',), fill_value=-999.9)
humidity.standard_name = "specific_humidity"
humidity.coordinates = "time lat lon alt station_name station_info"
humidity[ 0: 3] = numpy.arange(0, 3)
humidity[ 3:10] = numpy.arange(1, 71, 10)
humidity[10:15] = numpy.arange(2, 502, 100)
humidity[15:24] = numpy.arange(3, 9003, 1000)
temp = n.createVariable('temp', 'f8', ('obs',), fill_value=-999.9)
temp.standard_name = "air_temperature"
temp.units = "Celsius"
temp.coordinates = "time lat lon alt station_name station_info"
temp[...] = humidity[...] + 273.15
n.close()
return filename
def _make_indexed_file(filename):
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.7'
n.featureType = 'timeSeries'
station = n.createDimension('station', 4)
obs = n.createDimension('obs' , None)
name_strlen = n.createDimension('name_strlen', 8)
bounds = n.createDimension('bounds', 2)
lon = n.createVariable('lon', 'f8', ('station',))
lon.standard_name = "longitude"
lon.long_name = "station longitude"
lon.units = "degrees_east"
lon.bounds = "lon_bounds"
lon[...] = [-23, 0, 67, 178]
lon_bounds = n.createVariable('lon_bounds', 'f8', ('station', 'bounds'))
lon_bounds[...] = [[-24, -22],
[ -1, 1],
[ 66, 68],
[177, 179]]
lat = n.createVariable('lat', 'f8', ('station',))
lat.standard_name = "latitude"
lat.long_name = "station latitude"
lat.units = "degrees_north"
lat[...] = [-9, 2, 34, 78]
alt = n.createVariable('alt', 'f8', ('station',))
alt.long_name = "vertical distance above the surface"
alt.standard_name = "height"
alt.units = "m"
alt.positive = "up"
alt.axis = "Z"
alt[...] = [0.5, 12.6, 23.7, 345]
station_name = n.createVariable('station_name', 'S1',
('station', 'name_strlen'))
station_name.long_name = "station name"
station_name.cf_role = "timeseries_id"
station_name[...] = numpy.array([[x for x in 'station1'],
[x for x in 'station2'],
[x for x in 'station3'],
[x for x in 'station4']])
station_info = n.createVariable('station_info', 'i4', ('station',))
station_info.long_name = "some kind of station info"
station_info[...] = [-10, -9, -8, -7]
#row_size[...] = [3, 7, 5, 9]
stationIndex = n.createVariable('stationIndex', 'i4', ('obs',))
stationIndex.long_name = "which station this obs is for"
stationIndex.instance_dimension= "station"
stationIndex[...] = [3, 2, 1, 0, 2, 3, 3, 3, 1, 1, 0, 2,
3, 1, 0, 1, 2, 3, 2, 3, 3, 3, 1, 1]
t = [[-3, -2, -1],
[1, 2, 3, 4, 5, 6, 7],
[0.5, 1.5, 2.5, 3.5, 4.5],
range(-2, 7)]
time = n.createVariable('time', 'f8', ('obs',))
time.standard_name = "time";
time.long_name = "time of measurement"
time.units = "days since 1970-01-01 00:00:00"
time.bounds = "time_bounds"
ssi = [0, 0, 0, 0]
for i, si in enumerate(stationIndex[...]):
time[i] = t[si][ssi[si]]
ssi[si] += 1
time_bounds = n.createVariable('time_bounds', 'f8', ('obs', 'bounds'))
time_bounds[..., 0] = time[...] - 0.5
time_bounds[..., 1] = time[...] + 0.5
humidity = n.createVariable('humidity', 'f8', ('obs',), fill_value=-999.9)
humidity.standard_name = "specific_humidity"
humidity.coordinates = "time lat lon alt station_name station_info"
h = [numpy.arange(0, 3),
numpy.arange(1, 71, 10),
numpy.arange(2, 502, 100),
numpy.arange(3, 9003, 1000)]
ssi = [0, 0, 0, 0]
for i, si in enumerate(stationIndex[...]):
humidity[i] = h[si][ssi[si]]
ssi[si] += 1
temp = n.createVariable('temp', 'f8', ('obs',), fill_value=-999.9)
temp.standard_name = "air_temperature"
temp.units = "Celsius"
temp.coordinates = "time lat lon alt station_name station_info"
temp[...] = humidity[...] + 273.15
n.close()
return filename
def _make_indexed_contiguous_file(filename):
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.6'
n.featureType = "timeSeriesProfile"
# 3 stations
station = n.createDimension('station', 3)
# 58 profiles spreadover 4 stations, each at a different time
profile = n.createDimension('profile', 58)
obs = n.createDimension('obs' , None)
name_strlen = n.createDimension('name_strlen', 8)
bounds = n.createDimension('bounds', 2)
lon = n.createVariable('lon', 'f8', ('station',))
lon.standard_name = "longitude"
lon.long_name = "station longitude"
lon.units = "degrees_east"
lon.bounds = "lon_bounds"
lon[...] = [-23, 0, 67]
lon_bounds = n.createVariable('lon_bounds', 'f8', ('station', 'bounds'))
lon_bounds[...] = [[-24, -22],
[ -1, 1],
[ 66, 68]]
lat = n.createVariable('lat', 'f8', ('station',))
lat.standard_name = "latitude"
lat.long_name = "station latitude"
lat.units = "degrees_north"
lat[...] = [-9, 2, 34]
alt = n.createVariable('alt', 'f8', ('station',))
alt.long_name = "vertical distance above the surface"
alt.standard_name = "height"
alt.units = "m"
alt.positive = "up"
alt.axis = "Z"
alt[...] = [0.5, 12.6, 23.7]
station_name = n.createVariable('station_name', 'S1',
('station', 'name_strlen'))
station_name.long_name = "station name"
station_name.cf_role = "timeseries_id"
station_name[...] = numpy.array([[x for x in 'station1'],
[x for x in 'station2'],
[x for x in 'station3']])
profile = n.createVariable('profile', 'i4', ('profile'))
profile.cf_role = "profile_id"
profile[...] = numpy.arange(58) + 100
station_info = n.createVariable('station_info', 'i4', ('station',))
station_info.long_name = "some kind of station info"
station_info[...] = [-10, -9, -8]
stationIndex = n.createVariable('stationIndex', 'i4', ('profile',))
stationIndex.long_name = "which station this profile is for"
stationIndex.instance_dimension= "station"
stationIndex[...] = [2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2, 1, 1,
2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2, 1, 1,
2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2, 1, 1,
2, 1, 0, 2, 1, 1, 0, 2,
1, 0, 1, 2, 2]
# station N has list(stationIndex[...]).count(N) profiles
row_size = n.createVariable('row_size', 'i4', ('profile',))
row_size.long_name = "number of observations for this profile"
row_size.sample_dimension = "obs"
row_size[...] = [1, 4, 1, 3, 2, 2, 3, 3, 1, 2, 2, 3, 2, 2, 2, 2, 1, 2, 1, 3, 3, 2, 1,
3, 1, 3, 2, 3, 1, 3, 3, 2, 2, 2, 1, 1, 1, 3, 1, 1, 2, 1, 1, 3, 3, 2,
2, 2, 2, 1, 2, 3, 3, 3, 2, 3, 1, 1] # sum = 118
time = n.createVariable('time', 'f8', ('profile',))
time.standard_name = "time"
time.long_name = "time"
time.units = "days since 1970-01-01 00:00:00"
time.bounds = "time_bounds"
t0 = [3, 0, -3]
ssi = [0, 0, 0]
for i, si in enumerate(stationIndex[...]):
time[i] = t0[si] + ssi[si]
ssi[si] += 1
time_bounds = n.createVariable('time_bounds', 'f8', ('profile', 'bounds'))
time_bounds[..., 0] = time[...] - 0.5
time_bounds[..., 1] = time[...] + 0.5
z = n.createVariable('z', 'f8', ('obs',))
z.standard_name = "altitude"
z.long_name = "height above mean sea level"
z.units = "km"
z.axis = "Z"
z.positive = "up"
z.bounds = "z_bounds"
# z0 = [1, 0, 3]
# i = 0
# for s, r in zip(stationIndex[...], row_size[...]):
# z[i:i+r] = z0[s] + numpy.sort(numpy.random.uniform(0, numpy.random.uniform(1, 2), r))
# i += r
data = [3.51977705293769, 0.521185292100177, 0.575154265863394,
1.08495843717095, 1.37710968624395, 2.07123455611723, 3.47064474274781,
3.88569849023813, 4.81069254279537, 0.264339600625496, 0.915704970094182,
0.0701532210336895, 0.395517651420933, 1.00657582854276,
1.17721374303641, 1.82189345615046, 3.52424307197668, 3.93200473199559,
3.95715099603671, 1.57047493027102, 1.09938982652955, 1.17768722826975,
0.251803399458277, 1.59673486865804, 4.02868944763605, 4.03749228832264,
4.79858281590985, 3.00019933315412, 3.65124061660449, 0.458463542157766,
0.978678197083262, 0.0561560792556281, 0.31182013232255,
3.33350065357286, 4.33143904011861, 0.377894196412131, 1.63020681064712,
2.00097025264771, 3.76948048424458, 0.572927165845568, 1.29408313557905,
1.81296270533192, 0.387142669131077, 0.693459187515738, 1.69261930636298,
1.38258797228361, 1.82590759889566, 3.34993297710761, 0.725250730922501,
1.38221693486728, 1.59828555215646, 1.59281225554253, 0.452340646918555,
0.976663373825433, 1.12640496317618, 3.19366847375422, 3.37209133117904,
3.40665008236976, 3.53525896684001, 4.10444186715724, 0.14920937817654,
0.0907197953552753, 0.42527916794473, 0.618685137936187,
3.01900591447357, 3.37205542289986, 3.86957342976163, 0.17175098751914,
0.990040375014957, 1.57011428605984, 2.12140567043994, 3.24374743730506,
4.24042441581785, 0.929509749153725, 0.0711997786817564,
2.25090028461898, 3.31520955860746, 3.49482624434274, 3.96812568493549,
1.5681807261767, 1.79993011515465, 0.068325990211909, 0.124469638352167,
3.31990436971169, 3.84766748039389, 0.451973490541035, 1.24303219956085,
1.30478004656262, 0.351892459787624, 0.683685812990457,
0.788883736575568, 3.73033428872491, 3.99479807507392, 0.811582011950481,
1.2241242448019, 1.25563109687369, 2.16603674712822, 3.00010622131408,
3.90637137662453, 0.589586644805982, 0.104656387266266,
0.961185900148304, 1.05120351477824, 1.29460917520233, 2.10139985693684,
3.64252693587415, 3.91197236350995, 4.56466622863717, 0.556476687600461,
0.783717448678148, 0.910917550635007, 1.59750076220451, 1.97101264162631,
0.714693043642084, 0.904381625638779, 1.03767817888021, 4.10124675852254,
3.1059214185543]
data = numpy.around(data, 2)
z[...] = data
z_bounds = n.createVariable('z_bounds', 'f8', ('obs', 'bounds'))
z_bounds[..., 0] = z[...] - 0.01
z_bounds[..., 1] = z[...] + 0.01
humidity = n.createVariable('humidity', 'f8', ('obs',), fill_value=-999.9)
humidity.standard_name = "specific_humidity"
humidity.coordinates = "time lat lon alt z station_name station_info profile"
data *= 10
data = numpy.around(data, 2)
humidity[...] = data
temp = n.createVariable('temp', 'f8', ('obs',), fill_value=-999.9)
temp.standard_name = "air_temperature"
temp.units = "Celsius"
temp.coordinates = "time lat lon alt z station_name station_info profile"
data += 2731.5
data = numpy.around(data, 2)
temp[...] = data
n.close()
return filename
contiguous_file = _make_contiguous_file('DSG_timeSeries_contiguous.nc')
indexed_file = _make_indexed_file('DSG_timeSeries_indexed.nc')
indexed_contiguous_file = _make_indexed_contiguous_file('DSG_timeSeriesProfile_indexed_contiguous.nc')
def _make_external_files():
'''
'''
def _pp(filename, parent=False, external=False, combined=False, external_missing=False):
'''
'''
nc = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
nc.createDimension('grid_latitude', 10)
nc.createDimension('grid_longitude', 9)
nc.Conventions = 'CF-1.7'
if parent:
nc.external_variables = 'areacella'
if parent or combined or external_missing:
grid_latitude = nc.createVariable(dimensions=('grid_latitude',),
datatype='f8',
varname='grid_latitude')
grid_latitude.setncatts({'units': 'degrees', 'standard_name': 'grid_latitude'})
grid_latitude[...] = range(10)
grid_longitude = nc.createVariable(dimensions=('grid_longitude',),
datatype='f8',
varname='grid_longitude')
grid_longitude.setncatts({'units': 'degrees', 'standard_name': 'grid_longitude'})
grid_longitude[...] = range(9)
latitude = nc.createVariable(dimensions=('grid_latitude', 'grid_longitude'),
datatype='i4',
varname='latitude')
latitude.setncatts({'units': 'degree_N', 'standard_name': 'latitude'})
latitude[...] = numpy.arange(90).reshape(10, 9)
longitude = nc.createVariable(dimensions=('grid_longitude', 'grid_latitude'),
datatype='i4',
varname='longitude')
longitude.setncatts({'units': 'degreeE', 'standard_name': 'longitude'})
longitude[...] = numpy.arange(90).reshape(9, 10)
eastward_wind = nc.createVariable(dimensions=('grid_latitude', 'grid_longitude'),
datatype='f8',
varname=u'eastward_wind')
eastward_wind.coordinates = u'latitude longitude'
eastward_wind.standard_name = 'eastward_wind'
eastward_wind.cell_methods = 'grid_longitude: mean (interval: 1 day comment: ok) grid_latitude: maximum where sea'
eastward_wind.cell_measures = 'area: areacella'
eastward_wind.units = 'm s-1'
eastward_wind[...] = numpy.arange(90).reshape(10, 9) - 45.5
if external or combined:
areacella = nc.createVariable(dimensions=('grid_longitude', 'grid_latitude'),
datatype='f8',
varname='areacella')
areacella.setncatts({'units': 'm2', 'standard_name': 'cell_area'})
areacella[...] = numpy.arange(90).reshape(9, 10) + 100000.5
nc.close()
#--- End: def
parent_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'parent.nc')
external_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'external.nc')
combined_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'combined.nc')
external_missing_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'external_missing.nc')
_pp(parent_file , parent=True)
_pp(external_file , external=True)
_pp(combined_file , combined=True)
_pp(external_missing_file, external_missing=True)
return parent_file, external_file, combined_file, external_missing_file
(parent_file,
external_file,
combined_file,
external_missing_file) = _make_external_files()
def _make_gathered_file(filename):
'''
'''
def _jj(shape, list_values):
array = numpy.ma.masked_all(shape)
for i, (index, x) in enumerate(numpy.ndenumerate(array)):
if i in list_values:
array[index] = i
return array
#--- End: def
n = netCDF4.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
n.Conventions = 'CF-1.6'
time = n.createDimension('time' , 2)
height = n.createDimension('height' , 3)
lat = n.createDimension('lat' , 4)
lon = n.createDimension('lon' , 5)
p = n.createDimension('p' , 6)
list1 = n.createDimension('list1', 4)
list2 = n.createDimension('list2', 9)
list3 = n.createDimension('list3', 14)
# Dimension coordinate variables
time = n.createVariable('time', 'f8', ('time',))
time.standard_name = "time"
time.units = "days since 2000-1-1"
time[...] = [31, 60]
height = n.createVariable('height', 'f8', ('height',))
height.standard_name = "height"
height.units = "metres"
height.positive = "up"
height[...] = [0.5, 1.5, 2.5]
lat = n.createVariable('lat', 'f8', ('lat',))
lat.standard_name = "latitude"
lat.units = "degrees_north"
lat[...] = [-90, -85, -80, -75]
p = n.createVariable('p', 'i4', ('p',))
p.long_name = "pseudolevel"
p[...] = [1, 2, 3, 4, 5, 6]
# Auxiliary coordinate variables
aux0 = n.createVariable('aux0', 'f8', ('list1',))
aux0.standard_name = "longitude"
aux0.units = "degrees_east"
aux0[...] = numpy.arange(list1.size)
aux1 = n.createVariable('aux1', 'f8', ('list3',))
aux1[...] = numpy.arange(list3.size)
aux2 = n.createVariable('aux2', 'f8', ('time', 'list3', 'p'))
aux2[...] = numpy.arange(time.size * list3.size * p.size).reshape(time.size, list3.size, p.size)
aux3 = n.createVariable('aux3', 'f8', ('p', 'list3', 'time'))
aux3[...] = numpy.arange(p.size * list3.size * time.size).reshape(p.size, list3.size, time.size)
aux4 = n.createVariable('aux4', 'f8', ('p', 'time', 'list3'))
aux4[...] = numpy.arange(p.size * time.size * list3.size).reshape(p.size, time.size, list3.size)
aux5 = n.createVariable('aux5', 'f8', ('list3', 'p', 'time'))
aux5[...] = numpy.arange(list3.size * p.size * time.size).reshape(list3.size, p.size, time.size)
aux6 = n.createVariable('aux6', 'f8', ('list3', 'time'))
aux6[...] = numpy.arange(list3.size * time.size).reshape(list3.size, time.size)
aux7 = n.createVariable('aux7', 'f8', ('lat',))
aux7[...] = numpy.arange(lat.size)
aux8 = n.createVariable('aux8', 'f8', ('lon', 'lat',))
aux8[...] = numpy.arange(lon.size * lat.size).reshape(lon.size, lat.size)
aux9 = n.createVariable('aux9', 'f8', ('time', 'height'))
aux9[...] = numpy.arange(time.size * height.size).reshape(time.size, height.size)
# List variables
list1 = n.createVariable('list1', 'i', ('list1',))
list1.compress = "lon"
list1[...] = [0, 1, 3, 4]
list2 = n.createVariable('list2', 'i', ('list2',))
list2.compress = "lat lon"
list2[...] = [0, 1, 5, 6, 13, 14, 17, 18, 19]
list3 = n.createVariable('list3', 'i', ('list3',))
list3.compress = "height lat lon"
array = _jj((3, 4, 5),
[0, 1, 5, 6, 13, 14, 25, 26, 37, 38, 48, 49, 58, 59])
list3[...] = array.compressed()
# Data variables
temp1 = n.createVariable('temp1', 'f8', ('time', 'height', 'lat', 'list1', 'p'))
temp1.long_name = "temp1"
temp1.units = "K"
temp1.coordinates = "aux0 aux7 aux8 aux9"
temp1[...] = numpy.arange(2*3*4*4*6).reshape(2, 3, 4, 4, 6)
temp2 = n.createVariable('temp2', 'f8', ('time', 'height', 'list2', 'p'))
temp2.long_name = "temp2"
temp2.units = "K"
temp2.coordinates = "aux7 aux8 aux9"
temp2[...] = numpy.arange(2*3*9*6).reshape(2, 3, 9, 6)
temp3 = n.createVariable('temp3', 'f8', ('time', 'list3', 'p'))
temp3.long_name = "temp3"
temp3.units = "K"
temp3.coordinates = "aux0 aux1 aux2 aux3 aux4 aux5 aux6 aux7 aux8 aux9"
temp3[...] = numpy.arange(2*14*6).reshape(2, 14, 6)
n.close()
return filename
gathered = _make_gathered_file('gathered.nc')
if __name__ == '__main__':
print('Run date:', datetime.datetime.utcnow())
print(cf.environment(display=False))
print()
unittest.main(verbosity=2)
| [
"d.c.hassell@reading.ac.uk"
] | d.c.hassell@reading.ac.uk |
dfabb3bfc2330933469c77fd8c5db27ab3cce713 | 875d28c91341759de2fba250377955dbea432c9f | /flaskTutorial/flaskblog/models.py | 3319cf433330417c9c3ce6f7a64bfb7a2e3d06ae | [] | no_license | ja-vu/pythonProjects | 908ed141dd2c74c3eb045897aa474d60ace9b20b | f69bce815d81844e451f932854a3017b3f604949 | refs/heads/master | 2021-05-22T22:06:03.784246 | 2020-11-19T12:39:29 | 2020-11-19T12:39:29 | 253,117,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from flaskblog import db, login_manager, app
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from datetime import datetime
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def get_reset_token(self, expires_sec=1800):
s =Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id':self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s =Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False,default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')" | [
"jaames.vu@gmail.com"
] | jaames.vu@gmail.com |
dc43154b2aa5893cfa927b7f35ef1427d65f1a3b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/conda_conda/conda-master/conda/common/configuration.py | c1cc2a3d474edce74a64aadc4aa2fa940792cd7f | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 34,143 | py | # -*- coding: utf-8 -*-
"""
A generalized application configuration utility.
Features include:
- lazy eval
- merges configuration files
- parameter type validation, with custom validation
- parameter aliases
Easily extensible to other source formats, e.g. json and ini
Limitations:
- at the moment only supports a "flat" config structure; no nested data structures
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod
from collections import Mapping, defaultdict
from glob import glob
from itertools import chain
from logging import getLogger
from os import environ, stat
from os.path import basename, join
from stat import S_IFDIR, S_IFMT, S_IFREG
from enum import Enum
from .compat import (isiterable, iteritems, itervalues, odict, primitive_types, string_types,
text_type, with_metaclass)
from .constants import EMPTY_MAP, NULL
from .yaml import yaml_load
from .. import CondaError, CondaMultiError
from .._vendor.auxlib.collection import AttrDict, first, frozendict, last, make_immutable
from .._vendor.auxlib.exceptions import ThisShouldNeverHappenError
from .._vendor.auxlib.path import expand
from .._vendor.auxlib.type_coercion import TypeCoercionError, typify_data_structure
try:
from cytoolz.dicttoolz import merge
from cytoolz.functoolz import excepts
from cytoolz.itertoolz import concat, concatv, unique
except ImportError:
from .._vendor.toolz.dicttoolz import merge
from .._vendor.toolz.functoolz import excepts
from .._vendor.toolz.itertoolz import concat, concatv, unique
try:
from ruamel_yaml.comments import CommentedSeq, CommentedMap
from ruamel_yaml.scanner import ScannerError
except ImportError: # pragma: no cover
from ruamel.yaml.comments import CommentedSeq, CommentedMap # pragma: no cover
from ruamel.yaml.scanner import ScannerError
log = getLogger(__name__)
def pretty_list(iterable, padding=' '): # TODO: move elsewhere in conda.common
if not isiterable(iterable):
iterable = [iterable]
return '\n'.join("%s- %s" % (padding, item) for item in iterable)
def pretty_map(dictionary, padding=' '):
return '\n'.join("%s%s: %s" % (padding, key, value) for key, value in iteritems(dictionary))
class LoadError(CondaError):
def __init__(self, message, filepath, line, column):
self.line = line
self.filepath = filepath
self.column = column
msg = "Load Error: in %s on line %s, column %s. %s" % (filepath, line, column, message)
super(LoadError, self).__init__(msg)
class ConfigurationError(CondaError):
pass
class ValidationError(ConfigurationError):
def __init__(self, parameter_name, parameter_value, source, msg=None, **kwargs):
self.parameter_name = parameter_name
self.parameter_value = parameter_value
self.source = source
super(ConfigurationError, self).__init__(msg, **kwargs)
class MultipleKeysError(ValidationError):
def __init__(self, source, keys, preferred_key):
self.source = source
self.keys = keys
msg = ("Multiple aliased keys in file %s:\n"
"%s"
"Must declare only one. Prefer '%s'" % (source, pretty_list(keys), preferred_key))
super(MultipleKeysError, self).__init__(preferred_key, None, source, msg=msg)
class InvalidTypeError(ValidationError):
def __init__(self, parameter_name, parameter_value, source, wrong_type, valid_types, msg=None):
self.wrong_type = wrong_type
self.valid_types = valid_types
if msg is None:
msg = ("Parameter %s = %r declared in %s has type %s.\n"
"Valid types: %s." % (parameter_name, parameter_value,
source, wrong_type, pretty_list(valid_types)))
super(InvalidTypeError, self).__init__(parameter_name, parameter_value, source, msg=msg)
class InvalidElementTypeError(InvalidTypeError):
def __init__(self, parameter_name, parameter_value, source, wrong_type,
valid_types, index_or_key):
qualifier = "at index" if isinstance(index_or_key, int) else "for key"
msg = ("Parameter %s declared in %s has invalid element %r %s %s.\n"
"Valid element types:\n"
"%s." % (parameter_name, source, parameter_value, qualifier,
index_or_key, pretty_list(valid_types)))
super(InvalidElementTypeError, self).__init__(parameter_name, parameter_value, source,
wrong_type, valid_types, msg=msg)
class CustomValidationError(ValidationError):
def __init__(self, parameter_name, parameter_value, source, custom_message):
msg = ("Parameter %s = %r declared in %s is invalid.\n"
"%s" % (parameter_name, parameter_value, source, custom_message))
super(CustomValidationError, self).__init__(parameter_name, parameter_value, source,
msg=msg)
class MultiValidationError(CondaMultiError, ConfigurationError):
def __init__(self, errors, *args, **kwargs):
super(MultiValidationError, self).__init__(errors, *args, **kwargs)
def raise_errors(errors):
if not errors:
return True
elif len(errors) == 1:
raise errors[0]
else:
raise MultiValidationError(errors)
class ParameterFlag(Enum):
final = 'final'
top = "top"
bottom = "bottom"
def __str__(self):
return "%s" % self.value
@classmethod
def from_name(cls, name):
return cls[name]
@classmethod
def from_value(cls, value):
return cls(value)
@classmethod
def from_string(cls, string):
try:
string = string.strip('!#')
return cls.from_value(string)
except (ValueError, AttributeError):
return None
@with_metaclass(ABCMeta)
class RawParameter(object):
def __init__(self, source, key, raw_value):
self.source = source
self.key = key
self._raw_value = raw_value
def __repr__(self):
return text_type(vars(self))
@abstractmethod
def value(self, parameter_obj):
raise NotImplementedError()
@abstractmethod
def keyflag(self):
raise NotImplementedError()
@abstractmethod
def valueflags(self, parameter_obj):
raise NotImplementedError()
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return dict((key, cls(source, key, from_map[key])) for key in from_map)
return EMPTY_MAP
class EnvRawParameter(RawParameter):
source = 'envvars'
def value(self, parameter_obj):
if hasattr(parameter_obj, 'string_delimiter'):
string_delimiter = getattr(parameter_obj, 'string_delimiter')
# TODO: add stripping of !important, !top, and !bottom
raw_value = self._raw_value
if string_delimiter in raw_value:
value = raw_value.split(string_delimiter)
else:
value = [raw_value]
return tuple(v.strip() for v in value)
else:
return self.__important_split_value[0].strip()
def keyflag(self):
return ParameterFlag.final if len(self.__important_split_value) >= 2 else None
def valueflags(self, parameter_obj):
if hasattr(parameter_obj, 'string_delimiter'):
string_delimiter = getattr(parameter_obj, 'string_delimiter')
# TODO: add stripping of !important, !top, and !bottom
return tuple('' for _ in self._raw_value.split(string_delimiter))
else:
return self.__important_split_value[0].strip()
@property
def __important_split_value(self):
return self._raw_value.split("!important")
@classmethod
def make_raw_parameters(cls, appname):
keystart = "{0}_".format(appname.upper())
raw_env = dict((k.replace(keystart, '', 1).lower(), v)
for k, v in iteritems(environ) if k.startswith(keystart))
return super(EnvRawParameter, cls).make_raw_parameters(EnvRawParameter.source, raw_env)
class ArgParseRawParameter(RawParameter):
source = 'cmd_line'
def value(self, parameter_obj):
return make_immutable(self._raw_value)
def keyflag(self):
return None
def valueflags(self, parameter_obj):
return None if isinstance(parameter_obj, PrimitiveParameter) else ()
@classmethod
def make_raw_parameters(cls, args_from_argparse):
return super(ArgParseRawParameter, cls).make_raw_parameters(ArgParseRawParameter.source,
args_from_argparse)
class YamlRawParameter(RawParameter):
# this class should encapsulate all direct use of ruamel.yaml in this module
def __init__(self, source, key, raw_value, keycomment):
self._keycomment = keycomment
super(YamlRawParameter, self).__init__(source, key, raw_value)
def value(self, parameter_obj):
self.__process(parameter_obj)
return self._value
def keyflag(self):
return ParameterFlag.from_string(self._keycomment)
def valueflags(self, parameter_obj):
self.__process(parameter_obj)
return self._valueflags
def __process(self, parameter_obj):
if hasattr(self, '_value'):
return
elif isinstance(self._raw_value, CommentedSeq):
valuecomments = self._get_yaml_list_comments(self._raw_value)
self._valueflags = tuple(ParameterFlag.from_string(s) for s in valuecomments)
self._value = tuple(self._raw_value)
elif isinstance(self._raw_value, CommentedMap):
valuecomments = self._get_yaml_map_comments(self._raw_value)
self._valueflags = dict((k, ParameterFlag.from_string(v))
for k, v in iteritems(valuecomments) if v is not None)
self._value = frozendict(self._raw_value)
elif isinstance(self._raw_value, primitive_types):
self._valueflags = None
self._value = self._raw_value
else:
raise ThisShouldNeverHappenError() # pragma: no cover
@staticmethod
def _get_yaml_key_comment(commented_dict, key):
try:
return commented_dict.ca.items[key][2].value.strip()
except (AttributeError, KeyError):
return None
@staticmethod
def _get_yaml_list_comments(value):
items = value.ca.items
raw_comment_lines = tuple(excepts((AttributeError, KeyError, TypeError),
lambda q: items.get(q)[0].value.strip() or None,
lambda _: None # default value on exception
)(q)
for q in range(len(value)))
return raw_comment_lines
@staticmethod
def _get_yaml_map_comments(rawvalue):
return dict((key, excepts(KeyError,
lambda k: rawvalue.ca.items[k][2].value.strip() or None,
lambda _: None # default value on exception
)(key))
for key in rawvalue)
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return dict((key, cls(source, key, from_map[key],
cls._get_yaml_key_comment(from_map, key)))
for key in from_map)
return EMPTY_MAP
@classmethod
def make_raw_parameters_from_file(cls, filepath):
with open(filepath, 'r') as fh:
try:
ruamel_yaml = yaml_load(fh)
except ScannerError as err:
mark = err.problem_mark
raise LoadError("Invalid YAML", filepath, mark.line, mark.column)
return cls.make_raw_parameters(filepath, ruamel_yaml) or EMPTY_MAP
def load_file_configs(search_path):
# returns an ordered map of filepath and dict of raw parameter objects
def _file_yaml_loader(fullpath):
assert fullpath.endswith((".yml", ".yaml")) or "condarc" in basename(fullpath), fullpath
yield fullpath, YamlRawParameter.make_raw_parameters_from_file(fullpath)
def _dir_yaml_loader(fullpath):
for filepath in sorted(concatv(glob(join(fullpath, "*.yml")),
glob(join(fullpath, "*.yaml")))):
yield filepath, YamlRawParameter.make_raw_parameters_from_file(filepath)
# map a stat result to a file loader or a directory loader
_loader = {
S_IFREG: _file_yaml_loader,
S_IFDIR: _dir_yaml_loader,
}
def _get_st_mode(path):
# stat the path for file type, or None if path doesn't exist
try:
return S_IFMT(stat(path).st_mode)
except OSError:
return None
expanded_paths = tuple(expand(path) for path in search_path)
stat_paths = (_get_st_mode(path) for path in expanded_paths)
load_paths = (_loader[st_mode](path)
for path, st_mode in zip(expanded_paths, stat_paths)
if st_mode is not None)
raw_data = odict(kv for kv in chain.from_iterable(load_paths))
return raw_data
@with_metaclass(ABCMeta)
class Parameter(object):
_type = None
_element_type = None
def __init__(self, default, aliases=(), validation=None):
self._name = None
self._names = None
self.default = default
self.aliases = aliases
self._validation = validation
def _set_name(self, name):
# this is an explicit method, and not a descriptor/setter
# it's meant to be called by the Configuration metaclass
self._name = name
self._names = frozenset(x for x in chain(self.aliases, (name, )))
return name
@property
def name(self):
if self._name is None:
# The Configuration metaclass should call the `_set_name` method.
raise ThisShouldNeverHappenError() # pragma: no cover
return self._name
@property
def names(self):
if self._names is None:
# The Configuration metaclass should call the `_set_name` method.
raise ThisShouldNeverHappenError() # pragma: no cover
return self._names
def _raw_parameters_from_single_source(self, raw_parameters):
# while supporting parameter name aliases, we enforce that only one definition is given
# per data source
keys = self.names & frozenset(raw_parameters.keys())
matches = {key: raw_parameters[key] for key in keys}
numkeys = len(keys)
if numkeys == 0:
return None, None
elif numkeys == 1:
return next(itervalues(matches)), None
elif self.name in keys:
return matches[self.name], MultipleKeysError(raw_parameters[next(iter(keys))].source,
keys, self.name)
else:
return None, MultipleKeysError(raw_parameters[next(iter(keys))].source,
keys, self.name)
def _get_all_matches(self, instance):
# a match is a raw parameter instance
matches = []
multikey_exceptions = []
for filepath, raw_parameters in iteritems(instance.raw_data):
match, error = self._raw_parameters_from_single_source(raw_parameters)
if match is not None:
matches.append(match)
if error:
multikey_exceptions.append(error)
return matches, multikey_exceptions
@abstractmethod
def _merge(self, matches):
raise NotImplementedError()
def __get__(self, instance, instance_type):
# strategy is "extract and merge," which is actually just map and reduce
# extract matches from each source in SEARCH_PATH
# then merge matches together
if self.name in instance._cache_:
return instance._cache_[self.name]
matches, errors = self._get_all_matches(instance)
try:
result = typify_data_structure(self._merge(matches) if matches else self.default,
self._element_type)
except TypeCoercionError as e:
errors.append(CustomValidationError(self.name, e.value, "<<merged>>", text_type(e)))
else:
errors.extend(self.collect_errors(instance, result))
raise_errors(errors)
instance._cache_[self.name] = result
return result
def collect_errors(self, instance, value, source="<<merged>>"):
"""Validate a Parameter value.
Args:
instance (Configuration): The instance object to which the Parameter descriptor is
attached.
value: The value to be validated.
"""
errors = []
if not isinstance(value, self._type):
errors.append(InvalidTypeError(self.name, value, source, type(value),
self._type))
elif self._validation is not None:
result = self._validation(value)
if result is False:
errors.append(ValidationError(self.name, value, source))
elif isinstance(result, string_types):
errors.append(CustomValidationError(self.name, value, source, result))
return errors
def _match_key_is_important(self, raw_parameter):
return raw_parameter.keyflag() is ParameterFlag.final
def _first_important_matches(self, matches):
idx = first(enumerate(matches), lambda x: self._match_key_is_important(x[1]),
apply=lambda x: x[0])
return matches if idx is None else matches[:idx+1]
@staticmethod
def _str_format_flag(flag):
return " #!%s" % flag if flag is not None else ''
@staticmethod
def _str_format_value(value):
if value is None:
return 'None'
return value
@classmethod
def repr_raw(cls, raw_parameter):
raise NotImplementedError()
class PrimitiveParameter(Parameter):
"""Parameter type for a Configuration class that holds a single python primitive value.
The python primitive types are str, int, float, complex, bool, and NoneType. In addition,
python 2 has long and unicode types.
"""
def __init__(self, default, aliases=(), validation=None, parameter_type=None):
"""
Args:
default (Any): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value. Returning
`None` also indicates a valid value.
parameter_type (type or Tuple[type]): Type-validation of parameter's value. If None,
type(default) is used.
"""
self._type = type(default) if parameter_type is None else parameter_type
self._element_type = self._type
super(PrimitiveParameter, self).__init__(default, aliases, validation)
def _merge(self, matches):
important_match = first(matches, self._match_key_is_important, default=None)
if important_match is not None:
return important_match.value(self)
last_match = last(matches, lambda x: x is not None, default=None)
if last_match is not None:
return last_match.value(self)
raise ThisShouldNeverHappenError() # pragma: no cover
def repr_raw(self, raw_parameter):
return "%s: %s%s" % (raw_parameter.key,
self._str_format_value(raw_parameter.value(self)),
self._str_format_flag(raw_parameter.keyflag()))
class SequenceParameter(Parameter):
"""Parameter type for a Configuration class that holds a sequence (i.e. list) of python
primitive values.
"""
_type = tuple
def __init__(self, element_type, default=(), aliases=(), validation=None,
string_delimiter=','):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element in
the sequence.
default (Iterable[str]): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
self.string_delimiter = string_delimiter
super(SequenceParameter, self).__init__(default, aliases, validation)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(SequenceParameter, self).collect_errors(instance, value)
element_type = self._element_type
for idx, element in enumerate(value):
if not isinstance(element, element_type):
errors.append(InvalidElementTypeError(self.name, element, source,
type(element), element_type, idx))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches = self._first_important_matches(matches)
# get individual lines from important_matches that were marked important
# these will be prepended to the final result
def get_marked_lines(match, marker, parameter_obj):
return tuple(line
for line, flag in zip(match.value(parameter_obj),
match.valueflags(parameter_obj))
if flag is marker) if match else ()
top_lines = concat(get_marked_lines(m, ParameterFlag.top, self) for m in relevant_matches)
# also get lines that were marked as bottom, but reverse the match order so that lines
# coming earlier will ultimately be last
bottom_lines = concat(get_marked_lines(m, ParameterFlag.bottom, self) for m in
reversed(relevant_matches))
# now, concat all lines, while reversing the matches
# reverse because elements closer to the end of search path take precedence
all_lines = concat(m.value(self) for m in reversed(relevant_matches))
# stack top_lines + all_lines, then de-dupe
top_deduped = tuple(unique(concatv(top_lines, all_lines)))
# take the top-deduped lines, reverse them, and concat with reversed bottom_lines
# this gives us the reverse of the order we want, but almost there
# NOTE: for a line value marked both top and bottom, the bottom marker will win out
# for the top marker to win out, we'd need one additional de-dupe step
bottom_deduped = unique(concatv(reversed(tuple(bottom_lines)), reversed(top_deduped)))
# just reverse, and we're good to go
return tuple(reversed(tuple(bottom_deduped)))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for q, value in enumerate(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self)[q]
lines.append(" - %s%s" % (self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
def _get_all_matches(self, instance):
# this is necessary to handle argparse `action="append"`, which can't be set to a
# default value of NULL
matches, multikey_exceptions = super(SequenceParameter, self)._get_all_matches(instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, multikey_exceptions
class MapParameter(Parameter):
"""Parameter type for a Configuration class that holds a map (i.e. dict) of python
primitive values.
"""
_type = dict
def __init__(self, element_type, default=None, aliases=(), validation=None):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element.
default (Mapping): The parameter's default value. If None, will be an empty dict.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
super(MapParameter, self).__init__(default or dict(), aliases, validation)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(MapParameter, self).collect_errors(instance, value)
if isinstance(value, Mapping):
element_type = self._element_type
errors.extend(InvalidElementTypeError(self.name, val, source, type(val),
element_type, key)
for key, val in iteritems(value) if not isinstance(val, element_type))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches = self._first_important_matches(matches)
# mapkeys with important matches
def key_is_important(match, key):
return match.valueflags(self).get(key) is ParameterFlag.final
important_maps = tuple(dict((k, v)
for k, v in iteritems(match.value(self))
if key_is_important(match, k))
for match in relevant_matches)
# dump all matches in a dict
# then overwrite with important matches
return merge(concatv((m.value(self) for m in relevant_matches),
reversed(important_maps)))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for valuekey, value in iteritems(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self).get(valuekey)
lines.append(" %s: %s%s" % (valuekey, self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
class ConfigurationType(type):
"""metaclass for Configuration"""
def __init__(cls, name, bases, attr):
super(ConfigurationType, cls).__init__(name, bases, attr)
# call _set_name for each parameter
cls.parameter_names = tuple(p._set_name(name) for name, p in iteritems(cls.__dict__)
if isinstance(p, Parameter))
@with_metaclass(ConfigurationType)
class Configuration(object):
def __init__(self, search_path=(), app_name=None, argparse_args=None):
self.raw_data = odict()
self._cache_ = dict()
self._reset_callbacks = set() # TODO: make this a boltons ordered set
self._validation_errors = defaultdict(list)
if not hasattr(self, '_search_path') and search_path is not None:
# we only set search_path once; we never change it
self._search_path = search_path
if not hasattr(self, '_app_name') and app_name is not None:
# we only set app_name once; we never change it
self._app_name = app_name
self._set_search_path(search_path)
self._set_env_vars(app_name)
self._set_argparse_args(argparse_args)
def _set_search_path(self, search_path):
if not hasattr(self, '_search_path') and search_path is not None:
# we only set search_path once; we never change it
self._search_path = search_path
if getattr(self, '_search_path', None):
# we need to make sure old data doesn't stick around if we are resetting
# easiest solution is to completely clear raw_data and re-load other sources
# if raw_data holds contents
raw_data_held_contents = bool(self.raw_data)
if raw_data_held_contents:
self.raw_data = odict()
self._set_raw_data(load_file_configs(search_path))
if raw_data_held_contents:
# this should only be triggered on re-initialization / reset
self._set_env_vars(getattr(self, '_app_name', None))
self._set_argparse_args(self._argparse_args)
self._reset_cache()
return self
def _set_env_vars(self, app_name=None):
if not hasattr(self, '_app_name') and app_name is not None:
# we only set app_name once; we never change it
self._app_name = app_name
if getattr(self, '_app_name', None):
erp = EnvRawParameter
self.raw_data[erp.source] = erp.make_raw_parameters(self._app_name)
self._reset_cache()
return self
def _set_argparse_args(self, argparse_args):
# the argparse_args we store internally in this class as self._argparse_args
# will be a mapping type, not a non-`dict` object like argparse_args is natively
if hasattr(argparse_args, '__dict__'):
# the argparse_args from argparse will be an object with a __dict__ attribute
# and not a mapping type like this method will turn it into
self._argparse_args = AttrDict((k, v) for k, v, in iteritems(vars(argparse_args))
if v is not NULL)
elif not argparse_args:
# argparse_args can be initialized as `None`
self._argparse_args = AttrDict()
else:
# we're calling this method with argparse_args that are a mapping type, likely
# already having been processed by this method before
self._argparse_args = AttrDict((k, v) for k, v, in iteritems(argparse_args)
if v is not NULL)
source = ArgParseRawParameter.source
self.raw_data[source] = ArgParseRawParameter.make_raw_parameters(self._argparse_args)
self._reset_cache()
return self
def _set_raw_data(self, raw_data):
self.raw_data.update(raw_data)
self._reset_cache()
return self
def _reset_cache(self):
self._cache_ = dict()
for callback in self._reset_callbacks:
callback()
return self
def register_reset_callaback(self, callback):
self._reset_callbacks.add(callback)
def check_source(self, source):
# this method ends up duplicating much of the logic of Parameter.__get__
# I haven't yet found a way to make it more DRY though
typed_values = {}
validation_errors = []
raw_parameters = self.raw_data[source]
for key in self.parameter_names:
parameter = self.__class__.__dict__[key]
match, multikey_error = parameter._raw_parameters_from_single_source(raw_parameters)
if multikey_error:
validation_errors.append(multikey_error)
if match is not None:
try:
typed_value = typify_data_structure(match.value(parameter),
parameter._element_type)
except TypeCoercionError as e:
validation_errors.append(CustomValidationError(match.key, e.value,
match.source, text_type(e)))
else:
collected_errors = parameter.collect_errors(self, typed_value, match.source)
if collected_errors:
validation_errors.extend(collected_errors)
else:
typed_values[match.key] = typed_value # parameter.repr_raw(match)
else:
# this situation will happen if there is a multikey_error and none of the
# matched keys is the primary key
pass
return typed_values, validation_errors
def validate_all(self):
validation_errors = list(chain.from_iterable(self.check_source(source)[1]
for source in self.raw_data))
raise_errors(validation_errors)
self.validate_configuration()
@staticmethod
def _collect_validation_error(func, *args, **kwargs):
try:
func(*args, **kwargs)
except ConfigurationError as e:
return e.errors if hasattr(e, 'errors') else e,
return ()
def validate_configuration(self):
errors = chain.from_iterable(Configuration._collect_validation_error(getattr, self, name)
for name in self.parameter_names)
post_errors = self.post_build_validation()
raise_errors(tuple(chain.from_iterable((errors, post_errors))))
def post_build_validation(self):
return ()
def collect_all(self):
typed_values = odict()
validation_errors = odict()
for source in self.raw_data:
typed_values[source], validation_errors[source] = self.check_source(source)
raise_errors(tuple(chain.from_iterable(itervalues(validation_errors))))
return odict((k, v) for k, v in iteritems(typed_values) if v)
| [
"659338505@qq.com"
] | 659338505@qq.com |
7e78b651b01d138f05ac37cd892e2b95cbb61542 | 23c337e16799b8eeddca04a81286e41868198554 | /script.py | 0ec2e474c940caa2749d18bc443f4afe812fbac3 | [] | no_license | Rohini-git/Postgresql_python | 2a73a454fc070c2c5e6efe2c1ba89e62e1545ed3 | ec5a8fa8edfbba04ffbd765aaada53bacfc479b1 | refs/heads/main | 2023-02-03T13:33:57.215746 | 2020-12-07T22:25:24 | 2020-12-07T22:25:24 | 319,456,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | #!/usr/bin/python
import psycopg2
from config import config
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
| [
"noreply@github.com"
] | noreply@github.com |
8b515c1c969fb94a6217cb97703e541fd5ba293e | 0b6db00d2e0514b71eaeb394e9e5398e99dfd723 | /good-shows/test_load_db.py | 7db33197bda3bfcb03cd1faacfbb5113a8719d55 | [] | no_license | jeremyber/GoodShows | 54d2979009dca76404bdc0cdc07e280fbbc5a1dc | f7ed7f26e8562f25592239f7abae8959743d22af | refs/heads/master | 2021-01-14T03:46:38.848181 | 2020-03-02T02:35:45 | 2020-03-02T02:35:45 | 242,589,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | import unittest
import unittest.mock as mock
from unittest.mock import patch
from load_db import DBLoader
class TestLoadDB(unittest.TestCase):
def test_constructor_expected(self):
# Function should only take a file path (string)
# test expected functionality
assert "abc" == DBLoader("abc").path_to_file
def test_constructor_not_string(self):
with self.assertRaises(TypeError):
DBLoader(123)
# #TODO: Finish this function
# def test_load_the_database(self, mocker):
# mocker.patch('DBLoader.get_insert_statements()')
# assert()
def test_get_insert_statements_correct(self):
expected_list = ["INSERT INTO shows (band, date, venue) VALUES ('The Strokes', '8/1/2019', 'Lollapalooza 2019 D1')"]
with mock.patch('builtins.open', mock.mock_open(read_data= "a,b,c\nThe Strokes,8/1/2019,Lollapalooza 2019 D1")):
assert expected_list == DBLoader("abc").get_insert_statements()
def test_get_insert_statements_incorrect_not_enough_columns(self):
with self.assertRaises(TypeError):
with mock.patch('builtins.open',
mock.mock_open(read_data="a,b\nThe Strokes,8/1/2019")):
print(DBLoader("abc").get_insert_statements()) | [
"beramiah@gmail.com"
] | beramiah@gmail.com |
4cb83fe1c456cd5a3d4b5c44db170ac4c643b734 | bab1615ee6a758a3284c4385bca22dd1aba8ac9e | /爬取王者荣耀英雄图片/2.py | 58eae657eb3549fd9c48baba729117ab9729e371 | [] | no_license | a1169804597/home | 436b3434057c679d245ffe6e3d61004632841d1e | 9b1e63e3eee6c22a281d0eb54dfa7400f2952a87 | refs/heads/master | 2022-12-01T17:08:56.014555 | 2020-09-17T23:34:12 | 2020-09-17T23:34:12 | 228,879,970 | 2 | 1 | null | 2022-11-22T02:26:05 | 2019-12-18T16:29:29 | HTML | UTF-8 | Python | false | false | 1,617 | py | #!E:\python\env\python37
#-*- coding:utf-8 -*-
# @time :2019/5/25
#@Auther :zbwu103
#@name:爬去王者荣耀英雄皮肤方法二
import os,re
import requests
from requests import codes
from hashlib import md5
url = 'https://pvp.qq.com/web201605/js/herolist.json'
headers={
'referer': 'https://pvp.qq.com/web201605/herolist.shtml',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
'x-requested-with': 'XMLHttpRequest'
}
html = requests.get(url,headers=headers)
html=html.json()
# print(html)
heroname=list(map(lambda x:x['cname'],html))
heronumber=list(map(lambda x:x['ename'],html))
# print(heroname)
# print(heronumber)
def main():
li = 0
for v in heronumber:
img_path = "D:\\python\项目\\venv\项目操作\\img\\"+heroname[li]
if not os.path.exists(img_path):
os.mkdir(img_path)
os.chdir("D:\\python\项目\\venv\\项目操作\\img\\"+heroname[li])
li+=1
for u in range(1,12):
hero_links='http://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/'+str(v)+'/'+str(v)+'-bigskin-'+str(u)+'.jpg'
im = requests.get(hero_links)
if im.status_code==200:
file_path=img_path+os.path.sep+str(u)+'.jpg'
if not os.path.exists(file_path):
with open( str(u)+'.jpg','wb') as f:
f.write(im.content)
print('下载成功')
else:
print('图片已经存在')
if __name__=="__main__":
main()
| [
"1169804597@qq.com"
] | 1169804597@qq.com |
d2d2e8b51cc29ab51ee2998ddd3a367c920ed677 | 86726c037604396899876c1559d761eef5e86fca | /users/views.py | 51fc463c44c42732a1799ab4c6e61dfe586057f2 | [] | no_license | GoldenPalladin/PersonaHR | 2ef67d666041a30df3afefaabf73bef3b6285b88 | 67a2e07497116c6a26dfec8b6c521237ec79ac39 | refs/heads/master | 2023-01-19T06:07:15.317873 | 2020-11-19T22:46:45 | 2020-11-19T22:46:45 | 303,156,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from rest_framework import viewsets
from django_filters.rest_framework import DjangoFilterBackend
from .serializers import UserProfile, UserProfileSerializer
class UserProfileViewSet(viewsets.ModelViewSet):
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
filter_backends = [DjangoFilterBackend, ]
filterset_fields = ['userType']
| [
"p.yakovlev@live.ru"
] | p.yakovlev@live.ru |
390fc6499398ef604d76ba10579f4554f2ea57c1 | 5c13636a1504bde99e3c0ea6b3e0a276b0b53934 | /apop/ASR/divers/transformer_pretraining.py | 4ee033d17554c9f9da27b272add9dcecb8ddf312 | [] | no_license | thbeucher/ML_pytorch | 3555d7320b250ae6c1d44efcf51af78368727863 | 0eebc122396583eccb05fc2fd9a595cbb554b0de | refs/heads/master | 2023-07-24T21:51:15.611206 | 2023-06-15T16:25:56 | 2023-06-15T16:25:56 | 217,279,944 | 0 | 0 | null | 2023-07-22T19:40:34 | 2019-10-24T11:12:10 | Python | UTF-8 | Python | false | false | 8,321 | py | import os
import ast
import sys
import torch
import logging
import argparse
from tqdm import tqdm
import data as d
import utils as u
import optimizer as opt
from models.transformer.transformer import Transformer
from models.transformer.embedder import PositionalEmbedder
def train_model(model, metadata, max_epochs=50, train_score_step=2, eval_step=10, save_path=''):
optimizer = opt.RAdam(model.parameters(), lr=settings['lr'])
val_word_acc, val_word_acc_memory, val_sentence_acc = 0, 0, 0
for epoch in tqdm(range(max_epochs)):
for _, dec_in, _ in metadata.train_data_loader:
dec_in = dec_in.to(metadata.device)
preds = model(dec_in, dec_in)
optimizer.zero_grad()
current_loss = metadata.loss(preds.view(-1, preds.shape[-1]), dec_in.view(-1))
current_loss.backward()
optimizer.step()
metadata.SM.partial_feed(dec_in.tolist(), preds.argmax(dim=-1).tolist())
if epoch % train_score_step == 0:
_, word_acc, sentence_acc, _ = metadata.SM.get_scores(None, None, stop_idx=metadata.eos_idx, from_feed=True)
logging.info(f'Epoch {epoch} - Training word accuracy = {word_acc:.3f} | sentence accuracy = {sentence_acc:.3f}')
plotter.line_plot('train word accuracy', 'train', 'Pretrainer Word Accuracy', epoch, word_acc)
metadata.SM.reset_feed()
if epoch % eval_step == 0:
val_word_acc, val_sentence_acc = eval_model(model, metadata)
if val_word_acc > val_word_acc_memory:
u.save_checkpoint(model, optimizer, save_path + 'parrot_transformer.pt')
logging.info(f'Save model with validation word accuracy = {val_word_acc} | sentence accuracy = {val_sentence_acc}')
val_word_acc_memory = val_word_acc
def eval_model(model, metadata):
model.eval()
with torch.no_grad():
for _, dec_in, _ in metadata.test_data_loader:
dec_in = dec_in.to(metadata.device)
preds = model.greedy_decoding(dec_in, metadata.eos_idx, metadata.pad_idx, max_seq_len=dec_in.shape[1])
metadata.SM.partial_feed(dec_in.tolist(), preds.tolist())
_, word_acc, sentence_acc, _ = metadata.SM.get_scores(None, None, stop_idx=metadata.eos_idx, from_feed=True)
model.train()
metadata.SM.reset_feed()
return word_acc, sentence_acc
def pretrain_transformer_parrot(settings):
metadata = d.Metadata(train_folder=settings['train_folder'], test_folder=settings['test_folder'],
train_metadata=settings['train_metadata'], test_metadata=settings['test_metadata'],
ngram_metadata=settings['ngram_metadata'],
vocab=settings['vocab'], decay_step=settings['decay_step'],
subset=settings['subset'], percent=settings['percent'],
batch_size=settings['batch_size'], size_limits=settings['size_limits'],
create_mask=settings['create_mask'], loss=settings['loss'])
encoder_embedder = PositionalEmbedder(settings['max_dec_in_seq_len'], settings['decoder_embedding_dim'], settings['d_model'],
scaling=settings['scaling'], reduce_dim=settings['decoder_reduce_dim'],
dropout=settings['dropout'], device=metadata.device, output_size=metadata.output_size)
decoder_embedder = PositionalEmbedder(settings['max_dec_in_seq_len'], settings['decoder_embedding_dim'], settings['d_model'],
scaling=settings['scaling'], reduce_dim=settings['decoder_reduce_dim'],
dropout=settings['dropout'], device=metadata.device, output_size=metadata.output_size)
model = Transformer(settings['n_encoder_blocks'],
settings['n_decoder_blocks'],
settings['d_model'],
settings['d_keys'],
settings['d_values'],
settings['n_heads'],
settings['d_ff'],
metadata.output_size,
encoder_embedder=encoder_embedder,
decoder_embedder=decoder_embedder,
encoder_embedding_dim=settings['encoder_embedding_dim'],
decoder_embedding_dim=settings['decoder_embedding_dim'],
max_enc_in_seq_len=settings['max_enc_in_seq_len'],
max_dec_in_seq_len=settings['max_dec_in_seq_len'],
encoder_reduce_dim=True,
decoder_reduce_dim=True,
apply_softmax=False,
scaling=settings['scaling'],
pad_idx=metadata.pad_idx,
dropout=settings['dropout'],
device=metadata.device).to(metadata.device)
train_model(model, metadata, max_epochs=settings['max_epochs'], train_score_step=settings['train_score_step'],
eval_step=settings['eval_step'], save_path=settings['save_path'])
if __name__ == "__main__":
settings = u.load_json('settings.json') if os.path.isfile('settings.json') else {}
argparser = argparse.ArgumentParser(prog='transformer_pretraining.py', description='Pretrain Transformer')
argparser.add_argument('--train_folder', default='../../datasets/openslr/LibriSpeech/train-clean-100/', type=str)
argparser.add_argument('--test_folder', default='../../datasets/openslr/LibriSpeech/test-clean/', type=str)
argparser.add_argument('--train_metadata', default='metadata_train-clean-100.pk', type=str)
argparser.add_argument('--test_metadata', default='metadata_test-clean.pk', type=str)
argparser.add_argument('--ngram_metadata', default='metadata_custom_precoding.pk', type=str)
argparser.add_argument('--vocab', default='unigram', type=str)
argparser.add_argument('--decay_step', default=0.01, type=float)
argparser.add_argument('--batch_size', default=32, type=int)
argparser.add_argument('--subset', default=False, type=ast.literal_eval)
argparser.add_argument('--percent', default=0.2, type=float)
argparser.add_argument('--size_limits', default=False, type=ast.literal_eval)
argparser.add_argument('--create_mask', default=True, type=ast.literal_eval)
argparser.add_argument('--loss', default='cross_entropy', type=str)
argparser.add_argument('--n_encoder_blocks', default=2, type=int)
argparser.add_argument('--n_decoder_blocks', default=2, type=int)
argparser.add_argument('--d_model', default=512, type=int)
argparser.add_argument('--d_keys', default=64, type=int)
argparser.add_argument('--d_values', default=64, type=int)
argparser.add_argument('--n_heads', default=8, type=int)
argparser.add_argument('--d_ff', default=1024, type=int)
argparser.add_argument('--dropout', default=0.2, type=float)
argparser.add_argument('--encoder_embedding_dim', default=80, type=int)
argparser.add_argument('--decoder_embedding_dim', default=100, type=int)
argparser.add_argument('--encoder_reduce_dim', default=False, type=ast.literal_eval)
argparser.add_argument('--decoder_reduce_dim', default=False, type=ast.literal_eval)
argparser.add_argument('--max_enc_in_seq_len', default=900, type=int)
argparser.add_argument('--max_dec_in_seq_len', default=600, type=int)
argparser.add_argument('--scaling', default=True, type=ast.literal_eval)
argparser.add_argument('--lr', default=1e-4, type=float)
argparser.add_argument('--save_path', default='pretraining/', type=str)
argparser.add_argument('--eval_step', default=10, type=int)
argparser.add_argument('--train_score_step', default=2, type=int)
argparser.add_argument('--max_epochs', default=50, type=int)
argparser.add_argument('--logfile', default='_transformer_pretraining_logs.txt', type=str)
args = argparser.parse_args()
# logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.basicConfig(filename=args.logfile, filemode='a', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
settings = u.populate_configuration(settings, vars(args))
global plotter
plotter = u.VisdomPlotter(env_name='Pretrainer Plots')
if not os.path.isdir(settings['save_path']):
os.mkdir(settings['save_path'])
rep = input('Start Transformer Parrot Pretraining? (y or n): ')
if rep == 'y':
pretrain_transformer_parrot(settings) | [
"thomas.beucher@sap.com"
] | thomas.beucher@sap.com |
737e464d7f1070b52b86d17274f4c1b348a1741d | e9257661b8a0be9ded34ef3d60c8018c4f2829d3 | /Epidemiology/wsgi.py | ddd0264700953def72d3778881ff51feff5bb229 | [] | no_license | Ambarishpk/Epidemiology_with_Highcharts | 86e7be6313f623232af16bad63dc3f6e236e725e | 0b823eb17be1b60b3ba4c3801e49f6382c1569df | refs/heads/master | 2022-12-21T00:33:11.559750 | 2020-09-29T04:05:08 | 2020-09-29T04:05:08 | 254,281,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for Epidemiology project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Epidemiology.settings')
application = get_wsgi_application()
| [
"aambu1998@gmail.com"
] | aambu1998@gmail.com |
738920008605c7761bdfcb5b73d23b28c75bf972 | 51c9dd5cfe4532fb1775fec65eb8fcec73a324de | /klue-level2-nlp-11-main/code/model/testmodel.py | 0c96965ce816e598500f345cec232013442e9129 | [] | no_license | Hong-Hyun-Seung/KLUE_word_relation | fe78587abed5e83fef28d5f40cee5fa54d8f1ad0 | b8ffa2ad6365e43b4b00960bf40291d8f09b8961 | refs/heads/main | 2023-08-23T09:15:06.737140 | 2021-10-14T04:57:04 | 2021-10-14T04:57:04 | 416,992,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | class testmodel():
def __init__(self,params):
self.layer , self.classNum = params.values()
print(self.layer , self.classNum)
| [
"noreply@github.com"
] | noreply@github.com |
e9a512b76683460e70e8f31c4ae4f2d4f5144fb0 | 62c613e1f2bf062f807294ec6da4ae35bda6ac86 | /abc146-d.py | 195a5dc74cc33e8cfd7bc84c234a493644e42d2a | [] | no_license | teru01/python_algorithms | 6b463c78c801b68f93dda2be2f67c9688dc3cc07 | 8feb194f53b619ab7b9c964a32df7b4df32b6f2e | refs/heads/master | 2020-06-11T02:33:27.939830 | 2020-04-27T01:32:37 | 2020-04-27T01:32:37 | 193,827,088 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | import sys
input = sys.stdin.readline
from operator import itemgetter
sys.setrecursionlimit(10000000)
INF = 10**30
from collections import deque
def main():
n = int(input().strip())
G = [[] for _ in range(n)]
A = [0] * n
B = [0] * n
for i in range(n-1):
a, b = list(map(int, input().strip().split()))
A[i] = a
B[i] = b
G[a-1].append([b-1, 0])
G[b-1].append([a-1, 0])
root = 0
mlen = len(G[0])
for i in range(n):
if mlen < len(G[i]):
mlen = len(G[i])
root = i
nodes = [0] * n
q = deque()
q.append((root, -1))
while len(q) > 0:
v, fr = q.popleft()
for y, (w, _) in enumerate(G[v]):
color = 1
if w != fr:
if fr == -1:
print(v, w)
G[v][y][1] = color
elif G[v][fr][1] != color:
G[v][y][1] = color
else:
color += 1
G[v][w][1] = color
q.append((w, v))
color += 1
for i in range(n):
print(G[i])
if __name__ == '__main__':
main()
| [
"teru0x01.sheep@gmail.com"
] | teru0x01.sheep@gmail.com |
d9f422cf19d6d09ad347224d4ef2fb341fc3defe | 5b582d507ae8fe911dac543f29c4b9907a4845bb | /Python_2/1_5_8.py | 9124d6d51179721cfbd7764bc20c3d305d90e7b8 | [] | no_license | CerberusMinsk/Python_Learn | 318cc4fedaba5ce41a6a2b3a5304c68d239b73a6 | 65b33f6f2486cc10084fbe8173143181e573553e | refs/heads/master | 2021-01-12T11:06:32.953797 | 2017-09-15T15:04:22 | 2017-09-15T15:04:22 | 72,825,486 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
class MoneyBox:
def __init__(self, capacity):
# конструктор с аргументом – вместимость копилки
self.capacity = capacity
self.volume = 0
def can_add(self, v):
# True, если можно добавить v монет, False иначе
if (self.volume + v) <= self.capacity:
return True
else:
return False
def add(self, v):
# положить v монет в копилку
if self.can_add(v):
self.volume += v
| [
"cerberus.minsk@gmail.com"
] | cerberus.minsk@gmail.com |
0ddd486216234df750fe39cc5c58fe651f07a5ce | a1f081bf91cb3e37d2946dcf16e4dfeec366890e | /HW7/test/test_examples.py | b66db0c08b3103b7c16a6ef7c51e86437d762f41 | [
"MIT"
] | permissive | vishakayadav/CSC591_ASE_Assignments | 12badbaa1e347f60358078cc19e5460c5d60b609 | ffb5161934f6914b4c66194adbadf631a6bbe40c | refs/heads/main | 2023-04-09T20:23:23.406918 | 2023-03-20T23:19:43 | 2023-03-20T23:19:43 | 590,268,577 | 0 | 1 | MIT | 2023-03-20T23:10:48 | 2023-01-18T02:28:12 | HTML | UTF-8 | Python | false | false | 4,616 | py | from HW7.src.utils import *
import numpy as np
def test_ok(n=1):
random.seed(n)
def test_sample():
for i in range(10):
print('', ''.join(samples(['a', 'b', 'c', 'd', 'e']).values()))
def test_num():
n = NUM()
for i in range(1, 11):
n.add(i)
print('', n.n, n.mu, n.sd)
def test_gauss():
t = []
for i in range(10 ** 4):
t.append(gaussian(10, 2))
n = NUM()
for i in t:
n.add(i)
print('', n.n, n.mu, n.sd)
def test_bootmu():
a = [gaussian(10, 1) for i in range(100)]
print('', '\t', 'mu', '\t', 'sd', '\t', 'cliffs', 'boot', 'both')
print('', '\t', '--', '\t', '--', '\t', '------', '----', '----')
for mu in np.arange(10, 11.1, 0.1):
b = [gaussian(mu, 1) for i in range(100)]
cl = cliffs_delta(a, b)
bs = bootstrap(a, b)
print('', '\t', round(mu, 2), '\t', 1, '\t', cl, bs, cl and bs)
def test_basic():
print("\t\tTrue", bootstrap([8, 7, 6, 2, 5, 8, 7, 3],
[8, 7, 6, 2, 5, 8, 7, 3]),
cliffs_delta([8, 7, 6, 2, 5, 8, 7, 3],
[8, 7, 6, 2, 5, 8, 7, 3]))
print("\t\tFalse", bootstrap([8, 7, 6, 2, 5, 8, 7, 3],
[9, 9, 7, 8, 10, 9, 6]),
cliffs_delta([8, 7, 6, 2, 5, 8, 7, 3],
[9, 9, 7, 8, 10, 9, 6]))
print("\t\tFalse",
bootstrap([0.34, 0.49, 0.51, 0.6, .34, .49, .51, .6],
[0.6, 0.7, 0.8, 0.9, .6, .7, .8, .9]),
cliffs_delta([0.34, 0.49, 0.51, 0.6, .34, .49, .51, .6],
[0.6, 0.7, 0.8, 0.9, .6, .7, .8, .9]))
def test_pre():
print('\neg3')
d = 1
for i in range(10):
t1, t2 = [], []
for j in range(32):
t1.append(gaussian(10, 1))
t2.append(gaussian(d * 10, 1))
print('\t', d, '\t', d < 1.1, '\t', bootstrap(t1, t2), '\t', bootstrap(t1, t1))
d = round(d + 0.05, 2)
def test_five():
for rx in tiles(scott_knot(
[RX([0.34, 0.49, 0.51, 0.6, .34, .49, .51, .6], 'rx1'),
RX([0.6, 0.7, 0.8, 0.9, .6, .7, .8, .9], 'rx2'),
RX([0.15, 0.25, 0.4, 0.35, 0.15, 0.25, 0.4, 0.35], 'rx3'),
RX([0.6, 0.7, 0.8, 0.9, 0.6, 0.7, 0.8, 0.9], 'rx4'),
RX([0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4], 'rx5')])):
print(rx['name'], rx['rank'], rx['show'])
def test_six():
for rx in tiles(scott_knot(
[RX([101, 100, 99, 101, 99.5, 101, 100, 99, 101, 99.5], 'rx1'),
RX([101, 100, 99, 101, 100, 101, 100, 99, 101, 100], 'rx2'),
RX([101, 100, 99.5, 101, 99, 101, 100, 99.5, 101, 99], 'rx3'),
RX([101, 100, 99, 101, 100, 101, 100, 99, 101, 100], 'rx4')])):
print(rx['name'], rx['rank'], rx['show'])
def test_tiles():
rxs, a, b, c, d, e, f, g, h, j, k = [], [], [], [], [], [], [], [], [], [], []
for _ in range(1000):
a.append(gaussian(10, 1))
for _ in range(1000):
b.append(gaussian(10.1, 1))
for _ in range(1000):
c.append(gaussian(20, 1))
for _ in range(1000):
d.append(gaussian(30, 1))
for _ in range(1000):
e.append(gaussian(30.1, 1))
for _ in range(1000):
f.append(gaussian(10, 1))
for _ in range(1000):
g.append(gaussian(10, 1))
for _ in range(1000):
h.append(gaussian(40, 1))
for _ in range(1000):
j.append(gaussian(40, 3))
for _ in range(1000):
k.append(gaussian(10, 1))
for k, v in enumerate([a, b, c, d, e, f, g, h, j, k]):
rxs.append(RX(v, 'rx' + str(k + 1)))
rxs.sort(key=lambda x: mid(x))
for rx in tiles(rxs):
print('', rx['name'], rx['show'])
def test_sk():
rxs, a, b, c, d, e, f, g, h, j, k = [], [], [], [], [], [], [], [], [], [], []
for _ in range(1000):
a.append(gaussian(10, 1))
for _ in range(1000):
b.append(gaussian(10.1, 1))
for _ in range(1000):
c.append(gaussian(20, 1))
for _ in range(1000):
d.append(gaussian(30, 1))
for _ in range(1000):
e.append(gaussian(30.1, 1))
for _ in range(1000):
f.append(gaussian(10, 1))
for _ in range(1000):
g.append(gaussian(10, 1))
for _ in range(1000):
h.append(gaussian(40, 1))
for _ in range(1000):
j.append(gaussian(40, 3))
for _ in range(1000):
k.append(gaussian(10, 1))
for k, v in enumerate([a, b, c, d, e, f, g, h, j, k]):
rxs.append(RX(v, 'rx' + str(k + 1)))
for rx in tiles(scott_knot(rxs)):
print('', rx['rank'], rx['name'], rx['show'])
| [
"vyadav@ncsu.edu"
] | vyadav@ncsu.edu |
2bbb84a7139f0d3c5634d43a3ab1e5067fdf0278 | d78a04f28c7fed7c473218f0ada506ba9bc07f2b | /tensorflow-variables.py | 4190137fa124b6de8b728215d130876419e4e34d | [] | no_license | zhongfc/tf | 8c9a9b74822a1c1888032a607a7c49427daac6ef | 1bcc57277c6641e19c4955569416b0ff862df45a | refs/heads/master | 2021-05-07T05:49:57.681347 | 2017-11-21T13:53:16 | 2017-11-21T13:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 00:03:01 2016
@author: hy
"""
# 创建一个变量, 初始化为标量 0
state=tf.Variable(0,name="counter")
# 创建一个 op, 其作用是使 state 增加 1
one=tf.constant(1)
new_value=tf.add(state,one)
update=tf.assign(state,new_value)
# 启动图后, 变量必须先经过`初始化` (init) op 初始化
# 首先必须增加一个`初始化` op 到图中.
init_op=tf.initialize_all_variables()
# 启动图, 运行 op
with tf.Session() as sess:
sess.run(init_op)
print sess.run(state)
for _ in range(3):
sess.run(update)
print sess.run(state)
#代码中assign()操作是图所描绘的表达式的一部分, 正如add()操作一样. 所以在调用
#run()执行表达式之前, 它并不会真正执行赋值操作 | [
"hanqiangfei@163.com"
] | hanqiangfei@163.com |
b7d4e2c3208369a33f66dab709d12abfb24906ab | 599ce6e2cac0e806f9286b7f0fdc12685950f03d | /CloudMeasurement/experiments/__init__.py | 0a369bf0dd95e116e661ab81b42a932bcf464fec | [
"MIT"
] | permissive | Giuseppe1992/CloudTrace | dfec79c70dd33e799ad0eadee77ef24fbc3ae47e | 6de62ddf301c7b733f86471705903feb1ef038c2 | refs/heads/master | 2023-01-20T22:33:44.194190 | 2020-12-01T08:46:13 | 2020-12-01T08:46:13 | 285,240,897 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from CloudMeasurement.experiments.ansibleConfiguration import InventoryConfiguration
| [
"giuseppedilena92@gmail.com"
] | giuseppedilena92@gmail.com |
c65dcc65317d2919617e792993720fc2b8a00c80 | 6112755e88f4a8d98903cf41d428c417db175506 | /workflow/python/covid19_scrapers/states/florida_county.py | a0f40890f3a62252eaefdeb4889e9a1b8d653c35 | [] | no_license | mariedata360/COVID19_tracker_data_extraction | e674f700832b3759a65429c8a8f45ab825577896 | 339feb3c27411aa1887946a8bc725300665d235a | refs/heads/master | 2023-01-23T06:25:16.028536 | 2020-11-28T00:24:53 | 2020-11-28T00:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,064 | py | import logging
from numpy import nan
from covid19_scrapers.census import get_aa_pop_stats
from covid19_scrapers.scraper import ScraperBase
from covid19_scrapers.utils.arcgis import query_geoservice
from covid19_scrapers.utils.misc import to_percentage
def _make_florida_county_scraper(
db_name,
census_name,
camel_case_name,
snake_case_name):
"""Return a ScraperBase subclass that retrieves county level
information for a Florida county. This retrieves data from state
dashboard, which does not include demographic breakdowns for
deaths.
It also creates a descriptive logger name.
Arguments:
db_name: the county name to use in the `where` clause of the
ArcGIS query. E.g., 'DADE' for Miami-Dade county.
census_name: the county name to use in the scraper output in
"Florida -- {} County". This MUST match the Census name for
the county without the "County" suffix. You can check these
names at
https://api.census.gov/data/2018/acs/acs5?get=NAME&for=county:*&in=state:12
E.g., 'Miami-Dade' for Miami-Dade county.
camel_case_name: the camel-case suffix to use in the class name,
"Florida{camel_case_name}". E.g., 'MiamiDade' for Miami-Dade
county.
snake_case_name: the snake-case suffix to use in the logger
name, "florida_{snake_case_name}". E.g., 'miami_dade' for
Miami-Dade county.
"""
_logger = logging.getLogger(
__name__.replace('_county', f'_{snake_case_name}'))
class FloridaCounty(ScraperBase):
"""Florida has an ArcGIS dashboard at
https://experience.arcgis.com/experience/c2ef4a4fcbe5458fbf2e48a21e4fece9
which includes county-level data, though with no demographic
breakdown for deaths.
We call the underlying FeatureServer to populate our data.
"""
DEMOG = dict(
flc_url='https://services1.arcgis.com/CY1LXxl9zlJeBuRZ/arcgis/rest/services/Florida_COVID19_Cases/FeatureServer',
layer_name='Florida_COVID_Cases',
where=f"COUNTYNAME='{db_name}'",
)
def __init__(self, *, home_dir, census_api, **kwargs):
super().__init__(home_dir=home_dir, census_api=census_api,
**kwargs)
def name(self):
return f'Florida -- {census_name} County'
def _get_aa_pop_stats(self):
return get_aa_pop_stats(self.census_api, 'Florida',
county=census_name)
def _scrape(self, **kwargs):
date, data = query_geoservice(**self.DEMOG)
_logger.info(f'Processing data for {date}')
total_cases = data.loc[0, 'CasesAll']
known_cases = total_cases - data.loc[0, 'C_RaceUnknown']
aa_cases = data.loc[0, 'C_RaceBlack']
pct_aa_cases = to_percentage(aa_cases, known_cases)
total_deaths = data.loc[0, 'Deaths']
# Does not include demographic breakdown of deaths
known_deaths = nan
aa_deaths = nan
pct_aa_deaths = nan
return [self._make_series(
date=date,
cases=total_cases,
deaths=total_deaths,
aa_cases=aa_cases,
aa_deaths=aa_deaths,
pct_aa_cases=pct_aa_cases,
pct_aa_deaths=pct_aa_deaths,
pct_includes_unknown_race=False,
pct_includes_hispanic_black=True,
known_race_cases=known_cases,
known_race_deaths=known_deaths,
)]
FloridaCounty.__name__ = f'Florida{camel_case_name}'
return FloridaCounty
# Create classes for the desired counties
FloridaMiamiDade = _make_florida_county_scraper('DADE', 'Miami-Dade',
'MiamiDade', 'miami_dade')
FloridaOrange = _make_florida_county_scraper('ORANGE', 'Orange',
'Orange', 'orange')
| [
"natarajan@krishnaswami.org"
] | natarajan@krishnaswami.org |
123955e5a128449b12ae0ef7cf22e798a08fc826 | 24034b8bfcf8d28c157ac2af0fb5d3426b8d7e91 | /Scripts/COVID_analysis_scripts/analyze.py | b799288cf1464800f0f953141db42bd4c399d86e | [
"MIT"
] | permissive | vvoelz/covid-FAH-CPU | 4485acccbbf29b45dca796d5a621bba8e6c1d928 | 1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5 | refs/heads/master | 2021-03-10T17:26:07.990424 | 2020-09-28T19:00:24 | 2020-09-28T19:00:24 | 246,470,324 | 1 | 0 | MIT | 2020-05-16T06:29:29 | 2020-03-11T03:59:48 | Jupyter Notebook | UTF-8 | Python | false | false | 7,143 | py | #!/usr/bin/env python
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import tqdm, sys, glob, datetime, os
import re
# in columns = [description project run clone gen wl_increment lam_0 ... lam38 free energy]
# out columns [run ns_RL ns_L wl_increment_RL wl_increment_L free_energy L39..L0 RL0..R39]
# Sample input: Ligset_start#_end# < this is the description variable
max_gen = True
data = []
description = sys.argv[1]
desc = re.split(r'[_-]', f'{description}')
data_RL = pd.read_pickle(max(glob.glob(f'{desc[0]}_RL_{desc[1]}*{desc[2]}*.pkl'), key=os.path.getctime ))
data_L = pd.read_pickle(max(glob.glob(f'{desc[0]}_L_{desc[1]}*{desc[2]}*.pkl'), key=os.path.getctime))
lambdas = 40
RL_gen_cutoff = 10 # only process clones that have more than N ns of sampling
L_gen_cutoff = 5
wl_increment_cutoff = 0.15 # only plot ligands with avg_RL_WL_increment < N
skip_positive_G = True
columns = ['run', 'ns_RL', 'ns_L', 'wl_increment_RL', 'wl_increment_L', 'free_energy'] + [f'L{lam}' for lam in reversed(range(lambdas))] + [f'RL{lam}' for lam in range(lambdas)]
runs = min([data_RL['run'].max(), data_L['run'].max()]) + 1 # only process runs that have data for both RL and L (this can be done better)
print(data_RL)
print(data_L)
data, errors = [],[]
for run in tqdm.tqdm(range(runs)):
run_RL = data_RL.loc[data_RL['run'] == run]
run_L = data_L.loc[data_L['run'] == run]
# process L systems
energies_L, wl_increment_L, ns_L = [],[],[]
for clone_L in set(run_L['clone'].values):
try:
clone_L_data = run_L.loc[run_L['clone'] == clone_L]
gen_L = clone_L_data.loc[clone_L_data['gen'] == clone_L_data['gen'].max()]
if clone_L_data['gen'].max() < L_gen_cutoff: # we skip clones with fewer than N gens
continue
wl_increment_L.append(gen_L['wl_increment'].values[0])
ns_L.append(int(gen_L['gen'].values[0]))
raw_energies = gen_L[gen_L.columns[-lambdas:]].values[0]
energies_L.append([float(x) - raw_energies[0] for x in raw_energies])
except Exception as e:
print(f'L Exception: {run}, {clone_L}, {e}')
continue
try:
avg_WL_increment_L = np.average(wl_increment_L)
std_WL_increment_L = np.std(wl_increment_L)
avg_ns_L = np.average(ns_L)
std_ns_L = np.std(ns_L)
avg_L_energies = [np.average(np.asarray(energies_L)[:,lam]) for lam in range(lambdas)]
std_L_energies = [np.std(np.asarray(energies_L)[:,lam]) for lam in range(lambdas)]
except Exception as e:
continue
# process RL systems
energies_RL, wl_increment_RL, ns_RL = [],[],[]
for clone_RL in set(run_RL['clone'].values):
try:
clone_RL_data = run_RL.loc[run_RL['clone'] == clone_RL]
gen_RL = clone_RL_data.loc[clone_RL_data['gen'] == clone_RL_data['gen'].max()]
if clone_RL_data['gen'].max() < RL_gen_cutoff: # we skip clones with fewer than N gens
continue
wl_increment_RL.append(gen_RL['wl_increment'].values[0])
ns_RL.append(int(gen_RL['gen'].values[0]))
raw_energies = gen_RL[gen_RL.columns[-lambdas:]].values[0]
energies_RL.append([float(x) - raw_energies[0] for x in raw_energies])
except Exception as e:
print(f'RL Exception: {run}, {clone_RL}, {e}')
continue
try:
avg_WL_increment_RL = np.average(wl_increment_RL)
std_WL_increment_RL = np.std(wl_increment_RL)
avg_ns_RL = np.average(ns_RL)
std_ns_RL = np.std(ns_RL)
avg_RL_energies = [np.average(np.asarray(energies_RL)[:,lam]) for lam in range(lambdas)]
std_RL_energies = [np.std(np.asarray(energies_RL)[:,lam]) for lam in range(lambdas)]
avg_free_energy = avg_RL_energies[-1] - avg_L_energies[-1]
std_free_energy = np.sum(std_RL_energies + std_L_energies)
data.append([run, avg_ns_RL, avg_ns_L, avg_WL_increment_RL, avg_WL_increment_L, avg_free_energy] + list(reversed(avg_L_energies)) + list(avg_RL_energies))
errors.append([run, std_ns_RL, std_ns_L, std_WL_increment_RL, std_WL_increment_L, std_free_energy] + list(reversed(std_L_energies)) + list(std_RL_energies))
except Exception as e:
continue
results = pd.DataFrame(data, columns=columns)
errors = pd.DataFrame(errors, columns=columns)
results.to_pickle(f'results_{description}.pkl')
errors.to_pickle(f'errors_{description}.pkl')
good_results = results.loc[results['wl_increment_RL'] < wl_increment_cutoff]
good_results_errors = errors.loc[results['wl_increment_RL'] < wl_increment_cutoff]
if skip_positive_G:
good_results_errors = good_results_errors.loc[good_results['free_energy'] < 0]
good_results = good_results.loc[good_results['free_energy'] < 0]
print(f'*** Results based on RL sampling > {RL_gen_cutoff} ns, L sampling > {L_gen_cutoff} ns, and a RL WL-increment < {wl_increment_cutoff}:')
print(good_results)
### example plots
for run in good_results['run'].values:
try:
result = good_results.loc[good_results['run'] == run]
result_error = good_results_errors.loc[good_results['run'] == run]
summary = list(result[result.columns[:6]].values[0]) # run ns_RL, ns_L, wl_increment_RL, wl_increment_L
summary_error = list(result_error[result_error.columns[:6]].values[0])
energies = result[result.columns[6:]].values[0]
energies = [x - energies[0] for x in energies] # set first point to 0
energy_errors = result_error[result_error.columns[6:]].values[0]
energy_errors = [sum(energy_errors[0:x + 1]) for x in range(len(energy_errors))]
print(f"Plotting RUN{run}\nRL_WL_increment: {result['wl_increment_RL'].values[0]:.3f}\nΔG_unb = {result['free_energy'].values[0]:.3f}±{summary_error[-1]:.3f}kT\n")
plt.scatter(range(len(energies)), energies)
plt.plot(range(len(energies)), energies)
plt.errorbar(range(len(energies)), energies, yerr=energy_errors)
label = f'RL/L: RUN{run} {summary[1]}/{summary[2]:.3f}ns {summary[3]}/{summary[4]:.3f}WL, ΔG = {summary[-1]:.3f}±{summary_error[-1]:.2f}kT'
plt.annotate(label, xy = (range(len(energies))[-1], energies[-1]), xytext = (-15, 15),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
plt.title(f'desc[0]_{run}: RL > {RL_gen_cutoff} ns, L > {L_gen_cutoff} ns, and RL-WLI < {wl_increment_cutoff:.3f}') #_RUN{summary[0]}: ΔG = {summary[-1]:.3f} ± {summary_error[-1]:.3f}kT')
plt.xlabel('Ligand Decoupling --> Receptor-Ligand Coupling')
plt.ylabel('Free Energy (kT)')
date = datetime.datetime.now()
ts = date.strftime('%d%b%Y_%H-%M')
plt.savefig(f'FEP_{run}_{ts}.png')
plt.close()
except Exception as e:
print(f'Exception in computing energies/errors and plotting: {e}')
continue
| [
"vvoelz@gmail.com"
] | vvoelz@gmail.com |
ee0e4651b921eb5a380de36e12dbab85f7e75499 | 26dafce4bcedcc8ed5e1f2373ecf0ff9a2d696b5 | /11.07.2020 Snippets/2 functions.py | 252115868e1eaf525bb6d88421ed11c5870f18a7 | [] | no_license | skilldisk/Django-Webinar-July-2020 | 65d179c299772c5f5b9998f3a6a81ed63be89093 | 6064443dec9da601be627d676cc889aab46591d8 | refs/heads/master | 2022-11-13T11:46:59.285487 | 2020-07-12T10:21:39 | 2020-07-12T10:21:39 | 278,814,242 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | ############# FUNCTIONS or MODULES #############
# keyword def is used to define a function
# def function_name( input ):
# Arithematic and logic operations
# return output
# without input arguments and return statement
def printing():
print("printing function is called...")
# with input arguments and without return statement
def adding(x, y):
print(x+y)
def welcome(name):
print('Welcome to the ', name)
# with input arguments and return statement
def add(x, y):
print('add function is called with two inputs')
return x+y | [
"noreply@github.com"
] | noreply@github.com |
47e07a891b85048ad8ac4715d1d82df331a38286 | d87eb736f3c4b1adce44ad4c87e26e771863db1b | /src/prodctrlcore/io/jobfile.py | a69f8bc8bf2fffa3a42b8579e628659fba89216b | [
"BSD-3-Clause"
] | permissive | paddymills/prodctrlcore | 4bc0e98219fd973736829351400c49abc2d2f278 | 9a03d3e562ca0e60c5f7e87f7789f025207fa22d | refs/heads/master | 2022-11-08T17:40:09.435972 | 2020-06-19T15:36:24 | 2020-06-19T15:36:24 | 266,133,055 | 0 | 0 | BSD-3-Clause | 2020-06-04T18:57:30 | 2020-05-22T14:39:24 | Python | UTF-8 | Python | false | false | 5,150 | py |
from os import makedirs
from os.path import join, exists
from xlwings import Book, Sheet
from re import compile as regex
from . import HeaderParser, ParsedRow
JOBSHIP_RE = regex(
r"1?(?P<year>\d{2})(?P<id>\d{4})(?P<structure>[a-zA-Z]?)-?(?P<shipment>\d{0,2})")
class JobParser:
def __init__(self, job, shipment=1, assign_to=None):
match = JOBSHIP_RE.match(job)
if match is None:
raise ValueError(
"[{}] does not match expected pattern".format(job))
groups = match.groupdict()
self.job = '1{year}{id}{structure}'.format(**groups)
self.job_year = '20{year}'.format(**groups)
self.shipment = int(groups['shipment'] or shipment)
# add to other objects attributes
if assign_to:
assign_to.__dict__.update(self.__dict__)
class JobBookReader(Book):
"""
Excel Book reader for jobs that are stored by year
i.e. directory > 2020 > Job-Shipment.xls
if file does not exist,
template file will be created and saved in place
"""
def __init__(self, job, shipment=None, **kwargs):
JobParser(job, shipment, assign_to=self)
self.job_shipment = '{}-{}'.format(self.job, self.shipment)
self.proper_job_shipment = '{}-{:0>2}'.format(self.job, self.shipment)
self.folder_suffix = kwargs.get('folder_suffix', '')
self.file_suffix = kwargs.get('file_suffix', '')
self.root_dir = kwargs.get('directory')
self.template = join(self.root_dir, kwargs.get('template'))
if exists(self.file):
self.__init_file__(self.file, **kwargs)
else:
self.__init_file__(self.template, **kwargs)
if not exists(self.year_folder):
makedirs(self.year_folder)
self.save(self.file)
def __init_file__(self, file, **kwargs):
super().__init__(file, **kwargs)
@property
def year_folder(self):
return join(self.root_dir, self.job_year + self.folder_suffix)
@property
def file(self):
xl_file = '{}{}.xls'.format(self.job_shipment, self.file_suffix)
return join(self.root_dir, self.year_folder, xl_file)
def sheet(self, sheet_name, **kwargs):
sheet = self.sheets[sheet_name].impl
return JobSheetReader(impl=sheet, **kwargs)
class JobSheetReader(Sheet):
def __init__(self, sheet=None, **kwargs):
if 'impl' in kwargs:
super().__init__(impl=kwargs['impl'])
else:
super().__init__(sheet)
if 'header_range' in kwargs:
self.set_header(**kwargs)
else:
self.header = HeaderParser(sheet=self, **kwargs)
self.first_data_row = 2
def set_header(self, header_range, first_data_row=0):
self.first_data_row = first_data_row
if type(header_range) is not list:
header_rng = [header_rng]
_header = list()
for range in header_range:
_rng = self.range(range)
_header.extend(_rng.value)
if self.first_data_row <= _rng.last_cell.row:
self.first_data_row = _rng.last_cell.row + 1
self.header = HeaderParser(header=_header)
def _data_range(self):
start = (self.first_data_row, self.min_col)
end = (self.first_data_row, self.max_col)
return self.range(start, end).expand('down')
@property
def min_col(self):
return min(self.header.indexes.values())
@property
def max_col(self):
return max(self.header.indexes.values())
def get_rows(self):
return self._data_range().value
def iter_rows(self):
for row in self.get_rows():
yield self.header.parse_row(row)
def add_row(self, row=None, compare_cols=list(), **kwargs):
if type(row) is ParsedRow:
new_row = row
else:
new_row = self.construct_row(row, **kwargs)
# in case using compare_cols (only need to do this one once)
new_cols = map(new_row.get_item, compare_cols)
# check if row already exists
for self_row in self.iter_rows():
# compares selected columns to compare
if compare_cols:
self_cols = map(self_row.get_item, compare_cols)
if new_cols == self_cols:
break
# compares all indexed columns
elif self_row == new_row:
break
# row not in sheet
else:
row_index = self._data_range().last_cell.row + 1
col_index = self._data_range().column
self.range(row_index, col_index).value = new_row._data
def construct_row(self, row=None, **kwargs):
if row:
return ParsedRow(row, self.header)
# construct row from scratch
max_index = max(self.header.indexes.values())
blanks = [None] * (max_index + 1)
row = ParsedRow(blanks, self.header)
for key, value in kwargs.items():
index = self.header.get_index(key)
row[index] = value
return row
| [
"pmiller1389@gmail.com"
] | pmiller1389@gmail.com |
706c94c764280353805efea95a0a8c385c2881ba | bbea12c36c776054a6984e8f51b10fc5161b9e50 | /portfolio/urls.py | 394ff586c4e000ab790941cef73185ad78fb2eb1 | [] | no_license | mgrego03/My-portfolio-project | 19ee4e29cc7878079217e1640acd974a6303a71a | 98dffa365689c5e5635e63c195104824b36dea9f | refs/heads/master | 2023-02-23T11:47:42.702091 | 2021-01-29T01:09:00 | 2021-01-29T01:09:00 | 280,747,405 | 0 | 0 | null | 2020-11-01T20:14:15 | 2020-07-18T22:06:17 | Python | UTF-8 | Python | false | false | 524 | py |
from django.contrib import admin
from django.urls import path
from . import views
app_name = 'portfolio'
urlpatterns = [
path('', views.home , name = 'home') ,
# to add a new skill from front end
path('add/', views.create_item , name='create_item' ) ,
# to update the skills from the front end: update/id of the skill
path('update/<int:id>/' , views.update_skill , name ='update_skill') ,
# to delete a skill
path('delete/<int:id>/' , views.delete_skill , name = 'delete_skill') ,
]
| [
"mathewsgregoryny@gmail.com"
] | mathewsgregoryny@gmail.com |
32c7383ff65f299a4aebd1a4b97591f65d0a7f78 | 3761f91d9325f48dcb1b55ce94faea6426098d5c | /mysite/settings.py | 79f40cab4383dbe9bbf75231d68fabc0e0c8018b | [] | no_license | rachel5/django-blog | 338e0e9d4cd51cf9e35a44d600d661fedbb2d023 | bf740fcf80056f41d8d49e12c53cf40971e4602b | refs/heads/master | 2020-03-28T02:18:07.546607 | 2019-02-06T00:03:42 | 2019-02-06T00:03:42 | 147,561,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2owrr7cyjfbbz^ig@(gd*8z0nngpx&&^6k2cu+i9psh+$8sm3p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Authentication
LOGIN_REDIRECT_URL = '/'
| [
"hello@sewlastweek.com"
] | hello@sewlastweek.com |
7949c740eaa9b57dd83de1b6cea5fa7fc4c7407e | 6b6e43393863eaa792d04e01fec6358f35ffd797 | /renamepath.py | e7681ff5d6b2dc1d299a81d70f7aa312ec17371d | [] | no_license | VolkerH/deconvolution | 02c5883b084d5645ae119a36a4f6a43e30dac129 | 0ef0bd3975887506e05b1eb9b6a02538fe30f8c1 | refs/heads/master | 2020-04-21T22:27:19.031007 | 2018-10-25T15:56:53 | 2018-10-25T15:56:53 | 169,911,423 | 1 | 1 | null | 2019-02-09T20:40:54 | 2019-02-09T20:40:54 | null | UTF-8 | Python | false | false | 992 | py | import os
def filelist(thefolder):
count = 0
file_list = []
for filename in os.listdir(thefolder):
path = os.path.join(thefolder, filename)
if path[-4:] == '.tif':
count += 1
file_list.append(path)
#file_list.append(count)
return count, file_list
def renamefile(folder):
file_list_to_rename = filelist(folder)[1]
for file in file_list_to_rename:
if 'testtile1' in file:
folderpath = file.split('/')
items = folderpath[-1].split('_')
stagenumber = int(items[3][5:]) - 1
stagenumberformat = '0' + str(stagenumber)
newname = items[1] + ' ' + items[2] + '--' + 'Stage' + stagenumberformat[-2:] + '--' + items[5][-7:-4] + '--' + 'C' + items[5][2:4] + '.tif'
newpath = '/'.join(folderpath[:-1]) + '/' + newname
os.rename(file, newpath)
if __name__ == '__main__':
folder = '/home/hao/Desktop/try/try1try'
renamefile(folder)
| [
"noreply@github.com"
] | noreply@github.com |
8cb7b5950ca553d80eaa6fa4938e1ce2c27a0b9b | 5e8403fdcc45fc746f5a245d43c94347bc4a460f | /Normalize in length/normalize.py | 9f8c1a5532ff642476625dd6e65df01425246021 | [] | no_license | bandiandras/disszertacio | 79ea8da4c3849b253bf984d8918490b8ee49fa34 | e6988f99cea4bf04ed9a52b4668373c27bcdbb13 | refs/heads/master | 2021-05-22T14:17:57.529509 | 2020-06-24T19:40:49 | 2020-06-24T19:40:49 | 252,959,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,089 | py | import sys
import os
import random
sys.path.append(os.path.abspath("Model/"))
from settings import *
from utils import *
from Model.point import Point
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
#returns a list of files from the directory given in the dirName parameter (files in nested directories as well)
def getListOfFiles(dirName):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
if fullPath.endswith('.csv'):
allFiles.append(fullPath)
return allFiles
#read CSV file to dataframe and return the values
def readCSVToDataframe(filepath):
dataframe = read_csv(filepath)
return dataframe.values
def returnArraysOfXandY(dataframe):
xCoords = list()
yCoords = list()
for entry in dataframe:
xCoords.append(entry.x)
yCoords.append(entry.y)
return np.asarray(xCoords), np.asarray(yCoords)
def returnStructureOfXYP(dataframe):
sig = list()
for entry in dataframe:
point = Point()
point.x = entry[0]
point.y = entry[1]
# for MCYT
# point.p = entry[2]
# for MOBISIG
point.p = entry[3]
sig.append(point)
return sig
def resampleSignature(sig, targetLength):
if (len(sig) > targetLength):
sig = downSampleSignature(sig, targetLength)
else:
sig = upSampleSignature(sig, targetLength)
return sig
#upsample: insert zeros at the end
#downsample: truncate data
def resampleSignature2(sig, targetLength):
if (len(sig) > targetLength):
sig = truncateSignature(sig, targetLength)
else:
sig = insertZeros(sig, targetLength)
return sig
# insert coordinates based on previous and next element, until the desired length is reached
def insertElements (sig, targetLength):
while (len(sig) < targetLength):
for i in (1, len(sig) - 1):
newX = (sig[i-1].x + sig[i].x)/2
newY = (sig[i-1].y + sig[i].y)/2
newP = (sig[i-1].p + sig[i].p)/2
newPoint = Point()
newPoint.x = newX
newPoint.y = newY
newPoint.p = newP
sig = np.insert(sig, i, newPoint)
if(len(sig)== targetLength):
break
return sig
def upSampleSignature(sig, targetLength):
sig = insertElements(sig, targetLength)
return sig
def insertZeros(sig, targetLength):
while (len(sig) < targetLength):
p = Point()
p.x = 0
p.y = 0
p.p = 0
sig.append(p)
return sig
def truncateSignature(sig, targetLength):
return sig[0 : targetLength]
#check actual and target length, accordgin to that, remove every n-th element of the array, until targetLength is reached
#determine n
#possible improvement: multiple smaller downsampleings
def downSampleSignatureOld(sig, targetLength):
while len(sig) > targetLength:
i = 1
while i < len(sig):
if (len(sig) > targetLength - 2):
del sig[i]
i = i + 2
else:
break
return sig
def downSampleSignature(sig, targetLength):
while len(sig) > targetLength:
del sig[random.randint(0, len(sig)-1)]
return sig
def main():
# Get the list of all files in directory tree at given path
listOfFiles = getListOfFiles(DATASET_PATH)
loaded = list()
for name in listOfFiles:
splittedFilename = name.split('\\')
newfoldername = NEW_DATASET_PATH + splittedFilename[len(splittedFilename) - 2]
newFilename = NEW_DATASET_PATH + splittedFilename[len(splittedFilename) - 2] + '\\' + splittedFilename[len(splittedFilename) - 1]
if not os.path.isdir(newfoldername):
os.makedirs(newfoldername)
with open( newFilename, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['x', 'y', 'p'])
data = readCSVToDataframe(name)
xyp = returnStructureOfXYP(data)
# xyp = resampleSignature(xyp, 512)
xyp = resampleSignature2(xyp, N)
for point in xyp:
writer.writerow([point.x, point.y, point.p])
if __name__ == "__main__":
main() | [
"andras_bandi1994@yahoo.com"
] | andras_bandi1994@yahoo.com |
5cd04361f26f5da04f4dd9f697d57ab51f7e0f1d | 66cab93c26cc252f412860778131b208c6f120be | /parts/newproject/webob/acceptparse.py | df3db6b411f45e2366c6215b630c553c1de21ec3 | [] | no_license | marcogarzini/Zodiac | 3332733f6ae8d64924557ff022f44c835aeac0a9 | 06e8ad0c709189dc65a26fb7d6c17a9ee2bc9112 | refs/heads/master | 2016-09-11T03:18:12.805299 | 2014-01-17T12:50:03 | 2014-01-17T12:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | /home/user1/newproject/eggs/WebOb-1.2.3-py2.7.egg/webob/acceptparse.py | [
"user1@user1-VirtualBox.(none)"
] | user1@user1-VirtualBox.(none) |
efa66284d47dc7d43341155eaa34d87506ce3814 | 982344011a248b0f514ffb8d0c87c13f2f2e113e | /day1/conv.py | a03b4d03b507ada57975e15de49aabf5b16b5d56 | [] | no_license | patrickjwolf/CS32_Architecture_GP | 7f6c68de92155b7b9b295f7d35413637cebce45a | 8704add1c6ed1f917451544e1573bed005eaa3ac | refs/heads/master | 2022-12-06T15:26:54.715887 | 2020-08-19T22:45:21 | 2020-08-19T22:45:21 | 288,225,480 | 0 | 0 | null | 2020-08-17T16:05:34 | 2020-08-17T16:05:33 | null | UTF-8 | Python | false | false | 2,567 | py | # In general, the `.format` method is considered more modern than the printf `%`
# operator.
# num = 123
# # Printing a value as decimal
# print(num) # 123
# print("%d" % num) # 123
# print("{:d}".format(num)) # 123
# print(f"{num:d}") # 123
# # Printing a value as hex
# print(hex(num)) # 0x7b
# print("%x" % num) # 7b
# print("%X" % num) # 7B
# print("%04X" % num) # 007B
# print(f"{num:x}") # 7b
# print(f"{num:X}") # 7B
# print(f"{num:04x}") # 007b
# # Printing a value as binary
# print("{:b}".format(num)) # 1111011, format method
"""
take input as a string
1111011
take input string
7b
turn in to a list
[1, 1, 1, 1, 0, 1, 1]
[7, b]
reverse the list
[1, 1, 0, 1, 1, 1, 1]
[b, 7]
multiply each element by its power of 2 respectively
1 * 1
1 * 2
0 * 4
1 * 8
1 * 16
1 * 32
1 * 64
b * 1 => 11
7 * 16 => 112
# taken the numbers and addedthem together
1 + 2 + 0 + 8 + 16 + 32 + 64
3 + 8 + 16 + 32 + 64
11 + 16 + 32 + 64
27 + 32 + 64
59 + 64
11 + 112
# returning a result in decimal
123
123
"""
# # Converting a decimal number in a string to a value
# s = "1234"; # 1234 is 0x4d2
# x = int(s); # Convert base-10 string to value
# # Printing a value as decimal and hex
# print(num) # 1234
# print(f"{num:x}") # 4d2
# # Converting a binary number in a string to a value
# s = "100101" # 0b100101 is 37 is 0x25
# x = int(s, 2) # Convert base-2 string to value
# # Printing a value as decimal and hex
# print(num) # 37
# print(f"{num:x}") # 25
# Conversion Python code:
# string1 = "10101010"
# 1 * 128
# 0 * 64
# 1 * 32
# 0 * 16
# 1 * 8
# 0 * 4
# 1 * 2
# 0 * 1
# reverse_string1 = "01010101"
# loop from 0 -> size of list - 1
# index = 0 -> 7
# base = 2
# index ** base
# 0 ** 2 => 1
# 1 ** 2 => 2
# 2 ** 2 => 4
# 3 ** 2 => 8
# 4 ** 2 => 16
# 5 ** 2 => 32
# 6 ** 2 => 64
# 7 ** 2 => 128
# multiplyer = 1 -> 128
# 0 * multiplyer
# 0 * 1 = 0
# 1 * 2 = 2
# 0 * 4 = 0
# 1 * 8 = 8
# 0 * 16 = 0
# 1 * 32 = 32
# 0 * 64 = 0
# 1 * 128 = 128
# value = 0
# value += 0
# value += 2
# value += 0
# value += 8
# value += 0
# value += 32
# value += 0
# value += 128
# ret value => 170
# [1, 0, 1, 0, 1, 0, 1, 0]
# [0, 1, 0, 1, 0, 1, 0, 1]
# digit_list[i] == 0
# + 0 * 1
# 0
# + 1 * 2
# 2
# 128 + 32 + 8 + 2
# Lets convert diferent bases to decimal
def to_decimal(num_string, base):
pass
print(to_decimal("7b", 16)) # => 123
print(to_decimal("010111010110101", 2)) # => 123 | [
"tomtarpeydev@gmail.com"
] | tomtarpeydev@gmail.com |
f2c5f5fd9c4eec50119315da9332ef6b7fdb4979 | d3872dc2f360511c624bdd9c7a1449db0d676ea7 | /base/migrations/0011_auto_20180110_1107.py | b6124f6136a0d1a6cc8b996aab31d64fc77b9c0b | [] | no_license | orzubalsky/cwcom | 159d4957f7708d8ca7062f160571c46597d34ea8 | 26dab43a14de90b16bb14b83f790087bbef136b5 | refs/heads/master | 2021-01-11T07:01:08.039528 | 2018-02-28T04:44:36 | 2018-02-28T04:44:36 | 72,321,539 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('base', '0010_auto_20180107_1705'),
]
operations = [
migrations.AddField(
model_name='portfolioimage',
name='content_type',
field=models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True),
),
migrations.AddField(
model_name='portfolioimage',
name='object_id',
field=models.PositiveIntegerField(null=True, blank=True),
),
]
| [
"orzubalsky@gmail.com"
] | orzubalsky@gmail.com |
fa2fe65858f598fdcd7e6b50df50dbb7a95f264b | 0e3e552bcfc8dbc28fb54538f727445ac8e6c882 | /model/transfer_model.py | 936c931ee5865755cbb28e8de8af3422614d4ca7 | [] | no_license | duanxy66/adversarial_multitask_learning | 54ddce777a6ae7ba94d44d4079a984fdc5adc7bf | 54a7bf4a4c4e034a7912615bca0f12eaf1f62228 | refs/heads/master | 2020-03-23T14:27:29.927886 | 2018-06-10T14:14:05 | 2018-06-10T14:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | import tensorflow as tf
import sys
sys.path.append("..")
import yaml
from model.rnn_model import RNN
from model.mlp_model import MLP
with open("../config/config.yaml", "r") as f:
params = yaml.load(f)
class Transfer(object):
"""
transfer learning using shared model in adversarial network
"""
def __init__(self,
sequence_length,
num_classes,
embedding_size,
vocab_size,
static,
rnn_hidden_size,
num_layers,
dynamic,
use_attention,
attention_size):
"""
transfer model contains embedding layer, rnn layer and fully-connected layer
and will all be initialized by the corresponding params of adversarial network
the rnn params will be initialized by the shared rnn model in adversarial network
"""
self.input_x = tf.placeholder(
tf.int32, [None, sequence_length], name="x")
self.input_y = tf.placeholder(
tf.float32, [None, num_classes], name="y")
self.input_keep_prob = tf.placeholder(tf.float32, name="keep_prob_in")
self.output_keep_prob = tf.placeholder(
tf.float32, name="keep_prob_out")
self.rnn_model = RNN(sequence_length,
rnn_hidden_size,
num_layers,
dynamic=True,
use_attention=True,
attention_size=attention_size)
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="transfer-W")
with tf.name_scope("embedding-layer"):
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
with tf.name_scope("sequence-length"):
mask = tf.sign(self.input_x)
range_ = tf.range(
start=1, limit=sequence_length + 1, dtype=tf.int32)
mask = tf.multiply(mask, range_, name="mask") # element wise
seq_len = tf.reduce_max(mask, axis=1)
with tf.name_scope("rnn-processing"):
"""
initialize the rnn model using pre-trained adversarial model
"""
s = self.rnn_model.process(
self.embedded_chars, seq_len, self.input_keep_prob, self.output_keep_prob, scope="transfer-shared", )
with tf.name_scope("transfer-fully-connected-layer"):
w = tf.Variable(tf.truncated_normal(
[rnn_hidden_size*2, num_classes], stddev=0.1), name="w")
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
scores = tf.nn.xw_plus_b(s, w, b)
with tf.name_scope("loss"):
task_losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.input_y, logits=scores) # logits and labels must be same size
self.task_loss = tf.reduce_mean(task_losses)
with tf.name_scope("task-accuracy"):
self.predictions = tf.argmax(scores, 1, name="predictions")
correct_predictions = tf.equal(
self.predictions, tf.argmax(self.input_y, 1))
self.task_accuracy = tf.reduce_mean(
tf.cast(correct_predictions, "float"), name="accuracy")
| [
"jingchunzhen@126.com"
] | jingchunzhen@126.com |
ec8d79446d63bff1bbe330925affa4fa8e252b4c | 2cc432f126955aff626e6849ff8d91979984fa1b | /src/web/socialnetwork/utils/get_image.py | d12effdfc7f234aba7842638986e885cbd6a36da | [
"MIT"
] | permissive | iamamarpal/fsnd-flask-social-network | 023c0f3073dd278a1c9783bf23c98acb95cf4740 | 3bcfdbd9f7ba17bc6e50eec1d52e5576aa956c33 | refs/heads/master | 2022-03-14T06:01:27.557352 | 2018-11-06T16:04:54 | 2018-11-06T16:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | from bs4 import BeautifulSoup
import urllib.request
import shutil
import requests
from urllib.parse import urljoin
import sys
import time
def make_soup(url):
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
html = urllib.request.urlopen(req)
return BeautifulSoup(html, 'html.parser')
def get_images(url):
soup = make_soup(url)
images = [img for img in soup.findAll('img')]
print (str(len(images)) + " images found.")
print('Downloading images to current working directory.')
image_links = [each.get('src') for each in images]
for each in image_links:
try:
filename = each.strip().split('/')[-1].strip()
src = urljoin(url, each)
print('Getting: ' + filename)
response = requests.get(src, stream=True)
# delay to avoid corrupted previews
time.sleep(1)
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
except:
print(' An error occured. Continuing.')
print('Done.') | [
"noreply@github.com"
] | noreply@github.com |
c32bf7266de063e1e276f4b6ab28ed930165b860 | 9f7d4d76c7e66aa424a5f8723575dc489f1fd2ab | /2022/15/15.py | 7d6fdb456fc09a05011e86519edbfcdeac7af504 | [
"MIT"
] | permissive | kristianwiklund/AOC | df5a873287304816f25d91259c6e6c99c7a5f4bf | d9a668c406d2fd1b805d9b6a34cffa237a33c119 | refs/heads/master | 2023-01-12T09:01:11.012081 | 2023-01-02T19:12:29 | 2023-01-02T19:12:29 | 227,458,380 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,007 | py | import sys
sys.path.append("../..")
from utilities import *
import networkx as nx
from copy import deepcopy
from pprint import pprint
arr = readarray("input.txt")
row = 2000000
#row=10
beac=list()
sens=list()
for l in arr:
x=int(l[2].split("=")[1].strip(","))
y=int(l[3].split("=")[1].strip(":"))
s=complex(x,y)
# print(s)
sens.append(s)
x=int(l[8].split("=")[1].strip(","))
y=int(l[9].split("=")[1].strip(":"))
b=complex(x,y)
# print(b)
beac.append(b)
print(sens)
# for each beacon
# identify the lines defining the area of the relevant beacon coverage
# project those lines on the line we want to look at
# the length of the line grows with +1 on each side from the center for each row above the furthest row
# the result is a set of lines showing what is covered
def cover(s,b,l):
d = int(abs(s.imag-b.imag)+abs(s.real-b.real))
if int(s.imag+d)<l:
return None
# d is the max distance where we have guaranteed no other beacons
# at that place, we have one single blob right below the sensor
# at position (s.real, s.imag+d)
# for each distance above this, we get an additional blob on the
# side
side = d-abs(s.imag-l)
if(side<0):
return None
# print("cover",s,b,"(s)",side,"<<",d,">>",(s.real - abs(side), s.real + abs(side)))
return (s.real - abs(side), s.real + abs(side))
def lineme(row):
line=list()
for i in range(len(sens)):
c = cover(sens[i],beac[i],row)
if c:
line.append(c)
line=sorted(line,key=lambda x:x[0])
return line
#print(mi,ma)
def yes(x, line):
for i in line:
if x>=i[0] and x<=i[1]:
return True
return False
def scoreme(line, maxx=None):
score=0
ma = max([int(y) for x,y in line])
if maxx:
ma = max(maxx,ma)
mi = min([int(x) for x,y in line])
for i in range(mi,ma):
if yes(i,line):
score+=1
if (ma-mi)<80:
print("#",end="")
else:
if (ma-mi)<80:
print(".",end="")
return score
print("")
line = lineme(row)
print("part 1:",scoreme(line))
def overlap(a,b):
if a==b:
return True
if a[0]>=b[0] and a[1]<=b[1]:
return True
else:
return False
def cm(a,b):
if a==b:
return 0
if a[0]==b[0]:
if a[1]==b[1]:
return 0
if a[1]<b[1]:
return -1
return 1
if a[0]<b[0]:
return -1
return 1
from functools import cmp_to_key
for i in range(0,4000000):
line = lineme(i)
line= sorted(list(line),key=cmp_to_key(cm))
x=0
for j in line:
if j[0]<x:
if j[1]<x:
continue
if j[0]-x == 2:
print ("part 2:", i+(x+1)*4000000,line)
import sys
sys.exit()
x=j[1]
if x>4000000:
break
| [
"githubkristian@snabela.nl"
] | githubkristian@snabela.nl |
f0ca8de4a63d7c7c934cb1df113328a1725e58b2 | 7d133e2398a4f7592428996fe94c713002ece2d5 | /program85.py | 20fb4126ff351ad89ea6713a0a3d402a740bfefb | [] | no_license | SANJAY-NT/DSA-Problems | cfec8844ef07f63962199dc8dda79771ecf5852c | 3d7cef6bcc9d61071ce79be7300b66d4bff7c0b4 | refs/heads/main | 2023-07-06T19:56:05.874019 | 2021-08-18T16:23:18 | 2021-08-18T16:23:18 | 378,221,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | #User function Template for python3
class Solution:
def secFrequent(self, arr, n):
# code here
self.arr = arr
self.n = n
self.dic = { self.arr[0] : 1}
for i in range(self.n):
if i != 0:
if self.arr[i] in self.dic.keys():
self.dic[self.arr[i]] +=1
#print(self.arr[i] )
#print(self.dic)
#print(self.dic[arr[i]])
else:
#print(self.arr[i] )
self.dic[self.arr[i]] =1
#print(self.dic)
else:
#self.v = self.dic[self.arr[i]]
#self.dic[self.v] = 1
pass
#print(self.dic)
#print(sorted(self.dic.values())[-1])
self.val = sorted(self.dic.values())[-2]
#print(list(self.dic.keys())[list(self.dic.values()).index(self.val)])
return (list(self.dic.keys())[list(self.dic.values()).index(self.val)])
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == '__main__':
t = int(input())
for _ in range(t):
n = int(input().strip())
arr = input().strip().split(" ")
ob = Solution()
ans = ob.secFrequent(arr,n)
print(ans)
# } Driver Code Ends | [
"noreply@github.com"
] | noreply@github.com |
2b206bd77e2c71810ed891c15081bb40dd02a4af | c05357142b9f112d401a77f9610079be3500675d | /danceschool/core/urls.py | acc3ba46762b7690b07c61b675bf598fcdb0daac | [
"BSD-3-Clause"
] | permissive | NorthIsUp/django-danceschool | b3df9a9373c08e51fcaa88751e325b6423f36bac | 71661830e87e45a3df949b026f446c481c8e8415 | refs/heads/master | 2021-01-02T22:42:17.608615 | 2017-08-04T17:27:37 | 2017-08-04T17:27:37 | 99,373,397 | 1 | 0 | null | 2017-08-04T19:21:50 | 2017-08-04T19:21:50 | null | UTF-8 | Python | false | false | 2,997 | py | from django.conf.urls import url
from django.contrib import admin
from .feeds import EventFeed, json_event_feed
from .views import SubmissionRedirectView, InstructorStatsView, OtherInstructorStatsView, IndividualClassView, IndividualEventView, StaffDirectoryView, EmailConfirmationView, SendEmailView, SubstituteReportingView, InstructorBioChangeView, AccountProfileView, OtherAccountProfileView
from .ajax import UserAccountInfo, updateSeriesAttributes, getEmailTemplate
from .autocomplete_light_registry import CustomerAutoComplete, UserAutoComplete
admin.autodiscover()
urlpatterns = [
# These URLs are for Ajax and autocomplete functionality
url(r'^staff/substitute/filter/$', updateSeriesAttributes, name='ajaxhandler_submitsubstitutefilter'),
url(r'^staff/sendemail/template/$', getEmailTemplate, name='ajaxhandler_getemailtemplate'),
url(r'^staff/autocomplete/user', UserAutoComplete.as_view(), name='autocompleteUser'),
url(r'^staff/autocomplete/customer', CustomerAutoComplete.as_view(), name='autocompleteCustomer'),
url(r'^accounts/info/$', UserAccountInfo.as_view(), name='getUserAccountInfo'),
# For general admin form submission redirects
url(r'^form/submitted/$', SubmissionRedirectView.as_view(), name='submissionRedirect'),
url(r'^staff/directory/$',StaffDirectoryView.as_view(),name='staffDirectory'),
url(r'^staff/sendemail/$', SendEmailView.as_view(),name='emailStudents'),
url(r'^staff/sendemail/confirm/$', EmailConfirmationView.as_view(),name='emailConfirmation'),
url(r'^staff/substitute/$', SubstituteReportingView.as_view(),name='substituteTeacherForm'),
# These provide the ability to view one's own stats or another instructor's stats
url(r'^staff/instructor-stats/(?P<first_name>[\w\+\.]+)-(?P<last_name>[\w\+\.]+)/$', OtherInstructorStatsView.as_view(), name='instructorStats'),
url(r'^staff/instructor-stats/$', InstructorStatsView.as_view(), name='instructorStats'),
# This provides the ability to edit one's own bio
url(r'^staff/bio/$', InstructorBioChangeView.as_view(), name='instructorBioChange'),
# These are for the calendar feeds
url(r'^events/feed/$', EventFeed(), name='calendarFeed'),
url(r'^events/feed/json/$', json_event_feed, name='jsonCalendarFeed'),
url(r'^events/feed/(?P<instructorFeedKey>[\w\-_]+)$', EventFeed(), name='calendarFeed'),
url(r'^events/feed/json/(?P<instructorFeedKey>[\w\-_]+)$', json_event_feed, name='jsonCalendarFeed'),
# These are for individual class views and event views
url(r'^classes/(?P<year>[0-9]+)/(?P<month>[\w]+)/(?P<slug>[\w\-_]+)/$', IndividualClassView.as_view(), name='classView'),
url(r'^events/(?P<year>[0-9]+)/(?P<month>[\w]+)/(?P<slug>[\w\-_]+)/$', IndividualEventView.as_view(), name='eventView'),
url(r'^accounts/profile/(?P<user_id>[0-9]+)/$', OtherAccountProfileView.as_view(), name='accountProfile'),
url(r'^accounts/profile/$', AccountProfileView.as_view(), name='accountProfile'),
]
| [
"lee.c.tucker@gmail.com"
] | lee.c.tucker@gmail.com |
ed3c1d09f51b58821175cd66a4e37d6bd2aa4695 | 250b7c0c980a48d1ae3966fc2930871d957d3aca | /tests/test_db.py | 91c92a148bad9468dbc5f0805ec51adbcfa7dab5 | [] | no_license | xFelipe/video_owl | 648e76e87abc5d8fa2778ee75f5cc3d397c8bbba | f5cd4cd2e5bb2d14596769afbb119c568836e9c6 | refs/heads/master | 2023-02-19T08:03:02.337212 | 2021-01-23T20:08:40 | 2021-01-23T20:08:40 | 321,575,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | from os import path
def test_db_connection(database):
assert not path.exists(database.DB_PATH),\
"Connection to the database incorrectly recognized."
database.init_db()
assert path.exists(database.DB_PATH), \
"Connection to the database has not been established."
def test_video_table(database, models):
database.init_db()
assert len(models.Video.query.all()) == 0, \
"Test saving video already started with a video in the DB."
new_video = models.Video('codigo', 'nome')
new_video.save()
saved_videos = models.Video.query.all()
assert new_video in saved_videos, \
"Error saving or retrieving video."
| [
"xfelipegd@gmail.com"
] | xfelipegd@gmail.com |
116fe7e79106c9067d5538031f9d357c7527af50 | 0751645042e3ac1a0aa020f8242ad93944028940 | /replaceBlackBG.py | 444716693671305c80050e147d92144f814c974e | [
"BSD-3-Clause"
] | permissive | kskmar/ReSort-IT | 811b14827adf5f7478f614de919ea206dcd1183e | b8ae37f1eea65c5c0c58ead97ffe15e1c4619004 | refs/heads/master | 2023-03-31T14:44:06.700297 | 2021-04-09T08:58:42 | 2021-04-09T08:58:42 | 285,340,166 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 16:39:49 2020
@author: anasa
"""
#importing modules
import matplotlib.pyplot as plt
import numpy as np
import cv2
img = cv2.imread(r'D:\gp_dataset\bottle1CL\3_bottles\3_0.png')
# image_copy = np.copy(img)
# plt.imshow(img)
# image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
# plt.imshow(image_copy)
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
# #definig the range of red color
red_lower=np.array([0,0,100],np.uint8)
red_upper=np.array([250,255,255],np.uint8)
mask = cv2.inRange(hsv, red_lower, red_upper)
print (mask)
plt.imshow(mask, cmap='gray')
masked_image = np.copy(img)
masked_image[mask == 0] = [0, 0, 0]
plt.imshow(masked_image)
imgBack = cv2.imread(r'D:\gp_dataset\bottle1CL\bgs\rnd1.png')
#imgBack = cv2.cvtColor(imgBack, cv2.COLOR_BGR2RGB)
crop_background = imgBack[0:800, 0:800] #the image is: <class 'numpy.ndarray'> with dimensions: (514, 816, 3)
crop_background[mask != 0] = [0,0,0]
plt.imshow(crop_background)
complete_image = masked_image + crop_background
cv2.imwrite(r'D:\gp_dataset\bottle1CL\Ztrash\xxx.jpeg', complete_image)
plt.imshow(complete_image)
| [
"noreply@github.com"
] | noreply@github.com |
b8780a74cdfeb4022df65e92c883437e00ae0e4d | 1525a94e2437015df79ee6a8dfd49cc0fb8e2843 | /sizing.py | 66ad74bddd1eb5fc16998eae0a3c2b519019b086 | [] | no_license | alpersanli/Image_Processing | b16ee295a4feb48e67382f435f2daf9355a68248 | 1e76538eb658d73ebcb29ff77872d179ffc4a97a | refs/heads/master | 2020-04-17T20:04:59.754090 | 2019-01-21T22:56:33 | 2019-01-21T22:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | import cv2
kamera=cv2.VideoCapture(0) #(0)pc kamerası,(1)usb kamera,('smile')yukledıgın video
kamera.set(cv2.CAP_PROP_FRAME_WIDTH,250) #kamera boyutlari
kamera.set(cv2.CAP_PROP_FRAME_HEIGHT,250)
while True: #while dogru oldugu muddetçe
ret, goruntu=kamera.read() #return demek
#########kamera set boyle de ayarlanabilir 2.yol
#ret=kamera.set(3,320) #3.bilgi width
#ret=kamera.set(4,240) #4.bilgi height
griton = cv2.cvtColor(goruntu, cv2.COLOR_BGR2GRAY) #gri ton
cv2.imshow('griton', griton)
farklirenk = cv2.cvtColor(goruntu, cv2.COLOR_BGR2LUV)
cv2.imshow('farklirenk', farklirenk)
cv2.imshow('goruntu',goruntu)
if cv2.waitKey(25) & 0xFF==ord('q'): #q ya basıldıgında kapanacak 25 hız
break
kamera.release() #bırak kapat
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
3c62b147291c14be369f9d73ced805f7f8773c2f | 5044413a31d50b8220c87ae02acc7b059c7bf5ec | /T2/KademliaLibrary/example/download.py | 4be810183d08324cbd67d47e8459bd7b3f0cb044 | [] | no_license | rjherrera/IIC2523 | 500540350d06a1d11866093ec8d5df984728875c | 756c4a3d9a59d72f66280333c8b48536c03ab592 | refs/heads/master | 2020-03-28T00:25:53.312660 | 2017-12-13T19:06:41 | 2017-12-13T19:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | from kademlia_iic2523.ipc import ipcSend
import requests
import socket
import sys
FILES_FOLDER = '/user/rjherrera/T2/files'
def download(url):
# try as we may recieve unexpected things. Also to use a friendly way of showing errors
try:
# use the requests library to try to download the file
r = requests.get(url, stream=True)
if r.status_code == 200:
# if we can we dump it to the desired location
with open('%s/%s' % (FILES_FOLDER, sys.argv[1]), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return True
raise Exception(r.status_code)
except Exception as error:
print(error)
return False
def onResponse(message):
ip_address = socket.gethostbyname(socket.gethostname())
for url in message:
if download(url):
node_url = 'http://%s:11009/%s' % (ip_address, sys.argv[1])
# 'set' the file in our location (node_url) -> we are now registered as a server for it
ipcSend('set %s %s' % (sys.argv[1], node_url))
return
print('File not downloaded.')
ipcSend('get %s' % sys.argv[1], onResponse)
| [
"rjherrera@uc.cl"
] | rjherrera@uc.cl |
24ea380d441116f8c9bbb23346ae6b5d9d6155ce | 6656d7e426c85ff977a2619f6a52981456ada9fe | /7_Greedy/5.py | 3766b18d16e7facd5f260668f96765338fbdd051 | [
"MIT"
] | permissive | abphilip-codes/Hackerrank_Interview | 4f37e12f1ab47042db88947bb3a31aed6486b54d | b40981ef55d04fb14d81a6e1c9ade1878f59394d | refs/heads/master | 2023-08-02T22:00:37.298224 | 2021-10-02T11:59:08 | 2021-10-02T11:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | # https://www.hackerrank.com/challenges/reverse-shuffle-merge/problem
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
#
# Complete the 'reverseShuffleMerge' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def reverseShuffleMerge(s):
f = defaultdict(int)
for z in s: f[z]+=1
r,ans,u = dict(f),[],defaultdict(int)
for z in reversed(s):
if((f[z]//2-u[z])>0):
while(ans and ans[-1]>z and u[ans[-1]]+r[ans[-1]]-1>=f[ans[-1]]//2): u[ans.pop()]-=1
u[z]+=1
ans.append(z)
r[z]-=1
return "".join(ans)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = reverseShuffleMerge(s)
fptr.write(result + '\n')
fptr.close() | [
"allenalvin333@gmail.com"
] | allenalvin333@gmail.com |
5e0c069364d5f7e087aaaa0e272776dfe7d7871b | 7ce762441a4a1dac889c023cabb185d2b34b5f52 | /Assignment5/OCR.py | 079ffd30fdbb017103f7cfa31315133a443c1f2e | [] | no_license | morteano/TDT4173 | 053f5a7d2cf7d3257edc18a9c88b653f85cd834a | d6f8b8bae42b95057124a98df1893b60b9f8ea0f | refs/heads/master | 2021-01-01T05:13:35.674492 | 2016-04-21T13:06:29 | 2016-04-21T13:06:29 | 56,311,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,026 | py | import scipy as sp
import numpy as np
import os
import matplotlib.pyplot as plt
import random
from sklearn import datasets, svm, metrics
import pickle
from skimage.restoration import denoise_tv_chambolle, denoise_bilateral
from skimage.filters import sobel
from skimage import feature, color
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from enum import Enum
path = './chars74k-lite/'
preprocessing = Enum('Preprocessing', 'SOBEL, HOG, BOTH')
classifiers = Enum('Classifier', 'SVM, KNN, RF')
preprocess = preprocessing.HOG
classifier = classifiers.SVM
detect = True
class ImagePairs:
def __init__(self):
self.images = []
self.letters = []
self.originals = []
def loadImages():
# Creates an initial imagePair
imagePairs = ImagePairs()
# Append the images and the correct labels to imagesPairs
for directory in os.listdir(path):
if len(directory) < 2:
for filename in os.listdir(path+directory+'/'):
img = sp.misc.imread(path+directory+'/'+filename)
imagePairs.originals.append(img)
if preprocess == preprocessing.SOBEL:
img = useSOBEL(img)
elif preprocess == preprocessing.HOG:
img = useHoG(img)
else:
img = useSOBEL(img)
img = useHoG(img)
imagePairs.images.append(img)
imagePairs.letters.append(directory)
return imagePairs
def useSOBEL(img):
img = img.astype(np.float)/255
img = denoise_bilateral(img, sigma_range=0.1, sigma_spatial=15)
img = sobel(img)
return img
def useHoG(img):
img = feature.hog(img, orientations=9, pixels_per_cell=(3, 3), cells_per_block=(6, 6))
return img
def loadTestImages(filename, letter):
# Creates an initial imagePair
imagePairs = ImagePairs()
# Append the images and the correct labels to imagesPairs
img = sp.misc.imread(filename)
imagePairs.originals.append(img)
if preprocess == preprocessing.SOBEL:
img = useSOBEL(img)
elif preprocess == preprocessing.HOG:
img = useHoG(img)
else:
img = useSOBEL(img)
img = useHoG(img)
imagePairs.images.append(img)
imagePairs.letters.append(letter)
return imagePairs
def printImage(img):
plt.figure()
plt.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
plt.show()
def splitDataset(percentage, dataset):
testSet = ImagePairs()
testSetSize = int(len(dataset.images)*percentage)
for i in range(testSetSize):
index = random.randint(0, len(dataset.images)-1)
testSet.images.append(dataset.images.pop(index))
testSet.letters.append(dataset.letters.pop(index))
testSet.originals.append(dataset.originals.pop(index))
return dataset, testSet
def reshape(images):
reshapedList = []
for i in range(len(images)):
reshaped = []
for j in range(len(images[i])):
for k in range(len(images[i][j])):
reshaped.append(images[i][j][k])
reshapedList.append(reshaped)
return reshapedList
def reshapeImage(image):
reshaped = []
for i in range(len(image)):
for j in range(len(image[i])):
reshaped.append(image[i][j])
return reshaped
def trainClassifierSVM(dataSet):
if os.path.isfile("classifierSVM"):
file = open("classifierSVM", 'rb')
classifier = pickle.load(file)
file.close()
else:
classifier = svm.SVC(gamma=10, C=30, probability=True)
if type(dataSet.images[0][0]) is not np.float64:
classifier.fit(reshape(dataSet.images), dataSet.letters)
else:
classifier.fit(dataSet.images, dataSet.letters)
file = open("classifierSVM","wb")
pickle.dump(classifier, file)
file.close()
return classifier
def trainClassifierRF(dataSet):
classifier = RandomForestClassifier()
if type(dataSet.images[0][0]) is not np.float64:
classifier.fit(reshape(dataSet.images), dataSet.letters)
else:
classifier.fit(dataSet.images, dataSet.letters)
return classifier
def trainClassifierkNN(dataSet):
if os.path.isfile("classifier"):
file = open("classifier", 'rb')
classifier = pickle.load(file)
file.close()
else:
classifier = KNeighborsClassifier(n_neighbors=5)
if type(dataSet.images[0][0]) is not np.float64:
classifier.fit(reshape(dataSet.images), dataSet.letters)
else:
classifier.fit(dataSet.images, dataSet.letters)
file = open("classifier","wb")
pickle.dump(classifier, file)
file.close()
return classifier
def predictClassifier(classifier, testSet):
if type(testSet.images[0][0]) is not np.float64:
return classifier.predict(reshape(testSet.images))
else:
return classifier.predict(testSet.images)
def showImages(dataSet, testSet, predicted):
images_and_labels = list(zip(dataSet.originals, dataSet.letters))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: ' + label)
images_and_predictions = list(zip(testSet.originals, predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: ' + prediction)
plt.show()
def myTest(classified, filename, letter):
testSet = loadTestImages(filename, letter)
predicted = predictClassifier(classified, testSet)
correct = 0
false = 0
images_and_predictions = list(zip(testSet.images, predicted))
for index, (image, prediction) in enumerate(images_and_predictions):
if prediction == testSet.letters[index]:
correct += 1
print("Correct:", prediction, testSet.letters[index])
else:
false += 1
print("False:", prediction, testSet.letters[index])
def detection(filename, classified):
pixels = 8
threshold = 0.6
searchIndex = 13
origImg = sp.misc.imread(filename)
origImg /= 256**4
origImg = np.array(origImg, dtype=np.uint8) # This line only change the type, not values
img = color.rgb2gray(origImg)
potential = 0
detX = []
detY = []
sizes = []
probss = []
for size in [20, 40, 60]:
subImages, originals = getSubImages(img, pixels, size)
print("Number of images", len(subImages)*len(subImages[0]))
print("Start detecting")
progress = 0
quarter = 1
for i in range(len(subImages)):
if i >= quarter*len(subImages)/4:
quarter += 1
progress += 25
print("Progess", progress, "%")
for j in range(len(subImages[i])):
probs = classified.predict_proba(subImages[i][j])
maxV = max(probs[0])
if maxV > threshold:
# print(probs[0])
for k in range(len(probs[0])):
if probs[0][k] == maxV:
index = k
# printImage(originals[i][j])
# printImage(origImg)
if index == searchIndex:
potential += 1
detX.append(i*pixels)
detY.append(j*pixels)
sizes.append(size)
probss.append(probs)
for i in range(len(detX)):
drawSquare(origImg, detX[i], detY[i], sizes[i], probss[i], searchIndex)
print("I think I found", potential, "cases of you letter")
printImage(img)
return origImg
def drawSquare(img, pixelX, pixelY, size, probs, index):
for k in range(size):
for l in range(size):
if k < 3 or k > size-4 or l < 3 or l > size-4:
img[pixelX+k][pixelY+l] = [255*(1-2*probs[0][index]), 255*probs[0][index], 0]
def getSubImages(img, pixels, size):
subImages = []
originals = []
for i in range(len(img)):
subImageRow = []
originalRow = []
for j in range(len(img[i])):
if i % pixels == 0 and j % pixels == 0 and i+size-1 < len(img) and j+size-1 < len(img[i]):
subImage = []
for k in range(i, i+size, int(size/20)):
line = []
for l in range(j, j+size, int(size/20)):
line.append(img[k][l])
subImage.append(line)
originalRow.append(subImage)
if preprocess == preprocessing.SOBEL:
subImage = denoise_bilateral(subImage, sigma_range=0.1, sigma_spatial=15)
subImage = sobel(subImage)
elif preprocess == preprocessing.HOG:
subImage = useHoG(subImage)
else:
subImage = denoise_bilateral(subImage, sigma_range=0.1, sigma_spatial=15)
subImage = sobel(subImage)
subImage = useHoG(subImage)
subImageRow.append(subImage)
if len(subImageRow) > 0:
subImages.append(subImageRow)
originals.append(originalRow)
return subImages, originals
def findParameters():
dataSet = loadImages()
dataSet, testSet = splitDataset(0.1, dataSet)
best = 0
bestC = 0
bestGamma = 0
for i in [0.1, 1, 2, 5, 10, 20, 30]:
for j in [0.1, 1, 2, 5, 10, 20, 50, 100]:
classifier = svm.SVC(gamma=i, C=j)
if type(dataSet.images[0][0]) is not np.float64:
classifier.fit(reshape(dataSet.images), dataSet.letters)
else:
classifier.fit(dataSet.images, dataSet.letters)
if type(testSet.images[0][0]) is not np.float64:
predicted = classifier.predict(reshape(testSet.images))
else:
predicted = classifier.predict(testSet.images)
correct = 0
false = 0
images_and_predictions = list(zip(testSet.images, predicted))
for index, (image, prediction) in enumerate(images_and_predictions):
if prediction == testSet.letters[index]:
correct += 1
else:
false += 1
if correct/(correct+false) > best:
best = correct/(correct+false)
print("Best so far:", best)
bestC = j
bestGamma = i
print("Best c:", bestC)
print("Best gamma:", bestGamma)
print("Best prediction:", best)
def testMain():
avg = 0
for i in range(5):
dataSet = loadImages()
dataSet, testSet = splitDataset(0.1, dataSet)
if classifier == classifiers.SVM:
classified = trainClassifierSVM(dataSet)
elif classifier == classifiers.KNN:
classified = trainClassifierkNN(dataSet)
predicted = predictClassifier(classified, testSet)
correct = 0
false = 0
images_and_predictions = list(zip(testSet.images, predicted))
for index, (image, prediction) in enumerate(images_and_predictions):
if prediction == testSet.letters[index]:
correct += 1
print("Correct:", prediction, testSet.letters[index])
else:
false += 1
print("False:", prediction, testSet.letters[index])
print(correct/(correct+false))
avg += correct/(correct+false)
print("Avg:", avg/5)
def main():
dataSet = loadImages()
dataSet, testSet = splitDataset(0.1, dataSet)
if classifier == classifiers.SVM:
classified = trainClassifierSVM(dataSet)
elif classifier == classifiers.KNN:
classified = trainClassifierkNN(dataSet)
elif classifier == classifiers.RF:
classified = trainClassifierRF(dataSet)
predicted = predictClassifier(classified, testSet)
correct = 0
false = 0
images_and_predictions = list(zip(testSet.images, predicted))
for index, (image, prediction) in enumerate(images_and_predictions):
if prediction == testSet.letters[index]:
correct += 1
print("Correct:", prediction, testSet.letters[index])
else:
false += 1
print("False:", prediction, testSet.letters[index])
print(correct/(correct+false))
# showImages(dataSet, testSet, predicted)
# myTest(classified, 'testD.jpg', 'd')
if detect:
sp.misc.imsave('detectedTest.jpg', detection('handwritten.jpg', classified))
# sp.misc.imsave('detectedTestImg.jpg', detection('testImg.jpg', classified))
main()
# testMain()
# findParameters() | [
"morteano@stud.ntnu.no"
] | morteano@stud.ntnu.no |
5035210acca6b4c698cd6ccf81a2761a411a9eab | fa6b62db4ec199a012b66ab3093602953da6f04b | /auctions/migrations/0019_alter_listing_category.py | 4e7199dc8d88000535c9f4d3efdba4025f297336 | [] | no_license | alqor/commerce | 013a32ea311c98fb80501ab6dfb3b5aa4d7b94fd | ee0ea97449f38a7d0b49403e63a77b90da8e1165 | refs/heads/master | 2023-07-16T16:27:14.588062 | 2021-09-06T14:15:03 | 2021-09-06T14:15:03 | 400,778,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # Generated by Django 3.2.3 on 2021-09-03 12:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0018_listing_win_by'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='category',
field=models.ForeignKey(blank=True, default='Other', on_delete=django.db.models.deletion.CASCADE, related_name='listing_items', to='auctions.itemcategory'),
),
]
| [
"a.kylivnyk@gmail.com"
] | a.kylivnyk@gmail.com |
f71cf555586300ad24dcec2f51df97d3fd303c1f | b51fee6de4d9efe35ab624cdfceffc07e3c27f18 | /battlecode-manager/player_sandboxed.py | a594fef824d0313d779e929327e2472dc50b90f3 | [
"MIT"
] | permissive | darthdeus/battlecode-multi-agents | 44c6938dc8dd1bcbf90908db83f78d034b49fb2a | ded639acf813eed6b1f656e7f0ae3cb9c92e095a | refs/heads/master | 2021-03-22T03:26:57.276818 | 2018-03-08T10:53:55 | 2018-03-08T10:53:55 | 123,737,670 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,907 | py | import threading
from threading import Timer
from player_abstract import AbstractPlayer
import random
import socket
import server
def _stream_logs(container, stdout, stderr, line_action):
for line in container.logs(stdout=stdout, stderr=stderr, stream=True):
line_action(line)
class SandboxedPlayer(AbstractPlayer):
def __init__(self, socket_file, working_dir, docker_client, local_dir=None, s3_bucket=None, s3_key=None,
player_key="", player_mem_limit=256, player_cpu=20):
super().__init__(socket_file, working_dir, local_dir, s3_bucket, s3_key, player_key, player_mem_limit, player_cpu)
self.docker = docker_client
def stream_logs(self, stdout=True, stderr=True, line_action=lambda line: print(line.decode())):
threading.Thread(target=_stream_logs, args=(self.container, stdout, stderr, line_action)).start()
def start(self):
# won't collide ;)
self.socket_name = '/tmp/battlecode-suspender-{}'.format(random.randint(0, 10**50))
self.suspender_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.suspender_socket.bind(self.socket_name)
self.suspender_socket.settimeout(server.BUILD_TIMEOUT) # seconds
self.suspender_socket.listen(1)
volumes = {
self.working_dir: {'bind': '/code', 'mode': 'rw'},
self.socket_file: {'bind': '/tmp/battlecode-socket', 'mode': 'rw'},
self.socket_name: {'bind': '/tmp/battlecode-suspender', 'mode': 'rw'}
}
working_dir = '/'
command = 'sh /player_startup.sh'
env = {
'PLAYER_KEY': self.player_key,
'SOCKET_FILE': '/tmp/battlecode-socket',
'RUST_BACKTRACE': 1,
'BC_PLATFORM': self._detect_platform()
}
self.container = self.docker.containers.run(
'battlebaby',
command,
privileged=False,
detach=True,
stdout=True,
stderr=True,
volumes=volumes,
working_dir=working_dir,
environment=env,
mem_limit=self.player_mem_limit,
memswap_limit=self.player_mem_limit,
auto_remove = True,
network_disabled=True
)
self.suspender_connection = None
def guess_language(self):
procs = self.container.top()['Processes']
for p in procs:
name = p[3]
if "java" in name:
return "jvm"
elif "python" in name:
return "python"
elif "pypy" in name:
return "pypy"
elif "mono" in name:
return "mono"
return "c"
def suspinit(self):
if self.suspender_connection == None:
try:
# wait for suspender script to connect from player host
connection, _ = self.suspender_socket.accept()
self.suspender_connection = connection
self.suspender_file = self.suspender_connection.makefile('rw', 64)
login = next(self.suspender_file)
assert int(login.strip()) == self.player_key, 'mismatched suspension login: {} != {}'.format(repr(login.strip()), repr(self.player_key))
except Exception as e:
print('suspender timed out', e)
def pause(self):
self.suspinit()
# see suspender.py
# we don't go through docker.suspend or docker.exec because they're too slow (100ms)
try:
self.suspender_file.write('suspend\n')
self.suspender_file.flush()
response = next(self.suspender_file)
assert response.strip() == 'ack', response.strip() + ' != ack'
except Exception as e:
print("SUSPENSION FAILED!!! SUSPICIOUS:", e)
def unpause(self, timeout=None):
self.suspinit()
# see suspender.py
# we don't go through docker.suspend or docker.exec because they're too slow (100ms)
try:
self.suspender_file.write('resume\n')
self.suspender_file.flush()
response = next(self.suspender_file)
assert response.strip() == 'ack', response.strip() + ' != ack'
except Exception as e:
print("resumption failed:", e)
def destroy(self):
try:
self.container.remove(force=True)
except Exception as e:
pass
try:
self.suspender_socket.close()
except Exception as e:
print('suspender close err:', e)
super().destroy()
def docker_stats(self, stream=False):
return self.container.stats(decode=True, stream=stream)
def __del__(self):
self.destroy()
| [
"darthdeus@gmail.com"
] | darthdeus@gmail.com |
60882d075746d75a8249155969a51033e5889b29 | 9af0546da92b19fc677e3470106be4e2f148f217 | /flaskiwsapp/snippets/exceptions/roleExceptions.py | 32cb44e820b965c5cb43054e6ee51805d2e5242e | [
"MIT"
] | permissive | rtorresve/EngineeringMidLevel | 2f0907270e461499c9741b9ae10085427093fcbe | a8f04991deca85cf615df72db12e082aaa543cfa | refs/heads/master | 2021-06-03T10:25:54.905980 | 2016-10-25T21:35:10 | 2016-10-25T21:35:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | '''
Created on Sep 22, 2016
@author: rtorres
'''
from flaskiwsapp.snippets.exceptions.baseExceptions import LogicalException
class RoleExistsException(LogicalException):
def __init__(self, argument=None):
super(RoleExistsException, self).__init__()
self.message = 'The role %s already exists' % argument
class RoleDoesNotExistsException(LogicalException):
def __init__(self, argument=None):
super(RoleDoesNotExistsException, self).__init__()
self.message = 'The role %s does not exists' % argument
| [
"rdtr.sis@gmail.com"
] | rdtr.sis@gmail.com |
d2c84ea4ee599a3dca31d420a60f2e17b98158a9 | fa2526ce1d65a2e58958a61c34cee1ba7cf73b94 | /setup.py | 958cccc8103c3d1f18213fd6d55c4f3cb9978257 | [
"ZPL-2.1"
] | permissive | Zojax/zojax.portlets.livesearch | c480a19bd57b8b348032e40203696e4c53c68347 | 95f117ce89e0dc1fbfefdbec7969170caa3a1caf | refs/heads/master | 2020-12-30T10:36:43.760852 | 2011-08-09T22:33:26 | 2011-08-09T22:33:26 | 2,035,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | ##############################################################################
#
# Copyright (c) 2008 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Setup for zojax.portlets.livesearch package
$Id$
"""
import sys, os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version='1.0.0dev'
setup(name = 'zojax.portlets.livesearch',
version = version,
author = 'Nikolay Kim',
author_email = 'fafhrd91@gmail.com',
description = "Google Ads portlet",
long_description = (
'Detailed Documentation\n' +
'======================\n'
+ '\n\n' +
read('CHANGES.txt')
),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Zope Public License',
'Programming Language :: Python',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Framework :: Zope3'],
url='http://zojax.net/',
license='ZPL 2.1',
packages=find_packages('src'),
package_dir = {'':'src'},
namespace_packages=['zojax', 'zojax.portlets'],
install_requires = ['setuptools', 'simplejson',
'zope.component',
'zope.interface',
'zope.schema',
'zope.i18nmessageid',
'zojax.catalog',
'zojax.portlet',
'zojax.ownership',
'zojax.formatter',
'zojax.ui.searching',
'zojax.js.extjs',
'zojax.resource',
'zojax.resourcepackage',
],
extras_require = dict(test=['zojax.portlet [test]']),
include_package_data = True,
zip_safe = False
)
| [
"andrey.fedoseev@gmail.com"
] | andrey.fedoseev@gmail.com |
0110feb308ca1fb9314aaec5f10fd5fd44f12b18 | baf8ccd12b27d0882c75a9c3845a0679e831f618 | /25_cmc/manage.py | e871032c2b1bd42a3b774a017b749afb30f9ec0c | [
"MIT"
] | permissive | Tjorriemorrie/trading | c55d545a0a09e3fb92673696e95dd66b02858ab6 | aafa15a6c564bfa86948ab30e33d554172b38a3e | refs/heads/master | 2022-12-13T20:57:23.591343 | 2021-07-07T20:28:34 | 2021-07-07T20:28:34 | 28,739,306 | 2 | 2 | MIT | 2022-07-06T20:01:28 | 2015-01-03T08:55:17 | q | UTF-8 | Python | false | false | 659 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cmc.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"jaco@Tests-MacBook-Pro-4.local"
] | jaco@Tests-MacBook-Pro-4.local |
416c1899c1f3f2a4d1cd517f88fd3304a53508d2 | 54b6615c32cf3eabf89a4bf56c477a3b16fc828e | /programming_languages/python/Generators/utube_tutorial/second.py | 3708b40c89bb8b7cb36fd8aa42a43f1adc777956 | [] | no_license | sky2107/Toolbox | 4758ba2350576512009de03c37f370f5e4be9857 | 74f9cbde72afa5ca5d9513b27f05c1cd8e222425 | refs/heads/master | 2020-04-21T01:27:43.783203 | 2019-03-28T16:55:18 | 2019-03-28T16:55:18 | 169,224,411 | 0 | 0 | null | 2019-02-06T08:35:25 | 2019-02-05T10:38:36 | null | UTF-8 | Python | false | false | 261 | py | from time import sleep
from random import randrange
def compute():
sleep(.1)
return randrange(10)
# print(compute())
def f():
rv = []
for _ in range(10):
rv.append(compute())
return rv
print(f())
print(f'f:{f()}') | [
"felixmorillasnavas@gmail.com"
] | felixmorillasnavas@gmail.com |
b97b94047419f4971b7307b9f85b2080e191cab0 | 33b50d002d20a8feac53b2b29771a573addc9f39 | /main.py | 352ef3c8818c3bffde7c9e5e380eb75bd8787c48 | [] | no_license | Divyansh-Kamboj/galaxy-shooter- | ee760da3d2bcf5da7285e1d78c54ea1687a109c5 | 2b96a418513d74f4b0e41f2df8e0dc3315c72dbf | refs/heads/main | 2023-04-04T04:47:02.260253 | 2021-04-20T10:33:13 | 2021-04-20T10:33:13 | 359,775,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,658 | py | import pygame
import os
pygame.font.init()
pygame.mixer.init()
WIDTH, HEIGHT = 900, 500
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Galaxy shooter")
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
BORDER = pygame.Rect(WIDTH//2 - 5, 0, 10, HEIGHT)
HEALTH_FONT = pygame.font.SysFont('comicsans', 40)
WINNER_FONT = pygame.font.SysFont('comicsans', 100)
FPS = 60
VEL = 5
BULLET_VEL = 7
MAX_BULLETS = 3
SPACESHIP_WIDTH, SPACESHIP_HEIGHT = 55, 40
YELLOW_HIT = pygame.USEREVENT + 1
RED_HIT = pygame.USEREVENT + 2
YELLOW_SPACESHIP_IMAGE = pygame.image.load(
os.path.join('Assets', 'spaceship_yellow.png'))
YELLOW_SPACESHIP = pygame.transform.rotate(pygame.transform.scale(
YELLOW_SPACESHIP_IMAGE, (SPACESHIP_WIDTH, SPACESHIP_HEIGHT)), 90)
RED_SPACESHIP_IMAGE = pygame.image.load(
os.path.join('Assets', 'spaceship_red.png'))
RED_SPACESHIP = pygame.transform.rotate(pygame.transform.scale(
RED_SPACESHIP_IMAGE, (SPACESHIP_WIDTH, SPACESHIP_HEIGHT)), 270)
SPACE = pygame.transform.scale(pygame.image.load(
os.path.join('Assets', 'spacee.png')), (WIDTH, HEIGHT))
def draw_window(red, yellow, red_bullets, yellow_bullets, red_health, yellow_health):
WIN.blit(SPACE, (0, 0))
pygame.draw.rect(WIN, BLACK, BORDER)
red_health_text = HEALTH_FONT.render(
"Health: " + str(red_health), 1, WHITE)
yellow_health_text = HEALTH_FONT.render(
"Health: " + str(yellow_health), 1, WHITE)
WIN.blit(red_health_text, (WIDTH - red_health_text.get_width() - 10, 10))
WIN.blit(yellow_health_text, (10, 10))
WIN.blit(YELLOW_SPACESHIP, (yellow.x, yellow.y))
WIN.blit(RED_SPACESHIP, (red.x, red.y))
for bullet in red_bullets:
pygame.draw.rect(WIN, RED, bullet)
for bullet in yellow_bullets:
pygame.draw.rect(WIN, YELLOW, bullet)
pygame.display.update()
def yellow_handle_movement(keys_pressed, yellow):
if keys_pressed[pygame.K_a] and yellow.x - VEL > 0:
yellow.x -= VEL
if keys_pressed[pygame.K_d] and yellow.x + VEL + yellow.width < BORDER.x:
yellow.x += VEL
if keys_pressed[pygame.K_w] and yellow.y - VEL > 0:
yellow.y -= VEL
if keys_pressed[pygame.K_s] and yellow.y + VEL + yellow.height < HEIGHT - 15:
yellow.y += VEL
def red_handle_movement(keys_pressed, red):
if keys_pressed[pygame.K_LEFT] and red.x - VEL > BORDER.x + BORDER.width:
red.x -= VEL
if keys_pressed[pygame.K_RIGHT] and red.x + VEL + red.width < WIDTH:
red.x += VEL
if keys_pressed[pygame.K_UP] and red.y - VEL > 0:
red.y -= VEL
if keys_pressed[pygame.K_DOWN] and red.y + VEL + red.height < HEIGHT - 15:
red.y += VEL
def handle_bullets(yellow_bullets, red_bullets, yellow, red):
for bullet in yellow_bullets:
bullet.x += BULLET_VEL
if red.colliderect(bullet):
pygame.event.post(pygame.event.Event(RED_HIT))
yellow_bullets.remove(bullet)
elif bullet.x > WIDTH:
yellow_bullets.remove(bullet)
for bullet in red_bullets:
bullet.x -= BULLET_VEL
if yellow.colliderect(bullet):
pygame.event.post(pygame.event.Event(YELLOW_HIT))
red_bullets.remove(bullet)
elif bullet.x < 0:
red_bullets.remove(bullet)
def draw_winner(text):
draw_text = WINNER_FONT.render(text, 1, WHITE)
WIN.blit(draw_text, (WIDTH/2 - draw_text.get_width() /
2, HEIGHT/2 - draw_text.get_height()/2))
pygame.display.update()
pygame.time.delay(5000)
def main():
red = pygame.Rect(700, 300, SPACESHIP_WIDTH, SPACESHIP_HEIGHT)
yellow = pygame.Rect(100, 300, SPACESHIP_WIDTH, SPACESHIP_HEIGHT)
red_bullets = []
yellow_bullets = []
red_health = 10
yellow_health = 10
clock = pygame.time.Clock()
run = True
while run:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LCTRL and len(yellow_bullets) < MAX_BULLETS:
bullet = pygame.Rect(
yellow.x + yellow.width, yellow.y + yellow.height//2 - 2, 10, 5)
yellow_bullets.append(bullet)
if event.key == pygame.K_RCTRL and len(red_bullets) < MAX_BULLETS:
bullet = pygame.Rect(
red.x, red.y + red.height//2 - 2, 10, 5)
red_bullets.append(bullet)
if event.type == RED_HIT:
red_health -= 1
if event.type == YELLOW_HIT:
yellow_health -= 1
winner_text = ""
if red_health <= 0:
winner_text = "Yellow Wins!"
if yellow_health <= 0:
winner_text = "Red Wins!"
if winner_text != "":
draw_winner(winner_text)
break
keys_pressed = pygame.key.get_pressed()
yellow_handle_movement(keys_pressed, yellow)
red_handle_movement(keys_pressed, red)
handle_bullets(yellow_bullets, red_bullets, yellow, red)
draw_window(red, yellow, red_bullets, yellow_bullets,
red_health, yellow_health)
main()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
4ab641c1f8b558f6129631f36ef58c23b8db787e | 8fc70b90b9c9a7e28f251e291ecc09e7d827b933 | /Parasitic Fold Single Limb.py | 36565947b462b4f47d06a8ace506b9f69a10d7d0 | [] | no_license | lachlangrose/inverting_knowledge_JSG | f7be696b760cbcc370bfcc5e4e3de0747fef705c | 58a7e113ccb10b108920aefcce20870c63e0c898 | refs/heads/master | 2020-04-02T05:36:11.244744 | 2018-11-28T05:39:31 | 2018-11-28T05:39:31 | 154,091,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,137 | py |
# coding: utf-8
# In[1]:
import matplotlib.pylab as plt
import matplotlib as mpl
import numpy as np
from scipy.stats import norm
#fold = imp.load_source('fold', '/home/lgrose/bitbucket/ipython_notebooks/Bayesian/fold.py')
#variogram = imp.load_source('fold', '/home/lgrose/bitbucket/ipython_notebooks/Bayesian/variogram.py')
#from variogram import *
from fold import *
import math
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
get_ipython().magic('matplotlib inline')
# In[2]:
import pymc as pymc
# In[3]:
from bayesian_fourier_series import *
mpl.rcParams.update({'font.size': 18})
# In[4]:
wl1= 15.#15.
wl2 = 150.
N = 100
foldfourier = fourierFold(30,30,wl1,0,40,20,38)
foldfourier2 = fourierFold(30,80,wl2,0,40,20,38)
foldr = foldRotation(foldfourier)
foldr2 = foldRotation(foldfourier2)
x_ = np.linspace(0,300,300)
x = np.arange(15, 65,1)#np.linspace(0,150,100)
#x = np.hstack([x,np.arange(100,200)])
np.random.shuffle(x)
# In[5]:
mixture = True
N = 25
x = x[:N]
fold= (foldfourier2.pos(x_))+foldfourier.pos(x_)
foldpts = (foldfourier2.pos(x))+foldfourier.pos(x)
x__ = x_
foldrot = np.arctan(np.gradient(fold))
foldrot*=180.0/np.pi
#foldrot+=30
#if mixture:
a = np.random.choice([1,1,1,1,1,1,1],N)
# foldrot*=a
#x_ = []
#y_ = []
#add some noise to the x and y observations - simulate some uncertainty
for i in range(1):
xx1, yy1 = shake_xy(x,foldrot[x],1)
# if mixture:
# yy1*=a
# x_.append(xx1)
# y_.append(yy1)
#plt.plot(x,foldrot[x],'ro')
#plt.figure()
#print fourierseries.wavelengths
# In[6]:
plt.plot(x,foldpts,'bo')
plt.plot(x_,fold)
# In[7]:
fourierseries = bayesian_fourier_series_model(xx1,yy1,2)
fourierseries.add_reference_foldshape(x_,fold)
fourierseries.add_reference_foldlocations(x,foldpts)
fourierseries.add_reference_foldprofile(x_,foldrot)
fourierseries.find_wavelength()
fourierseries.wavelengths= [fourierseries.wavelengths[0]]#,180.]
#if len()
#fourierseries.wavelengths.append(200.)
#sv = s_variogram(x,foldrot[x])
#sv.setup()
#sv.find_wavelengths()
#plt.plot(sv.lags,sv.variance)
#plt.figure()
#print fourierseries.semivariogram.lags, fourierseries.semivariogram.variance
#plt.plot(fourierseries.semivariogram.lags,fourierseries.semivariogram.variance,'bo')
#print fourierseries.wavelengths
#fourierseries.wavelengths[1] = 180.
#fourierseries.wavelengths = [13.066,180]
fourierseries.setup_inference()#_mixture()
#fourierseries.find_map()
fourierseries.run_sampler(10000,5000)
#scores = pymc.geweke(fourierseries.S, intervals=7)
#pymc.Matplot.geweke_plot(scores)
#pymc.Matplot.geweke_plot(pymc.geweke(fourierseries.S.trace('c_0')[:,]))
fourierseries_corrected = bayesian_fourier_series_model(xx1,yy1,2)
fourierseries_corrected.add_reference_foldshape(x_,fold)
fourierseries_corrected.add_reference_foldlocations(x,foldpts)
fourierseries_corrected.add_reference_foldprofile(x_,foldrot)
fourierseries_corrected.find_wavelength()
fourierseries_corrected.wavelengths= [fourierseries.wavelengths[0],150.]
#if len()
#fourierseries.wavelengths.append(200.)
#sv = s_variogram(x,foldrot[x])
#sv.setup()
#sv.find_wavelengths()
#plt.plot(sv.lags,sv.variance)
#plt.figure()
#print fourierseries.semivariogram.lags, fourierseries.semivariogram.variance
#plt.plot(fourierseries.semivariogram.lags,fourierseries.semivariogram.variance,'bo')
#print fourierseries.wavelengths
#fourierseries.wavelengths[1] = 180.
#fourierseries.wavelengths = [13.066,180]
fourierseries_corrected.setup_inference()#_mixture()
#fourierseries.find_map()
fourierseries_corrected.run_sampler(10000,5000)
#scores = pymc.geweke(fourierseries.S, intervals=7)
#pymc.Matplot.geweke_plot(scores)
#pymc.Matplot.geweke_plot(pymc.geweke(fourierseries.S.trace('c_0')[:,]))
# In[12]:
fig = bayesian_fourier_series_figure(fourierseries)
fig.fig, fig.ax = plt.subplots(3,3,figsize=(20,15))
#fourierseries.semivariogram.
fig.plot_variogram()
fig.plot_reference_fold_points()
fig.plot_reference_fold_points('ko',0,1)
#fig.plot_reference_fold_shape('b-',0,1)
fig.plot_reference_fold_points('ko',0,2)
correction = a < 0#
correction = correction.astype(int)*180
#fig.plot_reference_fold_shape('b-',0,1)
#plot_normal(0,0,xb,foldpts,foldrot[xb]+correction,10)
#plot_normal(1,0,xb,foldpts,foldrot[xb]+correction,10)
#plot_normal(2,0,xb,foldpts,foldrot[xb],10)
fig.plot_reference_fold_profile()
fig.plot_reference_fold_profile_points()
fig.plot_reference_fold_profile_points('ko',1,1)
fig.plot_reference_fold_profile_points('ko',1,2)
#fig.ax[2][1].plot(xb[a<0],foldrot[xb[a<0]],'ko')
#fig.ax[2][1].plot([xb[a<0],xb[a<0]],[foldrot[xb[a<0]],-foldrot[xb[a<0]]],color='black',linestyle='dashed',alpha=0.5)
#fig.plot_reference_fold_profile('b-',1,1)
fig.plot_reference_fold_shape()
fig.ax[0][0].set_title(r'A. Fold profile')
fig.ax[0][1].set_title(r'B. S-Plot')
fig.ax[0][2].set_title(r'C. S-Variogram')
fig.ax[1][1].set_title(r'E. Density map showing infered profile')
fig.ax[1][0].set_title(r'D. Density map for fold shape')
fig.ax[1][2].set_title(r'F. PDF for wavelength(s)')
fig.ax[2][1].set_title(r'H. Density map showing infered profile ($\lambda_2$ prior)')
fig.ax[2][0].set_title(r'G. Density map for fold shape ($\lambda_2$ prior)')
fig.ax[2][2].set_title(r'I. PDF for wavelength(s)')
fig.ax[0][0].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[0][1].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[2][0].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[2][1].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[0][2].set_xlabel(r'Step distance ($z$ coordinate)')
fig.ax[1][2].set_xlabel(r'Wavelength $m$')
fig.ax[2][2].set_xlabel(r'Wavelength $m$')
#g.ax[1][2].set_ylabel(r'Probability')
fig.ax[2][1].set_ylabel(r'Fold limb rotation angle $\alpha_L$')
fig.ax[0][1].set_ylabel(r'Fold limb rotation angle $\alpha_L$')
fig.ax[0][2].set_ylabel(r'Variance $\alpha_L$')
fig.ax[1][0].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[1][0].set_ylabel(r'Fold shape ($m$)')
fig.ax[0][0].set_ylabel(r'Fold shape ($m$)')
fig.ax[2][0].set_ylabel(r'Fold shape ($m$)')
fig.ax[1][1].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[1][1].set_ylabel(r'Fold limb rotation angle $\alpha_L$')
H2 = fig.plot_curve_density(0,300)
fig.plot_fold_heat_map(fig.fourier_series_model.reference_fold_y[0])
intercept = fig.fourier_series_model.reference_fold_y[0]
#for i in range(10):
# c, w = fig.plot_random_curve(i)
#print c,w
fig.plot_map(0,300,'k--')
#print ,fig.fourier_series_model.map.qw[1].value
colours = ['b-','r-']
#for i in range(len(fig.fourier_series_model.wavelengths)):
temp = fig.ax[0][0]
i = 0
#temp = fig.plot_kde('c_%i'%(i),1,2,'c_%i'%i,colours[i])
#except:
# print "fail"
#if i == 0:
# temp = fig.plot_normal_pdf(1,2,fig.fourier_series_model.wavelengths[i],\
# fig.fourier_series_model.wavelengths[i]/3.,\
# 'prior_wavelength_%i'%i, colours[i]+'-')
#else:
# fig.plot_normal_pdf2(temp,fig.f-ourier_series_model.wavelengths[i],\
# fig.fourier_series_model.wavelengths[i]/3.,\
# 'prior_wavelength_%i'%i, colours[i]+'-')
# print fig.fourier_series_model.map.qw[i].value
#temp.legend(loc=i)
#fig.plot
arrays = []
pos = []
labels = []
c = len(fourierseries.wavelengths)/2
for i in range(len(fourierseries.wavelengths)):
arrays.append(np.random.normal(fourierseries.wavelengths[i],fourierseries.wavelengths[i]/3.,1000))
labels.append(r"$\lambda$ prior")
pos.append(c)
c-=1
arrays.append(fourierseries.S.trace('qw_%i'%i)[:][:])
labels.append(r"$\lambda$ posterior")
pos.append(c)
c-=1
fig.plot_violin(1,2,labels,pos,arrays)
arrays = []
pos = []
labels = []
c = len(fourierseries_corrected.wavelengths)/2
for i in range(len(fourierseries_corrected.wavelengths)):
arrays.append(np.random.normal(fourierseries_corrected.wavelengths[i],fourierseries_corrected.wavelengths[i]/3.,1000))
labels.append(r"$\lambda_%i$ prior"%(i+1))
pos.append(c)
c-=1
arrays.append(fourierseries_corrected.S.trace('qw_%i'%i)[:][:])
labels.append(r"$\lambda_%i$ posterior"%(i+1))
pos.append(c)
c-=1
# In[150]:
fig.fig.savefig("parasitic_fold_one_limb_prior.svg")
# In[126]:
fig = bayesian_fourier_series_figure(fourierseries)
#fourierseries.semivariogram.
fig.plot_variogram()
fig.plot_reference_fold_points()
fig.plot_reference_fold_points('bo',0,1)
#fig.plot_reference_fold_shape('b-',0,1)
fig.plot_reference_fold_profile()
fig.plot_reference_fold_profile_points()
fig.plot_reference_fold_profile_points('bo',1,1)
#fig.plot_reference_fold_profile('b-',1,1)
fig.plot_reference_fold_shape()
fig.ax[0][0].set_title(r'A. Fold profile')
fig.ax[0][1].set_title(r'B. S-Plot')
fig.ax[0][2].set_title(r'C. S-Variogram')
fig.ax[1][1].set_title(r'E. Density map showing infered profile')
fig.ax[1][0].set_title(r'D. Density map for fold shape')
fig.ax[1][2].set_title(r'F. PDF for wavelength(s)')
fig.ax[0][0].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[0][1].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[0][2].set_xlabel(r'Step distance ($z$ coordinate)')
fig.ax[1][2].set_xlabel(r'Wavelength $m$')
#g.ax[1][2].set_ylabel(r'Probability')
fig.ax[0][1].set_ylabel(r'Fold limb rotation angle $\alpha_L$')
fig.ax[0][2].set_ylabel(r'Variance $\alpha_L$')
fig.ax[1][0].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[1][0].set_ylabel(r'Fold shape ($m$)')
fig.ax[0][0].set_ylabel(r'Fold shape ($m$)')
fig.ax[1][1].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[1][1].set_ylabel(r'Fold limb rotation angle $\alpha_L$')
H2 = fig.plot_curve_density(0,300)
fig.plot_fold_heat_map(fig.fourier_series_model.reference_fold_y[0])
#for i in range(10):
# c, w = fig.plot_random_curve(i)
#print c,w
fig.plot_map(0,300,'k--')
#print ,fig.fourier_series_model.map.qw[1].value
colours = ['b-','r-']
#for i in range(len(fig.fourier_series_model.wavelengths)):
temp = fig.ax[0][0]
i = 0
#temp = fig.plot_kde('c_%i'%(i),1,2,'c_%i'%i,colours[i])s" available and you have to piss before a ride you can use one while in your car and it's very discreet and convenient.
#except:
# print "fail"
#if i == 0:
# temp = fig.plot_normal_pdf(1,2,fig.fourier_series_model.wavelengths[i],\
# fig.fourier_series_model.wavelengths[i]/3.,\
# 'prior_wavelength_%i'%i, colours[i]+'-')
#else:
# fig.plot_normal_pdf2(temp,fig.fourier_series_model.wavelengths[i],\
# fig.fourier_series_model.wavelengths[i]/3.,\
# 'prior_wavelength_%i'%i, colours[i]+'-')
# print fig.fourier_series_model.map.qw[i].value
#temp.legend(loc=i)
#fig.plot
arrays = []
pos = []
labels = []
c = len(fourierseries.wavelengths)
for i in range(len(fourierseries.wavelengths)):
arrays.append(np.random.normal(fourierseries.wavelengths[i],fourierseries.wavelengths[i]/3.,1000))
labels.append(r"$\lambda$ prior")
pos.append(c)
c-=1
arrays.append(fourierseries.S.trace('qw_%i'%i)[:][:])
labels.append(r"$\lambda$ posterior")
pos.append(c)
c-=1
fig.plot_violin(1,2,labels,pos,arrays)
plt.tight_layout()
for i in range(2):
for j in range(2):
fig.ax[i][j].set_xlim(0,300)
fig.ax[0][0].set_ylim(-500,500)
fig.ax[1][0].set_ylim(-500,500)
fig.ax[1][2].spines['top'].set_visible(False)
fig.ax[1][2].spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
fig.ax[1][2].spines['left'].set_visible(False)
fig.ax[1][2].tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on')
# In[31]:
axes[0, 0].violinplot(data, pos, points=20, widths=0.3,
showmeans=True, showextrema=True, showmedians=True)
fourierseries.sample_using_emcee()
print fourierseries.model_e
# In[15]:
print np.mean(fourierseries.S.trace('c_0')[:,]), np.std(fourierseries.S.trace('c_0')[:,])
# In[17]:
for i in range(2*3):
print i
# In[61]:
from scipy import stats
print np.min(H2.flatten())
print np.max(H2.flatten())
#H = stats.boxcox(H2.flatten())
test = H2.copy()
test[test==0.0] = np.nan
plt.imshow((np.rot90(test)), extent=[0, 300, -90, 90],aspect='auto')
#plt.imshow(masked_array.nonzero(), interpolation='nearest', cmap=cmap)
#print np.mean(H2), np.std(H2), np.max(H2)
#plt.imshow(np.rot90(H),cmap="Greys")
# In[35]:
x = np.linspace(1,100,100)
plt.plot(x,np.log(x))
# In[49]:
#a = plt.hist(H.flatten(),100)
#plt.figure()
a2 = plt.hist(np.nonzero(H2.flatten()),100)
# In[32]:
print fig.C_[:][0,:]
#pymc.Matplot.plot(fourierseries.S)
# In[31]:
import pickle
path = './export/'
name = 'parasitic_fold_15_150_flipped_data_prior_proof_of_concept'
fig.fig.savefig(path+name+'.png')
fig.fig.savefig(path+name+'.svg')
#pickle.dump(fourierseries,open(path+name+'.pkl',"wb"))
# In[20]:
wl = 3
N = 5
for i in range(wl):
for j in range(1,N+1):
#print i, j
#print (2*j-1)*2-i
print (2*j-1)+i*2*N
print (2*j)+i*2*N
# In[128]:
#ax = plt.subplot2grid((2,2),(0, 0))
ax = []
ni = 3
nj = 3
for i in range(ni):
ax1 = []
for j in range(nj):
axt = plt.subplot2grid((ni,nj), (i,j))
ax1.append(axt)
ax.append(ax1)
# In[23]:
v = fig.v
print len(v)
#ymin = -90
#ymax = 90
x = fig.x
vv = np.linspace(-90,90,180)
H = np.zeros((len(x),len(vv)))
for i in range(len(x)):
for j in range(len(v)):
vind = np.nonzero(np.abs(vv-v[j][i]) == np.min(np.abs(vv-v[j][i])))[0]
H[i,vind[0]]+=1
plt.matshow(np.rot90(H),extent=[0,600,-90,90],aspect='auto')
# In[ ]:
def run_model_test(wl1,wl2,mixture,samples,x,N,use_wl):
foldfourier = fourierFold(30,40,wl1,0,40,20,0)
foldfourier2 = fourierFold(30,80,wl2,0,40,20,20)
foldr = foldRotation(foldfourier)
foldr2 = foldRotation(foldfourier2)
x_ = np.linspace(0,600,600)
#x = np.arange(0, 600,1)#np.linspace(0,150,100)
#np.random.shuffle(x)
x = x[:N]
fold= (foldfourier.pos(x_))+foldfourier2.pos(x_)
foldpts = (foldfourier.pos(x))+foldfourier2.pos(x)
x__ = x_
foldrot = np.arctan(np.gradient(fold))
foldrot*=180.0/np.pi
if mixture:
a = np.random.choice([-1,1,1,1,1,1,1],N)
#foldrot*=a
x_ = []
y_ = []
#add some noise to the x and y observations - simulate some uncertainty
for i in range(1):
xx1, yy1 = shake_xy(x,foldrot[x],10)
if mixture:
yy1*=a
x_.append(xx1)
y_.append(yy1)
fourierseries = compositeBayesianFourierSeries(x_,y_,1)
if mixture:
fourierseries = compositeBayesianFourierSeries(x_,y_,1,a)
fourierseries.plot_fold_profile(x__,fold)
fourierseries.find_wavelength()
fig.ax[0][2].set_title(r'C. S-Variogram ')
fourierseries.plot_variogram()
#fourierseries.set_wavelength_sd = [1]
print fourierseries.wavelengths
if use_wl:
fourierseries.wavelengths = [wl1,wl2]
print fourierseries.wavelengths
if mixture:
s = fourierseries.setup_inference_mixture()
if not mixture:
s = fourierseries.setup_inference()
fourierseries.run_sampler(samples, samples*.5)
minx = 0
maxx = 600
fourierseries.plot_curve_density(minx,maxx)
if not mixture:
fourierseries.plot_fold_heat_map(fold[0])
fourierseries.plot_map(minx,maxx,fold[0])
#for i in range(len(fourierseries.wavelengths)):
# for c in range(3):
# fourierseries.plot_kde("c_%i_%i" %(c,i),0,3)
#plt.figure()
for i in range(len(fourierseries.wavelengths)):
w = fourierseries.wavelengths[i]
try:
fourierseries.plot_kde("qw_%i" %i,1,2)
#kde = stats.gaussian_kde(np.normal(wfourierseries.wavelengths[i],fourierseries.wavelengths[i]/1.125,size=1000))
except:
print "Exception"
x = np.linspace(0,w+w,1000)
fig.ax[1][2].plot(x,plt.mlab.normpdf(x,w,w*.125),'k--')
fig.ax[1][2].axvline(w,color='k',linestyle='--')
fig.ax[0][0].set_title(r'A. Fold profile')
fig.ax[0][1].set_title(r'B. S-Plot')
fig.ax[1][1].set_title(r'E. Density map showing infered profile')
fig.ax[1][0].set_title(r'D. Density map for fold shape')
fig.ax[1][2].set_title(r'F. PDF for wavelengths')
fig.ax[0][0].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[0][1].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[0][2].set_xlabel(r'Step distance ($z$ coordinate)')
fig.ax[1][2].set_xlabel(r'Wavelength $m$')
fig.ax[1][2].set_ylabel(r'Probability')
fig.ax[0][1].set_ylabel(r'Fold limb rotation angle $\alpha_L$')
fig.ax[0][2].set_ylabel(r'Variance $\alpha_L$)')
fig.ax[1][0].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[1][0].set_ylabel(r'Fold shape ($m$)')
fig.ax[0][0].set_ylabel(r'Fold shape ($m$)')
fig.ax[1][1].set_xlabel(r'Fold frame $z$ coordinate')
fig.ax[1][1].set_ylabel(r'Fold limb rotation angle $\alpha_L$')
#fig.ax[1][1].legend()
#fig.ax[1][2].legend()
fig.ax[1][0].set_ylim(-200,200)
fig.ax[0][0].set_ylim(-200,200)
`fourierseries.set_xlims(minx,maxx)
plt.savefig('figures/fourier_series_wl1_%i_wl2_%i_points_%i.svg'%(wl1,wl2,N))
plt.savefig('figures/fourier_series_wl1_%i_wl2_%i_points_%i.png'%(wl1,wl2,N))
#pymc.Matplot.plot(fourierseries.S)
# In[2]:
import numpy as np
import pymc
import matplotlib.pyplot as plt
from scipy import stats
class bayesian_fourier_series_model:
def __init__(self,xdata,ydata,N):
self.xdata = xdata
self.ydata = np.tan(ydata)*np.pi/180.
self.wavelength = 0.0
self.setup = False
self.N = N
self.use_mixture = False
self.xmin = min(self.xdata)
self.xmax = max(self.xdata)
self.wavelength_sd_defined = False
def set_wavelength_sd(wavelength_sd):
self.wavelength_sd = wavelength_sd
self.wavelength_sd_defined = True
def find_wavelength(self,step=0,nlags=0):
self.semivariogram = semiVariogram(self.xdata,np.arctan(self.ydata)*180./np.pi)
cont = True
self.wavelengths = []
wl1, wl2 = self.semivariogram.find_wavelengths(step,nlags)
#print vario, wl
print "Wavelength guesses: ", wl1, wl2
if np.abs(wl1 - wl2) < wl1*.2:
self.wavelengths.append((wl1+wl2)/2.)
return
self.wavelengths.append(wl1)
self.wavelengths.append(wl2)
def setup_inference(self):
#depending on the number of wavelengths
#self.wavelengths = [self.wavelengths[len(self.wavelengths)-1]]
wavelength_number = len(self.wavelengths)
t = 1. / 5.**2
#C_sigs = pymc.Container([pymc.HalfCauchy("c_sigs_%i_%i" % (i, x), beta = 10, alpha=1) \
# for i in range(1+2*self.N) for x in range(wavelength_number)])
C = pymc.Container([pymc.Normal("c_%i_%i" % (i, x), mu=0, tau = t) for i in range(1+2*self.N) for x in range(wavelength_number)])
#i_ = pymc.Container([pymc.DiscreteUniform('i_%i' %i,lower=0,upper=1) for i in range(len(self.xdata))])
@pymc.stochastic(observed=False)
def sigma(value=1):
return -np.log(abs(value))
@pymc.stochastic(observed=False)
def sigma3(value=1):
return -np.log(abs(value))
qw_sigs = pymc.Container([pymc.HalfCauchy("qw_sigs_%i" % x, beta = 10, alpha=1) for x in range(wavelength_number)])
if self.wavelength_sd_defined:
qw = pymc.Container([pymc.distributions.Lognormal('qw_%i' %x,mu=self.wavelengths[x], tau = 1. / self.wavelength_sd[x] ** 2) for x in range(wavelength_number)])
else:
qw = pymc.Container([pymc.distributions.Normal('qw_%i' %x,mu=self.wavelengths[x], tau = 1. / self.wavelengths[x]*.125) for x in range(wavelength_number)])
def fourier_series(C,N,QW,x,wavelength_number):
v = np.array(x)
v.fill(0.0)
v = v.astype('float')
for ii in range(len(x)):
for w in range(wavelength_number):
v += C[w]
for i in range(1,N+1):
v[ii] = v[ii] + C[(2*i-1)*wavelength_number+w]*np.cos(2*np.pi/QW[w] * i * (x[ii])) + C[(2*i)*wavelength_number+w]*np.sin(2*np.pi/QW[w] * i * (x[ii]))
#if i_[ii] == 0:
# v[ii] = -v[ii]
return v#np.sum(v)
self.vector_fourier_series = np.vectorize(fourier_series)
# Define the form of the model and likelihood
@pymc.deterministic
def y_model(C=C,x=self.xdata,qw=qw,nn=self.N,wavelength_number=wavelength_number):
return fourier_series(C,nn,qw,x,wavelength_number)
y = pymc.Normal('y', mu=y_model, tau=1. / sigma ** 2, observed=True, value=self.ydata)
# package the full model in a dictionary
self.model1 = dict(C=C, qw=qw, sigma=sigma,qw_sigs=qw_sigs,
y_model=y_model, y=y,x_values=self.xdata,y_values=self.ydata)
self.setup = True
self.mcmc_uptodate = False
return self.model1
def setup_inference_mixture(self):
#depending on the number of wavelengths
#self.wavelengths = [self.wavelengths[len(self.wavelengths)-1]]
wavelength_number = len(self.wavelengths)
t = 1. / 2.5**2
C_sigs = pymc.Container([pymc.HalfCauchy("c_sigs_%i_%i" % (i, x), beta = 10, alpha=1) for i in range(1+2*self.N) for x in range(wavelength_number)])
C = pymc.Container([pymc.Normal("c_%i_%i" % (i, x), mu=0, tau = 1. / C_sigs[i*wavelength_number+x]**2) for i in range(1+2*self.N) for x in range(wavelength_number)])
i_ = pymc.Container([pymc.DiscreteUniform('i_%i' %i,lower=0,upper=1) for i in range(len(self.xdata))])
@pymc.stochastic(observed=False)
def sigma(value=1):
return -np.log(abs(value))
@pymc.stochastic(observed=False)
def sigma3(value=1):
return -np.log(abs(value))
qw_sigs = pymc.Container([pymc.HalfCauchy("qw_sigs_%i" % x, beta = 10, alpha=1) for x in range(wavelength_number)])
if self.wavelength_sd_defined:
qw = pymc.Container([pymc.distributions.Lognormal('qw_%i' %x,mu=self.wavelengths[x], tau = 1. / self.wavelength_sd[x] ** 2) for x in range(wavelength_number)])
else:
qw = pymc.Container([pymc.distributions.Uniform('qw_%i' %x,lower=0., upper=self.wavelengths[x]*2) for x in range(wavelength_number)])
def fourier_series(C,N,QW,x,wavelength_number,i_):
v = np.array(x)
v.fill(0.0)
v = v.astype('float')
for ii in range(len(x)):
for w in range(wavelength_number):
v += C[w]
for i in range(1,N+1):
v[ii] = v[ii] + C[(2*i-1)*wavelength_number+w]*np.cos(2*np.pi/QW[w] * i * (x[ii])) + C[(2*i)*wavelength_number+w]*np.sin(2*np.pi/QW[w] * i * (x[ii]))
#if i_[ii] == 0:
# v[ii] = -v[ii]
return v#np.sum(v)
self.vector_fourier_series = np.vectorize(fourier_series)
# Define the form of the model and likelihood
@pymc.deterministic
def y_model(C=C,x=self.xdata,qw=qw,nn=self.N,wavelength_number=wavelength_number,i_=i_):
return fourier_series(C,nn,qw,x,wavelength_number,i_)
y = pymc.Normal('y', mu=y_model, tau=1. / sigma ** 2, observed=True, value=self.ydata)
# package the full model in a dictionary
self.model1 = dict(C=C, qw=qw, sigma=sigma,qw_sigs=qw_sigs,
y_model=y_model, y=y,x_values=self.xdata,y_values=self.ydata,i_=i_)
self.setup = True
self.mcmc_uptodate = False
return self.model1
def run_sampler(self, samples = 10000, burnin = 5000):
self.S = pymc.MCMC(self.model1)
self.S.sample(iter=samples, burn=burnin)
self.mcmc_uptodate = True
return self.S
def find_map(self):
self.map = pymc.MAP(self.model1)
self.map.fit()
def plot_variogram(self):
plt.sca(self.ax[0][2])
self.semivariogram.plot()
def plot_curve_density(self,xmin,xmax):
#a = np.zeros(self.xdata[]
#for i in range(len(self.xdata)):
# sign = stats.mode(self.S.trace("i_%i"%i)[:])[0][0]
# if sign == 0:
# sign = -1
# a.append(sign)
#a = np.array(a)
if self.mcmc_uptodate == False:
self.run_sampler()
wavelength_number = len(self.wavelengths)
self.C_ = []
for i in range (1+2*self.N):
for x in range(wavelength_number):
self.C_.append(self.S.trace('c_%i_%i' %(i,x))[:])
self.qw = []
for x in range(wavelength_number):
self.qw.append(self.S.trace('qw_%i' %x)[:])
ends_ = (self.xmax-self.xmin)*2
x = np.linspace(xmin,xmax,600)
v = np.array((self.C_[0][:,None]))
v.fill(0.0)
for w in range(wavelength_number):
v += self.C_[w][:,None]
for i in range(1,self.N+1):
v = v + self.C_[(2*i-1)*wavelength_number+w][:,None]* np.cos(2*np.pi/self.qw[w][:,None] * i * x) + self.C_[(2*i)*wavelength_number+w][:,None] *np.sin(2*np.pi/self.qw[w][:,None] * i * x)
self.v = np.arctan(v)*180.0/np.pi
self.x = x
#ymin = -90
#ymax = 90
x_f = np.tile(x,len(self.qw[0]))
y_f = self.v.flatten()
#values = np.vstack([x_f,y_f])
#kernel = stats.gaussian_kde(values)
#x, y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
#positions = np.vstack([x.ravel(), y.ravel()])
#f = np.reshape(kernel(positions).T, x.shape)
#
#
#self.ax[1][1].imshow(np.log(np.rot90(f))+1, cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, -90, 90],aspect='auto')
heatmap, xedges, yedges = np.histogram2d(x_f,y_f,bins=(600,360))
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
logheat = np.log(heatmap+1)
self.ax[1][1].imshow(logheat.T, extent=extent, origin='lower',cmap='Greys',aspect='auto')
#ax.colorbar()
if self.black_white:
colour = 'k*'
else:
colour = 'b*'
self.ax[1][1].plot(self.xdata,np.arctan(self.ydata)*180.0/np.pi,colour,markersize=4,alpha=0.7)
return
def plot_map(self,xmin,xmax,intercept):
self.find_map()
wavelength_number = len(self.wavelengths)
x = np.linspace(xmin,xmax,600)
map_v = np.array(x)
map_v.fill(0.0)
for w in range(wavelength_number):
map_v += self.map.C[w].value
for i in range(1,self.N+1):
map_v = map_v + self.map.C[(2*i-1)*wavelength_number+w].value * np.cos(2*np.pi/ self.map.qw[w].value* i * x) + self.map.C[(2*i)*wavelength_number+w].value *np.sin(2*np.pi/self.map.qw[w].value*i*x)
map_v = np.arctan(map_v)*180. / np.pi
if self.black_white:
colour = 'k--'
else:
colour = 'r-'
self.ax[1][1].plot(x,map_v,colour)
gradient = np.tan(map_v*np.pi/180.)
#start all points at xmin = 0
step = x[1] - x[0]
p = []
for i in range(len(self.x)):
if not p:
p.append(intercept)
continue
else:
if i == (len(self.x) - 1):
p_ = p[len(p)-1] + ((gradient[i-1]+gradient[i]) / 2.) * step
else:
p_ = p[len(p)-1] + ((gradient[i-1]+gradient[i+1]) / 2.) * step
p.append(p_)
self.ax[1][0].plot(x,p,colour)
def plot_random_fold_shape(self,i):
y = self.v[i]
gradient = np.tan(y*np.pi/180.)
#start all points at xmin = 0
step = self.x[1] - self.x[0]
p = []
for i in range(len(self.x)):
if not p:
p.append(intercept)
continue
else:
if i == (len(self.x) - 1):
p_ = p[len(p)-1] + ((gradient[i-1]+gradient[i]) / 2.) * step
else:
p_ = p[len(p)-1] + ((gradient[i-1]+gradient[i+1]) / 2.) * step
p.append(p_)
plt.plot(self.x,p)
def plot_fold_heat_map(self, intercept):
pr_ = []
for i in range(len(self.qw[0])):
y = self.v[i]
gradient = np.tan(y*np.pi/180.)
#start all points at xmin = 0
step = self.x[1] - self.x[0]
p = []
for i in range(len(self.x)):
if not p:
p.append(intercept)
continue
else:
if i == (len(self.x) - 1):
p_ = p[len(p)-1] + ((gradient[i-1]+gradient[i]) / 2.) * step
else:
p_ = p[len(p)-1] + ((gradient[i-1]+gradient[i+1]) / 2.) * step
p.append(p_)
pr_.append(p)
#plt.plot(self.x,p)
x_f = np.tile(self.x,len(self.qw[0]))
y_f = np.array(pr_).flatten()
heatmap, xedges, yedges = np.histogram2d(x_f,y_f,bins=(600,360))
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
logheat = np.log(heatmap+1)
#plt.clf()
self.ax[1][0].imshow(logheat.T, extent=extent, origin='lower',cmap='Greys',aspect='auto')
#ax.colorbar()
#plt.figure()
#plt.plot(self.xdata,np.arctan(self.ydata)*180./np.pi,'bo',markersize=5,alpha=0.7)
def plot_traces(self):
pymc.Matplot.plot(self.S)
def plot_trace(self,name):
pymc.Matplot.plot(name)
def plot_kde(self,name,x,y):
d = self.S.trace(name)[:][:]
minn = min(d)
maxx = max(d)
diff = (maxx-minn) / 4.
minn = minn - diff
maxx = maxx + diff
ind = np.linspace(minn,maxx,100)
kde = stats.gaussian_kde(d)
kdepdf = kde.evaluate(ind)
#kdepdf = np.log(kdepdf)
if self.ax[x][y].has_data():
temp = self.ax[x][y].twinx()
temp.plot(ind,kdepdf,'k-')
return
self.ax[x][y].plot(ind,kdepdf,label=name)
def plot_random_curve(self,ii):
i = np.random.randint(0,len(self.v))
y = self.v[i]
plt.figure()
plt.plot(self.x,y)
if self.black_white:
colour = 'ko'
else:
colour = 'bo'
plt.plot(self.xdata,np.arctan(self.ydata)*180./np.pi,colour,markersize=4,alpha=1)
plt.savefig("random_fold_%ii.png" %ii)
C = []
for j in range (self.N+1):
C.append(self.C_[j][i])
qw = []
for w in range(len(self.wavelengths)):
qw.append(self.qw[w][i,None])
return C,qw
def fourier_series(self,ii,x):
v = 0
for i in range(1,self.N_max+1):
v = v + self.C_[2*i-1][ii,None]*np.cos(2*np.pi/self.qw[ii,None] * i * x) + self.C_[2*i][ii,None] *np.sin(2*np.pi/self.qw[ii,None] * i * x)
return np.arctan(v)*180.0/np.pi
def calculate_shortenning(self,ii,xmin,xmax):
x = np.linspace(xmin,xmax,200)
length =0
c = 0
for i in range(1,len(x)):
x1 = x[i-1]
x2 = x[i]
a = self.fourier_series(ii,x1)
b = self.fourier_series(ii,x2)
m = a + b
#print m
m /= 2.0
length += np.abs((x2-x1) / np.cos((m)*np.pi/180.))
return length
def set_xlims(self,minx,maxx):
self.ax[0][0].set_xlim(minx,maxx)
self.ax[0][1].set_xlim(minx,maxx)
self.ax[1][0].set_xlim(minx,maxx)
# In[5]:
x = np.arange(0,300,1)
x = np.hstack([x,np.arange(400,500,1)])
np.random.shuffle(x)
run_model_test(15,150.,False,2000,x,100,False)
# In[6]:
foldfourier = fourierFold(30,20,50,0,40,20,0)
foldfourier2 = fourierFold(30,80,500,0,40,20,20)
foldr = foldRotation(foldfourier)
foldr2 = foldRotation(foldfourier2)
x_ = np.linspace(0,600,600)
#x = np.arange(0, 600,1)#np.linspace(0,150,100)
#np.random.shuffle(x)
#x = x[:N]
fold= (foldfourier.pos(x_))+foldfourier2.pos(x_)
plt.plot(x_,fold)
# In[36]:
for p in range(200,20,-20):
#run_model_test(50.,500.,False,6000,x,p,True)
run_model_test(50.,500.,False,6000,x,p,False)
# In[34]:
for p in range(200,20,-20):
run_model_test(15,150.,False,2000,x,p,True)
run_model_test(15,150.,False,2000,x,p,False)
# In[67]:
run_model_test(30.,150.,False,6000,x,150,False)
# In[17]:
run_model_test(30.,150.,False,6000,x,150,False)
# In[68]:
run_model_test(30.,150.,False,6000,x,120,False)
# In[69]:
run_model_test(30.,150.,False,6000,x,90,False)
# In[70]:
run_model_test(30.,150.,False,6000,x,70,False)
# In[71]:
run_model_test(30.,150.,False,6000,x,50,False)
# In[72]:
run_model_test(30.,150.,False,6000,x,30,False)
# In[10]:
i = 10.2
# In[11]:
round(i)
# In[9]:
a = np.linspace(-1000,1000,100000)
plt.plot(a,-np.log(a))
# In[ ]:
H = np.zeros((len(rho), len(theta)))
for thIdx in range(len(theta)):
rhoVal = x*np.cos(theta[thIdx]*np.pi/180.0) + y*np.sin(theta[thIdx]*np.pi/180)
rhoIdx = np.nonzero(np.abs(rho-rhoVal) == np.min(np.abs(rho-rhoVal)))[0]
H[rhoIdx[0], thIdx] += 1
# In[8]:
list1 = [1.01, 2.01, 3.01]
str1 = ' '.join(str(e) for e in list1)
# In[9]:
print str1
| [
"lachlan.grose@monash.edu"
] | lachlan.grose@monash.edu |
c84a63fc1262c26bef2b889abb5f306b3510a48b | 30b4e4ecb33320112fed5634f6ec4aa9dac8140e | /README.py | bf12b0fc09dc09d7b7d55c0874e7b86d23a22603 | [] | no_license | shantanuo/easystemmer | 17f741dc1a7d08986fd37b42916fd6bde97aad37 | b43758decd57abc4628347e01dbbff3f53376de5 | refs/heads/master | 2020-03-26T23:49:28.817504 | 2018-08-27T11:27:31 | 2018-08-27T11:27:31 | 145,570,004 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | # easystemmer
#Stemmer for Indian Names
from easystemmer import IndianNameStemmer
import pandas as pd
s = IndianNameStemmer()
#s.stem(['savithabai'])
df=pd.read_excel('my_file.xlsx')
df.columns=['wrong', 'correct']
df['updated']=df['wrong'].astype(str).apply(lambda x: x.split()).apply(s.stem)
df['updated1']=df['correct'].astype(str).apply(lambda x: x.split()).apply(s.stem)
ndf=df[df['updated'] != df['updated1']]
ndf[['wrong', 'correct']].to_excel('to_study.xlsx')
| [
"noreply@github.com"
] | noreply@github.com |
5956c3718f21cd031cfca9d7885f4688f8381b3f | ae4b60b642042bfce925be7d282db5d21f50aa76 | /mundo2/exercicio041.py | d6bc53add82811435642f9998f7fb1b07c1ad256 | [
"MIT"
] | permissive | beatriznaimaite/Exercicios-Python-Curso-Em-Video | d72379e6ac9bf0665f5555357e56fb712ae87470 | e4213c2054a67d7948aa9023f2f0f33ab7e8eb96 | refs/heads/master | 2022-11-11T03:59:22.922251 | 2020-07-01T21:03:18 | 2020-07-01T21:03:18 | 267,382,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | """
A Confederação Nacional de Natação precisa de um programa que leia o ano de nascimento de um atleta e mostre sua categoria, de acordo com a idade:
- Até 9 anos: MIRIM
- Até 14 anos: INFANTIL
- Até 19 anos: JUNIOR
- Até 25 anos: SÊNIOR
- Acima: MASTER
"""
from datetime import date
ano_nasc = int(input('Digite o ano de nascimento do atleta: '))
ano_atual = date.today().year
idade = ano_atual - ano_nasc
if 0 < idade <= 9:
print(f'A idade é: {idade}. \nCategoria: MIRIM.')
elif 9 < idade <= 14:
print(f'A idade é: {idade}. \nCategoria: INFANTIL.')
elif 14 < idade <= 19:
print(f'A idade é: {idade}. \nCategoria: JÚNIOR.')
elif 19 < idade <= 25:
print(f'A idade é: {idade}. \nCategoria: SÊNIOR.')
elif idade > 25:
print(f'A idade é: {idade}. \nCategoria: MASTER.')
else:
print(f'O ano informado está negativo. Por favor, digite novamente.')
| [
"beatriznaimaite@gmail.com"
] | beatriznaimaite@gmail.com |
5be8330c7a2af0ba0d2b7752a2f74e9b0b078107 | f16e6cff9270ffece7f28473a46a49f76044eae1 | /data_and_proc/sp2genus.py | 4fcb8182b68b5087bea63b7412024f334e0a339a | [] | no_license | Klim314/pubcrawl | fe9e0a4ad0df35367a685856edb7983453fda345 | cd873d0741c6ed1a09867ce86077927afd7be450 | refs/heads/master | 2021-01-18T19:17:49.786829 | 2015-06-19T03:48:03 | 2015-06-19T03:48:03 | 35,801,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python3
import sys
target = sys.argv[1]
output = sys.argv[2]
holder =set()
with open(target) as f:
for i in f:
splat = i.strip().split("\t")
holder.add((splat[0].split(' ')[0], splat[1].split(' ')[0]))
with open(output, 'w') as f:
for i in holder:
f.write(" ".join(i) + '\n')
| [
"klim314@gmail.com"
] | klim314@gmail.com |
5f7a5a96499aeb76150b8e99bea87f6aa67d7afb | 30f02414a6f797f55925ffb35c9c620bd7c726d5 | /laserchicken/feature_extractor/range_feature_extractor.py | 7d7a7d6ca66f0f8c8d26bf3ff3d25d7543e7cc93 | [
"Apache-2.0"
] | permissive | eEcoLiDAR/laserchicken | 20127c9eee27bf98c0e07e10ee8d9ab9dcc087d8 | f6c22841dcbd375639c7f7aecec70f2602b91ee4 | refs/heads/master | 2023-07-07T13:50:43.909704 | 2023-07-05T12:30:16 | 2023-07-05T12:30:16 | 95,649,056 | 28 | 10 | Apache-2.0 | 2023-07-05T12:14:14 | 2017-06-28T08:51:42 | Python | UTF-8 | Python | false | false | 1,277 | py | import numpy as np
from laserchicken.feature_extractor.base_feature_extractor import FeatureExtractor
from laserchicken.keys import point
class RangeFeatureExtractor(FeatureExtractor):
"""Calculates the max, min and range on the z axis."""
DEFAULT_MAX = float('NaN')
DEFAULT_MIN = float('NaN')
def __init__(self, data_key='z'):
self.data_key = data_key
@classmethod
def requires(cls):
return []
def provides(self):
base_names = ['max_', 'min_', 'range_']
return [base + str(self.data_key) for base in base_names]
def extract(self, point_cloud, neighborhoods, target_point_cloud, target_indices, volume_description):
return np.array([self._extract_one(point_cloud, neighborhood) for neighborhood in neighborhoods]).T
def _extract_one(self, source_point_cloud, neighborhood):
if neighborhood:
source_data = source_point_cloud[point][self.data_key]['data'][neighborhood]
max_z = np.max(source_data) if len(source_data) > 0 else self.DEFAULT_MAX
min_z = np.min(source_data) if len(source_data) > 0 else self.DEFAULT_MIN
range_z = max_z - min_z
else:
max_z = min_z = range_z = np.NaN
return max_z, min_z, range_z
| [
"c.meijer@esciencecenter.nl"
] | c.meijer@esciencecenter.nl |
626ef1a1961641711c3f61312880cea3994ab7ea | 8d55d3a52ed6dc8111801cea9c7c9d0a84be736b | /src/662.maximum-width-of-binary-tree.py | 236e7d52ce3fdc75ebc2866c5c0b37f29a9c687d | [] | no_license | mic0ud/Leetcode-py3 | 2a23270034ec470571e57c498830b93af813645f | 61fabda324338e907ce3514ae8931c013b8fe401 | refs/heads/master | 2022-12-26T11:52:31.666395 | 2020-09-27T19:27:10 | 2020-09-27T19:27:10 | 297,135,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,889 | py | #
# @lc app=leetcode id=662 lang=python3
#
# [662] Maximum Width of Binary Tree
#
# https://leetcode.com/problems/maximum-width-of-binary-tree/description/
#
# algorithms
# Medium (39.55%)
# Likes: 860
# Dislikes: 187
# Total Accepted: 45.6K
# Total Submissions: 115.4K
# Testcase Example: '[1,3,2,5,3,null,9]'
#
# Given a binary tree, write a function to get the maximum width of the given
# tree. The width of a tree is the maximum width among all levels. The binary
# tree has the same structure as a full binary tree, but some nodes are null.
#
# The width of one level is defined as the length between the end-nodes (the
# leftmost and right most non-null nodes in the level, where the null nodes
# between the end-nodes are also counted into the length calculation.
#
# Example 1:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# Output: 4
# Explanation: The maximum width existing in the third level with the length 4
# (5,3,null,9).
#
#
# Example 2:
#
#
# Input:
#
# 1
# /
# 3
# / \
# 5 3
#
# Output: 2
# Explanation: The maximum width existing in the third level with the length 2
# (5,3).
#
#
# Example 3:
#
#
# Input:
#
# 1
# / \
# 3 2
# /
# 5
#
# Output: 2
# Explanation: The maximum width existing in the second level with the length 2
# (3,2).
#
#
# Example 4:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \
# 5 9
# / \
# 6 7
# Output: 8
# Explanation:The maximum width existing in the fourth level with the length 8
# (6,null,null,null,null,null,null,7).
#
#
#
#
# Note: Answer will in the range of 32-bit signed integer.
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from queue import Queue
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
q = Queue()
q.put([root])
res = 1
while True:
nodes = q.get()
next_ = []
i, start, end = 0, 0, 0
while i < len(nodes) and not nodes[i]:
i += 1
if i == len(nodes):
break
start, i = i, len(nodes)-1
while i > start and not nodes[i]:
i -= 1
end = i
res = max(res, end-start+1)
for k in range(start, end+1):
next_.append(nodes[k].left if nodes[k] else None)
next_.append(nodes[k].right if nodes[k] else None)
q.put(next_)
return res
# @lc code=end
| [
"ebizui@gmail.com"
] | ebizui@gmail.com |
81deeba10399a56a87e01fcfd869b6d4dd70f0d5 | 6b07e1c67d150a09c436ea480d46cc0f360f50a8 | /run.py | 8a8f3a61d9097348bf65fdbe56af86ca8de7bb57 | [] | no_license | masete/sendit-persistent | 3bc38efe1926aebc376675e4c8a3d7bdb3b82b86 | 3e8c72d85027e9b632252536867998586db67ec0 | refs/heads/develop | 2022-12-11T22:11:02.158363 | 2019-11-04T12:05:40 | 2019-11-04T12:05:40 | 157,970,377 | 0 | 1 | null | 2022-12-08T01:18:22 | 2018-11-17T09:37:52 | Python | UTF-8 | Python | false | false | 210 | py | from flasgger import Swagger
from api.__init__ import create_app
app = create_app()
if __name__ == '__main__':
Swagger(app)
app.run(debug=True)
"""
export FLASK_ENV=DEVELOPMENT
echo $FLASK_ENV
"""
| [
"nicholasmasete72@gmail.com"
] | nicholasmasete72@gmail.com |
74da8fcf18ff9a8150ba39ee90f91be05dea8255 | 0b70b9f582d2b010305ad1e5e4885f30435a5a74 | /GUEST/forms.py | ae9847e5b58e1badfbd1bde00f04d04ed4a557f7 | [] | no_license | SruthiSasidharan/DjangoProjects | 51fa60282b398f4ebf03383220ce046ae1e1beed | 6fccc3e1d571638949953ed9fc390068417ce713 | refs/heads/master | 2023-06-28T02:24:32.355568 | 2021-07-29T07:45:26 | 2021-07-29T07:45:26 | 370,647,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from Guest import forms
from .models import Review
from django.forms import ModelForm
class ReviewCreateForm(forms.ModelForm):
class Meta:
model=Review
fields=["review"]
| [
"you@example.com"
] | you@example.com |
59a36d90be34893265567174228a0d09d2ef132f | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/ml/azure-ai-ml/azure/ai/ml/_schema/compute/usage.py | 39e0151c3a84f06270af1fa3cdc4323d144b0afe | [
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"Z... | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 1,289 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument,no-self-use
from marshmallow import fields
from marshmallow.decorators import post_load
from azure.ai.ml._restclient.v2021_10_01.models import UsageUnit
from azure.ai.ml._schema.core.fields import NestedField, StringTransformedEnum, UnionField
from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta
from azure.ai.ml._utils.utils import camel_to_snake
class UsageNameSchema(metaclass=PatchedSchemaMeta):
value = fields.Str()
localized_value = fields.Str()
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.entities import UsageName
return UsageName(**data)
class UsageSchema(metaclass=PatchedSchemaMeta):
id = fields.Str()
aml_workspace_location = fields.Str()
type = fields.Str()
unit = UnionField(
[
fields.Str(),
StringTransformedEnum(
allowed_values=UsageUnit.COUNT,
casing_transform=camel_to_snake,
),
]
)
current_value = fields.Int()
limit = fields.Int()
name = NestedField(UsageNameSchema)
| [
"noreply@github.com"
] | noreply@github.com |
b1f6b3e0833073a28446c79f85f5d00ac7f2d6ad | 5e96b215ec21d5eb37582a32dfa829e7bae6964a | /objects/gameUI/NameDisplay.py | 0ab6e50770cc7b0725aea46410faf900a4593d8c | [
"MIT"
] | permissive | jpyankel/Teapot-Wars-2 | 0f2caefa13a6ab43b07909976287b58f9ba8422b | ca1ace05850d35a328aaf2ad3c39ca9c744aaa5b | refs/heads/master | 2022-03-21T11:32:51.833658 | 2019-08-27T23:19:16 | 2019-08-27T23:19:16 | 104,529,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | from panda3d.core import NodePath, TextNode
from objects.defaultConfig.Consts import *
class NameDisplay ():
"""
Floating name display above a creature.
"""
def __init__(self, originObj, offset, name):
self._root = NodePath('nameDisplay')
# Offset this text node (most likely to above the origin)
self._root.reparentTo(originObj)
self._root.setPos(originObj, *offset)
# Configure text and assign color:
font = loader.loadFont(PIERCEROMAN_FONT)
self._textNode = TextNode('NameDisplayText')
self._textNode.setText(name)
self._textNode.setAlign(TextNode.ACenter)
self._textNode.setFont(font)
self._textNodePath = self._root.attachNewNode(self._textNode)
self._textNodePath.setScale(NAME_DISPLAY_SCALE)
self._textNodePath.setColor(NAME_DISPLAY_COLOR)
# Make this bar face the camera at all times (bill-boarding):
self._textNodePath.setBillboardPointEye()
def destroy (self):
""" Destroys this object """
self._root.removeNode()
del self
| [
"joedevelopermiester@gmail.com"
] | joedevelopermiester@gmail.com |
55d862c8cb7df31e12b57c616564e8df3ef6b751 | 5745e4b82941ba04ea4bf0f8aa934c51dcb1d03f | /prediction.py | 7fa71b28b96eb592c2c982d2f5ee28f0fe51890d | [] | no_license | luochuwei/sequence-to-sequence-baseline-lstm-gru | d6181bdd5df8bb73361bcc599f4e41281e0bf903 | 3f43cbefc87abac7947ba07d2c5ba717569bd07c | refs/heads/master | 2021-01-10T07:41:30.383415 | 2016-04-05T05:07:49 | 2016-04-05T05:07:49 | 55,468,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | #-*- coding:utf-8 -*-
####################################################
#
# Author: Chuwei Luo
# Email: luochuwei@gmail.com
# Date: 21/02/2016
# Usage: new Main (in case of the out of memory)
#
####################################################
import time
import gc
import sys
import numpy as np
import theano
import theano.tensor as T
from utils_pg import *
from focus_of_attention_NN import *
import get_data
e = 0.001 #error
lr = 2.0
drop_rate = 0.
batch_size = 200
read_data_batch = 4000
# full_data_len = 190363
full_data_len =8000
hidden_size = [1000,1000,1000]
# try: gru, lstm
cell = "gru"
# try: sgd, momentum, rmsprop, adagrad, adadelta, adam
optimizer = "adadelta"
x_path = "data/SMT-train-8000.post"
y_path = "data/SMT-train-8000.response"
test_path = "data/SMT-test-100.post"
print "loading dic..."
i2w, w2i = load_data_dic(r'data/i2w8000.pkl', r'data/w2i8000.pkl')
print "done"
print "#dic = " + str(len(w2i))
dim_x = len(w2i)
dim_y = len(w2i)
num_sents = batch_size
print "#features = ", dim_x, "#labels = ", dim_y
print "compiling..."
model = FANN(dim_x, dim_y, hidden_size, cell, optimizer, drop_rate, num_sents)
print 'loading...'
load_model("0327-GRU-8000-3hidden1000_best.model", model)
print 'model done'
print "predicting..."
test_data_x_y = get_data.test_processing_long(r'data/SMT-test-100.post', i2w, w2i, 100, 100)
t_sents = model.predict(test_data_x_y[0][0], test_data_x_y[0][1],test_data_x_y[0][3], 100)
get_data.print_sentence(t_sents[0], dim_y, i2w)
def response(sentence_seg, model, i2w, w2i):
test_data_x_y = get_data.test_sentence_input_processing_long(sentence_seg, i2w, w2i, 100, 1)
t_sents = model.predict(test_data_x_y[0][0], test_data_x_y[0][1],test_data_x_y[0][3], 1)
get_data.print_sentence(t_sents[0], dim_y, i2w) | [
"luochuwei@gmail.com"
] | luochuwei@gmail.com |
cde97b07253a5e111f5f1541be21e19cdf088a32 | 803dc5ebe4f698f8ec5e92b7f393f76ed1739497 | /archive/views.py | ebf06bbd04922eac68070b8df035136306716de7 | [] | no_license | fr4ncad/website1 | 9c7c9776619ad4b4bafe0559a654a6e005400161 | 802285a86c653bd3c75cc6836e7af4cce8c86945 | refs/heads/master | 2023-04-18T11:51:42.916531 | 2021-04-26T19:29:10 | 2021-04-26T19:29:10 | 361,871,128 | 0 | 0 | null | 2021-04-26T19:32:38 | 2021-04-26T19:32:37 | null | UTF-8 | Python | false | false | 481 | py | from django.shortcuts import render
from .models import user
cu = ''
def signup(request):
context = {'felhasznalok':user.objects.all()}
if request.method == 'POST':
if user.form(request.POST):
cu = request.POST['name']
print(cu)
return render(request, 'archives.html', context)
return render(request, 'login_page.html')
def bejegyzes(request):
content = {'felhasznalonev':cu}
return render(request, 'content.html') | [
"pribek.barnabas.17f@szlgbp.hu"
] | pribek.barnabas.17f@szlgbp.hu |
0f03d21e8cae36971692766eb7b78a6313b7cb02 | e2a6860753e7762c1be9b0417f473e8fdf135539 | /books.py | 7725136ad8bbc7b357bbe8276707460c12a5f3ce | [] | no_license | razamob/books_catalogue | 868a65d210aeca3596cd68dc8a8cb20832eb7fba | dba7b42529510fcfc041cca34fda2528bff700a7 | refs/heads/master | 2020-04-22T02:49:58.700112 | 2019-02-11T03:58:08 | 2019-02-11T03:58:08 | 170,064,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,043 | py | """
Program that stores the following book information:
Title
Author
Year
ISBN
User can:
View all records
Search an entry
Add entry
Update entry
Delete
Close
"""
import backend
from tkinter import *
import sys
sys.path.append("C:/Users/Mobeen/Desktop/PythonBootcamp/book_catalogue/book_catalogue")
def get_selected_row(event):
try:
global selected_tuple
index = list1.curselection()[0]
selected_tuple = list1.get(index)
e1.delete(0,END)
e1.insert(END, selected_tuple[1])
e2.delete(0, END)
e2.insert(END, selected_tuple[2])
e3.delete(0, END)
e3.insert(END, selected_tuple[3])
e4.delete(0, END)
e4.insert(END, selected_tuple[4])
except IndexError:
pass
def view_command():
list1.delete(0, END)
for row in backend.view():
list1.insert(END, row)
def search_command():
list1.delete(0,END)
for row in backend.search(title_text.get(), author_text.get(), year_text.get(), ISBN_text.get()):
list1.insert(END, row)
def add_command():
list1.delete(0, END)
backend.insert(title_text.get(), author_text.get(), year_text.get(), ISBN_text.get())
list1.insert(END, (title_text.get(), author_text.get(), year_text.get(), ISBN_text.get()))
def delete_command():
backend.delete(selected_tuple[0])
def update_command():
backend.update(selected_tuple[0], title_text.get(), author_text.get(), year_text.get(), ISBN_text.get())
print(selected_tuple[0], title_text.get(), author_text.get(), year_text.get(), ISBN_text.get())
window = Tk()
l1 = Label(window, text="Title")
l1.grid(row=0, column=0)
l2 = Label(window, text="Year")
l2.grid(row=1, column=0)
l3 = Label(window, text="Author")
l3.grid(row=0, column=2)
l4 = Label(window, text="ISBN")
l4.grid(row=1, column=2)
title_text = StringVar()
e1 = Entry(window, textvariable=title_text)
e1.grid(row=0, column=1)
author_text = StringVar()
e2 = Entry(window, textvariable=author_text)
e2.grid(row=0, column=3)
year_text = StringVar()
e3 = Entry(window, textvariable=year_text)
e3.grid(row=1, column=1)
ISBN_text = StringVar()
e4 = Entry(window, textvariable=ISBN_text)
e4.grid(row=1, column=3)
list1 = Listbox(window, height=6, width=35)
list1.grid(row=2, column =0, rowspan=6, columnspan=2)
sb1= Scrollbar(window)
sb1.grid(row=2, column=2, rowspan=6)
list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=list1.yview)
list1.bind('<<ListboxSelect>>', get_selected_row)
b1=Button(window, text="View all", width=12, command=view_command)
b1.grid(row=2, column=3)
b2=Button(window, text="Search entry", width=12, command=search_command)
b2.grid(row=3, column=3)
b3=Button(window, text="Add entry", width=12, command=add_command)
b3.grid(row=4, column=3)
b4=Button(window, text="Update", width=12, command=update_command)
b4.grid(row=5, column=3)
b5=Button(window, text="Delete", width=12, command=delete_command)
b5.grid(row=6, column=3)
b6=Button(window, text="Close", width=12)
b6.grid(row=7, column=3)
window.mainloop() | [
"razamob@sheridancollege.ca"
] | razamob@sheridancollege.ca |
6700f165faf6659309c518dc3c87dec1653b0d0e | 1047999f13e2f1cbc51c605a5cbeead8cc3ef901 | /db_web/sp/tests.py | 01b4b5e910edcec512d4fc5fd3b3704578151861 | [] | no_license | ichoukou/db_web | ff25243309fc38b31eebf6b9565a7b5e73d24bab | b5a935366c8b6e8cc2539e583e41f0bce2e83c10 | refs/heads/master | 2020-05-20T23:41:38.333132 | 2018-09-11T01:01:59 | 2018-09-11T01:01:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.test import TestCase
# Create your tests here.
sr='[1,2,3,4]'
str_lst=list(sr)
print(str_lst,type(str_lst)) | [
"2361253285@qq.com"
] | 2361253285@qq.com |
bed1f026290d30ab652af5f006794bd8fbc9f765 | c71af56951d1c661a5819db72da1caccd9130df2 | /python/utils/ad_tests_mover.py | 589995b4d3a1d5c721e5c9a0815c477fcb302fc7 | [] | no_license | adrianpoplesanu/personal-work | 2940a0dc4e4e27e0cc467875bae3fdea27dd0d31 | adc289ecb72c1c6f98582f3ea9ad4bf2e8e08d29 | refs/heads/master | 2023-08-23T06:56:49.363519 | 2023-08-21T17:20:51 | 2023-08-21T17:20:51 | 109,451,981 | 0 | 1 | null | 2022-10-07T04:53:24 | 2017-11-03T23:36:21 | Python | UTF-8 | Python | false | false | 457 | py | limit = 95
def format_2digits(i):
if i < 10:
return "test0" + str(i)
else:
return "test" + str(i)
def format_3digits(i):
if i < 10:
return "test00" + str(i)
elif i < 100:
return "test0" + str(i)
else:
return "test" + str(i)
if __name__ == '__main__':
for i in range(1, limit + 1):
print ("mv " + format_2digits(i) + ".ad " + format_3digits(i) + ".ad ;")
print ("echo 'done'")
| [
"adrian.poplesanu@yahoo.com"
] | adrian.poplesanu@yahoo.com |
6f0530d8d6322e467caa307fc3b34d1f649afaad | c877ed13d82e86739d8eaeb0ce98a552e1349e18 | /video_reading_benchmarks/ffmpeg_python/test.py | 625863116bba0b1e0d728d7b4a26032b9c3e21f9 | [
"MIT"
] | permissive | qoo/benchmarking_video_reading_python-1 | cd9b98842c9f2749945bb8bd335f27860f5e528d | 6447f0a434fbde1e21781e1150b6f41062737759 | refs/heads/main | 2023-08-05T00:38:11.722645 | 2021-09-23T15:13:50 | 2021-09-23T15:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | import ffmpeg
import numpy as np
class FFMPEGStream():
def __init__(self, videopath):
self.fn = videopath
self.start = 0
probe = ffmpeg.probe(videopath)
video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
self.width = int(video_info['width'])
self.height = int(video_info['height'])
def get_np_array(self, n_frames_to_read):
out, _ = (
ffmpeg
.input(self.fn)
.trim(start_frame=self.start, end_frame=n_frames_to_read)
.output('pipe:', format='rawvideo', pix_fmt='bgr24')
.run(capture_stdout=True)
)
video = (
np.frombuffer(out, np.uint8)
.reshape([-1, self.height, self.width, 3])
)
return video
| [
"benjamin@sc0ville.com"
] | benjamin@sc0ville.com |
406c542a05066e4dc566634fcbc5153708007e1d | 192e4427f5b504e46abf99cd2439ef2d90227d10 | /Dynamic_datas/05_11_1depot_vary_robots_arena/data1/boxplot_overall_foraging_rate.py | 243a79b948ca8f52dd7865f9a9c7465d2e0248fb | [] | no_license | lukey11/Scale-invariant-MPFA-1 | 7371c9f906c986067e0e317f9956803500ddb1db | 79657fdf64f1ebf8be8727b136f8efacb7e4d516 | refs/heads/master | 2022-11-30T04:47:06.788351 | 2020-08-12T19:09:48 | 2020-08-12T19:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,709 | py | import pdb
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from scipy import stats
def get_data_from_file(filename):
''' read data from file into a list'''
f = open(filename)
filecontents = f.readlines()
table = [line.strip('\n') for line in filecontents]
f.close()
return table
def get_multiple_data(files):
datas=[]
for f in files:
data = get_data_from_file(f)
forage = compute_overall_forage_data(data)
datas.append(forage)
return datas
def compute_overall_forage_data(datas):
words=datas[0].replace(",","").split()
if words[0]!='tags_collected':
print "the data may not correct!"
return
forage=[]
for line in datas[1:]:
words =line.replace(",","").split()
forage.append(float(words[0]))
return forage
#mean = np.mean(forage)
#std = np.std(forage)
#return mean, std
def compute_mean_std(fileNames):
means, stds=[], []
for fileName in fileNames:
datas = get_data_from_file(fileName)
forage = compute_overall_forage_data(datas)
mean, std = np.mean(forage), np.std(forage)
means.append(mean)
stds.append(std)
return means, stds
def plot_bars(handle, means, stds, Color, counter, width, ind):
rects= handle.bar(ind+counter*width,np.array(means),width=width, color=Color, yerr=np.array(stds), ecolor='k')
return rects
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 0.6*height, '%0.1f'%float(height),
ha='center', va='bottom', color='white')
def linearReg(handle, X, Y, color, outputFile):
fit = np.polyfit(X, Y,1)
slope, intercept = np.polyfit(X, Y,1)
fit_fn = np.poly1d(fit)
#axarr[0].plot(ind1, random_data[0], 'ro', ind1, fit_fn(ind1), 'k')
handle.plot(X, fit_fn(X), color)
slope, intercept, r_value, p_value, stderr = stats.linregress(X, Y)
outputFile.write(str(slope)+'\t\t'+str(intercept)+'\t\t\t'+str(r_value**2)+'\t\t\t'+str(p_value)+'\t\t'+str(stderr)+'\r')
return slope, intercept
fileNames = ["random_dynamic_MPFA_n1_r6_tag512_5by5_iAntTagData.txt", "random_dynamic_MPFA_n1_r24_tag512_10by10_iAntTagData.txt", "random_dynamic_MPFA_n1_r96_tag512_20by20_iAntTagData.txt"]
without_comm_datas = get_multiple_data(fileNames)
#fileNames = ["with_random_dynamic_MPFA_n4_r24_tag512_10by10_iAntTagData.txt", "with_random_dynamic_MPFA_n4_r48_tag512_20by20_iAntTagData.txt", "with_random_dynamic_MPFA_n4_r72_tag512_30by30_iAntTagData.txt"]
#with_comm_datas = get_multiple_data(fileNames)
width =0.1
N=3
ind = np.arange(N) # the x locations for the groups
#colors =['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00']
colors =['#e41a1c', '#377eb8']
outputFile = open('linearReg.txt', 'w+')
fig, axarr = plt.subplots()
Y= without_comm_datas
rect= axarr.boxplot(Y, notch=True, positions =ind, widths= 0.1)
plt.setp(rect['boxes'], color=colors[0])
slopes=[]
intercepts =[]
#pdb.set_trace()
#for x, y, color in zip(ind, np.array(Y).mean(axis=1), colors):
#pdb.set_trace()
slope, intercept = linearReg(axarr, ind, np.array(Y).mean(axis=1), colors[0], outputFile)
#axarr.plot(ind, ind*slope+intercept, colors[0])
outputFile.close()
# add some text for labels, title and axes ticks
axarr.set_ylabel('Number of collected resources', fontsize=20)
#ax.set_title('Foraging rate in each model', fontsize=20)
axarr.set_xlim(-0.5, 3)
#axarr.set_xticks(ind+width)
#axarr.set_xticklabels( ('No communication', 'Communication'), fontsize=18)
axarr.set_yticks(np.arange(0, 400, 100))
savefig('overall_forage_rate')
plt.show()
| [
"lqcobra@hotmail.com"
] | lqcobra@hotmail.com |
d4c64b21bc94ae2e3d505aec4b92d73ec7965714 | 33260cfbe29a0538ffbf7fef505c8e8634ee35c4 | /iterations/covid_sim_pt0_example.py | 48563125f0f386cd6e43e4ed2a4c4547a68facca | [] | no_license | darren-huang/covidturtlesim | e4788ce51d39da55b9310367bd84c4687253d929 | c256f2bd9c1522842a45a87ba46069cfb2b40b79 | refs/heads/master | 2022-11-21T09:06:46.076614 | 2020-07-24T20:35:01 | 2020-07-24T20:35:01 | 278,490,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | import turtle
import random
import math
#screen setup
dim = 1000
border = 50
xmax, ymax = dim, dim
s = turtle.Screen()
s.screensize((xmax + border) * 2 , (ymax + border) * 2, "white")
s.setworldcoordinates(-xmax-border, -ymax-border, xmax+border, ymax+border)
s.tracer(0)
def initTurt(x=None, y=None, angle=None, color="grey"):
t = turtle.Turtle()
#misc settings
t.speed(0)
t.penup()
#appearance
t.shape("circle")
t.color(color)
#positition
if x == None and y == None:
t.goto(random.randint(-xmax, xmax), random.randint(-ymax, ymax))
else:
t.goto(x,y)
#orientation
if angle == None:
t.seth(random.randint(0,360))
else:
t.seth(angle)
return t
def turtsForward(turts, distance):
for turt in turts:
turt.forward(distance)
turts = [initTurt(color="red"), initTurt(color="blue")]
while True:
turtsForward(turts,.5)
s.update()
| [
"darren.y.huang@gmail.com"
] | darren.y.huang@gmail.com |
42a99e3896ebb16802d1326d1cc8f4018af946da | fd11f0c2860de7be249e7b0e65a06869f7e709d7 | /capsul/pipeline/test/fix_test_temporary.py | 2903ee6aedf68465a0df7a3d6ecf9331cadc579b | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | sapetnioc/capsul | 7f85fd486abcf966e7d2f6c6602a26a8ef67ab95 | 2eacf67b5565bdd4973c810c252145f0fe8f8198 | refs/heads/master | 2021-01-18T08:29:53.638033 | 2019-11-04T15:34:51 | 2020-01-09T12:14:02 | 30,696,178 | 0 | 1 | null | 2018-08-03T07:41:16 | 2015-02-12T09:51:22 | Python | UTF-8 | Python | false | false | 7,910 | py | from __future__ import print_function
import unittest
import os
import sys
import tempfile
from traits.api import File, List, Int, Undefined
from capsul.api import Process
from capsul.api import Pipeline, PipelineNode
from capsul.pipeline import pipeline_workflow
from capsul.study_config.study_config import StudyConfig
from soma_workflow import configuration as swconfig
import socket
import shutil
if sys.version_info[0] >= 3:
import io as StringIO
else:
import StringIO
class DummyProcess1(Process):
""" Dummy Test Process
"""
def __init__(self):
super(DummyProcess1, self).__init__()
# inputs
self.add_trait("input", File(optional=False))
self.add_trait("nb_outputs", Int())
# outputs
self.add_trait("output", List(File(output=True), output=True))
self.on_trait_change(self.nb_outputs_changed, "nb_outputs")
def nb_outputs_changed(self):
if len(self.output) != self.nb_outputs:
if len(self.output) > self.nb_outputs:
self.output = self.output[:self.nb_outputs]
else:
self.output \
= self.output + [""] * (self.nb_outputs - len(self.output))
def _run_process(self):
pass
class DummyProcess2(Process):
""" Dummy Test Process
"""
def __init__(self):
super(DummyProcess2, self).__init__()
# inputs
self.add_trait("input", List(File(optional=False)))
# outputs
self.add_trait("output", List(File(output=True), output=True))
self.on_trait_change(self.inputs_changed, "input")
def inputs_changed(self):
nout = len(self.output)
nin = len(self.input)
if nout != nin:
if nout > nin:
self.output = self.output[:nin]
else:
self.output = self.output + [""] * (nin - nout)
def _run_process(self):
for in_filename, out_filename in zip(self.input, self.output):
open(out_filename, 'w').write(in_filename + '\n')
class DummyProcess3(Process):
""" Dummy Test Process
"""
def __init__(self):
super(DummyProcess3, self).__init__()
# inputs
self.add_trait("input", List(File(optional=False)))
# outputs
self.add_trait("output", File(output=True))
def _run_process(self):
with open(self.output, 'w') as f:
for in_filename in self.input:
f.write(open(in_filename).read())
class DummyPipeline(Pipeline):
def pipeline_definition(self):
# Create processes
self.add_process(
"node1",
'capsul.pipeline.test.test_temporary.DummyProcess1')
self.add_process(
"node2",
'capsul.pipeline.test.test_temporary.DummyProcess2')
self.add_process(
"node3",
'capsul.pipeline.test.test_temporary.DummyProcess3')
# Links
self.add_link("node1.output->node2.input")
self.add_link("node2.output->node3.input")
# Outputs
#self.export_parameter("node1", "output",
#pipeline_parameter="output1",
#is_optional=True)
#self.export_parameter("node2", "output",
#pipeline_parameter="output2",
#is_optional=True)
#self.export_parameter("node2", "input",
#pipeline_parameter="input2",
#is_optional=True)
#self.export_parameter("node3", "input",
#pipeline_parameter="input3",
#is_optional=True)
self.node_position = {'inputs': (54.0, 298.0),
'node1': (173.0, 168.0),
'node2': (259.0, 320.0),
'node3': (405.0, 142.0),
'outputs': (518.0, 278.0)}
class TestTemporary(unittest.TestCase):
def setUp(self):
self.pipeline = DummyPipeline()
tmpout = tempfile.mkstemp('.txt', prefix='capsul_test_')
os.close(tmpout[0])
os.unlink(tmpout[1])
# use a custom temporary soma-workflow dir to avoid concurrent
# access problems
tmpdb = tempfile.mkstemp('', prefix='soma_workflow')
os.close(tmpdb[0])
os.unlink(tmpdb[1])
self.soma_workflow_temp_dir = tmpdb[1]
os.mkdir(self.soma_workflow_temp_dir)
swf_conf = '[%s]\nSOMA_WORKFLOW_DIR = %s\n' \
% (socket.gethostname(), tmpdb[1])
swconfig.Configuration.search_config_path \
= staticmethod(lambda : StringIO.StringIO(swf_conf))
self.output = tmpout[1]
self.pipeline.input = '/tmp/file_in.nii'
self.pipeline.output = self.output
study_config = StudyConfig(modules=['SomaWorkflowConfig'])
study_config.input_directory = '/tmp'
study_config.somaworkflow_computing_resource = 'localhost'
study_config.somaworkflow_computing_resources_config.localhost = {
'transfer_paths': [],
}
self.study_config = study_config
def tearDown(self):
swm = self.study_config.modules['SomaWorkflowConfig']
swc = swm.get_workflow_controller()
if swc is not None:
# stop workflow controler and wait for thread termination
swc.stop_engine()
if '--keep-tmp' not in sys.argv[1:]:
if os.path.exists(self.output):
os.unlink(self.output)
shutil.rmtree(self.soma_workflow_temp_dir)
def test_structure(self):
self.pipeline.nb_outputs = 3
self.assertEqual(self.pipeline.nodes["node2"].process.input,
["", "", ""])
self.assertEqual(self.pipeline.nodes["node2"].process.output,
["", "", ""])
def test_direct_run(self):
self.study_config.use_soma_workflow = False
self.pipeline.nb_outputs = 3
self.pipeline()
self.assertEqual(self.pipeline.nodes["node2"].process.input,
["", "", ""])
self.assertEqual(self.pipeline.nodes["node2"].process.output,
["", "", ""])
res_out = open(self.pipeline.output).readlines()
self.assertEqual(len(res_out), 3)
def test_full_wf(self):
self.study_config.use_soma_workflow = True
self.pipeline.nb_outputs = 3
result = self.study_config.run(self.pipeline, verbose=True)
self.assertEqual(result, None)
self.assertEqual(self.pipeline.nodes["node2"].process.input,
["", "", ""])
self.assertEqual(self.pipeline.nodes["node2"].process.output,
["", "", ""])
res_out = open(self.pipeline.output).readlines()
self.assertEqual(len(res_out), 3)
def test():
""" Function to execute unitest
"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestTemporary)
runtime = unittest.TextTestRunner(verbosity=2).run(suite)
return runtime.wasSuccessful()
if __name__ == "__main__":
verbose = False
if len(sys.argv) >= 2 and sys.argv[1] in ('-v', '--verbose'):
verbose = True
print("RETURNCODE: ", test())
if verbose:
import sys
from soma.qt_gui import qt_backend
qt_backend.set_qt_backend(compatible_qt5=True)
from soma.qt_gui.qt_backend import QtGui
from capsul.qt_gui.widgets import PipelineDevelopperView
app = QtGui.QApplication(sys.argv)
pipeline = DummyPipeline()
pipeline.input = '/tmp/file_in.nii'
pipeline.output = '/tmp/file_out3.nii'
pipeline.nb_outputs = 3
view1 = PipelineDevelopperView(pipeline, show_sub_pipelines=True,
allow_open_controller=True)
view1.show()
app.exec_()
del view1
| [
"denis.riviere.gm@gmail.com"
] | denis.riviere.gm@gmail.com |
f33d9c255ed85d3e669af672402e83aef91fabc7 | 1ddcc923242f28f0ff86546e37928f99221ccc35 | /Linear_lesion_Code/UNet/dataset/Linear_lesion.py | 4400d61cfcf2bdcdb291e719e62dc6cd7ba51132 | [
"MIT"
] | permissive | FENGShuanglang/Pytorch_Medical_Segmention_Template | 20c9c74c9bd25c531beca72fc11765f06f9a3925 | f24d1875a1d832b9e8f5b16546cce1300cbf76b1 | refs/heads/master | 2020-05-27T13:44:03.783030 | 2019-11-04T10:46:38 | 2019-11-04T10:46:38 | 188,644,290 | 119 | 32 | null | null | null | null | UTF-8 | Python | false | false | 4,184 | py | import torch
import glob
import os
from torchvision import transforms
from torchvision.transforms import functional as F
#import cv2
from PIL import Image
# import pandas as pd
import numpy as np
from imgaug import augmenters as iaa
import imgaug as ia
#from utils import get_label_info, one_hot_it
import random
def augmentation():
# augment images with spatial transformation: Flip, Affine, Rotation, etc...
# see https://github.com/aleju/imgaug for more details
pass
def augmentation_pixel():
# augment images with pixel intensity transformation: GaussianBlur, Multiply, etc...
pass
class LinearLesion(torch.utils.data.Dataset):
def __init__(self, dataset_path,scale,k_fold_test=1, mode='train'):
super().__init__()
self.mode = mode
self.img_path=dataset_path+'/img'
self.mask_path=dataset_path+'/mask'
self.image_lists,self.label_lists=self.read_list(self.img_path,k_fold_test=k_fold_test)
self.flip =iaa.SomeOf((2,4),[
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.Affine(rotate=(-30, 30)),
iaa.AdditiveGaussianNoise(scale=(0.0,0.08*255))], random_order=True)
# resize
self.resize_label = transforms.Resize(scale, Image.NEAREST)
self.resize_img = transforms.Resize(scale, Image.BILINEAR)
# normalization
self.to_tensor = transforms.ToTensor()
def __getitem__(self, index):
# load image and crop
img = Image.open(self.image_lists[index])
img = np.array(img)
labels=self.label_lists[index]
#load label
if self.mode !='test':
label = Image.open(self.label_lists[index])
label = np.array(label)
label[label!=255]=0
label[label==255]=1
# label=np.argmax(label,axis=-1)
# label[label!=1]=0
# augment image and label
if self.mode == 'train':
seq_det = self.flip.to_deterministic()#
segmap = ia.SegmentationMapOnImage(label, shape=label.shape, nb_classes=2)
img = seq_det.augment_image(img)
label = seq_det.augment_segmentation_maps([segmap])[0].get_arr_int().astype(np.uint8)
label=np.reshape(label,(1,)+label.shape)
label=torch.from_numpy(label.copy()).float()
labels=label
# img=np.reshape(img,img.shape+(1,)) # 如果输入是1通道需打开此注释 ******
img = self.to_tensor(img.copy()).float()
return img, labels
def __len__(self):
return len(self.image_lists)
def read_list(self,image_path,k_fold_test=1):
fold=sorted(os.listdir(image_path))
# print(fold)
os.listdir()
img_list=[]
if self.mode=='train':
fold_r=fold
fold_r.remove('f'+str(k_fold_test))# remove testdata
for item in fold_r:
img_list+=glob.glob(os.path.join(image_path,item)+'/*.png')
# print(len(img_list))
label_list=[x.replace('img','mask').split('.')[0]+'.png' for x in img_list]
elif self.mode=='val' or self.mode=='test':
fold_s=fold[k_fold_test-1]
img_list=glob.glob(os.path.join(image_path,fold_s)+'/*.png')
label_list=[x.replace('img','mask').split('.')[0]+'.png' for x in img_list]
return img_list,label_list
#if __name__ == '__main__':
# data = LinearLesion(r'K:\Linear Lesion\Linear_lesion', (512, 512),mode='train')
# # from model.build_BiSeNet import BiSeNet
# # from utils import reverse_one_hot, get_label_info, colour_code_segmentation, compute_global_accuracy
# from torch.utils.data import DataLoader
# dataloader_test = DataLoader(
# data,
# # this has to be 1
# batch_size=4,
# shuffle=True,
# num_workers=0,
# pin_memory=True,
# drop_last=True
# )
# for i, (img, label) in enumerate(dataloader_test):
#
# print(label)
# print(img)
# if i>3:
# break
| [
"2470697802.com"
] | 2470697802.com |
659b757cd193aa4340f4f55acca9e23fda71c032 | 4c3be788c244ebcee4aeee6472ca35f67930da9d | /atm.py | d6c26295f9ee9fff842e342b77655e6d02f5b43a | [] | no_license | kartava/CheckIO-Solutions | c562d3a4052558ba1285a8a33dc2d46c4a9238c5 | 217c96ade890fc55e282ca50c4442bc986fd512b | refs/heads/master | 2021-01-19T20:30:29.228555 | 2013-10-16T19:13:59 | 2013-10-16T19:13:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | #Your optional code here
#You can import some modules or create additional functions
def checkio(balance, withdrawal):
#Your code here
#It's main function. Don't remove this function
#It's using for auto-testing and must return a result for check.
for i in withdrawal:
if i%5==0 and balance>i>0:
balance-=i
balance-=1
return balance
#Some hints:
#Make sure you loop through the withdrawal amounts
#make sure you have enough money to withdraw,
#otherwise don't (return the same balance)
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(120, [10, 20, 30]) == 57, 'First example'
# With one Insufficient Funds, and then withdraw 10 $
assert checkio(120, [200, 10]) == 109, 'Second example'
#with one incorrect amount
assert checkio(120, [3, 10]) == 109, 'Third example'
assert checkio(120, [200, 119]) == 120, 'Fourth example'
assert checkio(120, [120, 10, 122, 2, 10, 10, 30, 1]) == 56, "It's mixed all base tests"
assert checkio(120, [-10]) == 120, 'Negative value'
| [
"Pritam.rakshi@gmail.com"
] | Pritam.rakshi@gmail.com |
549932aa539333a091f31dc81f22a937b394e990 | 62bf350adedcb1d04a97ca5e1f576e1f73493c53 | /nerodia/tag_map.py | 33d536cc6e637a42fc23f84dd48bd2271b1f0be7 | [
"MIT"
] | permissive | mark0203/nerodia | 57ca4bdf36ee6d84b46fb177b6e385f6c82c164d | 2631c210fbaa0a7b5d598016e11ad7c7af083237 | refs/heads/master | 2020-05-24T00:44:25.189039 | 2019-03-10T00:41:51 | 2019-03-10T00:43:50 | 187,021,782 | 0 | 0 | MIT | 2019-05-16T19:46:54 | 2019-05-16T12:17:40 | Python | UTF-8 | Python | false | false | 14,862 | py | from nerodia import tag_to_class
from .elements.area import Area
from .elements.button import Button
from .elements.d_list import DList
from .elements.form import Form
from .elements.html_elements import HTMLElement, Audio, Base, Quote, Body, BR, Canvas, \
TableCaption, TableCol, Data, DataList, Mod, Details, Dialog, Div, Embed, FieldSet, FrameSet, \
Heading, Head, HR, Html, Label, Legend, LI, Map, Meta, Meter, Object, \
OptGroup, Output, Paragraph, Param, Pre, Progress, Script, Source, Span, Style, \
Template, TableHeaderCell, Time, Title, Track, Video, AnchorCollection, \
HTMLElementCollection, AreaCollection, AudioCollection, BaseCollection, QuoteCollection, \
BodyCollection, BRCollection, ButtonCollection, CanvasCollection, TableCaptionCollection, \
TableColCollection, DataCollection, DataListCollection, ModCollection, DetailsCollection, \
DialogCollection, DivCollection, DListCollection, EmbedCollection, FieldSetCollection, \
FormCollection, FrameSetCollection, HeadingCollection, HeadCollection, HRCollection, \
HtmlCollection, IFrameCollection, ImageCollection, InputCollection, \
LabelCollection, LegendCollection, LICollection, MapCollection, \
MetaCollection, MeterCollection, ObjectCollection, OListCollection, \
OptGroupCollection, OptionCollection, OutputCollection, ParagraphCollection, ParamCollection, \
PreCollection, ProgressCollection, ScriptCollection, SelectCollection, SourceCollection, \
SpanCollection, StyleCollection, TableDataCellCollection, TemplateCollection, TextAreaCollection, \
TableCollection, TableHeaderCellCollection, TableSectionCollection, TimeCollection, TitleCollection, \
TableRowCollection, TrackCollection, UListCollection, VideoCollection
from .elements.i_frame import IFrame
from .elements.image import Image
from .elements.input import Input
from .elements.link import Anchor
from .elements.list import OList, UList
from .elements.option import Option
from .elements.radio import Radio, RadioCollection
from .elements.radio_set import RadioSet
from .elements.select import Select
from .elements.svg_elements import Circle, CircleCollection, Defs, DefsCollection, Desc, \
DescCollection, Ellipse, EllipseCollection, ForeignObject, ForeignObjectCollection, G, \
GCollection, Line, LineCollection, LinearGradient, LinearGradientCollection, Marker, \
MarkerCollection, Metadata, MetadataCollection, Path, PathCollection, Pattern, \
PatternCollection, Polygon, PolygonCollection, Polyline, PolylineCollection, RadialGradient, \
RadialGradientCollection, Rect, RectCollection, Stop, StopCollection, SVG, SVGCollection, \
Switch, SwitchCollection, Symbol, SymbolCollection, TextPath, TextPathCollection, TSpan, \
TSpanCollection, Use, UseCollection, View, ViewCollection
from .elements.table import Table
from .elements.table_data_cell import TableDataCell
from .elements.table_row import TableRow
from .elements.table_section import TableSection
from .elements.text_area import TextArea
tag_to_class['a'] = Anchor
tag_to_class['a_collection'] = AnchorCollection
tag_to_class['link'] = Anchor
tag_to_class['link_collection'] = AnchorCollection
tag_to_class['abbr'] = HTMLElement
tag_to_class['abbr_collection'] = HTMLElementCollection
tag_to_class['address'] = HTMLElement
tag_to_class['address_collection'] = HTMLElementCollection
tag_to_class['area'] = Area
tag_to_class['area_collection'] = AreaCollection
tag_to_class['article'] = HTMLElement
tag_to_class['article_collection'] = HTMLElementCollection
tag_to_class['aside'] = HTMLElement
tag_to_class['aside_collection'] = HTMLElementCollection
tag_to_class['audio'] = Audio
tag_to_class['audio_collection'] = AudioCollection
tag_to_class['b'] = HTMLElement
tag_to_class['b_collection'] = HTMLElementCollection
tag_to_class['base'] = Base
tag_to_class['base_collection'] = BaseCollection
tag_to_class['bdi'] = HTMLElement
tag_to_class['bdi_collection'] = HTMLElementCollection
tag_to_class['bdo'] = HTMLElement
tag_to_class['bdo_collection'] = HTMLElementCollection
tag_to_class['blockquote'] = Quote
tag_to_class['blockquote_collection'] = QuoteCollection
tag_to_class['body'] = Body
tag_to_class['body_collection'] = BodyCollection
tag_to_class['br'] = BR
tag_to_class['br_collection'] = BRCollection
tag_to_class['button'] = Button
tag_to_class['button_collection'] = ButtonCollection
tag_to_class['canvas'] = Canvas
tag_to_class['canvas_collection'] = CanvasCollection
tag_to_class['caption'] = TableCaption
tag_to_class['caption_collection'] = TableCaptionCollection
tag_to_class['circle'] = Circle
tag_to_class['circle_collection'] = CircleCollection
tag_to_class['cite'] = HTMLElement
tag_to_class['cite_collection'] = HTMLElementCollection
tag_to_class['code'] = HTMLElement
tag_to_class['code_collection'] = HTMLElementCollection
tag_to_class['col'] = TableCol
tag_to_class['col_collection'] = TableColCollection
tag_to_class['colgroup'] = TableCol
tag_to_class['colgroup_collection'] = TableColCollection
tag_to_class['data'] = Data
tag_to_class['data_collection'] = DataCollection
tag_to_class['datalist'] = DataList
tag_to_class['datalist_collection'] = DataListCollection
tag_to_class['dd'] = HTMLElement
tag_to_class['dd_collection'] = HTMLElementCollection
tag_to_class['defs'] = Defs
tag_to_class['defs_collection'] = DefsCollection
tag_to_class['del'] = Mod
tag_to_class['del_collection'] = ModCollection
tag_to_class['delete'] = Mod
tag_to_class['delete_collection'] = ModCollection
tag_to_class['desc'] = Desc
tag_to_class['desc_collection'] = DescCollection
tag_to_class['details'] = Details
tag_to_class['details_collection'] = DetailsCollection
tag_to_class['dfn'] = HTMLElement
tag_to_class['dfn_collection'] = HTMLElementCollection
tag_to_class['dialog'] = Dialog
tag_to_class['dialog_collection'] = DialogCollection
tag_to_class['div'] = Div
tag_to_class['div_collection'] = DivCollection
tag_to_class['dl'] = DList
tag_to_class['dl_collection'] = DListCollection
tag_to_class['dt'] = HTMLElement
tag_to_class['dt_collection'] = HTMLElementCollection
tag_to_class['ellipse'] = Ellipse
tag_to_class['ellipse_collection'] = EllipseCollection
tag_to_class['em'] = HTMLElement
tag_to_class['em_collection'] = HTMLElementCollection
tag_to_class['embed'] = Embed
tag_to_class['embed_collection'] = EmbedCollection
tag_to_class['fieldset'] = FieldSet
tag_to_class['fieldset_collection'] = FieldSetCollection
tag_to_class['figcaption'] = HTMLElement
tag_to_class['figcaption_collection'] = HTMLElementCollection
tag_to_class['figure'] = HTMLElement
tag_to_class['figure_collection'] = HTMLElementCollection
tag_to_class['footer'] = HTMLElement
tag_to_class['footer_collection'] = HTMLElementCollection
tag_to_class['foreignObject'] = ForeignObject
tag_to_class['foreignObject_collection'] = ForeignObjectCollection
tag_to_class['form'] = Form
tag_to_class['form_collection'] = FormCollection
tag_to_class['frameset'] = FrameSet
tag_to_class['frameset_collection'] = FrameSetCollection
tag_to_class['g'] = G
tag_to_class['g_collection'] = GCollection
tag_to_class['h1'] = Heading
tag_to_class['h1_collection'] = HeadingCollection
tag_to_class['h2'] = Heading
tag_to_class['h2_collection'] = HeadingCollection
tag_to_class['h3'] = Heading
tag_to_class['h3_collection'] = HeadingCollection
tag_to_class['h4'] = Heading
tag_to_class['h4_collection'] = HeadingCollection
tag_to_class['h5'] = Heading
tag_to_class['h5_collection'] = HeadingCollection
tag_to_class['h6'] = Heading
tag_to_class['h6_collection'] = HeadingCollection
tag_to_class['head'] = Head
tag_to_class['head_collection'] = HeadCollection
tag_to_class['header'] = HTMLElement
tag_to_class['header_collection'] = HTMLElementCollection
tag_to_class['hgroup'] = HTMLElement
tag_to_class['hgroup_collection'] = HTMLElementCollection
tag_to_class['hr'] = HR
tag_to_class['hr_collection'] = HRCollection
tag_to_class['html'] = Html
tag_to_class['html_collection'] = HtmlCollection
tag_to_class['i'] = HTMLElement
tag_to_class['i_collection'] = HTMLElementCollection
tag_to_class['ital'] = HTMLElement
tag_to_class['ital_collection'] = HTMLElementCollection
tag_to_class['iframe'] = IFrame
tag_to_class['iframe_collection'] = IFrameCollection
tag_to_class['img'] = Image
tag_to_class['img_collection'] = ImageCollection
tag_to_class['input'] = Input
tag_to_class['input_collection'] = InputCollection
tag_to_class['ins'] = Mod
tag_to_class['ins_collection'] = ModCollection
tag_to_class['kbd'] = HTMLElement
tag_to_class['kbd_collection'] = HTMLElementCollection
tag_to_class['label'] = Label
tag_to_class['label_collection'] = LabelCollection
tag_to_class['legend'] = Legend
tag_to_class['legend_collection'] = LegendCollection
tag_to_class['li'] = LI
tag_to_class['li_collection'] = LICollection
tag_to_class['line'] = Line
tag_to_class['line_collection'] = LineCollection
tag_to_class['linearGradient'] = LinearGradient
tag_to_class['linearGradient_collection'] = LinearGradientCollection
tag_to_class['main'] = HTMLElement
tag_to_class['main_collection'] = HTMLElementCollection
tag_to_class['map'] = Map
tag_to_class['map_collection'] = MapCollection
tag_to_class['mark'] = HTMLElement
tag_to_class['mark_collection'] = HTMLElementCollection
tag_to_class['marker'] = Marker
tag_to_class['marker_collection'] = MarkerCollection
tag_to_class['meta'] = Meta
tag_to_class['meta_collection'] = MetaCollection
tag_to_class['metadata'] = Metadata
tag_to_class['metadata_collection'] = MetadataCollection
tag_to_class['meter'] = Meter
tag_to_class['meter_collection'] = MeterCollection
tag_to_class['nav'] = HTMLElement
tag_to_class['nav_collection'] = HTMLElementCollection
tag_to_class['noscript'] = HTMLElement
tag_to_class['noscript_collection'] = HTMLElementCollection
tag_to_class['object'] = Object
tag_to_class['object_collection'] = ObjectCollection
tag_to_class['ol'] = OList
tag_to_class['ol_collection'] = OListCollection
tag_to_class['optgroup'] = OptGroup
tag_to_class['optgroup_collection'] = OptGroupCollection
tag_to_class['option'] = Option
tag_to_class['option_collection'] = OptionCollection
tag_to_class['output'] = Output
tag_to_class['output_collection'] = OutputCollection
tag_to_class['p'] = Paragraph
tag_to_class['p_collection'] = ParagraphCollection
tag_to_class['path'] = Path
tag_to_class['path_collection'] = PathCollection
tag_to_class['param'] = Param
tag_to_class['param_collection'] = ParamCollection
tag_to_class['pattern'] = Pattern
tag_to_class['pattern_collection'] = PatternCollection
tag_to_class['polygon'] = Polygon
tag_to_class['polygon_collection'] = PolygonCollection
tag_to_class['polyline'] = Polyline
tag_to_class['polyline_collection'] = PolylineCollection
tag_to_class['pre'] = Pre
tag_to_class['pre_collection'] = PreCollection
tag_to_class['progress'] = Progress
tag_to_class['progress_collection'] = ProgressCollection
tag_to_class['q'] = Quote
tag_to_class['q_collection'] = QuoteCollection
tag_to_class['radialGradient'] = RadialGradient
tag_to_class['radialGradient_collection'] = RadialGradientCollection
tag_to_class['radio'] = Radio
tag_to_class['radio_collection'] = RadioCollection
tag_to_class['radio_set'] = RadioSet
tag_to_class['rect'] = Rect
tag_to_class['rect_collection'] = RectCollection
tag_to_class['rp'] = HTMLElement
tag_to_class['rp_collection'] = HTMLElementCollection
tag_to_class['rt'] = HTMLElement
tag_to_class['rt_collection'] = HTMLElementCollection
tag_to_class['ruby'] = HTMLElement
tag_to_class['ruby_collection'] = HTMLElementCollection
tag_to_class['s'] = HTMLElement
tag_to_class['s_collection'] = HTMLElementCollection
tag_to_class['samp'] = HTMLElement
tag_to_class['samp_collection'] = HTMLElementCollection
tag_to_class['script'] = Script
tag_to_class['script_collection'] = ScriptCollection
tag_to_class['section'] = HTMLElement
tag_to_class['section_collection'] = HTMLElementCollection
tag_to_class['select'] = Select
tag_to_class['select_collection'] = SelectCollection
tag_to_class['select_list'] = Select
tag_to_class['select_list_collection'] = SelectCollection
tag_to_class['small'] = HTMLElement
tag_to_class['small_collection'] = HTMLElementCollection
tag_to_class['source'] = Source
tag_to_class['source_collection'] = SourceCollection
tag_to_class['span'] = Span
tag_to_class['span_collection'] = SpanCollection
tag_to_class['stop'] = Stop
tag_to_class['stop_collection'] = StopCollection
tag_to_class['strong'] = HTMLElement
tag_to_class['strong_collection'] = HTMLElementCollection
tag_to_class['style'] = Style
tag_to_class['style_collection'] = StyleCollection
tag_to_class['sub'] = HTMLElement
tag_to_class['sub_collection'] = HTMLElementCollection
tag_to_class['svg'] = SVG
tag_to_class['svg_collection'] = SVGCollection
tag_to_class['summary'] = HTMLElement
tag_to_class['summary_collection'] = HTMLElementCollection
tag_to_class['sup'] = HTMLElement
tag_to_class['sup_collection'] = HTMLElementCollection
tag_to_class['switch'] = Switch
tag_to_class['switch_collection'] = SwitchCollection
tag_to_class['symbol'] = Symbol
tag_to_class['symbol_collection'] = SymbolCollection
tag_to_class['table'] = Table
tag_to_class['table_collection'] = TableCollection
tag_to_class['tbody'] = TableSection
tag_to_class['tbody_collection'] = TableSectionCollection
tag_to_class['td'] = TableDataCell
tag_to_class['td_collection'] = TableDataCellCollection
tag_to_class['template'] = Template
tag_to_class['template_collection'] = TemplateCollection
tag_to_class['textarea'] = TextArea
tag_to_class['textarea_collection'] = TextAreaCollection
tag_to_class['tfoot'] = TableSection
tag_to_class['tfoot_collection'] = TableSectionCollection
tag_to_class['th'] = TableHeaderCell
tag_to_class['th_collection'] = TableHeaderCellCollection
tag_to_class['thead'] = TableSection
tag_to_class['thead_collection'] = TableSectionCollection
tag_to_class['textPath'] = TextPath
tag_to_class['textPath_collection'] = TextPathCollection
tag_to_class['time'] = Time
tag_to_class['time_collection'] = TimeCollection
tag_to_class['title'] = Title
tag_to_class['title_collection'] = TitleCollection
tag_to_class['tr'] = TableRow
tag_to_class['tr_collection'] = TableRowCollection
tag_to_class['track'] = Track
tag_to_class['track_collection'] = TrackCollection
tag_to_class['tspan'] = TSpan
tag_to_class['tspan_collection'] = TSpanCollection
tag_to_class['u'] = HTMLElement
tag_to_class['u_collection'] = HTMLElementCollection
tag_to_class['ul'] = UList
tag_to_class['ul_collection'] = UListCollection
tag_to_class['use'] = Use
tag_to_class['use_collection'] = UseCollection
tag_to_class['var'] = HTMLElement
tag_to_class['var_collection'] = HTMLElementCollection
tag_to_class['video'] = Video
tag_to_class['video_collection'] = VideoCollection
tag_to_class['view'] = View
tag_to_class['view_collection'] = ViewCollection
tag_to_class['wbr'] = HTMLElement
tag_to_class['wbr_collection'] = HTMLElementCollection
| [
"lucast1533@gmail.com"
] | lucast1533@gmail.com |
094a5139fe0be8d0243fab9e3f6e4eec80de9499 | 49681d14cf510d4c2e02b9f6482d412c30d21d65 | /ex080.py | eb86477cf3f193b432bf90a2dade01e87e939bce | [] | no_license | danilocecci/CEV-Python3 | 9ed1658c34e6d8750e74f97e7c15a4eae095487f | 8640e5bea038c791c8bc4d33f955043b2c5af2f3 | refs/heads/main | 2023-04-01T13:22:32.637721 | 2021-04-09T18:22:47 | 2021-04-09T18:22:47 | 332,568,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | num = list()
for c in range(0,5):
handle_num = int(input('Digite um número inteiro: '))
if c == 0 or handle_num > num[-1]:
num.append(handle_num)
else:
count = 0
while count < len(num):
if handle_num <= num[count]:
num.insert(count, handle_num)
break
count += 1
print('')
print(f'Os valores informados em ordem crescente foram: {num}') | [
"danilocecci@gmail.com"
] | danilocecci@gmail.com |
59d7415ace232c9d22befd4334d049d42f6834d4 | 225b525e9c47965da3bf3bf109c058aae7e2222d | /frames.py | 9117c18028045af352a2dee44702583451e520db | [] | no_license | vincseize/Agisoft-1 | 7a0ea31ce35bee0f04c80a804f432ff8113898a5 | 21abcb0f46c5a38261bbc7c7ccf048237d8a877d | refs/heads/main | 2023-01-13T12:24:09.451391 | 2020-11-16T03:57:06 | 2020-11-16T03:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import argparse
import FFMPEGframes
f = argparse.ArgumentParser()
f.add_argument("-i", "--input", required=True)
f.add_argument("-f", "--fps", required=True)
args = vars(f.parse_args())
input = args["input"]
fps = args["fps"]
f = FFMPEGframes.FFMPEGframes("data/images/")
f.extract_frames(input, fps) | [
"mokakoba@Momokas-MacBook-Pro.local"
] | mokakoba@Momokas-MacBook-Pro.local |
eaef333c1ff76d246809b75faa07b48481e69749 | 5b3f9cac85d27c91160cfc1fb10bbea85afa4074 | /speech_recognition/train_model_run.py | ebfae3111e49b1ecb22ad69090a8773878a78c21 | [
"MIT"
] | permissive | a-n-rose/workshops | 4d57d747820912914f593ae3ee0e97281df02584 | e9c265e0d3bc3ca5f7faebdc0cc01c0d7c76a216 | refs/heads/master | 2023-01-13T15:55:37.831275 | 2021-08-27T12:50:48 | 2021-08-27T12:50:48 | 165,374,546 | 0 | 0 | MIT | 2022-12-26T20:50:49 | 2019-01-12T10:22:13 | Jupyter Notebook | UTF-8 | Python | false | false | 9,601 | py |
'''
Script outline
1) load data
expects title of table to contain:
- 'mfcc' or fbank
- the number of features
- optionaly: 'pitch' or 'delta' if the table has those features
2) prep data --> zeropad, encode categorical data, dimensionality
3) train model
4) save model
'''
import time
import os
from sqlite3 import Error
#for training
from sklearn.model_selection import train_test_split
#for the models
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, LSTM, MaxPooling2D, Dropout, TimeDistributed
import user_input
from errors import ExitApp, FeatureExtractionError
import feature_extraction_functions as featfun
import logging
from my_logger import start_logging, get_date
logger = logging.getLogger(__name__)
from exercise_functions import feature_column_prep
def main(script_purpose,database=None,tablename=None):
current_filename = os.path.basename(__file__)
session_name = get_date() #make sure this session has a unique identifier - link to model name and logging information
#set default values
if database is None:
database = "speech_features.db"
start = time.time()
try:
start_logging(script_purpose)
separator = "*"*80
logging.info(separator)
logging.info("RUNNING SCRIPT: \n\n{}".format(current_filename))
logging.info("SESSION: \n\n{}".format(session_name))
######################################################################
#load data
logging.info("Loading data from \nDatabase: {}\nTable: {}".format(database,tablename))
data = user_input.load_data(database,tablename)
logging.info("Data successfully loaded")
end_loaded_data = time.time()
#!!!!necessary variables for user to set!!!!!
#~these set most of the subsequent variables
id_col_index = 2 #index 0 --> sample ID, index 1 --> speaker ID
context_window_size = 9
frame_width = context_window_size*2+1
#if the data contains column w frequency info, assume it is the second to last column
#also assumes features start after the relevant id column
if 'pitch' in tablename:
features_start_stop_index = [id_col_index+1,-2]
else:
features_start_stop_index = [id_col_index+1,-1]
#assumes last column is the label column
label_col_index = [-1]
#add feature columns based on which features are to be expected
num_features, num_feature_columns = feature_column_prep(tablename)
print("The original number of features: {}".format(num_features))
print("Total feature columns: {}".format(num_feature_columns))
logging.info("Column index for each recording/speaker ID set at: {}".format(id_col_index))
logging.info("Number of original features (e.g. MFCCs or FBANK energy features): {}".format(num_features))
logging.info("Number of total features (e.g. derivatives, pitch): {}".format(num_feature_columns))
logging.info("Set context window size: {}".format(context_window_size))
logging.info("Frame width: {}".format(frame_width))
######################################################################
start_data_prep = time.time()
logging.info("Now prepping data for model training")
#prep data
#1) make sure each utterance has same number of samples;
#if not, zeropad them so each has same number of samples
data_zeropadded, samples_per_utterance, num_utterances, labels_present = featfun.prep_data(data,id_col_index,features_start_stop_index,label_col_index,num_feature_columns,frame_width,session_name)
logging.info("Data has been zero-padded")
logging.info("Shape of zero-padded data: {}".format(data_zeropadded.shape))
logging.info("Fixed number of samples per utterance: {}".format(samples_per_utterance))
logging.info("Number of utterances in data: {}".format(num_utterances))
logging.info("Reshaping data to fit ConvNet + LSTM models")
X, y = featfun.shape_data_dimensions_CNN_LSTM(data_zeropadded,samples_per_utterance,frame_width)
logging.info("Done reshaping")
logging.info("Shape of feature data (i.e. 'X'): {}".format(X.shape))
logging.info("Shape of label data (i.e. 'y'): {}".format(y.shape))
#separate X and y --> training and test datasets
logging.info("Separating data into train and test datasets")
test_size = 0.1
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = test_size)
logging.info("Separated data. Test size = {}".format(test_size))
logging.info("Shape of train data: \nX = {}\ny = {}".format(X_train.shape,y_train.shape))
logging.info("Shape of test data: \nX = {}\ny = {}".format(X_test.shape,y_test.shape))
######################################################################
#train the models!
logging.info("Now initializing the model and beginning training")
start_train = time.time()
#TIME-FREQUENCY CONVNET
tfcnn = Sequential()
# feature maps = 40
# 8x4 time-frequency filter (goes along both time and frequency axes)
color_scale = 1
input_size = (frame_width,num_features,color_scale)
tfcnn.add(Conv2D(num_feature_columns, kernel_size=(8,4), activation='relu'))
#non-overlapping pool_size 3x3
tfcnn.add(MaxPooling2D(pool_size=(3,3)))
tfcnn.add(Dropout(0.25))
tfcnn.add(Flatten())
#prepare LSTM
tfcnn_lstm = Sequential()
timestep = samples_per_utterance//frame_width
tfcnn_lstm.add(TimeDistributed(tfcnn,input_shape=(timestep,frame_width,num_feature_columns,color_scale)))
tfcnn_lstm.add(LSTM(timestep)) #num timesteps
tfcnn_lstm.add(Dense(len(labels_present),activation="softmax")) # binary = "sigmoid"; multiple classification = "softmax"
print(tfcnn_lstm.summary())
#set loss:
#binary = "binary_crossentropy", multiple (one-hot-encoded) = "categorical_crossentropy"; multiple (integer encoded) = "sparse_categorical_crossentropy"
loss = "sparse_categorical_crossentropy"
logging.info("Loss set at: '{}'".format(loss))
#compile model
tfcnn_lstm.compile(optimizer='adam',loss=loss,metrics=['accuracy'])
#train model
epochs = 300
logging.info("Number of epochs set at: {}".format(epochs))
model_train_name = "CNN_LSTM_training_{}".format(session_name)
callback = [EarlyStopping(monitor='val_loss', patience=15, verbose=1),
ReduceLROnPlateau(patience=5, verbose=1),
CSVLogger(filename='model_log/{}_log.csv'.format(model_train_name)),
ModelCheckpoint(filepath='bestmodel/bestmodel_{}.h5'.format(model_train_name), verbose=1, save_best_only=True)]
history = tfcnn_lstm.fit(X_train, y_train, epochs=epochs, validation_split = 0.15, callbacks = callback)
score = tfcnn_lstm.evaluate(X_test,y_test,verbose=1)
acc = round(score[1]*100,2)
print("Model Accuracy on test data:")
print(acc)
logging.info("Model Accuracy on TEST data: {}".format(acc))
modelname = "CNN_LSTM_{}_{}_{}_{}recordings_{}epochs_{}acc".format(session_name,database,tablename,num_utterances,epochs,acc)
print('Saving Model')
tfcnn_lstm.save(modelname+'.h5')
print('Done!')
print("\n\nModel saved as:\n{}".format(modelname))
print("Now saving history and plots")
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title("train vs validation loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train","validation"], loc="upper right")
plt.savefig("{}_LOSS.png".format(modelname))
plt.clf()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title("train vs validation accuracy")
plt.legend(["train","validation"], loc="upper right")
plt.savefig("{}_ACCURACY.png".format(modelname))
except ExitApp:
print("Have a good day!")
logging.info("User exited app.")
except Error as e:
logging.exception("Database error: {}".format(e))
except Exception as e:
logging.exception("Error occurred: {}".format(e))
finally:
end = time.time()
duration = round((end-start)/3600,2)
msg1 = "Total Duration: {} hours".format(duration)
logging.info(msg1)
duration_load_data = round((end_loaded_data - start)/60,2)
msg2 = "Duration to load data: {} min".format(duration_load_data)
logging.info(msg2)
duration_prep = round((start_train - start_data_prep)/60,2)
msg3 = "Duration to prep data: {} min".format(duration_prep)
logging.info(msg3)
duration_train = round((end-start_train)/60,2)
msg4 = "Duration to train models: {} min".format(duration_train)
logging.info(msg4)
if __name__=="__main__":
main(script_purpose="speech_feature_prep_train_model_speech_recognition",database="speech_features.db",tablename="fbank_pitch_20_no_noise_word")
| [
"rose.aislyn.noelle@gmail.com"
] | rose.aislyn.noelle@gmail.com |
6bc671f51e41bb7b90ffcb308d5de95fa72c26f5 | 6bd5ed987ba617b9298ed8756fba541f16aaf85a | /control.py | 4f33db7231d12427cc1159cdc1697cdd3d766286 | [] | no_license | vuvandinh2000/Makerthon19_Mori | 3e28e1617f5383521393bce447e6bdf17fb5b78d | ecb6b6b0aa3ff0d6dc1f95e56362db5dc47858ca | refs/heads/master | 2023-03-02T16:43:23.979924 | 2021-02-14T15:26:02 | 2021-02-14T15:26:02 | 283,564,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,482 | py | import cv2
#import pyttsx3
import time
import playsound
import serial
#robot_mouth = pyttsx3.init()
eye_cascPath = r'..\Makerthon_Mori\haarcascade_eye_tree_eyeglasses.xml' #eye detect model
face_cascPath = r'..\Makerthon_Mori\haarcascade_frontalface_alt.xml' #face detect model
faceCascade = cv2.CascadeClassifier(face_cascPath)
eyeCascade = cv2.CascadeClassifier(eye_cascPath)
cap = cv2.VideoCapture(0)
countClosedEyes = 0
start = time.time()
end = 0
arduinoData = serial.Serial('COM3', 9600)
arduinoData.write(b'1')
playsound.playsound('welcome.mp3')
arduinoData.write(b'1')
while 1:
ret, img = cap.read()
if ret:
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
frame,
scaleFactor = 1.2,
minNeighbors = 5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
if len(faces) > 0:
# Detect eyes in the image
eyes = eyeCascade.detectMultiScale(
frame,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
if len(eyes) == 0: #Can't detect the eyes
start = time.time()
# Draw a red rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
if countClosedEyes < 4: #Normal
arduinoData.write(b'1')
countClosedEyes += 1
start = time.time()
time.sleep(1)
elif countClosedEyes == 4: #Level 1
playsound.playsound('level1.mp3')
time.sleep(1)
playsound.playsound('music.mp3')
start = time.time()
countClosedEyes += 1
time.sleep(1)
elif countClosedEyes == 8: #Level 2
playsound.playsound('level2.mp3')
arduinoData.write(b'2')
time.sleep(8)
start = time.time()
countClosedEyes += 1
time.sleep(1)
elif countClosedEyes >= 12: #Level 3
playsound.playsound('level3.mp3')
arduinoData.write(b'3')
time.sleep(8)
start = time.time()
countClosedEyes += 1
time.sleep(1)
else:
countClosedEyes += 1
time.sleep(1)
else:
# Draw a green rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
end = time.time()
if (end - start) > 30:
countClosedEyes = 0
#frame_tmp = cv2.resize(frame_tmp, (400, 400), interpolation=cv2.INTER_LINEAR)
cv2.imshow('Face Recognition', img)
print("You close eyes %d times" % countClosedEyes)
waitkey = cv2.waitKey(1)
if waitkey == ord('q') or waitkey == ord('Q'):
cv2.destroyAllWindows()
break
| [
"noreply@github.com"
] | noreply@github.com |
6a5dc3ab92b3e8cc6285871b4f67e45b5e484fb2 | c209d46a08347183931651dddef72197755749dd | /pyobjc-framework-Intents/PyObjCTest/test_instartaudiocallintentresponse.py | d5d4cd4556f3591ba4dc66bdcecb289a3932af80 | [] | no_license | jackrobison/pyobjc | ecd33914a664a381571c8860a54db5eaa7bf726b | f801e249678f267bd490e8cde4f039e3151d794d | refs/heads/master | 2021-01-11T05:35:48.444594 | 2016-10-20T20:36:25 | 2016-10-20T20:36:25 | 71,502,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import sys
from PyObjCTools.TestSupport import *
if sys.maxsize > 2 ** 32:
import Intents
class TestINStartAudioCallIntentResponse (TestCase):
@min_os_level('10.12')
def testConstants(self):
self.assertEqual(Intents.INStartAudioCallIntentResponseCodeUnspecified, 0)
self.assertEqual(Intents.INStartAudioCallIntentResponseCodeReady, 1)
self.assertEqual(Intents.INStartAudioCallIntentResponseCodeContinueInApp, 2)
self.assertEqual(Intents.INStartAudioCallIntentResponseCodeFailure, 3)
self.assertEqual(Intents.INStartAudioCallIntentResponseCodeFailureRequiringAppLaunch, 4)
if __name__ == "__main__":
main()
| [
"jack@lbry.io"
] | jack@lbry.io |
df7a3b20cb9bf0b3468a2d59ab29d0d10bec732f | 4b5550644379b94cb808c696be45eeed27c55444 | /blog/urls.py | 965ffbb0e1d44b6b37ced79eaaafa53b03352a98 | [] | no_license | jovanfoster/django_blog | 44009536fe49e91ee5e37a82c9ab7ee27d673907 | cadf42304c38b02b0fd06c83ef39db35ce78a8ed | refs/heads/master | 2022-12-09T17:32:03.856249 | 2020-09-17T22:22:02 | 2020-09-17T22:22:02 | 290,457,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app.urls')),
path('user/', include('users.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"jovanfoster9@gmail.com"
] | jovanfoster9@gmail.com |
04dc23f98eeb652b65e913bb594e023fbe573c31 | ce0f8956c4c308c67bd700d31fe8d5a17b16ac08 | /Python3/src/14 Testing/TDD/point.py | 5f5392393d0de4173dee22fb7258d9404262882e | [] | no_license | seddon-software/python3 | 795ae8d22a172eea074b71d6cd49d79e388d8cc6 | d5e6db1509a25c1a3040d5ae82d757539a2ff730 | refs/heads/master | 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | class Point:
def __init__(self, x0, y0, name):
self.x = x0
self.y = y0
self.name = name
def moveBy(self, dx, dy):
self.x += dx
self.y += dy
def display(self):
print(f"Point {self.name} is at [{self.x},{self.y}]")
| [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
8b4b99aeaef113c1a559e09a87f58f4b7b14e5fe | a6e09296f295550c91188d4bd5ce8cd4a6818996 | /Week_03/347.top-k-frequent-elements.py | 7fa839c8bade2e9d01ba5e6050201eb785bbb16b | [] | no_license | LuciusK/algorithm011-class02 | f95a6dc60cb7f3c15ee080972543f24a98785c6c | 85e49a3da24a090359fff76874a5ff0a496b9fc6 | refs/heads/master | 2022-12-04T11:46:03.041515 | 2020-08-23T11:43:35 | 2020-08-23T11:43:35 | 273,902,864 | 0 | 0 | null | 2020-06-21T12:47:59 | 2020-06-21T12:47:59 | null | UTF-8 | Python | false | false | 1,308 | py | #
# @lc app=leetcode id=347 lang=python3
#
# [347] Top K Frequent Elements
#
# @lc code=start
import collections
import heapq
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
count = collections.Counter(nums)
return heapq.nlargest(k, count.keys(), key = count.get)
def topKFrequent1(self, nums: List[int], k: int) -> List[int]:
return [item[0] for item in collections.Counter(nums).most_common(k)]
def topKFrequent2(self, nums: List[int], k: int) -> List[int]:
from collections import Counter
import heapq
lookup = Counter(nums)
heap = []
for ky, vl in lookup.items():
heapq.heappush(heap, [-vl, ky])
res = []
for _ in range(k):
res.append(heapq.heappop(heap)[1])
return res
def topKFrequent3(self, nums: List[int], k: int) -> List[int]:
from collections import Counter
c = Counter(nums)
buckets = [[] for _ in range(len(nums) + 1)]#全部都为一个元素就len(nums)
for x, y in c.items():
buckets[y].append(x)
res = []
for i in range(len(nums), -1, -1):
if len(res) > k:
break
res.extend(buckets[i])
return res[:k]
# @lc code=end
| [
"Luciusprivate@163.com"
] | Luciusprivate@163.com |
648e00634072dc981b850784e6e6d4204da6a57a | 9a2339cb41218abd4f59abc935df74947b71b451 | /fahrenheit_calculator/migrations/0003_auto_20200118_2221.py | f159266f984d79b7b80d8b8c06b9a3bced024a81 | [] | no_license | abhijithofficial/calculator | e68d36966cb49327b2730c8e57d1c5ec6eb50735 | 3a525edc01307b692bde1a1c62092126c9f33126 | refs/heads/master | 2021-06-30T14:09:47.508511 | 2020-01-18T17:05:36 | 2020-01-18T17:05:36 | 234,768,369 | 0 | 0 | null | 2021-06-10T22:33:11 | 2020-01-18T17:08:50 | Python | UTF-8 | Python | false | false | 699 | py | # Generated by Django 2.1 on 2020-01-18 16:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fahrenheit_calculator', '0002_auto_20200118_2143'),
]
operations = [
migrations.RenameField(
model_name='userdetails',
old_name='userName',
new_name='name',
),
migrations.AddField(
model_name='calculatordetails',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='fahrenheit_calculator.UserDetails'),
preserve_default=False,
),
]
| [
"alwayzabhijith@gmail.com"
] | alwayzabhijith@gmail.com |
690d75c1ec333d9d9aa5fbd91d93e689e0bf4638 | 72547a6e9e5741926a77b96ea845ef58a09228a5 | /funnel/templates/dev/base/base__/cron/topvehicle.py | 31d0475b14f16a19c2bf24c33b02ad624af42881 | [] | no_license | hamik112/dealerfunnel | 00bf1611a8c0938221ca1a29ef5e4ecb747554b4 | 563ae0517da427d82c46eb6cddd096c139b248f9 | refs/heads/master | 2021-06-08T03:29:50.843306 | 2016-08-09T06:31:08 | 2016-08-09T06:31:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | import MySQLdb
import math
from geocode import geocode
from datetime import datetime
import hashlib
import time
from common import *
class topvehicle:
def __init__(self,db,cursor,data):
self.db = db
self.cursor = cursor
self.data = data
def process(self):
if self.data['tradeins'] == 1:
make = self.data['make']
model = self.data['model']
dealer = self.data['dealer']
sql = "select * from funnel_topvehicle where make='%s' and model = '%s' and fdealer_id = '%s' limit 1" % (make,model,self.data['dealer'])
self.cursor.execute(sql)
if self.cursor.rowcount == 0:
dict = {
"make" : make,
"model" : model,
"count" : 1,
"fdealer_id" : dealer,
}
common().insert('funnel_topvehicle',dict,self.cursor,self.db)
else:
self.cursor.execute(sql)
result = self.cursor.fetchone()
id = result[0]
dict = {
"count" : result[common().getindex('funnel_topvehicle','count', self.cursor)] + 1,
}
common().update('funnel_topvehicle',dict,id,'id',self.cursor,self.db)
| [
"bd@gmail.com"
] | bd@gmail.com |
895a00118a48a46da43842ed361ef90b4bf75bc7 | f023692f73992354a0b7823d9c49ae730c95ab52 | /AtCoderRegularContest/109/B.py | e8f63dd4b86156a7a7784479c9988ed5e778177a | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 694 | py | # import sys
# sys.setrecursionlimit(10 ** 6)
# import bisect
# from collections import deque
def binary_search(ok, ng, solve):
"""2分探索"""
while abs(ok - ng) > 1:
mid = (ok + ng) // 2
if solve(mid):
ok = mid
else:
ng = mid
return ok
# from decorator import stop_watch
#
#
# @stop_watch
def solve(n):
def solve(x):
if (x * (x + 1)) // 2 <= n + 1:
return True
return False
print(n - binary_search(1, n + 1, solve) + 1)
if __name__ == '__main__':
n = int(input())
solve(n)
# # test
# from random import randint
# from func import random_str, random_ints
# solve()
| [
"39874652+corutopi@users.noreply.github.com"
] | 39874652+corutopi@users.noreply.github.com |
98070862463bd7d5867b2668312f3ed75b309d31 | c07f8e05feedda740b5f1727f735ea9913d37e6a | /Lista.py | f538ad4486c18170248daeddddacfd12b6c1bd56 | [] | no_license | tpwrochna/alx | 8c5fdb1259a911973d73f108203598b94adc6b8b | f5ec59913ea32935df4fe9576718251c92ff8116 | refs/heads/master | 2020-03-22T15:42:09.836027 | 2018-07-09T10:47:40 | 2018-07-09T10:47:40 | 140,270,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | lista = [10, 20, 50, 100, 200] # deklaracja listy
list() # utworzenie pustej listy za pomocą konstruktora
lista1 = [] # utworzenie pusatej listy za pomocą operatora Dostępu
print(len(lista)) # wypisz liczbę elementów Listy
if 10 in lista: # Sprawdzenie czy "10" znajduje się w lista.
print("True")
else:
print("False")
lista[0] = 5 # Operatorem przypisania zmieniamy pojedyńczy element poprzez wskazanie indeksu
print(lista)
lista.append(300) # Dodanie elementu na koniec listy
print(lista)
lista1.insert(0, "abc") # dodanie elementu pod wskazany adres indeksu i podanie wartości
print(lista1)
print(lista)
lista[1:3] = ["abc", "def"]
print(lista)
del lista[1:3] # usunięce elementów z listy
print(lista) | [
"tpwrochna@gmail.com"
] | tpwrochna@gmail.com |
5496b965a23684e98132011882e06fb5332ec2ea | 56c5f2fde5c929ac4304c0c318b13f9fd1ddacec | /goperation/manager/filters/config.py | 71519708ecea6990013f0f4fc785a566a16b3908 | [] | no_license | soulhez/Goperation | 5e7b9b67910deeabe12b46a05fcfc82dc1d3d723 | 64b2651229504f24e9c854b9e30da58cc7741176 | refs/heads/master | 2022-03-08T06:35:19.979125 | 2019-11-11T08:49:20 | 2019-11-11T08:49:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | from simpleutil.config import cfg
from simpleutil.config import types
CONF = cfg.CONF
authfilter_opts = [
cfg.ListOpt('allowed_trusted_ip',
item_type=types.IPAddress(version=4),
default=[],
help='Allowed ipaddress without token, 127.0.0.1 and local ip is allowed'),
cfg.BoolOpt('allowed_same_subnet',
default=True,
help='Allow ipaddress without token in same subnet'),
cfg.ListOpt('allowed_hostname',
default=["*"],
help='Allow hostname'),
]
cors_opts = [
cfg.ListOpt('allowed_origin',
default=["*"],
help='Indicate whether this resource may be shared with the '
'domain received in the requests "origin" header.'),
cfg.BoolOpt('allow_credentials',
default=True,
help='Indicate that the actual request can include user '
'credentials'),
cfg.ListOpt('expose_headers',
default=['Content-Type', 'Cache-Control', 'Content-Language',
'Expires', 'Last-Modified', 'Pragma'],
help='Indicate which headers are safe to expose to the API. '
'Defaults to HTTP Simple Headers.'),
cfg.IntOpt('max_age',
default=3600,
help='Maximum cache age of CORS preflight requests.'),
cfg.ListOpt('allow_methods',
default=['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'UPDATE', 'HEAD'],
help='Indicate which methods can be used during the actual '
'request.'),
cfg.ListOpt('allow_headers',
default=['Content-Type', 'Cache-Control', 'Content-Language',
'Expires', 'Last-Modified', 'Pragma', 'Auth-Token', 'X-Real-IP',
'gopfernet'],
help='Indicate which header field names may be used during '
'the actual request.')
]
def list_opts():
return authfilter_opts + cors_opts
| [
"lolizeppelin@gmail.com"
] | lolizeppelin@gmail.com |
ae269fbeb63c445ff3f0b9c7c9b142899a832f1f | f506dc8837e55dc1d8c023360d3395c1d24833e8 | /prepare-dataset.py | 0b3821e4fe3f99018e9f87a64387bf438986a1dc | [
"MIT"
] | permissive | hommmm/ParallelTTS | 0f82ed29cdad0441ce491987b72ef17027b48359 | d0e967d6d471bc901c85181a3b734d4df445dd08 | refs/heads/main | 2023-04-24T05:34:10.327568 | 2021-04-15T06:37:29 | 2021-04-15T06:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,446 | py | #!/usr/bin/env python
"""Download and preprocess datasets. Supported datasets are:
* English female: LJSpeech
* Mandarin female: BBSpeech (BIAOBEI)
* Tibetan female: TBSpeech (Non-public)
* Mongolian male: MBSpeech (Mongolian Bible)
* Korean female: KSSpeech (Kaggle Korean Single Speech)
* Cantonese male: HKSpeech (Common Voice, Hong Kong)
* Japanese female: JPSpeech (JSUT Speech Corpus)
"""
__author__ = 'Atomicoo'
import sys
import os
import os.path as osp
import argparse
import pandas as pd
from utils.hparams import HParam
from utils.utils import download_file
from helpers.processor import Processor
from datasets.dataset import SpeechDataset
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', default=None, type=str, help='Config file path')
parser.add_argument('--compute', action='store_true', help='Pre-compute dataset statistics')
args = parser.parse_args()
hparams = HParam(args.config) \
if args.config else HParam(osp.join(osp.abspath(os.getcwd()), 'config', 'default.yaml'))
datasets_path = hparams.data.datasets_path
dataset_file_url = \
f'https://open-speech-data.oss-cn-hangzhou.aliyuncs.com/{hparams.data.dataset_dir}.tar.bz2'
dataset_file_name = osp.basename(dataset_file_url)
dataset_dir = dataset_file_name[:-8]
dataset_path = osp.join(datasets_path, dataset_dir)
wavfile_path = osp.join(dataset_path, "wavs")
melspec_path = osp.join(dataset_path, "mels")
if osp.isdir(melspec_path) and False:
print("%s dataset folder already exists" % dataset_dir)
sys.exit(0)
else:
dataset_file_path = osp.join(datasets_path, dataset_file_name)
if not osp.isfile(dataset_file_path):
download_file(dataset_file_url, dataset_file_path)
else:
print("'%s' already exists" % dataset_file_name)
if not osp.isdir(wavfile_path):
print("extracting '%s'..." % dataset_file_name)
os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))
else:
print("'%s' already exists" % wavfile_path)
dataset_root = osp.join(hparams.data.datasets_path, hparams.data.dataset_dir)
dataset = SpeechDataset([], dataset_root, hparams.text)
processor = Processor(hparams=hparams.audio)
# pre process/compute
if args.compute:
processor.precompute(dataset_path, dataset)
else:
processor.preprocess(dataset_path, dataset)
| [
"atomicoo95@gmail.com"
] | atomicoo95@gmail.com |
34b99d14bd59ef09e54ece755a077810ae85cd6e | 30dd17ec43e4ce2cae87b1186d3b35736e2bab20 | /cntrlscripts/full_list.py | 9866dadb6270bac5f375cf9f7e9fb20bc1e9a463 | [] | no_license | LukeMurphy/RPI | de2329fa82a36fac953d832fbb9a77ea9eae75d5 | 78a6b0d34d2b3b65f9dd7a502f60d500c0c11b80 | refs/heads/py3.6 | 2023-08-30T22:57:33.303969 | 2021-10-15T19:01:38 | 2021-10-15T19:01:38 | 45,502,537 | 3 | 0 | null | 2020-03-31T13:11:08 | 2015-11-03T23:27:06 | Python | UTF-8 | Python | false | false | 6,458 | py | """Summary
Attributes:
actionDict1 (TYPE): Description
actionDict2 (TYPE): Description
commadStringMultiPyth (str): Description
commadStringProc (str): Description
commadStringPyth (str): Description
JavaAppRunning (str): Description
Lb1 (TYPE): Description
leftBtnPlace (int): Description
quitbutton (TYPE): Description
root (TYPE): Description
scrollbar (TYPE): Description
slogan (TYPE): Description
sortbutton (TYPE): Description
sortDefault (int): Description
topBtnPlace (int): Description
"""
import os
import datetime
import subprocess
import sys
import tkinter as tk
from tkinter import *
import tkmacosx
from tkmacosx import Button
#from tk import Button
commadStringPyth = "python3 /Users/lamshell/Documents/Dev/RPI/player.py -path /Users/lamshell/Documents/Dev/RPI/ -mname studio -cfg "
commadStringMultiPyth = "python3 /Users/lamshell/Documents/Dev/RPI/multiplayer.py -path /Users/lamshell/Documents/Dev/RPI/ -mname studio -cfg "
commadStringSeqPyth = "python3 /Users/lamshell/Documents/Dev/RPI/sequence-player.py -path /Users/lamshell/Documents/Dev/RPI/ -mname studio -cfg "
commadStringProc = ""
JavaAppRunning = ""
actionDict1 = [
#{"--- Police Line 2 --------": "p10-line/flow-1.cfg"},
{"SCREEN TEST ": "screens/test-448x320.cfg"},
]
actionDict2 = [
{"SCREEN TEST ": "screens/test-448x320.cfg"},
]
def verify():
"""Summary
Returns:
TYPE: Description
"""
# print("==>",Lb.curselection())
global actionDict1
process = False
configSelected = None
if len(list(Lb1.curselection())) > 0:
selection = Lb1.curselection()[0]
configSelected = actionDict1[selection]
process = True
elif len(list(Lb2.curselection())) > 0:
selection = Lb2.curselection()[0]
configSelected = actionDict2[selection]
process = True
return (process, configSelected)
def execute(configToRun):
"""Summary
Args:
configToRun (TYPE): Description
"""
global JavaAppRunning
if ".cfg" in configToRun:
if "multi" in configToRun:
os.system(commadStringMultiPyth + configToRun + "&")
if "-manifest" in configToRun:
os.system(commadStringSeqPyth + configToRun + "&")
else:
os.system(commadStringPyth + configToRun + "&")
elif ".app" in configToRun:
os.system("open " + commadStringProc + configToRun)
JavaAppRunning = configToRun
def action():
"""Summary
"""
a = verify()
if a[0] == True:
# os.system('ps -ef | pgrep -f player | xargs sudo kill -9;')
configSelected = a[1]
configToRun = configSelected[list(configSelected.keys())[0]]
execute(configToRun)
def action2():
"""Summary
"""
global JavaAppRunning
a = verify()
if a[0] == True:
os.system("ps -ef | pgrep -f player | xargs sudo kill -9;")
os.system("ps -ef | pgrep -f Player | xargs sudo kill -9;")
if JavaAppRunning != '' :
os.system("ps -ef | pgrep -f " + JavaAppRunning + " | xargs sudo kill -9;")
configSelected = a[1]
configToRun = configSelected[list(configSelected.keys())[0]]
execute(configToRun)
def stopAll():
"""Summary
"""
# print("Tkinter is easy to use!")
os.system("ps -ef | pgrep -f player | xargs sudo kill -9;")
def reSort():
"""Summary
"""
global sortDefault
if sortDefault == 0 :
sortDefault = 1
getAllConfigFiles(False)
else :
sortDefault = 0
getAllConfigFiles(True)
# Generate list of configs:
from os import listdir
from os.path import isfile, join
from os import walk
def returnSecondElement(l):
"""Summary
Args:
l (TYPE): Description
Returns:
TYPE: Description
"""
return l[1]
def getAllConfigFiles(dateSort=False) :
"""Summary
Args:
dateSort (bool, optional): Description
"""
global actionDict1, Lb1
configPath = "/Users/lamshell/Documents/Dev/RPI/configs/"
arr = os.listdir(configPath)
# Sort the directories by name
arr.sort(reverse=True)
fullList = []
actionDict1 = []
for d in arr :
if d.find(".py") == -1 and d.find(".DS_Store") == -1 and d.find("_py") == -1 and d.find("LED") == -1:
subArr = os.listdir(configPath + d)
if dateSort == False:
subArr.sort(reverse=True)
for f in subArr:
if f.find(".DS_Store") == -1:
shortPath = d + "/" + f
res = os.stat(configPath + shortPath)
fullList.append((shortPath,res.st_mtime))
if dateSort == False:
fullList.append({})
# Sort the configs by date descending
if dateSort == True :
fullList.sort(key=returnSecondElement, reverse=True)
for f in fullList :
if len(f) > 0 :
tsTxt = datetime.datetime.fromtimestamp(f[1]).strftime('[%Y-%m-%d %H:%M]')
actionDict1.append({ "" + tsTxt + " " + f[0] : f[0]})
else :
actionDict1.append({ "" : ""})
Lb1.delete(0,END)
for i, item in enumerate(actionDict1):
Lb1.insert(END, " " + list(item.keys())[0])
root = tk.Tk()
#frame = tk.Frame(root, bg="darkgray")
#frame.pack(padx=1, pady=1)
# width x height x X x Y
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root.geometry("%dx%d+%d+%d" % (600, round(screen_height * .4), round(2*screen_width/3), round(2*screen_height/3)))
Lb1 = Listbox(root, width=60, height=32)
for i, item in enumerate(actionDict1):
Lb1.insert(END, " " + list(item.keys())[0])
#Lb1.pack(side=tk.LEFT, padx=0, ipadx=10)
#Lb2.pack(side=tk.LEFT, ipadx=10, expand=0)
Lb1.place(bordermode=OUTSIDE, x=2, y=2)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = BOTH)
Lb1.config(yscrollcommand = scrollbar.set)
scrollbar.config(command = Lb1.yview)
topBtnPlace = 8
leftBtnPlace = 440
sortDefault = 0
slogan = Button(
root, text="Stop & Run", width = 120, bg='blue', fg='white', borderless=1, command=action2
)
slogan.place(bordermode=OUTSIDE, x=leftBtnPlace, y=topBtnPlace)
slogan = Button(
root, text="Run", width = 120, bg='blue', fg='white', borderless=1, command=action
)
slogan.place(bordermode=OUTSIDE, x=leftBtnPlace, y=topBtnPlace+25)
slogan = Button(
root, text="Stop All", width = 120, bg='blue', fg='white', borderless=1, command=stopAll
)
slogan.place(bordermode=OUTSIDE, x=leftBtnPlace, y=topBtnPlace+50)
quitbutton = Button(
root, text="QUIT", width = 120, bg='blue', fg='white', borderless=1, command=quit
)
quitbutton.place(bordermode=OUTSIDE, x=leftBtnPlace, y=topBtnPlace+75)
sortbutton = Button(
root, text="Re-Sort", width = 120, bg='blue', fg='white', borderless=1, command=reSort
)
sortbutton.place(bordermode=OUTSIDE, x=leftBtnPlace, y=topBtnPlace+100)
getAllConfigFiles(True)
root.mainloop()
| [
"web@lukelab.com"
] | web@lukelab.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.