seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
239386283 | from bs4 import BeautifulSoup
import argparse as ap
import requests, os, pickle
# Pull PostMates HTML
def pull_postmates_html(postmates_code):
url = "https://postmates.com/merchant/" + postmates_code
r = requests.get(url)
encoding = r.encoding if 'charset' in r.headers.get('content-type', '').lower() else None
soup = BeautifulSoup(r.content, from_encoding=encoding) # BeautifulSoup produces HTML of webpage
return soup
def pull_items(soup):
output = []
items = soup.find_all('h3', attrs={'class' : 'product-name css-1vuygjh e1tw3vxs3'})
for item in items:
category = item.parent.parent.parent.parent.parent.find('h2', attrs={'class' : 'css-sqkt8s e1u06svg0'}).get_text()
# remove popular items because they're repeats
if "Popular Items" in category:
continue
name = item.get_text()
description = item.parent.find('div', attrs={'class' : 'product-description css-1cwo7kl e1tw3vxs5'}).get_text()
price = item.parent.parent.find('div', attrs={'class' : 'css-1ju2yr7 e1tw3vxs4'}).find('span').get_text()
output.append([name, description, price, category, ""])
return output
# Save menu_items as a file using pickle library (not necessarily human readable)
def write_menu(menu_items, postmates_code):
script_dir = os.path.abspath(os.path.join(__file__ ,"../.."))
print("Saving menu items at " + script_dir + "/output_menu_items/postmates/" + postmates_code + ".txt")
with open(script_dir + "/output_menu_items/postmates/" + postmates_code + ".txt", 'wb') as f:
pickle.dump(menu_items, f)
if __name__ == '__main__':
parser = ap.ArgumentParser()
parser.add_argument('-p', '--postmates_code', help='Postmates Restaurant Code', default='ej-sushi-chicago')
args = vars(parser.parse_args())
postmates_code = args['postmates_code']
soup = pull_postmates_html(postmates_code)
output = pull_items(soup)
write_menu(output, postmates_code) | null | scrape_postmates.py | scrape_postmates.py | py | 1,873 | python | en | code | null | code-starcoder2 | 50 |
490311790 | # -*- coding: utf-8 -*-
"""
SmartWin回策框架
MacdMaWin策略
作者:Smart
新建时间:2018-09-02
"""
from StrategyTemplate import StrategyTemplate
import pandas as pd
from Indexer import MA, MACD, dfCross
class MacdMaWin(StrategyTemplate):
strategy_name = 'MacdMaWin'
strategy_para_name_list = ['MS', 'MM', 'ML', 'MA']
def __init__(self,):
super(MacdMaWin, self).__init__()
pass
def run_trade_logic(self, symbol_info, raw_data, para_dic):
MACD_S = para_dic['MS']
MACD_L = para_dic['ML']
MACD_M = para_dic['MM']
MA_N = para_dic['MA']
# print setname
raw_data['Unnamed: 0'] = range(raw_data.shape[0])
# 计算MACD
macd = MA.calMACD(raw_data['close'], MACD_S, MACD_L, MACD_M) # 普通MACD
#macd = MACD.hull_macd(raw_data['close'], MACD_S, MACD_L, MACD_M) # hull_macd
raw_data['DIF'] = macd[0]
raw_data['DEA'] = macd[1]
raw_data['MA'] = MA.calEMA(raw_data['close'], MA_N)
# raw_data['MA'] = MA.hull_ma(raw_data['close'], MA_N)
raw_data['next_strtime'] = raw_data['strtime'].shift(-1).fillna(method='ffill')
raw_data['next_open'] = raw_data['open'].shift(-1).fillna(method='ffill')
raw_data['next_utc'] = raw_data['utc_time'].shift(-1).fillna(method='ffill')
# 计算MACD的金叉和死叉
raw_data['MACD_True'], raw_data['MACD_Cross'] = dfCross(raw_data, 'DIF', 'DEA')
# ================================ 找出买卖点================================================
goldcrosslist = pd.DataFrame({'goldcrosstime': raw_data.loc[raw_data['MACD_Cross'] == 1, 'next_strtime']})
goldcrosslist['goldcrossutc'] = raw_data.loc[raw_data['MACD_Cross'] == 1, 'next_utc']
goldcrosslist['goldcrossindex'] = raw_data.loc[raw_data['MACD_Cross'] == 1, 'Unnamed: 0']
goldcrosslist['goldcrossprice'] = raw_data.loc[raw_data['MACD_Cross'] == 1, 'next_open']
# 取出死叉点
deathcrosslist = pd.DataFrame({'deathcrosstime': raw_data.loc[raw_data['MACD_Cross'] == -1, 'next_strtime']})
deathcrosslist['deathcrossutc'] = raw_data.loc[raw_data['MACD_Cross'] == -1, 'next_utc']
deathcrosslist['deathcrossindex'] = raw_data.loc[raw_data['MACD_Cross'] == -1, 'Unnamed: 0']
deathcrosslist['deathcrossprice'] = raw_data.loc[raw_data['MACD_Cross'] == -1, 'next_open']
goldcrosslist = goldcrosslist.reset_index(drop=True)
deathcrosslist = deathcrosslist.reset_index(drop=True)
# 生成多仓序列(金叉在前,死叉在后)
if goldcrosslist.ix[0, 'goldcrossindex'] < deathcrosslist.ix[0, 'deathcrossindex']:
longcrosslist = pd.concat([goldcrosslist, deathcrosslist], axis=1)
else: # 如果第一个死叉的序号在金叉前,则要将死叉往上移1格
longcrosslist = pd.concat([goldcrosslist, deathcrosslist.shift(-1)], axis=1)
longcrosslist = longcrosslist.set_index(pd.Index(longcrosslist['goldcrossindex']), drop=True)
# 生成空仓序列(死叉在前,金叉在后)
if deathcrosslist.ix[0, 'deathcrossindex'] < goldcrosslist.ix[0, 'goldcrossindex']:
shortcrosslist = pd.concat([deathcrosslist, goldcrosslist], axis=1)
else: # 如果第一个金叉的序号在死叉前,则要将金叉往上移1格
shortcrosslist = pd.concat([deathcrosslist, goldcrosslist.shift(-1)], axis=1)
shortcrosslist = shortcrosslist.set_index(pd.Index(shortcrosslist['deathcrossindex']), drop=True)
# 取出开多序号和开空序号
openlongindex = raw_data.loc[
(raw_data['MACD_Cross'] == 1) & (raw_data['close'] > raw_data['MA'])].index
openshortindex = raw_data.loc[
(raw_data['MACD_Cross'] == -1) & (raw_data['close'] < raw_data['MA'])].index
# 从多仓序列中取出开多序号的内容,即为开多操作
longopr = longcrosslist.loc[openlongindex]
longopr['tradetype'] = 1
longopr.rename(columns={'goldcrosstime': 'opentime',
'goldcrossutc': 'openutc',
'goldcrossindex': 'openindex',
'goldcrossprice': 'openprice',
'deathcrosstime': 'closetime',
'deathcrossutc': 'closeutc',
'deathcrossindex': 'closeindex',
'deathcrossprice': 'closeprice'}, inplace=True)
# 从空仓序列中取出开空序号的内容,即为开空操作
shortopr = shortcrosslist.loc[openshortindex]
shortopr['tradetype'] = -1
shortopr.rename(columns={'deathcrosstime': 'opentime',
'deathcrossutc': 'openutc',
'deathcrossindex': 'openindex',
'deathcrossprice': 'openprice',
'goldcrosstime': 'closetime',
'goldcrossutc': 'closeutc',
'goldcrossindex': 'closeindex',
'goldcrossprice': 'closeprice'}, inplace=True)
# 结果分析
result = pd.concat([longopr, shortopr])
result = result.sort_index()
result = result.reset_index(drop=True)
result = result.dropna()
slip = symbol_info.getSlip()
result['ret'] = ((result['closeprice'] - result['openprice']) * result['tradetype']) - slip
result['ret_r'] = result['ret'] / result['openprice']
return result
def get_para_list(self, para_list_dic):
macd_s_list = para_list_dic['MS']
macd_l_list = para_list_dic['ML']
macd_m_list = para_list_dic['MM']
ma_n_list = para_list_dic['MA']
setlist = []
i = 0
for s1 in macd_s_list:
for m1 in macd_m_list:
for l1 in macd_l_list:
for ma_n in ma_n_list:
setname = "Set%d MS%d ML%d MM%d MA%d" % (i, s1, l1, m1, ma_n)
l = [setname, s1, l1, m1, ma_n]
setlist.append(l)
i += 1
setpd = pd.DataFrame(setlist, columns=['Setname', 'MS', 'ML', 'MM', 'MA'])
return setpd
def get_para_name_list(self):
return self.strategy_para_name_list
| null | Strategy/MacdMaWin.py | MacdMaWin.py | py | 6,443 | python | en | code | null | code-starcoder2 | 50 |
57362755 | import json#cPickle as pickle
import cv2
import numpy as np
from sys import stdout
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from scipy.misc import imsave
import time
from keras import backend as K
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
def run(train_tensors, train_targets, valid_tensors, valid_targets, test_tensors, test_targets):
# Keep record of test accuracy. ########################################
#accHistory = {}
# Hyper parameter history. #############################################
hpHistory = []
hpHistory.append({ 'l1_filters':16,
'l1_kSize':2,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':8,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':2,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':2,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':2,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':4,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':2,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':8,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':4,
'l1_poolStrides':4,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':4,
'l2_poolStrides':4,
'l3_filters':64,
'l3_kSize':2,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':4,
'l3_poolStrides':4}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':8,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':4,
'l1_poolStrides':4,
'l2_filters':32,
'l2_kSize':8,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':4,
'l2_poolStrides':4,
'l3_filters':64,
'l3_kSize':8,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':4,
'l3_poolStrides':4}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':8,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':4,
'l1_poolStrides':4,
'l2_filters':32,
'l2_kSize':8,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':8,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2,
'l4_filters':64,
'l4_kSize':4,
'l4_strides':1,
'l4_padding':'valid',
'l4_poolSize':2,
'l4_poolStrides':2}
)
# Loop through the different param settings. ###########################
for iSetting in range(len(hpHistory)):
current_setting = hpHistory[iSetting]
print('Testing setting {n:g} ***************************************************************************'.format(n = iSetting))
startTime = time.time()
print('Setting up model.')
# Build the CNN. #######################################################
model = Sequential()
# First convolutional layer.
model.add( Conv2D( filters = hpHistory[iSetting]['l1_filters'],
kernel_size = hpHistory[iSetting]['l1_kSize'],
strides = hpHistory[iSetting]['l1_strides'],
padding = hpHistory[iSetting]['l1_padding'],
activation = 'relu',
input_shape=train_tensors[0].shape,
name = 'conv_1'
)
)
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l1_poolSize'],
strides = hpHistory[iSetting]['l1_poolStrides'],
padding = hpHistory[iSetting]['l1_padding'],
name = 'pool_1'
)
)
# Second convolutional layer.
if 'l2_kSize' in hpHistory[iSetting].keys():
model.add( Conv2D( filters = hpHistory[iSetting]['l2_filters'],
kernel_size = hpHistory[iSetting]['l2_kSize'],
strides = hpHistory[iSetting]['l2_strides'],
padding = hpHistory[iSetting]['l2_padding'],
activation = 'relu',
name = 'conv_2' ))
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l2_poolSize'],
strides = hpHistory[iSetting]['l2_poolStrides'],
padding = hpHistory[iSetting]['l2_padding'],
name = 'pool_2' ))
# Third convolutional layer.
if 'l3_kSize' in hpHistory[iSetting].keys():
model.add( Conv2D( filters = hpHistory[iSetting]['l3_filters'],
kernel_size = hpHistory[iSetting]['l3_kSize'],
strides = hpHistory[iSetting]['l3_strides'],
padding = hpHistory[iSetting]['l3_padding'],
activation = 'relu',
name = 'conv_3' ))
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l3_poolSize'],
strides = hpHistory[iSetting]['l3_poolStrides'],
padding = hpHistory[iSetting]['l3_padding'],
name = 'pool_3' ))
# Fourth convolutional layer.
if 'l4_kSize' in hpHistory[iSetting].keys():
model.add( Conv2D( filters = hpHistory[iSetting]['l4_filters'],
kernel_size = hpHistory[iSetting]['l4_kSize'],
strides = hpHistory[iSetting]['l4_strides'],
padding = hpHistory[iSetting]['l4_padding'],
activation = 'relu',
name = 'conv_4' ))
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l4_poolSize'],
strides = hpHistory[iSetting]['l4_poolStrides'],
padding = hpHistory[iSetting]['l4_padding'],
name = 'pool_4' ))
# Add global pooling layer.
model.add( GlobalAveragePooling2D() )
# Add classification layer.
model.add( Dense(133, activation='softmax') )
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model. #####################################################
print('')
print('Training model.')
epochs = 5
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5', verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets, validation_data=(valid_tensors, valid_targets), epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
train_time = time.time() - startTime
# Load the best weights.
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
# Visualize the weights. ###############################################
print('')
print('Creating weight images.')
# dimensions of the generated pictures for each filter.
img_width = train_tensors[0].shape[0]
img_height = train_tensors[0].shape[1]
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
# The name of the layer we want to visualize.
layer_names = []
for name in layer_dict:
if 'conv' in name:
layer_names.append(name)
#layer_name = 'conv_1'
# Create weight images for each convolutional layer.
for layer_name in layer_names:
print(' Creating weight image for layer {n:s}'.format(n = layer_name))
n_filters = layer_dict[layer_name].filters
kept_filters = []
for filter_index in range(n_filters):
print(' Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(30):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
#print('Current loss value:', loss_value)
stdout.write('{r:s} Current loss value: {n:2.2f}'.format(r = '\r', n = loss_value))
stdout.flush()
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
print('')
# Decode the resulting input image.
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print(' Filter %d processed in %ds' % (filter_index, end_time - start_time))
# Create the image and save it.
n = 8
if n_filters <=36:
n = 6
if n_filters <= 25:
n = 5
if n_filters <= 16:
n = 4
if n_filters <= 9:
n = 3
if n_filters <=4:
n = 2
# The filters that have the highest loss are assumed to be better-looking. Sort by loss.
kept_filters.sort(key=lambda x: x[1], reverse=True)
# Build a black picture with enough space for all filter images.
# Keep 5px margin between pictures.
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
try:
img, loss = kept_filters[i * n + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
except IndexError:
pass
# Save the result to disk
print(' Saving image.')
cv2.imwrite('weightImages/hp{n:g}_{l:s}.png'.format(n = iSetting, l = layer_name), stitched_filters)
# Test the CNN. ######################################################
# get index of predicted dog breed for each image in test set
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# Report test accuracy
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('')
print('Test accuracy: %.4f%%' % test_accuracy)
hpHistory[iSetting]['accuracy'] = test_accuracy
hpHistory[iSetting]['time'] = train_time
hpHistory[iSetting]['i'] = iSetting
# Save the results.
with open('results', 'w') as file:
file.write(json.dumps(hpHistory))
print('Done in {n:g} seconds.'.format(n = time.time() - startTime))
print('')
print('')
if __name__ == "__main__":
print('Loading data.')
# load train, test, and validation datasets
train_files, train_targets = load_dataset('dogImages/train')
valid_files, valid_targets = load_dataset('dogImages/valid')
test_files, test_targets = load_dataset('dogImages/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("dogImages/train/*/"))]
print('Preparing tensors.')
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
print('Running.')
run(train_tensors, train_targets, valid_tensors, valid_targets, test_tensors, test_targets) | null | step2.py | step2.py | py | 20,779 | python | en | code | null | code-starcoder2 | 50 |
103667864 | import numpy as np
from .detections import associate_detections_to_trackers
from .person_box_tracker import PersonBoxTracker
class SORT(object):
def __init__(self, max_age=3, min_hits=3):
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
def update(self, detections):
"""
Update the representation of detections
:param detections: np.array in the format [x1, y1, x2, y2, score]
:return: The list of indexes that we are certain that are the targets
"""
self.frame_count += 1
trks = np.zeros((len(self.trackers), 5))
to_del = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
matched, unmatched_detections, unmatched_trackers = associate_detections_to_trackers(detections, trks)
for t, trk in enumerate(self.trackers):
if t not in unmatched_trackers:
d = matched[np.where(matched[:, 1] == t)[0], 0]
trk.update(detections[d, :][0])
for i in unmatched_detections:
trk = PersonBoxTracker(detections[i])
self.trackers.append(trk)
i = len(self.trackers)
ret = np.zeros(len(self.trackers), dtype=np.long) - 1
for trk in reversed(self.trackers):
i -= 1
if trk.time_since_update < 1 and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
ret[i] = i
if trk.time_since_update > self.max_age:
ret = np.delete(ret, i)
ret[-i:] -= 1
self.trackers.pop(i)
return ret[ret >= 0]
| null | sort/sort.py | sort.py | py | 1,957 | python | en | code | null | code-starcoder2 | 50 |
269595769 | def baseN(num, base, numerals="0123456789abcdefghijklmnopqrstuvwxyz"):
"""
Convert any int to base/radix 2-36 string. Special numerals can be used
to convert to any base or radix you need. This function is essentially
an inverse int(s, base).
For example:
>>> baseN(-13, 4)
'-31'
>>> baseN(91321, 2)
'10110010010111001'
>>> baseN(791321, 36)
'gyl5'
>>> baseN(91321, 2, 'ab')
'babbaabaababbbaab'
"""
if num == 0:
return "0"
if num < 0:
return '-' + baseN((-1) * num, base, numerals)
if not 2 <= base <= len(numerals):
raise ValueError('Base must be between 2-%d' % len(numerals))
left_digits = num // base
if left_digits == 0:
return numerals[num % base]
else:
return baseN(left_digits, base, numerals) + numerals[num % base]
def hexToB24(s):
if ((len(s) % 2) != 0):
raise BaseException("Invalid Input")
toreturn = ""
for x in range(0, len(s), 2):
h = s[x:x+2]
h = int(h, 16)
h = baseN(h, 24)
if (len(h) != 2):
h = "0" + h
toreturn += h
return toreturn.upper()
def decrypt(s):
lib = \
{
'0':'B',
'1':'C',
'2':'D',
'3':'F',
'4':'G',
'5':'H',
'6':'J',
'7':'K',
'8':'M',
'9':'P',
'A':'Q',
'B':'R',
'C':'T',
'D':'V',
'E':'W',
'F':'X',
'G':'Y',
'H':'2',
'I':'3',
'J':'4',
'K':'6',
'L':'7',
'M':'8',
'N':'9'
}
toreturn = ""
for x in s:
toreturn += lib[x]
return toreturn
def form(s):
if ((len(s) %5) != 0):
raise BaseException("FormatError")
toret = ""
for x in range(0, len(s), 5):
toret += s[x:x+5] + "-"
toret = toret[:-1]
return toret
def get_key(s):
return form(decrypt(hexToB24(s)))
| null | Python/calculator-base.py | calculator-base.py | py | 2,059 | python | en | code | null | code-starcoder2 | 50 |
135985996 | # -*- coding: utf-8 -*
# ----------------------------------------here we-import files---------------------------------------------------------------
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from datetime import timedelta
from demoapp.forms import SignUpForms,LoginForm,PostForm,CommentForm,LikeForm
from demoapp.models import UserModel, SessionToken, PostModel,CommentModel,LikeModel
from django.contrib.auth.hashers import make_password,check_password
from upload_to_win.settings import BASE_DIR
from django.utils import timezone
from imgurpython import ImgurClient
import yagmail
import ctypes
import tkMessageBox
from django.contrib import messages
# Create your views here.
def signup_view(request):
#------------------------------here is the logic of the functions--------------------------------------------------------
if request.method == 'POST':
form = SignUpForms(request.POST)
if form.is_valid():
Username = form.cleaned_data['Username']
Email =form.cleaned_data['Email']
Name = form.cleaned_data['Name']
Password = form.cleaned_data['Password']
# insert data to db
new_user = UserModel(Name=Name,Password=make_password(Password),Username=Username, Email=Email)
new_user.save()
# sending welcome Email To User That Have Signup Successfully
message = "Welcome to UPLOAD TO WIN. Your account is sucessfuly created on UPLOAD TO WIN"
yag = yagmail.SMTP('kumarrajenderkullu@gmail.com', 'luvmomdad11')
yag.send(to=Email, subject='Upload to win', contents=message)
ctypes.windll.user32.MessageBoxW(0, u"You are Successfully Registered.",
u"Done", 0)
# SUCCESSFULLY SEND EMAIL TO THE USER WHO HAS SIGNUP.
#--------------------------here we give conditions which open success page or failed page ----------------------------------
template_name = 'success.html'
else:
template_name = 'failed.html'
else:
form = SignUpForms()
template_name = 'signup.html'
return render(request, template_name, {'form':form})
#-------------------------------------create a new function for login user---------------------------------------------------------
def login_view(request):
#----------------------------------here is the function logic-----------------------------------------------------------------
if request.method == 'GET':
#Display Login Page
login_form = LoginForm()
template_name = 'login.html'
#---------------------------------------Elif part---------------------------------------------------------------------------------
elif request.method == 'POST':
#Process The Data
login_form = LoginForm(request.POST)
if login_form.is_valid():
#Validation Success
Username = login_form.cleaned_data['Username']
Password = login_form.cleaned_data['Password']
#read Data From db
user = UserModel.objects.filter(Username=Username).first()
if user:
#compare Password
if check_password(Password, user.Password):
token = SessionToken(user = user)
token.create_token()
token.save()
response = redirect('feed/')
response.set_cookie(key='session_token', value=token.session_token)
return response
#successfully Login
template_name = 'login_success.html'
tkMessageBox.showinfo(title="Greetings", message="Hello World!")
else:
#Failed
template_name = 'login_fail.html'
else:
#user doesn't exist
template_name = 'login_fail.html'
else:
#Validation Failed
template_name = 'login_fail.html'
return render(request,template_name,{'login_form':login_form})
#-------------------------------------------Create a new function for post --------------------------------------------------------------
def post_view(request):
#-----------------------------------------here is the function logic------------------------------------------------------------
user = check_validation(request)
if user:
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
image = form.cleaned_data.get('image')
caption = form.cleaned_data.get('caption')
post = PostModel(user=user, image=image, caption=caption)
post.save()
path = str(BASE_DIR+"//"+post.image.url)
client = ImgurClient('918e8552c6faccc', '38babe210df5ed9cde17605ac646b24a27f2b58a')
post.image_url = client.upload_from_path(path,anon=True)['link']
post.save()
return redirect('/feed/')
elif request.method == 'GET':
return redirect('/logout/')
else:
form = PostForm()
return render(request, 'post.html', {'form' : form})
else:
return redirect('/login/')
#--------------------------------------------Create a new functions to show the all post of user--------------------------------------
def feed_view(request):
user = check_validation(request)
if user:
#-------------------------------------here is the functions logic---------------------------------------------------------------
posts = PostModel.objects.all().order_by('-created_on',)
for post in posts:
existing_like = LikeModel.objects.filter(post_id=post.id, user=user).first()
if existing_like:
post.has_liked = True
return render(request, 'feed.html', {'posts': posts})
else:
return redirect('/login/')
#----------------------------------------------Create a new functions to like the user post-------------------------------------------
def like_view(request):
#-------------------------------------------here is the function logic------------------------------------------------------------
user = check_validation(request)
if user and request.method == 'POST':
form = LikeForm(request.POST)
if form.is_valid():
post_id = form.cleaned_data.get('post').id
existing_like = LikeModel.objects.filter(post_id=post_id, user=user).first()
if not existing_like:
like=LikeModel.objects.create(post_id=post_id, user=user)
email = like.post.user.Email
# sending welcome Email To User That Have Commented Successfully
message = "Hii!.. Someone Liked your Post on Upload To Win."
yag = yagmail.SMTP('kumarrajenderkullu@gmail.com', 'luvmomdad11')
yag.send(to=email, subject='Liked Your Post', contents=message)
else:
existing_like.delete()
return redirect('/feed/')
else:
return redirect('/login/')
#------------------------------------------------Create a new functions to comment on a user post---------------------------------------
def comment_view(request):
#----------------------------------------------here is the function logic-------------------------------------------------------
user = check_validation(request)
if user and request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
post_id = form.cleaned_data.get('post').id
comment_text = form.cleaned_data.get('comment_text')
comment = CommentModel.objects.create(user=user, post_id=post_id, comment_text=comment_text)
comment.save()
email = comment.post.user.Email
# sending welcome Email To User That Have Commented Successfully
message = "Hii! Someone Comented on your Post on Upload To Win."
yag = yagmail.SMTP('kumarrajenderkullu@gmail.com', 'luvmomdad11')
yag.send(to=email, subject='Liked Your Post', contents=message)
# TODO: ADD MESSAGE TO INDICATE SUCCESS
return redirect('/feed/')
else:
# TODO: ADD MESSAGE FOR FAILING TO POST COMMENT
return redirect('/feed/')
else:
return redirect('/login')
# -----------------------------------------------Create a functions for validating the session---------------------------------------------
def check_validation(request):
#----------------------------------------------here is the function logic----------------------------------------------------------
if request.COOKIES.get('session_token'):
session = SessionToken.objects.filter(session_token=request.COOKIES.get('session_token')).first()
if session:
time_to_live = session.created_on + timedelta(days=1)
if time_to_live > timezone.now():
return session.user
else:
return None
def logout_view(request):
return render(request,'logout.html') | null | upload_to_win/upload_to_win/views.py | views.py | py | 9,328 | python | en | code | null | code-starcoder2 | 50 |
535992375 | import os, argparse
parser = argparse.ArgumentParser()
parser.add_argument('--file-ext', help='file extension')
parser.add_argument('--dir', help='directory path')
parser.add_argument('--substr', help='Sub string for searching')
args = parser.parse_args()
extens = args.file_ext
dir = args.dir
substr = args.substr
def find(extens):
for file in os.listdir(dir):
if extens in file:
for num, line in enumerate(open(dir+file).readlines()):
yield file, line, num
def grep(gen, substr):
for f, l, n in gen:
if substr in l:
yield f, l, n
for data in grep(find(extens), substr):
print(data)
| null | dz-6/find.py | find.py | py | 661 | python | en | code | null | code-starcoder2 | 50 |
55885691 | import numpy as np
from scipy.interpolate import interp1d
from scipy import ndimage
import scipy.constants as sc
import astropy.constants as const
import astropy.units as u
default_cmap = "inferno"
sigma_to_FWHM = 2.0 * np.sqrt(2.0 * np.log(2))
FWHM_to_sigma = 1.0 / sigma_to_FWHM
arcsec = np.pi / 648000
def spectral_convolution(model, Delta_v, n_window=101):
# Creating a Hanning function with n_window points
w = np.hanning(n_window)
# For each pixel, resampling the spectrum between -FWHM to FWHM
# then integrating over convolution window
v_new = model.velocity[iv] + np.linspace(-1, 1, n_window) * Delta_v
iv_min = int(iv - Delta_v / self.dv - 1)
iv_max = int(iv + Delta_v / self.dv + 2)
im = np.zeros([self.nx, self.ny])
for j in range(self.ny):
for i in range(self.nx):
f = interpolate.interp1d(self.velocity[iv_min:iv_max], cube[iv_min:iv_max, i, j])
im[i, j] = np.average(f(v_new))
return im
def bin_image(im, n, func=np.sum):
# bin an image in blocks of n x n pixels
# return a image of size im.shape/n
nx = im.shape[0]
nx_new = nx // n
x0 = (nx - nx_new * n) // 2
ny = im.shape[1]
ny_new = ny // n
y0 = (ny - ny_new * n) // 2
return np.reshape(
np.array(
[
func(im[x0 + k1 * n : (k1 + 1) * n, y0 + k2 * n : (k2 + 1) * n])
for k1 in range(nx_new)
for k2 in range(ny_new)
]
),
(nx_new, ny_new),
)
def Wm2_to_Jy(nuFnu, nu):
'''
Convert from W.m-2 to Jy
nu [Hz]
'''
return 1e26 * nuFnu / nu
def Jy_to_Wm2(Fnu, nu):
'''
Convert from Jy to W.m-2
nu [Hz]
'''
return 1e-26 * Fnu * nu
def Jybeam_to_Tb(Fnu, nu, bmaj, bmin):
'''
Convert Flux density in Jy/beam to brightness temperature [K]
Flux [Jy]
nu [Hz]
bmaj, bmin in [arcsec]
T [K]
'''
beam_area = bmin * bmaj * arcsec ** 2 * np.pi / (4.0 * np.log(2.0))
exp_m1 = 1e26 * beam_area * 2.0 * sc.h / sc.c ** 2 * nu ** 3 / Fnu
hnu_kT = np.log1p(np.maximum(exp_m1, 1e-10))
Tb = sc.h * nu / (hnu_kT * sc.k)
return Tb
def Jy_to_Tb(Fnu, nu, pixelscale):
'''
Convert Flux density in Jy/pixel to brightness temperature [K]
Flux [Jy]
nu [Hz]
bmaj, bmin in [arcsec]
T [K]
'''
pixel_area = (pixelscale * arcsec) ** 2
exp_m1 = 1e16 * pixel_area * 2.0 * sc.h / sc.c ** 2 * nu ** 3 / Fnu
hnu_kT = np.log1p(exp_m1 + 1e-10)
Tb = sc.h * nu / (hnu_kT * sc.k)
return Tb
def Wm2_to_Tb(nuFnu, nu, pixelscale):
"""Convert flux converted from Wm2/pixel to K using full Planck law.
Convert Flux density in Jy/beam to brightness temperature [K]
Flux [W.m-2/pixel]
nu [Hz]
bmaj, bmin, pixelscale in [arcsec]
"""
pixel_area = (pixelscale * arcsec) ** 2
exp_m1 = pixel_area * 2.0 * sc.h * nu ** 4 / (sc.c ** 2 * nuFnu)
hnu_kT = np.log1p(exp_m1)
Tb = sc.h * nu / (sc.k * hnu_kT)
return Tb
# -- Functions to deal the synthesized beam.
def _beam_area(self):
"""Beam area in arcsec^2"""
return np.pi * self.bmaj * self.bmin / (4.0 * np.log(2.0))
def _beam_area_str(self):
"""Beam area in steradian^2"""
return self._beam_area() * arcsec ** 2
def _pixel_area(self):
return self.pixelscale ** 2
def _beam_area_pix(self):
"""Beam area in pix^2."""
return self._beam_area() / self._pixel_area()
def telescope_beam(wl,D):
""" wl and D in m, returns FWHM in arcsec"""
return 0.989 * wl/D / 4.84814e-6
def make_cut(im, x0,y0,x1,y1,num=None,plot=False):
"""
Make a cut in image 'im' along a line between (x0,y0) and (x1,y1)
x0, y0,x1,y1 are pixel coordinates
"""
if plot:
vmax = np.max(im)
vmin = vmax * 1e-6
norm = colors.LogNorm(vmin=vmin, vmax=vmax, clip=True)
plt.imshow(im,origin="lower", norm=norm)
plt.plot([x0,x1],[y0,y1])
if num is not None:
# Extract the values along the line, using cubic interpolation
x, y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
zi = ndimage.map_coordinates(im, np.vstack((y,x)))
else:
# Extract the values along the line at the pixel spacing
length = int(np.hypot(x1-x0, y1-y0))
x, y = np.linspace(x0, x1, length), np.linspace(y0, y1, length)
zi = im[y.astype(np.int), x.astype(np.int)]
return zi
class DustExtinction:
import os
__dirname__ = os.path.dirname(__file__)
wl = []
kext = []
_extinction_dir = __dirname__ + "/extinction_laws"
_filename_start = "kext_albedo_WD_MW_"
_filename_end = "_D03.all"
V = 5.47e-1 # V band wavelength in micron
def __init__(self, Rv=3.1, **kwargs):
self.filename = (
self._extinction_dir
+ "/"
+ self._filename_start
+ str(Rv)
+ self._filename_end
)
self._read(**kwargs)
def _read(self):
with open(self.filename, 'r') as file:
f = []
for line in file:
if (not line.startswith("#")) and (
len(line) > 1
): # Skipping comments and empty lines
line = line.split()
self.wl.append(float(line[0]))
kpa = float(line[4])
albedo = float(line[1])
self.kext.append(kpa / (1.0 - albedo))
# Normalize extinction in V band
kext_interp = interp1d(self.wl, self.kext)
kextV = kext_interp(self.V)
self.kext /= kextV
def redenning(self, wl, Av):
"""
Computes extinction factor to apply for a given Av
Flux_red = Flux * redenning
wl in micron
"""
kext_interp = interp1d(self.wl, self.kext)
kext = kext_interp(wl)
tau_V = 0.4 * np.log(10.0) * Av
return np.exp(-tau_V * kext)
def Hill_radius():
pass
#d * (Mplanet/3*Mstar)**(1./3)
def splash2mcfost(anglex, angley, anglez):
#Convert the splash angles to mcfost angles
# Base unit vector
x0 = [1,0,0]
y0 = [0,1,0]
z0 = [0,0,1]
# Splash rotated vectors
x = _rotate_splash_axes(x0,-anglex,-angley,-anglez)
y = _rotate_splash_axes(y0,-anglex,-angley,-anglez)
z = _rotate_splash_axes(z0,-anglex,-angley,-anglez)
# MCFOST angles
mcfost_i = np.arccos(np.dot(z,z0)) * 180./np.pi
if abs(mcfost_i) > 1e-30:
print("test1")
# angle du vecteur z dans le plan (-y0,x0)
mcfost_az = (np.arctan2(np.dot(z,x0), -np.dot(z,y0)) ) * 180./np.pi
# angle du vecteur z0 dans le plan x_image, y_image (orientation astro + 90deg)
mcfost_PA = -( np.arctan2(np.dot(x,z0), np.dot(y,z0)) ) * 180./np.pi
else:
print("test2")
mcfost_az = 0.
# angle du vecteur y dans le plan x0, y0
mcfost_PA = (np.arctan2(np.dot(y,x0),np.dot(y,y0)) ) * 180./np.pi
print("anglex =",anglex, "angley=", angley, "anglez=", anglez,"\n")
print("Direction to oberver=",z)
print("x-image=",x)
print("y_image = ", y,"\n")
print("MCFOST parameters :")
print("inclination =", mcfost_i)
print("azimuth =", mcfost_az)
print("PA =", mcfost_PA)
return [mcfost_i, mcfost_az, mcfost_PA]
def _rotate_splash(xyz, anglex, angley, anglez):
# Defines rotations as in splash
# This function is to rotate the data
x = xyz[0]
y = xyz[1]
z = xyz[2]
# rotate about z
if np.abs(anglez) > 1e-30:
r = np.sqrt(x**2+y**2)
phi = np.arctan2(y,x)
phi -= anglez/180*np.pi
x = r*np.cos(phi)
y = r*np.sin(phi)
# rotate about y
if np.abs(angley) > 1e-30:
r = np.sqrt(z**2+x**2)
phi = np.arctan2(z,x)
phi -= angley/180*np.pi
x = r*np.cos(phi)
z = r*np.sin(phi)
# rotate about x
if np.abs(anglex) > 1e-30:
r = np.sqrt(y**2+z**2)
phi = np.arctan2(z,y)
phi -= anglex/180*np.pi
y = r*np.cos(phi)
z = r*np.sin(phi)
return np.array([x,y,z])
def _rotate_splash_axes(xyz, anglex, angley, anglez):
# Defines rotations as in splash, but in reserve order
# as we rotate the axes instead of the data
x = xyz[0]
y = xyz[1]
z = xyz[2]
# rotate about x
if np.abs(anglex) > 1e-30:
r = np.sqrt(y**2+z**2)
phi = np.arctan2(z,y)
phi -= anglex/180*np.pi
y = r*np.cos(phi)
z = r*np.sin(phi)
# rotate about y
if np.abs(angley) > 1e-30:
r = np.sqrt(z**2+x**2)
phi = np.arctan2(z,x)
phi -= angley/180*np.pi
x = r*np.cos(phi)
z = r*np.sin(phi)
# rotate about z
if np.abs(anglez) > 1e-30:
r = np.sqrt(x**2+y**2)
phi = np.arctan2(y,x)
phi -= anglez/180*np.pi
x = r*np.cos(phi)
y = r*np.sin(phi)
return np.array([x,y,z])
def rotate_vec(u,v,angle):
'''
rotate a vector (u) around an axis defined by another vector (v)
by an angle (theta) using the Rodrigues rotation formula
'''
k = v/np.sqrt(np.inner(v,v))
w = np.cross(k,u)
k_dot_u = np.inner(k,u)
for i,uval in enumerate(u):
u[i] = u[i]*np.cos(angle) + w[i]*np.sin(angle) + k[i]*k_dot_u*(1.-np.cos(angle))
return u
def rotate_coords(x,y,z,inc,PA):
'''
rotate x,y,z coordinates into the observational plane
'''
k = [-np.sin(PA), np.cos(PA), 0.]
xvec = [x,y,z]
xrot = rotate_vec(xvec,k,inc)
return xrot[0],xrot[1],xrot[2]
def rotate_to_obs_plane(x,y,inc,PA):
'''
same as rotate_coords but takes 2D x,y as arrays
'''
for i,xx in enumerate(x): # this can probably be done more efficiently
x[i],y[i],dum = rotate_coords(x[i],y[i],0.,inc,PA)
return x,y
def planet_position(model, i_planet, i_star, ):
'''
Returns planet position [arcsec] and PA [deg] in the map
'''
xy_planet = model.star_positions[:,0,0,i_planet]
xy_star = model.star_positions[:,0,0,i_star]
dxy = xy_planet - xy_star
dist = np.hypot(dxy[0],dxy[1])
PA = np.rad2deg(np.arctan2(dxy[1],-dxy[0])) + 360 - 90
return [dist, PA]
| null | pymcfost/utils.py | utils.py | py | 10,299 | python | en | code | null | code-starcoder2 | 50 |
204739894 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ManagedHostingEnvironment(Resource):
"""Description of a managed hosting environment.
:param id: Resource Id
:type id: str
:param name: Resource Name
:type name: str
:param kind: Kind of resource
:type kind: str
:param location: Resource Location
:type location: str
:param type: Resource type
:type type: str
:param tags: Resource tags
:type tags: dict
:param managed_hosting_environment_name: Name of the managed hosting
environment
:type managed_hosting_environment_name: str
:param managed_hosting_environment_location: Location of the managed
hosting environment e.g. "West US"
:type managed_hosting_environment_location: str
:param status: Current status of the managed hosting environment.
Possible values include: 'Preparing', 'Ready', 'Deleting'
:type status: str or :class:`ManagedHostingEnvironmentStatus
<azure.mgmt.web.models.ManagedHostingEnvironmentStatus>`
:param virtual_network: Description of the managed hosting environment's
virtual network
:type virtual_network: :class:`VirtualNetworkProfile
<azure.mgmt.web.models.VirtualNetworkProfile>`
:param ipssl_address_count: Number of ip ssl addresses reserved for the
managed hosting environment
:type ipssl_address_count: int
:param dns_suffix: DNS suffix of the managed hosting environment
:type dns_suffix: str
:param subscription_id: Subscription of the managed hosting environment
(read only)
:type subscription_id: str
:param resource_group: Resource group of the managed hosting environment
(read only)
:type resource_group: str
:param environment_is_healthy: True/false indicating whether the managed
hosting environment is healthy
:type environment_is_healthy: bool
:param environment_status: Detailed message about with results of the
last check of the managed hosting environment
:type environment_status: str
:param suspended: True/false indicating whether the managed hosting
environment is suspended. The environment can be suspended e.g. when the
management endpoint is no longer available
(most likely because NSG blocked the incoming traffic)
:type suspended: bool
:param api_management_account: Resource id of the api management account
associated with this managed hosting environment (read only)
:type api_management_account: str
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_hosting_environment_name': {'key': 'properties.name', 'type': 'str'},
'managed_hosting_environment_location': {'key': 'properties.location', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'ManagedHostingEnvironmentStatus'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'api_management_account': {'key': 'properties.apiManagementAccount', 'type': 'str'},
}
def __init__(self, location, id=None, name=None, kind=None, type=None, tags=None, managed_hosting_environment_name=None, managed_hosting_environment_location=None, status=None, virtual_network=None, ipssl_address_count=None, dns_suffix=None, subscription_id=None, resource_group=None, environment_is_healthy=None, environment_status=None, suspended=None, api_management_account=None):
super(ManagedHostingEnvironment, self).__init__(id=id, name=name, kind=kind, location=location, type=type, tags=tags)
self.managed_hosting_environment_name = managed_hosting_environment_name
self.managed_hosting_environment_location = managed_hosting_environment_location
self.status = status
self.virtual_network = virtual_network
self.ipssl_address_count = ipssl_address_count
self.dns_suffix = dns_suffix
self.subscription_id = subscription_id
self.resource_group = resource_group
self.environment_is_healthy = environment_is_healthy
self.environment_status = environment_status
self.suspended = suspended
self.api_management_account = api_management_account
| null | azure-mgmt-web/azure/mgmt/web/models/managed_hosting_environment.py | managed_hosting_environment.py | py | 5,538 | python | en | code | null | code-starcoder2 | 50 |
78999154 | import numpy as np
def create_all_subject_connectivity_matrices(subjects):
connectivity_matrices = []
for subject in subjects:
connectivity_matrices.append(np.load(subject))
connectivity_matrices = np.array(connectivity_matrices)
connectivity_matrices = np.swapaxes(connectivity_matrices, 0, -1)
return connectivity_matrices
def norm_matrices(matrices, norm_type = 'scaling'):
norm_matrices = matrices.copy()
for s in range(matrices.shape[-1]):
if norm_type == 'scaling':
norm_matrices[norm_matrices==0] = np.nan
norm_matrices[:,:,s] = norm_scaling(matrices[:,:,s])
elif norm_type == 'fisher':
norm_matrices[norm_matrices == 0] = np.nan
norm_matrices[:,:,s] = fisher_transformation(matrices[:,:,s])
elif norm_type == 'z-score':
norm_matrices[norm_matrices == 0] = np.nan
norm_matrices[:,:,s] = z_score(matrices[:,:,s])
elif norm_type == 'rating':
norm_matrices[:,:,s] = rating(matrices[:,:,s])
return norm_matrices
def norm_scaling(matrix):
norm_mat = (matrix - np.nanmin(matrix)) / (np.nanmax(matrix) - np.nanmin(matrix))
return norm_mat
def fisher_transformation(matrix):
matrix = np.arctanh(matrix)
return matrix
def z_score(matrix):
matrix = (matrix - np.nanmean(matrix)) / np.nanstd(matrix)
return matrix
def rating(matrix):
from scipy.stats import rankdata
matrix = rankdata(matrix, method='dense').reshape(matrix.shape)-1
return matrix | null | Tractography/group_analysis.py | group_analysis.py | py | 1,543 | python | en | code | null | code-starcoder2 | 50 |
132052169 | from lib.scrapy_table import Scrapy_Table
url="https://pt.wikipedia.org/wiki/C%C3%A2mara_Municipal_de_S%C3%A3o_Paulo"
url_jato="https://pt.wikipedia.org/wiki/Lista_de_pessoas_envolvidas_na_Opera%C3%A7%C3%A3o_Lava_Jato"
site_connect = Scrapy_Table(url)
site_jato = Scrapy_Table(url_jato)
tables = tuple(site_connect.get_tables(5))
lista_lava_jato = tuple(site_jato.get_tables(1))
lista_investigados = ()
for investigados in lista_lava_jato[1:]:
lista_investigados = lista_investigados + (investigados[0],)
for vereador in tables[1:]:
if vereador[0] in lista_investigados:
print(vereador)
# vereador = "Aécio Neves"
# if vereador in lista_investigados:
# print(vereador) | null | modulo1/Labs/myCode/ex7.py | ex7.py | py | 697 | python | en | code | null | code-starcoder2 | 50 |
4017268 | print("**********Calule o tempo de viagem**********") #informa a finalidade do programa.
distância = float(input("Qual a distância a ser percorrida: ")) #entrada de dados referente a distância percorrida.
velocidade = int(input("Qual a velocidade média esperada: ")) #entrada de dados referente a velocidade media.
hora_decimal = (distância / velocidade) #variável recebe os valores das variáveis "distância" e "velocidade" realizando a divisão.
hora = int(hora_decimal) #variável "hora" recebe o resultado da variável "hora_decimal" de forma inteira.
dias = (hora // 24) #variável "dia" recebe o resultado da variável "hora"(resultado inteiro) e é divida de forma inteira por 24h.
horas_restantes = hora % 24 #variável "horas_restantes" receber o valor da variável "hora" e é dividido pela sobra de 24h.
minuto_decimal = (hora - hora_decimal) #variável "minuto_decimal" recebe a subtração das variáveis "hora" e "hora_decimal".
minuto = abs(int(minuto_decimal * 60)) #variável "minuto" recebe a multiplicação da variável "minuto_decimal" por 60minutos e retorna o resultado de forma absoluta.
print("O tempo de viagem é",dias,"dias",horas_restantes,"horas e",minuto,"minutos.") #saída de dados.
| null | tempodeviagem.py | tempodeviagem.py | py | 1,239 | python | en | code | null | code-starcoder2 | 50 |
37623113 | import inline as inline
import numpy
import matplotlib
import matplotlib.pyplot as pyplot
import scipy.special
class NeuralNetwork:
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# learningrate
self.lr = learningrate
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
# sigmoid activation function
self.activation_function = lambda x: scipy.special.expit(x)
def train(self, inputs_list, targets_list):
#train the neural network
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging fomr hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate signal emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# error is the (target - actual)
output_errors = targets - final_outputs
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who.T, output_errors)
# update the weights for the links between the hidden and output layers
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
# update the weights for the links between the input and hidden layers
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
pass
def query(self, input_list):
# input_list in 2d array umrechnen
inputs = numpy.array(input_list, ndmin=2).T
# berechnet eingangssignale für hidden Layer
hidden_inputs = numpy.dot(self.wih, inputs)
# berechnet Signale aus der Hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# berechnet signale in output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# berechnet signale aus output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
input_nodes = 784
hidden_nodes = 100
output_nodes = 10
learning_rate = 0.3
# instatnce of neural network
n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# array = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# print(array.query([1.0, 0.5, -1.5]))
training_data_file = open("MNIST_DATSET\mnist_train_100.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
all_values = training_data_list[2].split(',')
image_array = numpy.asfarray(all_values[1:]).reshape((28,28))
matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
pyplot.show()
#
# scaled_imput = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
#
# # output nodes is 10
# onodes = 10
# targets = numpy.zeros(onodes) + 0.01
# targets[int(all_values[0])] = 0.99
# print(targets)
# train neural network
# go through all record in the training data set for record in trianing_data_list:
for record in training_data_list:
# split records by commma
all_values = record.split(',')
# scale and shift the inputs
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0] is the target labe for this record
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
| null | first.py | first.py | py | 4,018 | python | en | code | null | code-starcoder2 | 51 |
16308405 | def main():
usernames = ['jimbo', 'giltson98', 'derekf', 'WhatSup', 'NicolEye', 'swei45',
'BaseInterpreterInterface', 'BaseStdIn', 'Command', 'ExecState', 'InteractiveConsole',
'InterpreterInterface', 'StartServer', 'bob']
username_input = str(input("Can i have your username: "))
for i in range(len(usernames)):
if username_input == usernames[i]:
print("Acess granted")
break
if i == len(usernames)-1:
print("Acess denied")
main() | null | Practical 4/security_checker.py | security_checker.py | py | 532 | python | en | code | null | code-starcoder2 | 51 |
396457645 | from PIL import Image
from PIL import ImageDraw
import queue
import math
width = 100
background = (255, 255, 255, 255)
fill_color = (0, 0, 0, 255)
im = Image.new("RGBA", (width, width), background)
pen = ImageDraw.Draw(im)
weight = [[1/16, 1/8, 1/16], [1/8, 1/4, 1/8], [1/16, 1/8, 1/16]]
a = 0
b = 0
c = 0
s = 0
def draw_point(x, y):
w = 0
for i in range(3):
for j in range(3):
_x = x + i * 0.5
_y = y + j * 0.5
dist = abs((a * _x + b * _y + c) / s);
if dist <= 0.5:
w += weight[i][j]
pen.point((x, y), (round(w * fill_color[0] + (1 - w) * background[0]), round(w * fill_color[1] + (1 - w) * background[1]), round(w * fill_color[2] + (1 - w) * background[2]), 255))
def draw_line(s, t):
sx, sy = s[0], s[1]
tx, ty = t[0], t[1]
if (ty == sy):
if (sx > tx):
sx, tx = tx, sx
for i in range(sx, tx + 1):
pen.point((i, sy), fill_color)
return
if (tx == sx):
if (sy > ty):
sy, ty = ty, sy
for i in range(sy, ty + 1):
pen.point((sx, i), fill_color)
return
slope = (ty - sy) / (tx - sx)
if (abs(slope) < 1):
if (sx > tx):
sx, tx, sy, ty = tx, sx, ty, sy
dx = tx - sx
dy = ty - sy
k = dy * 2
e = 0
x, y = sx, sy
while x < tx:
draw_point(x, y)
draw_point(x, y - 1)
draw_point(x, y + 1)
x += 1
e += k
if e > dx:
y += 1
e -= dx * 2
if e < -dx:
y -= 1
e += dx * 2
else:
if (sy > ty):
sx, tx, sy, ty = tx, sx, ty, sy
dx = tx - sx
dy = ty - sy
k = dx * 2
e = 0
x, y = sx, sy
while y < ty:
draw_point(x, y)
draw_point(x - 1, y)
draw_point(x + 1, y)
y += 1
e += k
if e > dy:
x += 1
e -= dy * 2
if e < -dy:
x -= 1
e += dy * 2
if __name__ == "__main__":
x0, y0, x1, y1 = input("please enter coordinates of line segments' start and end point:").split()
x0 = int(x0)
y0 = int(y0)
x1 = int(x1)
y1 = int(y1)
a = y0 - y1
b = x1 - x0
c = -(a * x0 + b * y0)
s = math.sqrt(a * a + b * b);
draw_line((x0, y0), (x1, y1))
im.show() | null | Draw Line Anti-Aliasing/pure.py | pure.py | py | 1,980 | python | en | code | null | code-starcoder2 | 51 |
122958924 | import logging
from gym.envs.registration import register
logger = logging.getLogger(__name__)
register(
id='Qubit-v0',
entry_point='gym_qubit.envs:TransmonEnv',
timestep_limit=1000,
reward_threshold=1.0,
nondeterministic=False,
) | null | gym_qubit/__init__.py | __init__.py | py | 252 | python | en | code | null | code-starcoder2 | 51 |
273748052 | # importing all the required libraries
import os
import sys
import torch
import pandas as pd
from os.path import abspath
from argparse import ArgumentParser
# importing local modules
script_path = os.path.abspath('')
sys.path.insert(0, abspath(script_path))
# print(abspath(script_path))
from utils.utilities import load_check_point, \
tokenizer_nltk, load_dict_from_disk
# importing model
from model.model import DCNN_TREC
# importing the model parameters
from utils.model_parameters import TREC_DATASET_PARAMETERS
parser = ArgumentParser()
parser.add_argument(
"--embedding_dim", help="Mention the dimension of embedding.",
type=int,
default=50
)
parser.add_argument(
"--sentence_length", help="Fix the sentence length for each sentence.",
type=int,
default=10
)
parser.add_argument(
"--saved_model_path", help="Mention the path where model is saved.",
type=str,
default=None
)
parser.add_argument(
"--saved_vocab_path", help="Mention the path where vocab is saved.",
type=str,
default=None
)
parser.add_argument(
"--device", help="Mention the device to be used cuda or cpu,",
type=str,
default=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
)
parser.add_argument(
"--glove_file_path", help="Mention the path where glove embeddings are saved.",
type=str,
default="/home/neo/glove.6B.50d.txt"
)
parser.add_argument(
"--file_to_predict_on", help="Mention the path of the csv file to predict on.",
type=str,
default=None
)
parser.add_argument(
"--file_to_save_predictions", help="Mention the path of the csv file to save predictions.",
type=str,
default=None
)
arguments = parser.parse_args()
EMBEDDING_DIM = arguments.embedding_dim
TREC_DATASET_PARAMETERS["embedding_dim"] = EMBEDDING_DIM
SENT_LENGTH = arguments.sentence_length
TREC_DATASET_PARAMETERS["cell_one_parameter_dict"]["sent_length"] = SENT_LENGTH
MODEL_PATH = arguments.saved_model_path
VOCAB_PATH = arguments.saved_vocab_path
SAVE_PATH = arguments.file_to_save_predictions
DEVICE = arguments.device
FILE_TO_PREDICT_ON = arguments.file_to_predict_on
GLOVE_FILE_PATH = arguments.glove_file_path
def return_indexed(vocab_obj, tokenized):
indexed = []
for i in tokenized:
if len(i) < SENT_LENGTH:
i = i + ["<pad>"]*(SENT_LENGTH - len(i))
if len(i) > SENT_LENGTH:
i = i[:SENT_LENGTH]
temp = []
for j in i:
temp.append(vocab_obj.stoi[j])
indexed.append(temp)
return indexed
def predict_using_model(model, vocab):
df = pd.read_csv(FILE_TO_PREDICT_ON)
df["sentence_tokenized"] = df["sentence"].apply(lambda x: tokenizer_nltk(x))
df["indexed"] = return_indexed(vocab, df["sentence_tokenized"])
input_tensor = torch.LongTensor(list(df["indexed"])).to(DEVICE)
model_outputs = model(input_tensor).squeeze(1)
preds, ind = torch.max(torch.nn.functional.softmax(model_outputs, dim=-1), 1)
preds = preds.cpu().detach().numpy()
ind = ind.cpu().detach().numpy()
df["predictions"] = ind
df["probabilities"] = preds
df = df[["sentence", "predictions", "probabilities"]]
df.to_csv(SAVE_PATH, index=False, encoding="utf-8")
return
if __name__ == "__main__":
vocab = load_dict_from_disk(VOCAB_PATH)
TREC_DATASET_PARAMETERS["vocab_length"] = len(vocab.stoi)
model = DCNN_TREC(parameter_dict=TREC_DATASET_PARAMETERS)
model.to(DEVICE)
model = load_check_point(model, MODEL_PATH)
predict_using_model(model, vocab)
print("\n\n")
print("FINISH")
print("############################################################################")
| null | scripts/predict_using_trec.py | predict_using_trec.py | py | 3,681 | python | en | code | null | code-starcoder2 | 51 |
319801722 | """
jim weisman
spring 2019
PGR105
"""
# Running on a treadmill you burn 4.2 calories per minute.
# Write a program that uses a loop to display the number of calories burned after 10, 15, 20, 25 and 30 minutes.
cal_minute = 4.2
for minutes in range(10, 31, 5):
cal_burned = (minutes / 1) * cal_minute
print("You burned " + str(cal_burned) + " in " + str(minutes))
| null | 4.1 Calories Burned.py | 4.1 Calories Burned.py | py | 376 | python | en | code | null | code-starcoder2 | 51 |
214930681 | from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route("/write", methods=["GET", "POST"])
def board_write():
if request.method == "POST":
name = request.form.get("name")
title = request.form.get("title")
contents = request.form.get("contents")
else:
return render_template("write.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=9000) | null | run.py | run.py | py | 472 | python | en | code | null | code-starcoder2 | 51 |
245658143 | import numpy as np
import glob
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import fnmatch
import os
simulationInfo = np.genfromtxt('simulationInfo.txt')
numberOfParticles = int(simulationInfo[0])
startTime = int(simulationInfo[1])
numberOfParticleFiles = len(fnmatch.filter(os.listdir('Particles/Particle0/'), '*.txt'))
gridParameters = np.genfromtxt('gridParameters.txt')
lengthOfSimulationBoxInX = gridParameters[6]
lengthOfSimulationBoxInY = gridParameters[7]
lengthOfOneBoxInX = gridParameters[0] * gridParameters[3]
lengthOfOneBoxInY = gridParameters[1] * gridParameters[4]
numberOfBoxesInX = lengthOfSimulationBoxInX / lengthOfOneBoxInX
numberOfBoxesInY = lengthOfSimulationBoxInY / lengthOfOneBoxInY
X = np.zeros((numberOfParticles,1))
Y = np.zeros((numberOfParticles,1))
x = []
y = []
xnew = []
ynew = []
for i in range(startTime, startTime + numberOfParticleFiles):
# open figure
fig = plt.figure()
for p in range(numberOfParticles):
# read data from text and save it into array data
data = np.genfromtxt('Particles/Particle'+ str(p) +'/Particle' + str(p) + '_' + str(i) + '.txt')
# define variables
x.append(data[0][1])
y.append(data[0][2])
X = np.c_[X,x]
Y = np.c_[Y,y]
x=[]
y=[]
if i == startTime: #or len(X[0]) > 40:
X = np.delete(X,0,1)
Y = np.delete(Y,0,1)
for p in range(numberOfParticles):
# plot x and y value of particle as red dot
plt.plot(X[p], Y[p], color = 'r')
# set labels
plt.xlabel("X")
plt.ylabel("Y")
# set axis
plt.xlim([0, lengthOfSimulationBoxInX])
plt.ylim([0, lengthOfSimulationBoxInY])
plt.xticks(np.arange(0, lengthOfSimulationBoxInX + 1, lengthOfOneBoxInX))
plt.yticks(np.arange(0, lengthOfSimulationBoxInY + 1, lengthOfOneBoxInY))
plt.grid(linestyle = "-", color='red')
# define filename for saving
filename = 'img' + str(i - startTime)
fig.savefig("png/" + "{}.png".format(filename), bbox_inches='tight', dpi=300)
# close fig
plt.close(fig)
| null | Analysis/plot_borisPusher.py | plot_borisPusher.py | py | 1,952 | python | en | code | null | code-starcoder2 | 51 |
291694571 | # File: GaudiMP/Parallel.py
# Author: Pere Mato (pere.mato@cern.ch)
""" GaudiMP.Parallel module.
This module provides 'parallel' processing support for GaudiPyhton.
It is adding some sugar on top of public domain packages such as
the 'multiprocessing' or the 'pp' packages. The interface can be made
independent of the underlying implementation package.
Two main class are defined: Task and WorkManager
"""
from __future__ import print_function
__all__ = [ 'Task','WorkManager' ]
excluded_varnames = ['HOSTNAME', 'SSH_CLIENT', 'SSH_CONNECTION', 'DISPLAY']
import sys, os, time, copy
import multiprocessing
from ostap.utils.progress_bar import ProgressBar
from ostap.logger.logger import getLogger
from ostap.parallel.task import Task, Statistics , StatMerger
logger = getLogger('ostap.parallel.mp_gaudi')
def _prefunction( f, task , jobid , item) :
return f( ( task , jobid , item ) )
def _ppfunction ( args ) :
#--- Unpack arguments
task, jobid , item = args
with Statistics() as stat :
task.initialize_remote ( jobid )
result = task.process ( jobid , item )
stat.stop()
return result , stat
class WorkManager(object) :
""" Class to in charge of managing the tasks and distributing them to
the workers. They can be local (using other cores) or remote
using other nodes in the local cluster """
def __init__( self, ncpus='autodetect', ppservers=None , silent = False , **kwargs ) :
if ncpus == 'autodetect' : self.ncpus = multiprocessing.cpu_count()
else : self.ncpus = ncpus
self.pool = multiprocessing.Pool(self.ncpus)
self.stats = StatMerger()
self.silent = True if silent else False
def __del__(self):
if hasattr(self,'server') : self.server.destroy()
def process(self, task, items, timeout=90000):
if not isinstance(task,Task) :
raise TypeError("task argument needs to be an 'Task' instance")
# --- Call the Local initialialization
task.initialize_local ()
# --- Schedule all the jobs ....
start = time.time()
from itertools import repeat , count
jobs = self.pool.map_async ( _ppfunction, zip( repeat ( task ) , count () , items ))
with ProgressBar ( max_value = len ( items ) , description = "# Job execution:" , silent = self.silent ) as bar :
for result, stat in jobs.get(timeout) :
task.merge_results ( result )
self.stats += stat
bar += 1
end = time.time()
if not self.silent :
self.print_statistics()
logger.info ( 'Time elapsed since server creation %f' % ( end - start ) )
# --- Call the Local Finalize
task.finalize()
return task.results()
def print_statistics(self):
self.stats.print_stats ()
# == EOF ====================================================================================
| null | ostap/parallel/mp_gaudi.py | mp_gaudi.py | py | 3,123 | python | en | code | null | code-starcoder2 | 51 |
535649408 | import dash
import dash_bootstrap_components as dbc
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output
import pandas as pd
## variables
coffee_flavours_1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/sunburst-coffee-flavors-complete.csv')
coffee_flavours_2= pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/coffee-flavors.csv')
coffee_exports = pd.read_csv('data_processing/coffee_exports.csv')
def get_coffee_flavours(model):
dataset = None
if model == 1:
dataset = coffee_flavours_1
elif model == 2:
dataset = coffee_flavours_2
else:
print("whattt")
fig = go.Figure()
fig.add_trace(go.Sunburst(
ids=dataset.ids,
labels=dataset.labels,
parents=dataset.parents,
domain=dict(column=model)
))
fig.update_layout(
margin = dict(t=20, l=2, r=2, b=2)
)
fig.layout
return fig;
def get_coffee_exports(selected_year, selected_variable):
filtered_df = coffee_exports[coffee_exports.Anio == selected_year]
trace1 = go.Bar(x=filtered_df['PaisDestino'], y=filtered_df[selected_variable], name="toneladas", )
return {
'data': [trace1],
'layout': go.Layout(colorway=["#EF963B"], hovermode="closest",
xaxis={'title': "Países", 'titlefont': {'color': 'black', 'size': 14},
'tickfont': {'size': 9, 'color': 'black'}},
yaxis={'title': selected_variable, 'titlefont': {'color': 'black', 'size': 14, },
'tickfont': {'color': 'black'}})}
navbar = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
nav=True,
in_navbar=True,
label="Menú de aplicaciones",
children=[
dbc.DropdownMenuItem("App 1"),
dbc.DropdownMenuItem("App 2"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Entry 3"),
],
),
],
brand="Arjé Coffee - Entendiendo el café",
brand_href="https://arjecoffee.co",
sticky="top",
)
body = dbc.Container(
[
dbc.Row([
dbc.Col([
html.H2("Ser un catador"),
html.P(
"""Asímismo, cuando pruebes cosas, piensa de verdad en lo que estás
percibiendo. Intenta comprender qué fue lo que causó aquella diferencia
en el sabor. [Un catador con experiencia] suele usar un lenguaje más
complejo y descriptivo y está más acostumbrado a separar las partes
más allá de las sensaciones de sabor básicas. Esto te ayudará a tener
mayor experiencia en percibir los alimentos y las bebidas,
ser más consciente del sabor y desarrollar la forma
en la que te comunicas acerca del sabor"""
),
html.A([
dbc.Button([ "Aprender más"], color="primary",)
], href="https://www.perfectdailygrind.com/2018/10/notas-de-sabor-como-ayudar-a-los-consumidores-a-entenderlas/"),
],md=4,
),
dbc.Col([
html.H2("Las notas del café"),
html.P("""El café tiene notas maravillosas. Descúbrelas!"""),
dcc.Dropdown(
id='coffee-flavours-dropdown',
options=[
{'label': 'Por categoría', 'value': '1'},
{'label': 'Por sabor', 'value': '2'},
],
value='1'
),
dcc.Graph(
id='coffee-flavours',
),
]
),]
),
dbc.Row([
dbc.Col([
html.H2("Exportaciones anuales de café"),
dcc.Dropdown(
id='coffee-exports-dropdown',
options=[
{'label': 'USD en miles', 'value': 'ValorMilesFOBDol'},
{'label': 'Pesos Colombianos en miles', 'value': 'ValorMilesPesos'},
{'label': 'Toneladas', 'value': 'VolumenToneladas'},
],
value='ValorMilesFOBDol',
),
])
]),
dbc.Row([
dbc.Col([
dcc.Graph(id='exportaciones-por-anho'),
html.Div([
dcc.Slider(
id='exportaciones-year-slider',
min=coffee_exports['Anio'].min(),
max=coffee_exports['Anio'].max(),
value=coffee_exports['Anio'].min(),
marks={str(year): str(year) for year in coffee_exports['Anio'].unique()},
step=None,
),
],style={'paddingBottom': 40, 'paddingTop': 40}),
], md=12),
]),
dbc.Row([
html.H2(["Tabla dinámica para otros insights"])
]),
dbc.Row([
dbc.Col([
dash_table.DataTable(
id='datatable-interactivity',
columns=[
{"name": i, "id": i, "deletable": True, "selectable": True} for i in coffee_exports.columns
],
data=coffee_exports.to_dict('records'),
editable=True,
filter_action="native",
sort_action="native",
sort_mode="multi",
column_selectable="single",
row_selectable="multi",
row_deletable=True,
selected_columns=[],
selected_rows=[],
page_action="native",
page_current= 0,
page_size= 10,
),
html.Div(id='datatable-interactivity-container')
])
]),
],
className="mt-4",
)
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
app.layout = html.Div([navbar, body])
@app.callback(
dash.dependencies.Output('coffee-flavours', 'figure'),
[dash.dependencies.Input('coffee-flavours-dropdown', 'value')])
def update_output(value):
return get_coffee_flavours(int(value))
@app.callback(
Output('exportaciones-por-anho', 'figure'),
[Input('exportaciones-year-slider', 'value'),
Input('coffee-exports-dropdown', 'value')])
def update_figure(selected_year, selected_variable):
return get_coffee_exports(selected_year, selected_variable)
@app.callback(
Output('datatable-interactivity', 'style_data_conditional'),
[Input('datatable-interactivity', 'selected_columns')]
)
def update_styles(selected_columns):
return [{
'if': { 'column_id': i },
'background_color': '#D2F3FF'
} for i in selected_columns]
@app.callback(
Output('datatable-interactivity-container', "children"),
[Input('datatable-interactivity', "derived_virtual_data"),
Input('datatable-interactivity', "derived_virtual_selected_rows")])
def update_graphs(rows, derived_virtual_selected_rows):
# When the table is first rendered, `derived_virtual_data` and
# `derived_virtual_selected_rows` will be `None`. This is due to an
# idiosyncracy in Dash (unsupplied properties are always None and Dash
# calls the dependent callbacks when the component is first rendered).
# So, if `rows` is `None`, then the component was just rendered
# and its value will be the same as the component's dataframe.
# Instead of setting `None` in here, you could also set
# `derived_virtual_data=df.to_rows('dict')` when you initialize
# the component.
if derived_virtual_selected_rows is None:
derived_virtual_selected_rows = []
dff = coffee_exports if rows is None else pd.DataFrame(rows)
colors = ['#7FDBFF' if i in derived_virtual_selected_rows else '#0074D9'
for i in range(len(dff))]
return [
dcc.Graph(
id=column,
figure={
"data": [
{
"x": dff["PaisDestino"],
"y": dff[column],
"type": "bar",
"marker": {"color": colors},
}
],
"layout": {
"xaxis": {"automargin": True},
"yaxis": {
"automargin": True,
"title": {"text": column}
},
"height": 250,
"margin": {"t": 10, "l": 10, "r": 10},
},
},
)
# check if column exists - user may have deleted it
# If `column.deletable=False`, then you don't
# need to do this check.
for column in ["ValorMilesFOBDol", "ValorMilesPesos", "VolumenToneladas"] if column in dff
]
if __name__ == '__main__':
app.run_server(debug=True)
server = app.server
app.config.suppress_callback_exceptions = True | null | app_deploy.py | app_deploy.py | py | 8,872 | python | en | code | null | code-starcoder2 | 51 |
41861621 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" RPS-LS
Classic Rock Paper Scissors mini game and LS extension
1.0.0
Initial release
Upcoming features
latin to ascii comparisons (Lézard == Lezard) should be True
Add multiple way to write a choice (ex. French: Pierre, Roche)
"""
# --------------------------------------
# Script Import Libraries
# --------------------------------------
import clr
import os
import json
import codecs
import re
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
# --------------------------------------
# Script Information
# --------------------------------------
ScriptName = "RPS-LS"
Website = "https://github.com/CVex2150J"
Description = "Rock Paper Scissors LS"
Creator = "CVex2150J"
Version = "1.0.0"
# --------------------------------------
# Script Variables
# --------------------------------------
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
cooldown_command = "!rps"
local = {}
winningTable = [
# 0: rock, 1: paper, 2: scissors, 3: lizard, 4: Spock
[2, 1, 5], # cuts
[1, 0, 6], # covers
[0, 2, 7], # crushes
[0, 3, 8], # crushes
[3, 4, 9], # poisons
[4, 2, 10], # smashes
[2, 3, 11], # decapitates
[3, 1, 12], # eats
[1, 4, 13], # disproves
[4, 0, 14] # vaporizes
]
# --------------------------------------
# Script Classes
# --------------------------------------
class Settings(object):
""" Load in saved settings file if available else set default values. """
classic_command = "!rps"
lizardspock_command = "!rpsls"
localisation_file = "local_en.txt"
reward = 100
user_cooldown = 60
def __init__(self, settingsfile=None):
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
except:
return
def Reload(self, jsondata):
""" Reload settings from interface by given json data. """
self.__dict__ = json.loads(jsondata, encoding="utf-8")
# --------------------------------------
# Script Functions
# --------------------------------------
# Utilities
def Log(message):
Parent.Log(ScriptName, str(message))
def Message(message):
Parent.SendStreamMessage(str(message))
# def Whisper(target, message):
# Parent.SendStreamWhisper(str(target), str(message))
# Functions
def LoadLocalisation():
global local
try:
# Parse localisation file
file_name = os.path.join(os.path.dirname(__file__), ScriptSettings.localisation_file)
_file = codecs.open(file_name, encoding="utf-8-sig", mode="r")
# get all lines, strip \n and remove any comments commencing with #
lines = [re.sub('#.*', '', line.rstrip('\r\n')) for line in _file]
# discard all empty and comment line
local = list(filter(lambda x: x, lines))
except Exception as e:
Log("ERROR : Unable to parse localisation file." + str(e))
def add_user_cooldown(data):
if ScriptSettings.user_cooldown > 0:
Parent.AddUserCooldown(ScriptName, cooldown_command, data.User, ScriptSettings.user_cooldown)
def giveReward(data):
if ScriptSettings.reward > 0:
Parent.AddPoints(data.User, data.UserName, ScriptSettings.reward)
def show_result(data, u, c, win):
if u != c:
if u == win[0]:
# win
giveReward(data)
result = local[15]
else:
# loose
result = local[16]
result = result.replace('{phrase}', local[win[0]] + ' ' + local[win[2]] + ' ' + local[win[1]])
else:
# tie
result = local[17]
result = result.replace('{user}', data.UserName)
# result = result.replace('{bot}', ... Bot name ? )
result = result.replace('{user_pick}', local[u])
result = result.replace('{bot_pick}', local[c])
Message(result)
# limit 3 : classic rock, paper, scissors
# limit 5 : rock, paper, scissors, lizard, Spock
def play(data, limit=3):
if ScriptSettings.user_cooldown > 0:
duration = Parent.GetUserCooldownDuration(ScriptName, cooldown_command, data.User)
if duration > 0:
# Message(data.UserName + ' can\'t use this command for another ' + str(duration) + ' seconds.')
return
# parse parameter 1 and try to find its index 1-3 in classic or 1-5 in LS mode
user_choice_str = data.GetParam(1).lower()
user_choice = -1
for c in local:
user_choice += 1
if user_choice > limit:
user_choice = -1
break
elif c.lower() == user_choice_str:
break
# user_choice -1 : the user gives and invalid option
if user_choice != -1:
add_user_cooldown(data)
# random computer choice
computer_choice = Parent.GetRandom(0, limit) # Limit is excluded
if user_choice == computer_choice:
# Equality
show_result(data, user_choice, computer_choice, None)
else:
# Find the choice combination
for win in winningTable:
if user_choice in win and computer_choice in win:
show_result(data, user_choice, computer_choice, win)
break
# --------------------------------------
# Chatbot Initialize Function
# --------------------------------------
def Init():
global ScriptSettings
# Load settings from settings file
ScriptSettings = Settings(SettingsFile)
LoadLocalisation()
# --------------------------------------
# Chatbot Save Settings Function
# --------------------------------------
def ReloadSettings(jsondata):
# Reload newly saved settings and verify
ScriptSettings.Reload(jsondata)
LoadLocalisation()
# --------------------------------------
# Chatbot Execute Function
# --------------------------------------
def Execute(data):
# Twitch chat message only for now
if not data.IsFromTwitch() or not data.IsChatMessage() or data.IsWhisper():
return
command = data.GetParam(0).lower()
if len(ScriptSettings.lizardspock_command) > 0 and command == ScriptSettings.lizardspock_command.lower():
play(data, 5)
elif len(ScriptSettings.classic_command) > 0 and command == ScriptSettings.classic_command.lower():
play(data, 3)
return
# --------------------------------------
# Chatbot Script Unload Function
# --------------------------------------
def Unload():
return
# --------------------------------------
# Chatbot Tick Function
# --------------------------------------
def Tick():
return | null | RPS-LS_StreamlabsSystem.py | RPS-LS_StreamlabsSystem.py | py | 6,109 | python | en | code | null | code-starcoder2 | 51 |
603303822 | from flask import Flask, request, jsonify, abort, Response, render_template
from flask_assets import Environment, Bundle
from webassets_browserify import Browserify
from latex import build_pdf, LatexBuildError
from latex.jinja2 import make_env
from songs import get_songs
from string import digits,letters
app = Flask(__name__, static_url_path='/static')
assets = Environment(app)
js = Bundle('js/main.jsx',
depends=('*/*.js*'),
filters=Browserify,
output='app.js')
assets.register('js_all', js)
@app.route('/')
def index(): return render_template('index.html')
song_dict = get_songs()
song_list = song_dict.values()
@app.route('/songs/')
@app.route('/songs/<int:songid>')
def songs(songid=None):
def filter_keys(item):
return {
key: item[key]
for key in [
'songid',
'songtitle',
'firstline',
'songmeta',
'songtext',
'songnotes'
]
}
if songid == None: return jsonify(songs=map(filter_keys, song_list))
if songid in song_dict: return jsonify(filter_keys(song_dict[songid]))
else: return abort(404)
def whitelist(string, alphabet):
return ''.join([x for x in string if x in alphabet])
texenv = make_env(loader=app.jinja_loader)
@app.route('/songs.pdf')
def pdf():
texonly = 'texonly' in request.args
orientation = 'landscape' if 'landscape' in request.args else 'portrait'
cols = whitelist(request.args.get('cols', ''), digits) or '2'
font = whitelist(request.args.get('font', ''), digits+letters)
fontoptions = whitelist(request.args.get('fontoptions', ''), digits+letters)
songids = request.args.get('songids')
if songids:
try:
songids = map(int, songids.split(','))
except ValueError:
return 'Invalid songid'
else:
return 'No songs'
template = texenv.get_template('songs.tex')
tex = template.render(
songs=[song_dict[x] for x in songids if x in song_dict],
cols=cols,
orientation=orientation,
font=font,
fontoptions=fontoptions)
if texonly:
return Response(tex, mimetype='text/plain')
else:
try:
pdffile = build_pdf(tex)
except LatexBuildError as e:
return Response(tex, mimetype='text/plain')
return Response(bytes(pdffile), mimetype='application/pdf')
if __name__ == '__main__':
app.run(debug=True)
| null | audio.py | audio.py | py | 2,562 | python | en | code | null | code-starcoder2 | 51 |
352500380 | from threading import Thread, Lock
import cv2
from peekingduck.pipeline.nodes.input.utils.preprocess import set_res, mirror
class VideoThread:
'''
Videos will be threaded to prevent I/O blocking from affecting FPS.
'''
def __init__(self, res, input_source, mirror_image):
self.stream = cv2.VideoCapture(input_source)
self.mirror = mirror_image
if not self.stream.isOpened():
raise ValueError("Camera or video input not detected: %s" % input_source)
width, height = res['width'], res['height']
set_res(self.stream, width, height)
self._lock = Lock()
thread = Thread(target=self._reading_thread, args=(), daemon=True)
thread.start()
def _reading_thread(self):
'''
A thread that continuously polls the camera for frames.
'''
while True:
_, self.frame = self.stream.read()
def read_frame(self):
'''
Reads the frame.
'''
self._lock.acquire()
if self.frame is not None:
frame = self.frame.copy()
self._lock.release()
if self.mirror:
frame = mirror(frame)
return True, frame
self._lock.release()
return False, None
class VideoNoThread:
'''
No threading to deal with recorded videos and images.
'''
def __init__(self, res, input_source, mirror_image):
self.stream = cv2.VideoCapture(input_source)
self.mirror = mirror_image
if not self.stream.isOpened():
raise ValueError("Video or image path incorrect: %s" % input_source)
width, height = res['width'], res['height']
set_res(self.stream, width, height)
def read_frame(self):
'''
Reads the frame.
'''
return self.stream.read()
| null | peekingduck/pipeline/nodes/input/utils/read.py | read.py | py | 1,851 | python | en | code | null | code-starcoder2 | 51 |
35049406 | # -*- coding: utf-8 -*-
# AlgoPlus量化投资开源框架范例
# 微信公众号:AlgoPlus
# 项目地址:http://gitee.com/AlgoPlus/AlgoPlus
# 项目网址:http://www.algo.plus
# 项目网址:http://www.ctp.plus
# 项目网址:http://www.7jia.com
from AlgoPlus.CTP.TraderApi import TraderApi
from AlgoPlus.CTP.ApiStruct import *
import time
class TraderEngine(TraderApi):
def __init__(self, td_server, broker_id, investor_id, password, app_id, auth_code, md_queue=None
, page_dir='', private_resume_type=2, public_resume_type=2):
self.order_ref = 0
self.Join()
# 撤单
def req_order_action(self, exchange_ID, instrument_id, order_ref, order_sysid=''):
input_order_action_field = InputOrderActionField(
BrokerID=self.broker_id,
InvestorID=self.investor_id,
UserID=self.investor_id,
ExchangeID=exchange_ID,
ActionFlag="0",
InstrumentID=instrument_id,
FrontID=self.front_id,
SessionID=self.session_id,
OrderSysID=order_sysid,
OrderRef=str(order_ref),
)
l_retVal = self.ReqOrderAction(input_order_action_field)
# 报单
def req_order_insert(self, exchange_id, instrument_id, order_price, order_vol, order_ref, direction, offset_flag):
input_order_field = InputOrderField(
BrokerID=self.broker_id,
InvestorID=self.investor_id,
ExchangeID=exchange_id,
InstrumentID=instrument_id,
UserID=self.investor_id,
OrderPriceType="2",
Direction=direction,
CombOffsetFlag=offset_flag,
CombHedgeFlag="1",
LimitPrice=order_price,
VolumeTotalOriginal=order_vol,
TimeCondition="3",
VolumeCondition="1",
MinVolume=1,
ContingentCondition="1",
StopPrice=0,
ForceCloseReason="0",
IsAutoSuspend=0,
OrderRef=str(order_ref),
)
l_retVal = self.ReqOrderInsert(input_order_field)
# 买开仓
def buy_open(self, exchange_ID, instrument_id, order_price, order_vol, order_ref):
self.req_order_insert(exchange_ID, instrument_id, order_price, order_vol, order_ref, '0', '0')
# 卖开仓
def sell_open(self, exchange_ID, instrument_id, order_price, order_vol, order_ref):
self.req_order_insert(exchange_ID, instrument_id, order_price, order_vol, order_ref, '1', '0')
# 买平仓
def buy_close(self, exchange_ID, instrument_id, order_price, order_vol, order_ref):
if exchange_ID == "SHFE" or exchange_ID == "INE":
self.req_order_insert(exchange_ID, instrument_id, order_price, order_vol, order_ref, '0', '3')
else:
self.req_order_insert(exchange_ID, instrument_id, order_price, order_vol, order_ref, '0', '1')
# 卖平仓
def sell_close(self, exchange_ID, instrument_id, order_price, order_vol, order_ref):
if exchange_ID == "SHFE" or exchange_ID == "INE":
self.req_order_insert(exchange_ID, instrument_id, order_price, order_vol, order_ref, '1', '3')
else:
self.req_order_insert(exchange_ID, instrument_id, order_price, order_vol, order_ref, '1', '1')
def Join(self):
while True:
if self.status >= 0:
# ############################################################################# #
# 确认结算单
req_settlement_infoConfirm = SettlementInfoConfirmField(BrokerID=self.broker_id,
InvestorID=self.investor_id)
self.ReqSettlementInfoConfirm(req_settlement_infoConfirm)
self._write_log(f"=>发出确认结算单请求!")
time.sleep(3)
# ############################################################################# #
# 连续5次买开 - 卖平
ikk = 0
while ikk < 5:
ikk += 1
self.order_ref += 1
self.buy_open(test_exchange_id, test_instrument_id, test_raise_limited, test_vol, self.order_ref)
self._write_log(f"=>{ikk}=>发出涨停买开仓请求!")
time.sleep(3)
# 跌停卖平仓
self.order_ref += 1
self.sell_close(test_exchange_id, test_instrument_id, test_fall_limited, test_vol, self.order_ref)
self._write_log(f"=>发出跌停卖平仓请求!")
# ############################################################################# #
# 连续5次卖开 - 买平
ikk = 0
while ikk < 5:
# 跌停卖开仓
self.order_ref += 1
self.sell_open(test_exchange_id, test_instrument_id, test_fall_limited, test_vol, self.order_ref)
self._write_log(f"=>{ikk}=>发出跌停卖平仓请求!")
time.sleep(3)
# 涨停买平仓
self.order_ref += 1
self.buy_close(test_exchange_id, test_instrument_id, test_raise_limited, test_vol, self.order_ref)
self._write_log(f"=>发出涨停买平仓请求!")
ikk += 1
# ############################################################################# #
# 买开 - 撤单
self.order_ref += 1
self.buy_open(test_exchange_id, test_instrument_id, test_fall_limited, test_vol, self.order_ref)
self._write_log(f"=>发出涨停买开仓请求!")
time.sleep(3)
# 撤单
self.req_order_action(test_exchange_id, test_instrument_id, self.order_ref)
self._write_log(f"=>发出撤单请求!")
# ############################################################################# #
# 卖开 - 撤单
self.order_ref += 1
self.sell_open(test_exchange_id, test_instrument_id, test_raise_limited, test_vol, self.order_ref)
self._write_log(f"=>发出跌停卖平仓请求!")
time.sleep(3)
# 撤单
self.req_order_action(test_exchange_id, test_instrument_id, self.order_ref)
self._write_log(f"=>发出撤单请求!")
# ############################################################################# #
# 查询订单
qry_order_field = QryOrderField(BrokerID=self.broker_id,
InvestorID=self.investor_id)
self.ReqQryOrder(qry_order_field)
self._write_log(f"=>发出查询订单请求!")
time.sleep(3)
# ############################################################################# #
# 查询资金
qry_trading_account_field = QryTradingAccountField(BrokerID=self.broker_id,
AccountID=self.investor_id,
CurrencyID="CNY",
BizType="1")
self.ReqQryTradingAccount(qry_trading_account_field)
self._write_log(f"=>发出查询资金请求!")
time.sleep(3)
# ############################################################################# #
# 查询成交
qry_trade_field = QryTradeField(BrokerID=self.broker_id,
InvestorID=self.investor_id)
self.ReqQryTrade(qry_trade_field)
self._write_log(f"=>发出查询成交请求!")
time.sleep(3)
# ############################################################################# #
# 查询持仓
qry_investor_position_field = QryInvestorPositionField(BrokerID=self.broker_id,
InvestorID=self.investor_id)
self.ReqQryInvestorPosition(qry_investor_position_field)
self._write_log(f"=>发出查询持仓请求!")
# ############################################################################# #
# 查询资金
qry_trading_account_field = QryTradingAccountField(BrokerID=self.broker_id,
AccountID=self.investor_id,
CurrencyID="CNY",
BizType="1")
self.ReqQryTradingAccount(qry_trading_account_field)
self._write_log(f"=>发出查询资金请求!")
time.sleep(3)
# ############################################################################# #
print("老爷,看穿式监管认证仿真交易已经完成!请截图联系期货公司!")
break
time.sleep(1)
# ############################################################################# #
# 请在这里填写需要测试的合约数据
# 警告:该例子只支持上期所品种平今仓测试
test_exchange_id = 'SHFE' # 交易所
test_instrument_id = 'rb2001' # 合约代码
test_raise_limited = 3763 # 涨停板
test_fall_limited = 3206 # 跌停板
test_vol = 1 # 报单手数
if __name__ == "__main__":
import sys
sys.path.append("..")
from account_info import my_future_account_info_dict
future_account = my_future_account_info_dict['SimNow']
ctp_trader = TraderEngine(future_account.server_dict['TDServer']
, future_account.broker_id
, future_account.investor_id
, future_account.password
, future_account.app_id
, future_account.auth_code
, None
, future_account.td_page_dir)
| null | AlgoPlus入门手册/8客户端认证/trader_engine.py | trader_engine.py | py | 10,481 | python | en | code | null | code-starcoder2 | 51 |
582091359 | import torch
from torch.autograd import Variable
import numpy as np
import random
import sys
from collections import defaultdict
import urllib, csv
def data_to_dict(data):
data_dict = defaultdict(list)
items = set()
for (user, item) in data:
data_dict[user].append(item)
items.add(item)
return data_dict, set(data_dict.keys()), items
def load_data_from_csv(csv_file, users_to_i={}, items_to_i={}, min_purchase_value=15):
"""
Loads data from a CSV file located at `csv_file`
where each line is of the form:
user_id_1, item_id_1
...
user_id_n, item_id_n
Initial mappings from user and item identifiers
to integers can be passed using `users_to_i`
and `items_to_i` respectively.
This function will return a data array consisting
of (user, item) tuples, a mapping from user ids to integers
and a mapping from item ids to integers.
"""
raw_data = []
with open(csv_file) as f:
csvreader = csv.reader(f)
for i, (user, item) in enumerate(csvreader):
if i + 1 % 1000000 == 0:
print(user, item)
print("{} iter".format(i))
raw_data.append((user, item))
return load_data_from_array(raw_data, users_to_i, items_to_i,
min_purchase_value=min_purchase_value)
def load_data_from_movielens(url, threshold, users_to_i = {}, items_to_i = {}):
"""
Loads movielens data from a URL, e.g.
http://files.grouplens.org/datasets/movielens/ml-100k/
Initial mappings from user and item identifiers
to integers can be passed using `users_to_i`
and `items_to_i` respectively.
This function will return a data array consisting
of (user, item) tuples, a mapping from user ids to integers
and a mapping from item ids to integers.
"""
raw_data = []
for index, line in enumerate(open(url, 'r')):
if index > 100000:
break
if index == 0:
continue
user, item, rating, timestamp = line.split(',')
if index + 1 % 10000:
print(index + 1, user, item)
if float(rating) > threshold:
raw_data.append((user, item))
return load_data_from_array(raw_data)
def load_data_from_array(array, users_to_i = {}, items_to_i = {}, min_purchase_value=3):
"""
Loads data from an array of tuples of the form:
(user_id, item_id)
Initial mappings from user and item identifiers
to integers can be passed using `users_to_i`
and `items_to_i` respectively.
This function will return a data array consisting
of (user, item) tuples, a mapping from user ids to integers
and a mapping from item ids to integers.
"""
data = []
count_u = {}
# If already define
if len(users_to_i.values()) > 0:
u = max(users_to_i.values()) + 1
else:
u = 0
if len(items_to_i.values()) > 0:
i = max(items_to_i.values()) + 1
else:
i = 0
# Check users and item with more than n occurances
for j, (user, item) in enumerate(array):
if not count_u.has_key(user):
count_u[user] = 0
count_u[user] += 1
# Store real data indexes
for k, (user, item) in enumerate(array):
if count_u[user] < min_purchase_value:
continue
if k + 1 % 1000000 == 0:
print("{} order".format(k))
if not users_to_i.has_key(user):
users_to_i[user] = u
u += 1
if not items_to_i.has_key(item):
items_to_i[item] = i
i += 1
data.append((users_to_i[user], items_to_i[item]))
return data, users_to_i, items_to_i
class FactorizationMachine(object):
def __init__(self, train_dict, _train_users, _train_items,
order=3, ranks=[10, 5], data_size=10, learning_rate=1e-3):
# Model parameters
self.order = order
self.data_size = data_size
self.ranks = ranks
# Training parameter
self._train_dict = train_dict
self._train_users = _train_users
self._n_items = _train_items
self.lr = learning_rate
# Class param : output
self.y = Variable(torch.randn(1), requires_grad=True, volatile=False)
# Each factor order has its own rank and own matrix size
self.V_dict = {i: Variable(torch.randn(data_size, order_rank),
requires_grad=True, volatile=False)
for i, order_rank in enumerate(ranks)}
self.W = Variable(torch.randn(data_size, 1), requires_grad=True, volatile=False)
def forward(self, sample):
# For each order, we get the appropriate element
# For order 1
sum_ = self.W.dot(sample)
for idx, o in enumerate(range(2, self.order + 1)):
for f in range(self.ranks[idx]):
# sum_i( vif xi) ** order
elem = (self.V_dict[idx][f, :].dot(sample)) ** o
# sum_i( vif ** order xi ** order)
elem += ((self.V_dict[idx][f, :] ** o).dot(sample ** o))
sum_ += (1. / 2**(o - 1)) * elem
return sum_
def forward_couple(self, psample, nsample):
self.y = self.forward(psample) - self.forward(nsample)
def forward_backward(self, psample, nsample):
print(self.W.shape, psample.shape)
p_y = self.W.dot(psample)
for idx, o in enumerate(range(2, self.order + 1)):
for f in range(self.ranks[idx]):
# sum_i( vif xi) ** order
elem = (self.V_dict[idx][f, :].dot(psample)) ** o
# sum_i( vif ** order xi ** order)
elem += ((self.V_dict[idx][f, :] ** o).dot(psample ** o))
p_y += (1. / 2**(o - 1)) * elem
n_y = self.W.dot(nsample)
for idx, o in enumerate(range(2, self.order + 1)):
for f in range(self.ranks[idx]):
# sum_i( vif xi) ** order
elem = (self.V_dict[idx][f, :].dot(nsample)) ** o
# sum_i( vif ** order xi ** order)
elem += ((self.V_dict[idx][f, :] ** o).dot(nsample ** o))
n_y += (1. / 2**(o - 1)) * elem
y = p_y - n_y
y.backward()
self.W -= self.lr * self.W.grad
for k in self.V_dict.keys():
self.V_dict[k] -= self.lr * self.V_dict[k].grad
def _uniform_user_sampling(self, n_samples):
"""
Creates `n_samples` random samples from training data for performing Stochastic
Gradient Descent. We start by uniformly sampling users,
and then sample a positive and a negative item for each
user sample.
"""
sys.stderr.write("Generating %s random training samples\n" % str(n_samples))
sgd_users = np.array(list(self._train_users))\
[np.random.randint(len(list(self._train_users)), size=n_samples)]
sgd_pos_items, sgd_neg_items = [], []
for sgd_user in sgd_users:
pos_item = self._train_dict[sgd_user]\
[np.random.randint(len(self._train_dict[sgd_user]))]
sgd_pos_items.append(pos_item)
neg_item = np.random.randint(self._n_items)
while neg_item in self._train_dict[sgd_user]:
neg_item = np.random.randint(self._n_items)
sgd_neg_items.append(neg_item)
return sgd_users, sgd_pos_items, sgd_neg_items
def full_forward_backward(self, user, pitem, nitem, max_user, sample_size):
#print(max_user, sample_size, pitem, nitem)
p = torch.zeros(sample_size)
p[user] = 1
p[max_user + pitem] = 1
psample = Variable(p, requires_grad=True)
s = torch.zeros(sample_size)
s[user] = 1
s[max_user + nitem] = 1
nsample = Variable(s, requires_grad=True)
p_y = self.W.dot(psample)
for idx, o in enumerate(range(2, self.order + 1)):
for f in range(self.ranks[idx]):
# sum_i( vif xi) ** order
elem = (self.V_dict[idx][f, :].dot(psample)) ** o
# sum_i( vif ** order xi ** order)
elem += ((self.V_dict[idx][f, :] ** o).dot(psample ** o))
p_y += (1. / 2**(o - 1)) * elem
n_y = self.W.dot(nsample)
for idx, o in enumerate(range(2, self.order + 1)):
for f in range(self.ranks[idx]):
# sum_i( vif xi) ** order
elem = (self.V_dict[idx][f, :].dot(nsample)) ** o
# sum_i( vif ** order xi ** order)
elem += ((self.V_dict[idx][f, :] ** o).dot(nsample ** o))
n_y += (1. / 2**(o - 1)) * elem
y = p_y - n_y
y.backward()
self.W.data -= self.lr * self.W.grad.data
for k in self.V_dict.keys():
self.V_dict[k].data -= self.lr * self.V_dict[k].grad.data
def batch_forward_backward(self, users, pitems, nitems, max_user, sample_size):
batch_size = len(pitems)
p = torch.zeros(batch_size, sample_size)
s = torch.zeros(batch_size, sample_size)
for u in range(batch_size):
p[u, users[u]] = 1
p[u, max_user + pitems[u]] = 1
s[u, users[u]] = 1
s[u, max_user + nitems[u]] = 1
psample = Variable(p, requires_grad=True)
nsample = Variable(s, requires_grad=True)
print('() : ', psample.data.numpy().shape, self.W.data.numpy().shape)
p_y = torch.mm(psample, self.W)
for idx, o in enumerate(range(2, self.order + 1)):
for f in range(self.ranks[idx]):
# sum_i( vif xi) ** order
elem = torch.mm(psample, self.V_dict[idx]) ** o
# sum_i( vif ** order xi ** order)
elem += torch.mm((psample ** o), (self.V_dict[idx] ** o))
p_y += (1. / 2**(o - 1)) * elem.sum(1)
n_y = torch.mm(nsample, self.W)
for idx, o in enumerate(range(2, self.order + 1)):
for f in range(self.ranks[idx]):
# sum_i( vif xi) ** order
elem = torch.mm(nsample, self.V_dict[idx]) ** o
# sum_i( vif ** order xi ** order)
elem += torch.mm((nsample ** o), (self.V_dict[idx] ** o))
n_y += (1. / 2**(o - 1)) * elem.sum(1)
y = (p_y - n_y)
solution = torch.ones((batch_size, 1))
y.backward(solution)
self.W.data -= self.lr * self.W.grad.data
for k in self.V_dict.keys():
self.V_dict[k].data -= self.lr * self.V_dict[k].grad.data
| null | fm_core/train_fm.py | train_fm.py | py | 10,702 | python | en | code | null | code-starcoder2 | 51 |
477615728 | def soma(num):
if num > 0:
num = str(num)
lista = list(num)
soma = 0
for valor in lista:
soma = soma + int(valor)
return f'A soma dos digitos é de {soma}'
else:
print('Numero invalido')
print(soma(123))
| null | section 8 Funções/ex12.py | ex12.py | py | 274 | python | en | code | null | code-starcoder2 | 51 |
197318180 | import logging
import re
from secrets import token_urlsafe
import time
from django.conf import settings
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
from django.core.exceptions import ObjectDoesNotExist
from django.forms.models import model_to_dict
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
import jwt
from aidants_connect_web.decorators import activity_required
from aidants_connect_web.models import (
Connection,
Journal,
Usager,
)
from aidants_connect_web.utilities import generate_sha256_hash
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
def check_request_parameters(
parameters: dict, expected_static_parameters: dict, view_name: str
) -> tuple:
"""
When a request arrives, this function checks that all requested parameters are
present (if not, returns (1, "missing parameter") and if the static parameters are
correct (if not, returns (1, "forbidden parameter value")). If all is good, returns
(0, "all is good")
:param parameters: dict of all parameters expected in the request
(None if the parameter was not present)
:param expected_static_parameters: subset of parameters that are not dynamic
:param view_name: str with the name of the view for logging purposes
:return: tuple (error, message) where error is a bool and message an str
"""
for parameter, value in parameters.items():
if not value:
error_message = f"400 Bad request: There is no {parameter} @ {view_name}"
log.info(error_message)
return 1, "missing parameter"
elif (
parameter not in expected_static_parameters
and parameter in ["state", "nonce"]
and not value.isalnum()
):
error_message = (
f"403 forbidden request: malformed {parameter} @ {view_name}"
)
log.info(error_message)
return 1, "malformed parameter value"
elif (
parameter in expected_static_parameters
and value != expected_static_parameters[parameter]
):
error_message = (
f"403 forbidden request: unexpected {parameter} @ {view_name}"
)
log.info(error_message)
return 1, "forbidden parameter value"
return 0, "all good"
@login_required
@activity_required
def authorize(request):
if request.method == "GET":
parameters = {
"state": request.GET.get("state"),
"nonce": request.GET.get("nonce"),
"response_type": request.GET.get("response_type"),
"client_id": request.GET.get("client_id"),
"redirect_uri": request.GET.get("redirect_uri"),
"scope": request.GET.get("scope"),
"acr_values": request.GET.get("acr_values"),
}
EXPECTED_STATIC_PARAMETERS = {
"response_type": "code",
"client_id": settings.FC_AS_FI_ID,
"redirect_uri": settings.FC_AS_FI_CALLBACK_URL,
"scope": "openid profile email address phone birth",
"acr_values": "eidas1",
}
error, message = check_request_parameters(
parameters, EXPECTED_STATIC_PARAMETERS, "authorize"
)
if error:
return (
HttpResponseBadRequest()
if message == "missing parameter"
else HttpResponseForbidden()
)
connection = Connection.objects.create(
state=parameters["state"],
nonce=parameters["nonce"],
)
aidant = request.user
return render(
request,
"aidants_connect_web/id_provider/authorize.html",
{
"connection_id": connection.id,
"usagers": aidant.get_usagers_with_active_autorisation(),
"aidant": aidant,
},
)
else:
parameters = {
"connection_id": request.POST.get("connection_id"),
"chosen_usager": request.POST.get("chosen_usager"),
}
try:
connection = Connection.objects.get(pk=parameters["connection_id"])
if connection.is_expired:
log.info("connection has expired at authorize")
return render(request, "408.html", status=408)
except ObjectDoesNotExist:
log.info("No connection corresponds to the connection_id:")
log.info(parameters["connection_id"])
logout(request)
return HttpResponseForbidden()
aidant = request.user
chosen_usager = Usager.objects.get(pk=parameters["chosen_usager"])
if chosen_usager not in aidant.get_usagers_with_active_autorisation():
log.info(
"This usager does not have a valid autorisation "
"with the aidant's organisation"
)
log.info(aidant.id)
logout(chosen_usager.id)
logout(request)
return HttpResponseForbidden()
connection.usager = chosen_usager
connection.save()
select_demarches_url = (
f"{reverse('fi_select_demarche')}?connection_id={connection.id}"
)
return redirect(select_demarches_url)
@login_required
@activity_required()
def fi_select_demarche(request):
if request.method == "GET":
parameters = {
"connection_id": request.GET.get("connection_id"),
}
try:
connection = Connection.objects.get(pk=parameters["connection_id"])
if connection.is_expired:
log.info("Connection has expired at select_demarche")
return render(request, "408.html", status=408)
except ObjectDoesNotExist:
log.info("No connection matches the connection_id:")
log.info(parameters["connection_id"])
logout(request)
return HttpResponseForbidden()
aidant = request.user
usager_demarches = aidant.get_active_demarches_for_usager(connection.usager)
demarches = {
nom_demarche: settings.DEMARCHES[nom_demarche]
for nom_demarche in usager_demarches
}
return render(
request,
"aidants_connect_web/id_provider/fi_select_demarche.html",
{
"connection_id": connection.id,
"aidant": request.user.get_full_name(),
"usager": connection.usager,
"demarches": demarches,
},
)
else:
parameters = {
"connection_id": request.POST.get("connection_id"),
"chosen_demarche": request.POST.get("chosen_demarche"),
}
try:
connection = Connection.objects.get(pk=parameters["connection_id"])
if connection.is_expired:
log.info("connection has expired at select_demarche")
return render(request, "408.html", status=408)
except ObjectDoesNotExist:
log.info("No connection corresponds to the connection_id:")
log.info(parameters["connection_id"])
logout(request)
return HttpResponseForbidden()
aidant = request.user
autorisation = aidant.get_valid_autorisation(
parameters["chosen_demarche"], connection.usager
)
if not autorisation:
log.info("The autorisation asked does not exist")
return HttpResponseForbidden()
code = token_urlsafe(64)
connection.code = make_password(code, settings.FC_AS_FI_HASH_SALT)
connection.demarche = parameters["chosen_demarche"]
connection.autorisation = autorisation
connection.complete = True
connection.aidant = aidant
connection.save()
return redirect(
f"{settings.FC_AS_FI_CALLBACK_URL}?code={code}&state={connection.state}"
)
# Due to `no_referer` error
# https://docs.djangoproject.com/en/dev/ref/csrf/#django.views.decorators.csrf.csrf_exempt
@csrf_exempt
def token(request):
if request.method == "GET":
return HttpResponse("You did a GET on a POST only route")
client_secret = request.POST.get("client_secret")
try:
hash_client_secret = generate_sha256_hash(client_secret.encode())
except AttributeError:
return HttpResponseBadRequest()
parameters = {
"code": request.POST.get("code"),
"grant_type": request.POST.get("grant_type"),
"redirect_uri": request.POST.get("redirect_uri"),
"client_id": request.POST.get("client_id"),
"hash_client_secret": hash_client_secret,
}
EXPECTED_STATIC_PARAMETERS = {
"grant_type": "authorization_code",
"redirect_uri": settings.FC_AS_FI_CALLBACK_URL,
"client_id": settings.FC_AS_FI_ID,
"hash_client_secret": settings.HASH_FC_AS_FI_SECRET,
}
error, message = check_request_parameters(
parameters, EXPECTED_STATIC_PARAMETERS, "token"
)
if error:
return (
HttpResponseBadRequest()
if message == "missing parameter"
else HttpResponseForbidden()
)
code_hash = make_password(parameters["code"], settings.FC_AS_FI_HASH_SALT)
try:
connection = Connection.objects.get(code=code_hash)
if connection.is_expired:
log.info("connection has expired at token")
return render(request, "408.html", status=408)
except ObjectDoesNotExist:
log.info("403: /token No connection corresponds to the code")
log.info(parameters["code"])
return HttpResponseForbidden()
id_token = {
# The audience, the Client ID of your Auth0 Application
"aud": settings.FC_AS_FI_ID,
# The expiration time. in the format "seconds since epoch"
# TODO Check if 10 minutes is not too much
"exp": int(time.time()) + settings.FC_CONNECTION_AGE,
# The issued at time
"iat": int(time.time()),
# The issuer, the URL of your Auth0 tenant
"iss": settings.HOST,
# The unique identifier of the user
"sub": connection.usager.sub,
"nonce": connection.nonce,
}
encoded_id_token = jwt.encode(id_token, client_secret, algorithm="HS256")
access_token = token_urlsafe(64)
connection.access_token = make_password(access_token, settings.FC_AS_FI_HASH_SALT)
connection.save()
response = {
"access_token": access_token,
"expires_in": 3600,
"id_token": encoded_id_token.decode("utf-8"),
"refresh_token": "5ieq7Bg173y99tT6MA",
"token_type": "Bearer",
}
definite_response = JsonResponse(response)
return definite_response
def user_info(request):
auth_header = request.META.get("HTTP_AUTHORIZATION")
if not auth_header:
log.info("403: Missing auth header")
return HttpResponseForbidden()
pattern = re.compile(r"^Bearer\s([A-Z-a-z-0-9-_/-]+)$")
if not pattern.match(auth_header):
log.info("Auth header has wrong format")
return HttpResponseForbidden()
auth_token = auth_header[7:]
auth_token_hash = make_password(auth_token, settings.FC_AS_FI_HASH_SALT)
try:
connection = Connection.objects.get(access_token=auth_token_hash)
if connection.is_expired:
log.info("connection has expired at user_info")
return render(request, "408.html", status=408)
except ObjectDoesNotExist:
log.info("403: /user_info No connection corresponds to the access_token")
log.info(auth_token)
return HttpResponseForbidden()
usager = model_to_dict(connection.usager)
del usager["id"]
birthdate = usager["birthdate"]
birthplace = usager["birthplace"]
birthcountry = usager["birthcountry"]
usager["birthplace"] = str(birthplace)
usager["birthcountry"] = str(birthcountry)
usager["birthdate"] = str(birthdate)
Journal.log_autorisation_use(
aidant=connection.aidant,
usager=connection.usager,
demarche=connection.demarche,
access_token=connection.access_token,
autorisation=connection.autorisation,
)
return JsonResponse(usager, safe=False)
def end_session_endpoint(request):
if request.method != "GET":
log.info("Request should be a GET @ end_session_endpoint")
return HttpResponseBadRequest()
redirect_uri = settings.FC_AS_FI_LOGOUT_REDIRECT_URI
if request.GET.get("post_logout_redirect_uri") != redirect_uri:
message = (
f"post_logout_redirect_uri is "
f"{request.GET.get('post_logout_redirect_uri')} instead of "
f"{redirect_uri} @ end_session_endpoint"
)
log.info(message)
return HttpResponseBadRequest()
return HttpResponseRedirect(redirect_uri)
| null | aidants_connect_web/views/id_provider.py | id_provider.py | py | 13,240 | python | en | code | null | code-starcoder2 | 50 |
570520072 | """
SOILICE to mrfso converter
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import cmor
import cdms2
import logging
import numpy as np
from e3sm_to_cmip.lib import handle_variables
# list of raw variable names needed
RAW_VARIABLES = [str('SOILICE')]
VAR_NAME = str('mrfso')
VAR_UNITS = str('kg m-2')
TABLE = str('CMIP6_Lmon.json')
def write_data(varid, data, timeval, timebnds, index, **kwargs):
"""
mrfso = verticalSum(SOILICE, capped_at=5000)
"""
# we only care about data with a value greater then 0
mask = np.greater(data['SOILICE'][index, :], 0.0)
# sum the data over the levgrnd axis
outdata = np.sum(
data['SOILICE'][index, :],
axis=0)
# replace all values greater then 5k with 5k
capped = np.where(
np.greater(outdata, 5000.0),
5000.0,
outdata)
outdata = np.where(
mask,
capped,
outdata)
cmor.write(
varid,
outdata,
time_vals=timeval,
time_bnds=timebnds)
def handle(infiles, tables, user_input_path, **kwargs):
return handle_variables(
metadata_path=user_input_path,
tables=tables,
table=TABLE,
infiles=infiles,
raw_variables=RAW_VARIABLES,
write_data=write_data,
outvar_name=VAR_NAME,
outvar_units=VAR_UNITS,
serial=kwargs.get('serial'),
logdir=kwargs.get('logdir'))
# ------------------------------------------------------------------
| null | e3sm_to_cmip/cmor_handlers/mrfso.py | mrfso.py | py | 1,524 | python | en | code | null | code-starcoder2 | 50 |
563922917 | """add unique together constraint for shop item category and code name
Revision ID: b412a7dfa61f
Revises: 8ba89ef9391d
Create Date: 2018-07-07 14:48:10.809261
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "b412a7dfa61f"
down_revision = "8ba89ef9391d"
branch_labels = None
depends_on = None
def upgrade():
# Requires that the table already fulfills the constraint
with op.batch_alter_table("shop_item", schema=None) as batch_op:
batch_op.create_unique_constraint(
"uq_category_id_code_name", ["category_id", "code_name"])
def downgrade():
with op.batch_alter_table("shop_item", schema=None) as batch_op:
batch_op.drop_constraint("uq_category_id_code_name")
| null | meme_machine/alembic/versions/b412a7dfa61f_add_unique_together_constraint_for_shop_.py | b412a7dfa61f_add_unique_together_constraint_for_shop_.py | py | 737 | python | en | code | null | code-starcoder2 | 51 |
376525253 | # CPU functionality
LDI = 0b10000010
HLT = 0b00000001
PRN = 0b01000111
MUL = 0b10100010
NOP = 0b00000000
POP = 0b01000110
RET = 0b00010001
CALL = 0b01010000
PUSH = 0b01000101
SP = 0b00000111
ADD = 0b10100000
SUB = 0b10100001
CMP = 0b10100111
EQ = 0b00000111
JMP = 0b01010100
JEQ = 0b01010101
JNE = 0b01010110
AND = 0b10101000
MOD = 0b10100100
SHL = 0b10101100
SHR = 0b10101101
XOR = 0b10101011
OR = 0b10101010
NOT = 0b01101001
# imports
import sys
# start of CPU class
class CPU:
"""
Main CPU class
"""
def __init__(self):
"""Construct a new CPU."""
self.ram = [0] * 256
self.reg = [0] * 8
self.pc = 0
self.running = True
self.flag = [0] * 8
self.table = {
HLT : self.HLT,
PRN : self.PRN,
LDI : self.LDI,
MUL : self.MUL,
ADD : self.ADD,
SUB : self.SUB,
PUSH : self.PUSH,
POP : self.POP,
CALL : self.CALL,
RET : self.RET,
CMP : self.CMP,
JMP : self.JMP,
JEQ : self.JEQ,
JNE : self.JNE
}
def ram_read(self, address):
return self.ram[address]
def ram_write(self, address, value):
self.ram[address] = value
def load(self):
"""Load a program into memory."""
filename = sys.argv[1]
address = 0
with open(filename) as f:
for line in f:
line = line.split('#')[0].strip()
if line == '':
continue
try:
v = int(line, 2)
except ValueError:
continue
self.ram_write(address, v)
address += 1
def trace(self):
"""
Handy function to print out the CPU state. You might want to call this
from run() if you need help debugging.
"""
print(f"TRACE: %02X | %02X %02X %02X |" % (
self.pc,
self.ram_read(self.pc),
self.ram_read(self.pc + 1),
self.ram_read(self.pc + 2)
), end='')
for i in range(8):
print(" %02X" % self.reg[i], end='')
print()
def alu(self, op, reg_a, reg_b):
"""ALU operations."""
if op == "ADD":
self.reg[reg_a] += self.reg[reg_b]
elif op == "MUL":
self.reg[reg_a] *= self.reg[reg_b]
elif op == "SUB":
self.reg[reg_a] -= self.reg[reg_b]
# equal flag requirements of sprint
elif op == "CMP":
if reg_a == reg_b:
self.flag[EQ] = 0b00000001
else:
self.flag[EQ] = 0b00000000
# part of stretch
elif op == "AND":
self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]
# part of stretch
elif op == "MOD":
if self.reg[reg_b] == 0:
print("Cannot mod by value of 0")
self.HLT(reg_a, reg_b)
else:
self.reg[reg_a] %= self.reg[reg_b]
# part of stretch
elif op == "SHL":
self.reg[reg_a] << self.reg[reg_b]
# part of stretch
elif op == "SHR":
self.reg[reg_a] >> self.reg[reg_b]
# part of stretch
elif op == "OR":
self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]
# part of stretch
elif op == "NOT":
self.reg[reg_a] -= 0b11111111
# part of stretch
elif op == "XOR":
self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]
else:
raise Exception("Unsupported ALU operation")
def LDI(self, reg_a, reg_b):
self.reg[reg_a] = reg_b
def HLT(self, reg_a, reg_b):
self.running = False
def PRN(self, reg_a, reg_b):
print(self.reg[reg_a])
def MUL(self, reg_a, reg_b):
self.alu("MUL", reg_a, reg_b)
def SUB(self, reg_a, reg_b):
self.alu("SUB", reg_a, reg_b)
def ADD(self, reg_a, reg_b):
self.alu("ADD", reg_a, reg_b)
def PUSH(self, reg_a, reg_b):
reg_num = self.ram[reg_a]
value = self.reg[reg_num]
self.reg[SP] -= 1
top_of_stack_add = self.reg[SP]
self.ram[top_of_stack_add] = value
def POP(self, reg_a, reg_b):
top_of_stack_add = self.reg[SP]
value = self.ram[top_of_stack_add]
reg_num = self.ram[reg_a]
self.reg[reg_num] = value
self.reg[SP] += 1
def CALL(self, reg_a, reg_b):
return_addr = reg_b
self.reg[SP] -= 1
self.ram[self.reg[SP]] = return_addr
reg_num = self.ram[reg_a]
addr = self.reg[reg_num]
self.pc = addr
def RET(self, reg_a, reg_b):
addr = self.ram[self.reg[SP]]
self.reg[SP] += 1
self.pc = addr
# CMP requirement of sprint, equal flag above in alu
def CMP(self, reg_a, reg_b):
reg_num1 = self.reg[reg_a]
reg_num2 = self.reg[reg_b]
self.alu("CMP", reg_num1, reg_num2)
# start of JMP requirement of sprint
def JMP(self, reg_a, reg_b):
self.pc = self.reg[reg_a]
# JEQ requirement of sprint
def JEQ(self, reg_a, reg_b):
if self.flag[EQ] == 0b00000001:
self.pc = self.reg[reg_a]
else:
self.pc += 2
# JNE requirement of sprint
def JNE(self, reg_a, reg_b):
if self.flag[EQ] == 0b00000000:
self.pc = self.reg[reg_a]
else:
self.pc += 2
def run(self):
while self.running:
ir = self.ram_read(self.pc)
pc_flag = (ir & 0b00010000) >> 4
reg_num1 = self.ram[self.pc +1]
reg_num2 = self.ram[self.pc + 2]
self.table[ir](reg_num1, reg_num2)
if pc_flag == 0:
move = int((ir & 0b11000000) >>6)
self.pc += move + 1 | null | cpu.py | cpu.py | py | 5,962 | python | en | code | null | code-starcoder2 | 51 |
595002045 | #!/usr/bin/env python
import time, sys, logging
from daemon2x import Daemon
from lib_oled96 import ssd1306
from PIL import ImageFont, ImageDraw, Image
from smbus import SMBus
# Logging
logging.basicConfig(filename='/home/pi/oled/lib_oled96/clock.log',
filemode='a',
format='[%(asctime)s] %(message)s',
datefmt='%Y/%d/%m %H:%M:%S',
level=logging.INFO)
# Setup display
i2cbus = SMBus(1) # 1 = Raspberry Pi but NOT early REV1 board
oled = ssd1306(i2cbus) # create oled object, nominating the correct I2C bus, default address
draw = oled.canvas # "draw" onto this canvas, then call display() to send the canvas contents to the hardware.
# Hello World
#oled.canvas.text((40,40), 'Hello World!', fill=1)
#Setup fonts
#font = ImageFont.load_default()
font1 = ImageFont.truetype('/home/pi/oled/DSEG/fonts/DSEG7-Classic/DSEG7Classic-Light.ttf', 38)
#font2 = ImageFont.truetype('/home/pi/oled/DSEG/fonts/DSEG14-Classic/DSEG14Classic-Light.ttf', 12)
class MyDaemon(Daemon):
def run(self):
logging.info('--------------')
logging.info('Daemon Started')
# oled.cls()
while True:
draw.rectangle((0, 0, 128, 64), outline=0, fill=0)
# oled.canvas.rectangle((0, 0, oled.width-1, oled.height-1), outline=1, fill=0) # Border
draw.text((0 , 3), time.strftime("%H:%M"), font=font1, fill=1)
if time.strftime("%H")[:1] == '0': #remove leading 0 for hour
draw.text((0, 3), '0', font=font1, fill=0)
# draw.text((58, 0), time.strftime("%p")[:1], font=font3, fill=1)
# draw.text((15 , 48), time.strftime("%d-%m-%Y"), font=font2, fill=1)
oled.display()
time.sleep(-time.time() % 60)
logging.info('Daemon Ended')
if __name__ == "__main__":
daemonx = MyDaemon('/tmp/daemon-OLEDclock.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemonx.start()
elif 'stop' == sys.argv[1]:
logging.info('Daemon Stopped')
daemonx.stop()
elif 'restart' == sys.argv[1]:
logging.info('Daemon restarting')
daemonx.restart()
else:
print("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
| null | python/clock-dseg2-d.py | clock-dseg2-d.py | py | 2,446 | python | en | code | null | code-starcoder2 | 51 |
246563288 | class Solver:
def __init__(self):
with open("../resources/d3/input") as f:
self.lines = f.readlines()
def solveP1(self):
print(self.closest_intersection(self.lines))
def solveP2(self):
print(self.minimum_steps(self.lines))
def closest_intersection(self, lines):
meshes = []
for line in lines:
mesh = set()
current_point = 0, 0
for x in line.split(','):
direction, length = x[0], int(x[1:])
offset = self.get_offset(direction)
for _ in range(length):
current_point = current_point[0] + offset[0], current_point[1] + offset[1]
mesh.add(current_point)
meshes.append(mesh)
return min(self.manhattan(pt) for pt in meshes[0] & meshes[1])
def manhattan(self, point):
return abs(point[0]) + abs(point[1])
def minimum_steps(self, lines):
meshes = []
for line in lines:
mesh = {}
current_point = 0, 0
steps_taken = 0
for x in line.split(','):
direction, length = x[0], int(x[1:])
offset = self.get_offset(direction)
for _ in range(length):
current_point = current_point[0] + offset[0], current_point[1] + offset[1]
steps_taken += 1
if current_point not in mesh:
mesh[current_point] = steps_taken
meshes.append(mesh)
intersections = set(meshes[0].keys()) & set(meshes[1].keys())
return min(meshes[0][pt] + meshes[1][pt] for pt in intersections)
def get_offset(self, direction):
if direction == 'R':
offset = 1, 0
if direction == 'L':
offset = -1, 0
if direction == 'U':
offset = 0, 1
if direction == 'D':
offset = 0, -1
return offset
if __name__ == '__main__':
s = Solver()
assert s.closest_intersection(["R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83"]) == 159
assert s.closest_intersection(["R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51",
"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7"]) == 135
assert s.minimum_steps(["R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83"]) == 610
assert s.minimum_steps(["R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51",
"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7"]) == 410
s.solveP1()
s.solveP2()
| null | src/main/python/d3.py | d3.py | py | 2,600 | python | en | code | null | code-starcoder2 | 51 |
105123489 | from tkinter import *
import os
root = Tk()
root.wm_title("Terra Farmer")
#Initialising images in Array
mapImages= ["agriculturalLand.gif", "artesianBasinMap.gif", "populationMap.gif","rainfallMap.gif", "riverMap.gif", "sunshineHoursMap.gif", "temperatureMap.gif"]
mapArray = []
for name in mapImages:
mapArray.append(PhotoImage(file=name))
#Initialising Map text files
mapText = ["agriculturalLand.txt", "artesianBasinMap.txt", "populationMap.txt", "rainfallMap.txt","riverMap.txt","sunshineHoursMap.txt","temperatureMap.txt"]
mtArray = []
for name in mapText:
with open(name) as f:
mtArray.append(f.read())
#Initialising Projection images in an Array
projImages = ["australia1.gif", "australia2.gif", "australia3.gif", "australia4.gif"]
projArray = []
for name in projImages:
projArray.append(PhotoImage(file=name))
#Intialising Projection Text Files
projText = ["a1.txt", "a2.txt", "a3.txt", "a4.txt"]
ptArray = []
for name in projText:
with open(name) as p:
ptArray.append(p.read())
#Changing Projection Image
def set_year(new_value):
new_value = int(new_value)
global pic, myLabel, text
pic = None
pic = projArray[new_value - 1]
myLabel.configure(image = pic)
myLabel.image = pic
words = ptArray[new_value - 1]
text.delete(1.0, END)
text.insert(INSERT, words)
#Change map and relevant analysis
def radio_btns():
rVal = int(var.get())
pic = None
pic = mapArray[rVal-1]
myLabel.configure(image=pic)
myLabel.image = pic
words = mtArray[rVal - 1]
text.delete(1.0, END)
text.insert(INSERT, words)
var = IntVar()
#Make Radio buttons to change selection of what information is being looked at
R1 = Radiobutton(root, text = "Agricultural Land Map", variable = var, value = 1, command = radio_btns)
R1.pack(anchor = W)
R2 = Radiobutton(root, text = "Artesian Basin Map", variable = var, value = 2, command = radio_btns)
R2.pack(anchor = W)
R3 = Radiobutton(root, text = "Population Map", variable = var, value = 3, command = radio_btns)
R3.pack(anchor = W)
R4 = Radiobutton(root, text = "Rainfall Map", variable = var, value = 4, command = radio_btns)
R4.pack(anchor = W)
R5 = Radiobutton(root, text = "River Map", variable = var, value = 5, command = radio_btns)
R5.pack(anchor = W)
R6 = Radiobutton(root, text = "Sunshine Hours Map", variable = var, value = 6, command = radio_btns)
R6.pack(anchor = W)
R7 = Radiobutton(root, text = "Temperature Map", variable = var, value = 7, command = radio_btns)
R7.pack(anchor = W)
#image label
pic = projArray[0]
myLabel = Label(root, compound = CENTER, image=pic)
myLabel.pack(side="right")
#slider
slider = Scale(root, from_=1, to=4,tickinterval=1, length = 500, orient=HORIZONTAL, command=set_year)
slider.set(1)
slider.pack()
#text
text = Text(root, font=('Arial', 32), fg = 'dark blue', padx = 6, pady = 6, wrap = 'word', width = 20, height = 12)
text.insert(INSERT, ptArray[0])
text.pack()
root.mainloop()
| null | GUIDemo.py | GUIDemo.py | py | 3,071 | python | en | code | null | code-starcoder2 | 51 |
1481758 | # -*- coding: utf-8 -*-
import json
import os
import sys
import subprocess
import shutil
import time
import glob
import shutil
DEBUG = False
WS = '\t'
TYPE_PRE = "de.julielab.jcore.types."
PIPENAME = ""
DEP_LIST = []
DIR_LIST = []
CAP_PROVIDED = []
JSON_FILE = "coordinates.json"
### HEADER ###
HEAD = (
"""<?xml version="1.0" encoding="UTF-8"?>\n""" +
"""<cpeDescription xmlns="http://uima.apache.org/resourceSpecifier">\n"""
)
### END ###
END = (
"""\t<cpeConfig>\n""" +
"""\t\t<numToProcess>-1</numToProcess>\n""" +
"""\t\t<deployAs>immediate</deployAs>\n""" +
"""\t\t<checkpoint batch="0" time="300000ms"/>\n""" +
"""\t\t<timerImpl/>\n""" +
"""\t</cpeConfig>\n""" +
"""</cpeDescription>\n"""
)
### PROJECTS COORDINATES ###
JCOORDS = None
with open(JSON_FILE) as jfile:
JCOORDS = json.load(jfile)
# add short names (derived from key names) to components
for component in list(JCOORDS.keys()):
if component != "jcore version":
for short in list(JCOORDS[component]):
JCOORDS[component][short]["short"] = short
C_MAP = {
"cr": {"None": "None"},
"ae": {"None": "None"},
"cc": {"None": "None"}
}
A_MAP = {
"cr": "None",
"ae": ["None"],
"cc": "None"
}
c_dict = {
"cr": "Collection Reader",
"ae": "Analysis Engine",
"cc": "CAS Consumer"
}
### BUILDING FUNCTIONS ###
def buildValue(vType, vValue):
# e.g. <string>data/inFiles</string>
VALUE = (
"""<{}>{}</{}>"""
).format(vType, vValue, vType)
return VALUE
def buildArrayValue(vType, vValues, tab=1):
vValue = "\n".join(
["\t{}{}".format((tab + 1) * WS,
buildValue(vType, v)) for v in vValues])
vValue = vValue + "\n"
ARRAYVALUE = (
"""<array>\n""" +
"""{}""" +
"""{}</array>"""
).format(vValue,
(tab + 1) * WS)
return ARRAYVALUE
def buildNameValue(nvName, nvValue, tab=1):
# e.g. NAME = InputDirectory
NAME_VALUE_PAIR = (
"""{}<nameValuePair>\n""" +
"""{}\t<name>{}</name>\n""" +
"""{}\t<value>\n""" +
"""{}\t\t{}\n""" +
"""{}\t</value>\n""" +
"""{}</nameValuePair>\n"""
).format(tab * WS, tab * WS,
nvName, tab * WS, tab * WS,
nvValue, tab * WS, tab * WS)
return NAME_VALUE_PAIR
def buildConfigParams(cp_dict, tab=1):
global DIR_LIST
cp_string = ""
cp_param_list = []
for i in ["mandatory", "optional"]:
cp_param_list.extend(cp_dict[i])
for param in cp_param_list:
if len(param["default"]) != 0:
if not isinstance(param["default"], list):
nv_pair = buildNameValue(param["name"],
buildValue(param["type"], param["default"]), tab + 1)
else:
# value is an <array> ... </array>
nv_pair = buildNameValue(
param["name"],
buildArrayValue(param["type"], param["default"], tab + 2),
tab + 1)
if param.get("dir", False):
if param["dir"] == 'file':
DIR_LIST.append(
os.path.dirname(param["default"]))
elif param["dir"] == 'folder':
DIR_LIST.append(param["default"])
cp_string += nv_pair
cp_string = cp_string.rstrip('\n')
CONFIG_PARAMS = (
"""{}<configurationParameterSettings>\n""" +
"""{}\n""" +
"""{}</configurationParameterSettings>"""
).format(tab * WS, cp_string, tab * WS)
return CONFIG_PARAMS
def buildCollectionReader(cr_dict):
# e.g. cDescName=de.julielab.jcore.reader.file.desc.jcore-file-reader
crDescName = cr_dict["desc"]
crConfigParams = buildConfigParams(cr_dict, 3)
add2DepList(cr_dict)
CR = (
"""\t<collectionReader>\n""" +
"""\t\t<collectionIterator>\n""" +
"""\t\t\t<descriptor>\n""" +
"""\t\t\t\t<import name="{}"/>\n""" +
"""\t\t\t</descriptor>\n""" +
"""{}\n""" +
"""\t\t</collectionIterator>\n""" +
"""\t</collectionReader>\n""").format(crDescName, crConfigParams)
return CR
def buildCASProcs(casProcs, is_ae=True):
global PIPENAME
procs = ""
if isinstance(casProcs, list):
PIPENAME = casProcs[-1]["short"]
for proc in casProcs:
cpDescName = proc["desc"]
name = ", ".join([proc["name"], proc["model"]])
cp = buildConfigParams(proc, 3)
procs += buildCASProc(name, cpDescName, cp)
add2DepList(proc)
procs = procs.rstrip("\n")
else:
cp = buildConfigParams(casProcs, 3)
cpDescName = casProcs["desc"]
procs = buildCASProc(casProcs["name"], cpDescName, cp)
add2DepList(casProcs)
procs = procs.rstrip("\n")
CAS_PROCS = ""
if is_ae:
CAS_PROCS =\
"""\t<casProcessors casPoolSize="3" processingUnitThreadCount="1">\n"""
CAS_PROCS += ("""{}\n""").format(procs)
if not is_ae:
CAS_PROCS += """\t</casProcessors>\n"""
return CAS_PROCS
def buildCASProc(casName, casDescName, casCP):
### SINGLE CAS PROCESSOR ###
CAS_PROC = (
"""\t\t<casProcessor deployment="integrated" name="{}">\n""" +
"""\t\t\t<descriptor>\n""" +
"""\t\t\t\t<import name="{}"/>\n""" +
"""\t\t\t</descriptor>\n""" +
"""{}\n""" +
"""\t\t\t<deploymentParameters/>\n""" +
"""\t\t\t<errorHandling>\n""" +
"""\t\t\t\t<errorRateThreshold action="terminate" value="0/1000"/>\n""" +
"""\t\t\t\t<maxConsecutiveRestarts action="terminate" value="30"/>\n""" +
"""\t\t\t\t<timeout max="100000" default="-1"/>\n""" +
"""\t\t\t</errorHandling>\n""" +
"""\t\t\t<checkpoint batch="10000" time="1000ms"/>\n""" +
"""\t\t</casProcessor>\n""").format(casName, casDescName, casCP)
return CAS_PROC
def add2DepList(cDict):
global DEP_LIST
# if a component has multiple descriptors, the json file has a flag
# "mult_desc: true"; to be on par with the naming convention, the
# different descriptors all have the same prefix (i.e. name of the mvn
# artifact) and a "-" delimited suffix
cDescName = cDict["desc"]
if (cDict.get("mult_desc", "false")).lower() == "true":
dep = cDescName.split('.')[-1]
dep = "-".join(dep.split("-")[:-1])
else:
dep = cDescName.split('.')[-1]
DEP_LIST.append(dep)
def quitSystem():
if DEBUG:
print("\n[DEBUG] Map of Components:")
print(A_MAP)
sys.exit()
def clearScreen():
os.system('cls' if os.name == 'nt' else 'clear')
def removeLastComponent(component):
if component == "ae":
tmp = A_MAP[component].pop()
prevComp = tmp
if (tmp is "None") or (len(A_MAP[component]) == 0):
A_MAP[component].append("None")
else:
prevComp = A_MAP[component]
A_MAP[component] = "None"
checkForCapabilities(component, prevComp, remove=True)
def getCompName(component, index):
name = "None"
jShort = C_MAP[component][index]
if jShort != "None":
if component != "ae":
name = "{}".format(
JCOORDS[(c_dict[component]).lower()][jShort]["name"])
else:
name = "{}, {}".format(
JCOORDS[(c_dict[component]).lower()][jShort]["name"],
JCOORDS[(c_dict[component]).lower()][jShort]["model"]
)
return name
def checkForCapabilities(comp, coKey, remove=False):
global CAP_PROVIDED
fullCat = (c_dict[comp]).lower()
cKey = C_MAP[comp][coKey]
needCap = JCOORDS[fullCat][cKey]["capabilities"]["in"]
matchCap = False
missingCap = False
unmetCap = []
if not remove:
if DEBUG:
print("Provided capabilities: {}\n".format(CAP_PROVIDED))
print("Component needs cap: {} - {}:\n\t{}".format(
fullCat, cKey, needCap))
if len(needCap) <= 0:
matchCap = True
else:
for inCap in needCap:
if inCap not in CAP_PROVIDED:
missingCap = True
matchCap = False
unmetCap.append(inCap)
elif not missingCap:
matchCap = True
if matchCap:
CAP_PROVIDED.extend(JCOORDS[fullCat][cKey]["capabilities"]["out"])
else:
remCap = JCOORDS[fullCat][cKey]["capabilities"]["out"]
for oCap in remCap:
CAP_PROVIDED.remove(oCap)
return matchCap, unmetCap
def getComponent(component="ae"):
comp_string = ""
comps = JCOORDS[(c_dict[component]).lower()]
count = 0
for i in sorted(list(comps.keys())):
C_MAP[component][str(count)] = i
if component == "ae":
comp_string += "\t[{:>2}] {}, {}\n".format(count, comps[i]["name"],
comps[i]["model"])
else:
comp_string += "\t[{:>2}] {}\n".format(count, comps[i]["name"])
count += 1
cr = None
choice = """Choose a {} from the following list:"""
if component == "ae":
choice = """Add an {} from the following list:"""
displ = ""
while cr is None or cr not in ["q", "p"]:
displayPipeline()
cr = input(
(choice +
"""\n{}\nChoice (p for 'back to previous'; q for 'quit'; """ +
"""r for 'remove last'){}: """)
.format(c_dict[component], comp_string, displ)
)
cr = cr.lower()
if cr in [str(x) for x in range(len(C_MAP[component]) - 1)]:
matchCap, needCap = checkForCapabilities(component, cr)
if matchCap:
displ = ""
if component == "ae":
# add ae to stack
if "None" in A_MAP[component]:
A_MAP[component].remove("None")
A_MAP[component].append(cr)
else:
# replace previous cr/cc
prevComp = A_MAP[component]
A_MAP[component] = cr
if prevComp != "None":
checkForCapabilities(component, prevComp, remove=True)
else:
# report unmatched capabilities
displ = ("\n[Input Capabilities aren't provided for {}: {} ]"
).format(getCompName(component, cr), needCap)
if cr == "r":
displ = ""
removeLastComponent(component)
if cr == "q":
quitSystem()
elif cr == "p":
modifyPipeline()
def displayPipeline():
clearScreen()
print(("""The current pipeline consists of\n""" +
"""Collection Reader:\n\t{}""" +
"""Analysis Engine(s):\n\t{}""" +
"""Collection Consumer:\n\t{}""" +
"""Capabilities:\n\t{}\n"""
).format(getCompName("cr", A_MAP["cr"]) + "\n",
"; ".join([getCompName("ae", x) for x in A_MAP["ae"]]) + "\n",
getCompName("cc", A_MAP["cc"]) + "\n",
"; ".join(sorted(set(CAP_PROVIDED))))
)
def modifyPipeline():
ac = None
while ac is None or ac not in ["r", "a", "c", "q", "n"]:
displayPipeline()
ac = input("""modify (r)eader, (a)nalysis engines or (c)onsumer\n""" +
"""(n for 'build current pipeline'; q for 'quit'): """)
ac = ac.lower()
if ac == "q":
quitSystem()
elif ac == "r":
getComponent("cr")
elif ac == "c":
getComponent("cc")
elif ac == "n":
if DEBUG:
print("\n[DEBUG] Map of Components:")
print(A_MAP)
pass
else:
getComponent()
def writePom():
print("write POM...")
sys.stdout.flush()
time.sleep(0.5)
dependencies = ""
for dep in DEP_LIST:
dependencies += (
"""\t\t<dependency>\n""" +
"""\t\t\t<groupId>de.julielab</groupId>\n""" +
"""\t\t\t<artifactId>{}</artifactId>\n""" +
"""\t\t\t<version>[${{jcore-version}},]</version>\n""" +
"""\t\t</dependency>\n"""
).format(dep)
dependencies = dependencies.rstrip("\n")
out_string = (
"""<?xml version='1.0' encoding='UTF-8'?>\n""" +
"""<project xmlns="http://maven.apache.org/POM/4.0.0" """ +
"""xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" """ +
"""xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 """ +
"""http://maven.apache.org/xsd/maven-4.0.0.xsd">\n""" +
"""\t<modelVersion>4.0.0</modelVersion>\n""" +
"""\t<parent>\n""" +
"""\t\t<groupId>de.julielab</groupId>\n""" +
"""\t\t<artifactId>jcore-pipelines</artifactId>\n""" +
"""\t\t<version>{}</version>\n""" +
"""\t</parent>\n""" +
"""\t<artifactId>{}</artifactId>\n""" +
"""\t<name>{}</name>\n""" +
"""\t<dependencies>\n""" +
"""{}\n""" +
"""\t</dependencies>\n""" +
"""</project>"""
).format(JCOORDS["jcore version"], PIPENAME + "-pipeline",
JCOORDS["analysis engine"][PIPENAME]["name"] + " Pipeline",
dependencies)
with open("pom.xml", 'w') as out_file:
out_file.write(out_string)
def copyInstallScript():
iScript = os.path.abspath("../installComponents_template")
shutil.copy(iScript, "installComponents.sh")
subprocess.call(
["chmod", "+x", "installComponents.sh"]
)
def writeExecutionScript(cpeName):
print("create Scripts...")
sys.stdout.flush()
time.sleep(0.5)
xScript = (
"""#!/bin/bash\n\n""" +
"""java_libs=target/dependency\n\n""" +
"""export CLASSPATH=`for i in $java_libs/*.jar; """ +
"""do echo -n "$i:";done;echo -n ""`\n\n""" +
"""$UIMA_HOME/bin/runCPE.sh {}""").format(cpeName)
with open("runPipeline.sh", 'w') as out_file:
out_file.write(xScript)
subprocess.call(
["chmod", "+x", "runPipeline.sh"]
)
def createDirs():
print("create Directories...")
sys.stdout.flush()
time.sleep(0.5)
for iDir in DIR_LIST:
if not os.path.exists(iDir):
os.makedirs(iDir)
def installTrove():
foo = "target"+os.sep+"dependency"
src_fi = "jcore-mstparser-ae-2.*.jar"
os.chdir(foo)
mst = glob.glob(src_fi)[0]
# extract trove jar from mst repo
subprocess.call(
["jar","xf",mst,"repo/de/julielab/jules-trove/1.3/jules-trove-1.3.jar"]
)
# move trove jar to current dir
shutil.copy2("repo/de/julielab/jules-trove/1.3/jules-trove-1.3.jar","./")
# delete old folder
shutil.rmtree("repo/")
# install jules-trove using maven as well?
subprocess.call(
["mvn","install:install-file","-Dfile=jules-trove-1.3.jar","-DgroupId=de.julielab",
"-DartifactId=jules-trove","-Dversion=1.3","-Dpackaging=jar"]
)
def installDependencies():
print("install Dependencies...")
sys.stdout.flush()
time.sleep(0.5)
# run "installDependencies.sh" --> if all goes smoothly, fine
# else tell user to correct errors and run "installDependcies.sh" again
subprocess.call(
["./installComponents.sh"]
)
# if a component is mst-parser, install jules-trove
# run script again?
for ae_key in A_MAP["ae"]:
ae_key = C_MAP["ae"][ae_key]
if ae_key.startswith("mst"):
installTrove()
def buildCurrentPipeline():
# COLLECTION READER
cr = None
cr_key = C_MAP["cr"][A_MAP["cr"]]
cr_string = ""
if cr_key.lower() != "none":
cr = JCOORDS["collection reader"][cr_key]
cr_string = buildCollectionReader(cr)
# ANALYSIS ENGINES
ae_string = ""
ae_list = []
for ae_key in A_MAP["ae"]:
ae_key = C_MAP["ae"][ae_key]
ae = None
if ae_key.lower() != "none":
ae = JCOORDS["analysis engine"][ae_key]
ae_list.append(ae)
if len(ae_list) != 0:
ae_string = buildCASProcs(ae_list)
# CAS CONSUMERS
cc = None
cc_key = C_MAP["cc"][A_MAP["cc"]]
cc_string = ""
if cc_key.lower() != "none":
cc = JCOORDS["cas consumer"][cc_key]
cc_string = buildCASProcs(cc, False)
if DEBUG:
print("[DEBUG] List of Dependencies:\n{}".format(DEP_LIST))
# write out
foo = "jcore-{}-pipeline".format(PIPENAME)
if not os.path.exists(foo):
os.mkdir(foo)
os.chdir(foo)
fiName = "{}-cpe.xml".format(PIPENAME)
out_string = HEAD + cr_string + ae_string + cc_string + END
with open(fiName, 'w') as out_file:
out_file.write(out_string)
createDirs()
writePom()
copyInstallScript()
writeExecutionScript(fiName)
installDependencies()
os.chdir("..")
def checkSystemDependencies():
return False
if __name__ == "__main__":
if sys.version.startswith("3"):
if len(sys.argv) > 1:
if sys.argv[1].lower() == "true":
DEBUG = True
# check for UIMA and Maven
checkSystemDependencies()
modifyPipeline()
print("\nbuild pipeline ...")
sys.stdout.flush()
time.sleep(0.5)
buildCurrentPipeline()
else:
print("Your Python Version is {}".format(sys.version))
print("Please use Python Version 3.x")
| null | jcore-cpe-builder/cpe-builder.py | cpe-builder.py | py | 17,223 | python | en | code | null | code-starcoder2 | 51 |
265588831 | import traceback as _tb
def execPy(varname=None,traceback=False):
"""Get array of python commands from tdi public variable ___TDI___cmds
and execute them. The ___TDI___cmds variable should be either a scalar string
or a string array. If varname is defined
then set the tdi public variable ___TDI___answer to the value of the variable
with the name specified in varname. If varname is not defined
then set public variable ___TDI___answer to 1 if there is no exception. If there
is an exception then set public variable ___TDI___exception to be the
exception string.
"""
from MDSplus import Data as ___TDI___Data,makeData as ___TDI___makeData,String as ___TDI___String
try:
cmds=list()
for cmd in ___TDI___Data.getTdiVar('___TDI___cmds'):
cmds.append(str(cmd))
cmds="\n".join(cmds)
isglobal=False
try:
if int(___TDI___Data.getTdiVar('___TDI___global_ns'))==1:
isglobal=True
except:
pass
ans=1
if isglobal:
exec( cmds) in globals()
if varname is not None:
if varname in globals():
ans=globals()[varname]
else:
ans=None
else:
ns={}
exec( cmds) in ns
if varname is not None:
if varname in ns:
ans=ns[varname]
else:
ans=None
___TDI___makeData(ans).setTdiVar("___TDI___answer")
except Exception:
if traceback:
_tb.print_exc()
import sys
e=sys.exc_info()[1]
___TDI___String("Error: "+str(e)).setTdiVar("___TDI___exception")
| null | mdsobjects/python/tdipy.py | tdipy.py | py | 1,692 | python | en | code | null | code-starcoder2 | 51 |
7621461 | #!/usr/bin/env python3
import argparse
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
import pandas as pd
import numpy as np
import math
import os
import sys
import json
import glob
def main():
# import sys
# from IPython.core import ultratb
# sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux', call_pdb=1)
parser = argparse.ArgumentParser()
parser.add_argument('workDir', type=str)
args = parser.parse_args()
trainF = os.path.join(args.workDir, 'train.csv')
testF = os.path.join(args.workDir, 'test.csv')
trainDf = pd.read_csv(trainF, sep=',')
testDf = pd.read_csv(testF, sep=',')
plotLoss(trainDf, testDf, args.workDir)
plotErr(trainDf, testDf, args.workDir)
initDf = os.path.join(args.workDir, 'D.init')
if os.path.exists(initDf):
initD = np.loadtxt(initDf)
latestD = np.loadtxt(os.path.join(args.workDir, 'D.latest'))
plotD(initD, latestD, args.workDir)
loss_fname = os.path.join(args.workDir, 'loss.png')
err_fname = os.path.join(args.workDir, 'err.png')
loss_err_fname = os.path.join(args.workDir, 'loss-error.png')
os.system('convert +append "{}" "{}" "{}"'.format(loss_fname, err_fname, loss_err_fname))
print('Created {}'.format(loss_err_fname))
def plotLoss(trainDf, testDf, workDir):
# fig, ax = plt.subplots(1, 1, figsize=(5,2))
fig, ax = plt.subplots(1, 1)
# fig.tight_layout()
trainEpoch = trainDf['epoch'].values
trainLoss = trainDf['loss'].values
N = np.argmax(trainEpoch==1.0)
trainEpoch = trainEpoch[N:]
trainLoss = [sum(trainLoss[i-N:i])/N for i in range(N, len(trainLoss))]
plt.plot(trainEpoch, trainLoss, label='Train')
if not testDf.empty:
plt.plot(testDf['epoch'].values, testDf['loss'].values, label='Test')
plt.xlabel("Epoch")
plt.ylabel("MSE")
plt.xlim(xmin=0)
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='--', alpha=0.2)
plt.legend()
# ax.set_yscale('log')
ax.set_ylim(0, None)
for ext in ['pdf', 'png']:
f = os.path.join(workDir, "loss."+ext)
fig.savefig(f)
print("Created {}".format(f))
def plotErr(trainDf, testDf, workDir):
# fig, ax = plt.subplots(1, 1, figsize=(5,2))
fig, ax = plt.subplots(1, 1)
# fig.tight_layout()
trainEpoch = trainDf['epoch'].values
trainLoss = trainDf['err'].values
N = np.argmax(trainEpoch==1.0)
trainEpoch = trainEpoch[N:]
trainLoss = [sum(trainLoss[i-N:i])/N for i in range(N, len(trainLoss))]
plt.plot(trainEpoch, trainLoss, label='Train')
if not testDf.empty:
plt.plot(testDf['epoch'].values, testDf['err'].values, label='Test')
plt.xlabel("Epoch")
plt.ylabel("Error")
plt.xlim(xmin=0)
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='--', alpha=0.2)
plt.legend()
# ax.set_yscale('log')
ax.set_ylim(0, None)
for ext in ['pdf', 'png']:
f = os.path.join(workDir, "err."+ext)
fig.savefig(f)
print("Created {}".format(f))
def plotD(initD, latestD, workDir):
def p(D, fname):
plt.clf()
lim = max(np.abs(np.min(D)), np.abs(np.max(D)))
clim = (-lim, lim)
plt.imshow(D, cmap='bwr', interpolation='nearest', clim=clim)
plt.colorbar()
plt.savefig(os.path.join(workDir, fname))
p(initD, 'initD.png')
p(latestD, 'latestD.png')
latestDs = latestD**6
latestDs = latestDs/np.sum(latestDs, axis=1)[:,None]
I = np.argsort(latestDs.dot(np.arange(latestDs.shape[1])))
latestDs = latestD[I]
initDs = initD[I]
p(initDs, 'initD_sorted.png')
p(latestDs, 'latestD_sorted.png')
# Dcombined = np.concatenate((initDs, np.zeros((initD.shape[0], 10)), latestDs), axis=1)
# p(Dcombined, 'Dcombined.png')
if __name__ == '__main__':
main()
| null | sudoku/plot.py | plot.py | py | 4,017 | python | en | code | null | code-starcoder2 | 51 |
29452654 | # Imports
import time
import datetime
import win32api
import socket
import _thread
if socket.gethostname().find('.')>=0:
hostname=socket.gethostname()
else:
hostname=socket.gethostbyaddr(socket.gethostname())[0]
# Functions
def main():
idleflag = False
work_start, work_stop = 0, 0
idle_start, idle_stop = 0, 0
x1 = win32api.GetLastInputInfo()
while True:
time.sleep(1)
x2 = win32api.GetLastInputInfo()
if x1 == x2:
if not idleflag:
work_stop = time.clock()
idle_start = work_stop
with open(datetime.datetime.now().strftime("%m%d") + "_" + hostname + "_idle.csv", "a") as w:
msg = "Idle Start," + str(datetime.datetime.now()) + ",Worked Duration," + str(work_stop-work_start) + "\n"
w.write(msg)
w.close()
idleflag = True
else:
if idleflag:
idle_stop = time.clock()
work_start = idle_stop
with open(datetime.datetime.now().strftime("%m%d") + "_" + hostname + "_idle.csv", "a") as w:
msg = "Idle Stop," + str(datetime.datetime.now()) + ",Idle Duration," + str(idle_stop-idle_start) + "\n"
w.write(msg)
w.close()
idleflag = False
x1 = x2
# __main__()
if __name__ == "__main__":
try:
_thread.start_new_thread( main, () )
except:
print("Error: unable to start thread")
while 1:
pass | null | Py-dle.py | Py-dle.py | py | 1,289 | python | en | code | null | code-starcoder2 | 51 |
384093639 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import tkMessageBox
import MySQLdb
import mysql.connector
from fpdf import FPDF
import decimal
from datetime import *
import os, sys
from cargo import Cargo
#Definimos la clase Recibo
class Recibo(object):
__numero_Recibo = None
"""Constructor
* @param numero_Recibo,fechaPeriodo
* @return no devuelve nada
"""
def __init__(self, numero_Recibo=""):
self.__numero_Recibo = numero_Recibo
"""Getter numero_Recibo.
* @param Ninguno.
* @return devuelve el numero del recibo
*/
"""
def getNumero_Recibo(self):
return self.__numero_Recibo
"""Setter numero_Recibo.
* @param numero_Recibo.
* @return no devuelve nada.
*/
"""
def setNumero_Recibo(self, numero_Recibo):
self.__numero_Recibo = numero_Recibo
numero_Recibo = property(fget= getNumero_Recibo,fset=setNumero_Recibo)
"""Función sueldo_Basico.
* @param puntos.
* @return calcula y devuelve el sueldo Básico del Docente.
*/
"""
def sueldo_Basico(self,puntos):
sueldoBasico = round((3.437393 * puntos),2)
return sueldoBasico
"""Función monto_Anti.
* @param los años y sueldo Básico.
* @return calcula y devuelve el monto de la antigüedad.
*/
"""
def monto_Anti(self, anios, sueldoB):
if anios > 25:
monto_Anti = round((1.20 * sueldoB),2)
else:
porcentaje= [0,0.10,0.15,0.15,0.15,0.30,0.30,0.40,0.40,0.40,0.50,0.50,0.60,0.60,0.60,0.70,0.70,0.80,0.80,0.80,1,1,1.10,1.10,1.20]
monto_Anti = round((porcentaje[anios] * sueldoB),2)
return monto_Anti
"""Función suma_Zona.
* @param sueldo básico y el porcentaje de la zona.
* @return calcula y devuelve la suma de la zona donde trabaja el doncente.
*/
"""
def suma_Zona(self, sueldoB, porcentajeZona):
sumaZona = round((sueldoB * porcentajeZona),2)
return sumaZona
""" Función presentismo.
* @param sueldo Básico y el monto de la antiguedad.
* @return calcula y devuelve la suma del presentismo.
*/
"""
def presentismo(self, sueldoB,montoAnti):
presentissmo = round(((sueldoB + montoAnti)* 0.75)*0.08 ,2)
return presentissmo
""" Función SubTotal1.
* @param sueldo básico, monto Antiguedad, suma de la zona y presentismo.
* @return calcular y devolver las ganancias del docente.
*/
"""
def subTotal1(self, sueldoB, montoAnti, sumaZona, presentismo):
subtotal1 = round((sueldoB + montoAnti + sumaZona + presentismo),2)
return subtotal1
"""Función Jubilacion.
* @param subTotal1.
* @return calcula y devuelve el descuento de la jubilación.
*/
"""
def jubilacion(self, subtotal1):
montoJubilacion = round((subtotal1 * 0.20),2)
return montoJubilacion
""" Funcion Obra Social.
* @param subtotal1 y descuento de la obra social.
* @return calcula y devuelve el monto de la obra social..
*/
"""
def obraSocial(self, subtotal1, descuento_Obra):
descuentoObra = round((subtotal1 * descuento_Obra),2)
return descuentoObra
"""Funcion seguro.
* @param .
* @return devuelve el monto del seguro de vida.
*/
"""
def seguro(self):
return 300
"""Funcion subtotal2.
* @param jubilacion, descuento de la obra social y seguro.
* @return calcula y devuelve los descuentos hacia el docentes.
*/
"""
def subTotal2(self, jubilacion, descuento_Obra, seguro):
subtotal2 = round((jubilacion + descuento_Obra + seguro),2)
return subtotal2
"""Funcion total.
* @param subtotal1 subTotal2.
* @return calcula y devuelve el monto final a cobrar por parte del docente.
*/
"""
def total(self, subtotal1, subtotal2):
montoTotal = round((subtotal1 - subtotal2),2)
return montoTotal
"""Funcion calcularPeriodo.
* @param ninguno.
* @return calcula y devuelve el periodo del recibo a generar.
*/
"""
def calcularPeriodo(self):
mes = str(datetime.today().month)
anio = str(datetime.today().year)
return (mes + anio)
"""Funcion buscarPeriodo
* @param ninguno.
* @return devuelve el periodo del recibo.
*/
"""
def buscarPeriodo(self):
periodo = []
try:
bd = MySQLdb.connect("localhost","root","gogole","Recibo_Sueldo" )
cursor = bd.cursor()
sql = "SELECT fechaPeriodo FROM Recibo;"
cursor.execute(sql)
resultados = cursor.fetchall()
for registro in resultados:
periodo.append(str(registro[0]))
return periodo
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
bd.close()
""" calcularRecibo.
* @param ninguno.
* @return calcula y guarda el recibo.
*/
"""
def calcularRecibo(self):
try:
bd = MySQLdb.connect("localhost","root","gogole","Recibo_Sueldo")
cursor = bd.cursor()
sql="SELECT c.cod_Cargo, tp.puntos_Cargos, zon.porcentaje_Zona, obra.descuento_Obra, c.fechaIngreso FROM Docente d INNER JOIN Cargo c on d.dni_Docente = c.dni_Docente INNER JOIN ObraSocial obra on obra.cod_ObraSocial = d.cod_ObraSocial INNER JOIN Tipo_Cargo tp on tp.cod_tipoCargo = c.cod_Cargo INNER JOIN Escuela esc on esc.numero_Escuela = c.numero_Escuela INNER JOIN Zona zon on zon.cod_Zona = esc.cod_Zona where d.activo='Y';"
cursor.execute(sql)
resultados = cursor.fetchall()
for registro in resultados:
sueldoBasico = self.sueldo_Basico(registro[1])
cargo = Cargo(registro[0])
montoAntiguedad = self.monto_Anti(cargo.antiguedad(registro[4]), sueldoBasico)
sumaZona = self.suma_Zona(sueldoBasico, registro[2])
present = self.presentismo(sueldoBasico, montoAntiguedad)
suBTotal1 = self.subTotal1(sueldoBasico, montoAntiguedad, sumaZona, present)
jubi = self.jubilacion(suBTotal1)
obraS = self.obraSocial(suBTotal1, registro[3])
suBTotal2 = self.subTotal2(jubi, obraS, self.seguro())
tot = self.total(suBTotal1, suBTotal2)
fechaPeriodo= self.calcularPeriodo()
sql="INSERT INTO Recibo(cod_Cargo, sueldoBasico, montoAnti, sumaZona,presentismo, subTotal1, jubilacion, desObraSoial, seguro, subTotal2, total,fechaPeriodo) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s' , '%s', '%s', '%s', '%s', '%s')" % (registro[0], sueldoBasico, montoAntiguedad, sumaZona, present, suBTotal1, jubi, obraS, self.seguro(), suBTotal2, tot, self.calcularPeriodo())
cursor.execute(sql)
bd.commit()
tkMessageBox.showinfo("AVISO", " Los Recibos fueron insertados con exito")
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
"""Función crearPdf.
* @param ninguno.
* @return crea el pdf con el cargo del docente y lo abre.
*/
"""
def crearPdf(self):
periodo = "recibos"
try:
bd = MySQLdb.connect("localhost","root","gogole","Recibo_Sueldo")
cursor = bd.cursor()
sql = "SELECT DISTINCT e.nombre_Escuela,d.nomApe_Docente, d.dni_Docente, tp.descripcion_Cargo, r.numero_Recibo, r.sueldoBasico, r.montoAnti, r.sumaZona, r.presentismo, r.subTotal1, r.jubilacion,r.desObraSoial, r.seguro, r.subTotal2, r.Total, r.fechaPeriodo, c.fechaIngreso FROM Docente d INNER JOIN Cargo c on d.dni_Docente = c.dni_Docente INNER JOIN Tipo_Cargo tp on tp.cod_tipoCargo = c.cod_tipoCargo INNER JOIN Escuela e on e.numero_Escuela = c.numero_Escuela INNER JOIN Recibo r on r.cod_Cargo = c.cod_Cargo WHERE r.numero_Recibo ='%s'" % self.getNumero_Recibo()
cursor.execute(sql)
resultados = cursor.fetchall()
for registro in resultados:
escuela = str(registro[0])
nomApe = str(registro[1])
dni = str(registro[2])
cargo = str(registro[3])
numero_recibo = str(registro[4])
sueldoBasico = str(registro[5])
monto_Anti = str(registro[6])
suma_Zona = str(registro[7])
presentismo = str(registro[8])
subTotal1 = str(registro[9])
jubilacion = str(registro[10])
desObraSocial = str(registro[11])
seguro = str(registro[12])
subTotal2 = str(registro[13])
total = str(registro[14])
fechaPeriodo = str(registro[15])
ingreso = str(registro[16])
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
bd.close()
pdf = FPDF()
pdf.add_page()
pdf.image('factura.jpg',5,2,200,290)
pdf.set_font('Arial', 'B', 10)
pdf.text(96, 72 , escuela)
pdf.text(161, 72 , fechaPeriodo)
pdf.text(28, 89 , nomApe)
pdf.text(69, 89 , dni)
pdf.text(96, 89 , cargo)
pdf.text(161, 89 , numero_recibo)
pdf.text(74, 115 , "$ "+sueldoBasico)
pdf.text(74, 120 , "$ "+monto_Anti)
pdf.text(74, 125 , "$ "+suma_Zona)
pdf.text(74, 130 , "$ "+presentismo)
pdf.text(74, 140, "$ "+subTotal1)
pdf.text(155, 116, "$ "+jubilacion)
pdf.text(155, 121, "$ "+desObraSocial)
pdf.text(155, 126, "$ "+seguro)
pdf.text(150, 141, "$ "+subTotal2)
pdf.text(145, 216, "$ "+total)
pdf.text(29, 231 , ingreso)
nombre = str(self.getNumero_Recibo())
ext = '.pdf'
salida = nombre+ext
pdf.output(periodo+"/"+salida, 'F')
abrir='recibos/'+str(self.getNumero_Recibo())+'.pdf'
os.system('evince '+abrir)
| null | clases/recibo.py | recibo.py | py | 9,951 | python | en | code | null | code-starcoder2 | 51 |
290954816 | import os
import sys
import hashlib
import time
import sqlite3
import zlib
import datetime
from sqlite3 import Error
BLOCKSIZE = 524288
start_time = time.time()
#walk_dir = sys.argv[1]
walk_dir = "C:"
conn = sqlite3.connect('C:\DataSets\listing2.db')
c = conn.cursor()
# Create table
try:
c.execute('''CREATE TABLE listing (file text, size number, hash text, last_modified real, listing_date real)''')
except Error as e:
print(e)
print('walk_dir = ' + walk_dir)
# If your current working directory may change during script execution, it's recommended to
# immediately convert program arguments to an absolute path. Then the variable root below will
# be an absolute path as well. Example:
# walk_dir = os.path.abspath(walk_dir)
print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
for root, subdirs, files in os.walk(walk_dir):
print('--\nroot = ' + root)
#list_file_path = os.path.join(root, 'my-directory-list.txt')
#print('list_file_path = ' + list_file_path)
for subdir in subdirs:
print('--\nsubdir = ' + subdir )
for file in files:
print('--\nfile = ' + file )
file_path = os.path.join(root, file)
#hasher=hashlib.blake2s()
try:
f_asum = 1
with open(file_path, 'rb') as f:
f_content = f.read(BLOCKSIZE)
while len(f_content) > 0:
#hasher.update(f_content)
f_asum = zlib.adler32(f_content, f_asum)
f_content = f.read(BLOCKSIZE)
#f_hash=hasher.hexdigest()
print('HASH: ' + str(f_asum))
f_size=os.path.getsize(file_path)
last_modified = os.path.getmtime(file_path)
print('Size: ' + str(f_size))
print('Last_modified: ' + str(last_modified))
chestia = file_path, f_size, f_asum, last_modified, str(time.time())
c.execute("INSERT INTO LISTING VALUES (?,?,?,?,?)",chestia)
conn.commit()
except Exception as e:
print('Error: ' + file_path)
conn.close()
print("--- %s seconds ---" % (time.time() - start_time))
| null | loopy2.py | loopy2.py | py | 2,214 | python | en | code | null | code-starcoder2 | 51 |
568134976 | import os
from gitpandas import Repository
__author__ = 'willmcginnis'
if __name__ == '__main__':
repo = Repository(working_dir=os.path.abspath('../../git-pandas'))
fc = repo.file_change_rates(include_globs=['*.py'], coverage=True)
print(fc) | null | examples/file_change_rates.py | file_change_rates.py | py | 256 | python | en | code | null | code-starcoder2 | 51 |
143495959 | import collections
import re
from buildbot.plugins import steps, util
from buildbot.process import buildstep
from buildbot.status.results import SUCCESS
from twisted.internet import defer
import yaml
import environments as envs
SERVO_REPO = "https://github.com/servo/servo"
class CheckRevisionStep(buildstep.BuildStep):
"""\
Step which checks to ensure the revision that triggered the build
is the same revision that we actually checked out,
and fails the build if this is not the case.
"""
haltOnFailure = True
flunkOnFailure = True
def __init__(self, **kwargs):
buildstep.BuildStep.__init__(self, **kwargs)
@defer.inlineCallbacks
def run(self):
rev = self.getProperty('revision')
got_rev = self.getProperty('got_revision')
# `revision` can be None if the build is not tied to a single commit,
# e.g. if "force build" is requested on the status page
if rev is not None and rev != got_rev:
raise Exception(
"Actual commit ({}) differs from requested commit ({})".format(
got_rev, rev
)
)
yield defer.returnValue(SUCCESS)
class ServoFactory(util.BuildFactory):
"""\
Build factory which checks out the servo repo as the first build step.
"""
def __init__(self, build_steps):
"""\
Takes a list of Buildbot steps.
Prefer using DynamicServoFactory to using this class directly.
"""
all_steps = [
steps.Git(
repourl=SERVO_REPO,
mode="full", method="fresh", retryFetch=True
),
CheckRevisionStep(),
] + build_steps
# util.BuildFactory is an old-style class so we cannot use super()
# but must hardcode the superclass here
util.BuildFactory.__init__(self, all_steps)
class StepsYAMLParsingStep(buildstep.ShellMixin, buildstep.BuildStep):
"""\
Step which reads the YAML steps configuration in the main servo repo
and dynamically adds test steps.
"""
haltOnFailure = True
flunkOnFailure = True
workdir = None
def __init__(self, builder_name, environment, yaml_path, **kwargs):
kwargs = self.setupShellMixin(kwargs)
buildstep.BuildStep.__init__(self, **kwargs)
self.builder_name = builder_name
self.environment = environment
self.yaml_path = yaml_path
def setDefaultWorkdir(self, workdir):
buildstep.BuildStep.setDefaultWorkdir(self, workdir)
self.workdir = workdir
@defer.inlineCallbacks
def run(self):
self.is_windows = re.match('windows', self.builder_name)
try:
show_cmd = "cat" if not self.is_windows else "type"
native_yaml_path = self.yaml_path
if self.is_windows:
native_yaml_path = native_yaml_path.replace('/', '\\')
cmd = yield self.makeRemoteShellCommand(
command=[show_cmd, native_yaml_path],
collectStdout=True
)
yield self.runCommand(cmd)
result = cmd.results()
if result != util.SUCCESS:
raise Exception("Command failed with return code: {}" .format(
str(cmd.rc)
))
else:
config = yaml.safe_load(cmd.stdout)
builder_config = config[self.builder_name]
commands = None
env = self.environment
env += envs.Environment(config.get('env', {}))
if isinstance(builder_config, collections.Mapping):
commands = builder_config['commands']
env += envs.Environment(builder_config.get('env', {}))
else:
commands = builder_config
dynamic_steps = [
self.make_step(command, env) for command in commands
]
except Exception as e: # Bad step configuration, fail build
# Capture the exception and re-raise with a friendly message
raise Exception("Bad step configuration for {}: {}".format(
self.builder_name,
str(e)
))
pkill_step = [self.make_pkill_step("servo")]
self.add_steps(pkill_step + dynamic_steps)
defer.returnValue(result)
def add_steps(self, steps):
"""\
Adds new steps to this build, making sure to avoid name collisions
by adding counts to disambiguate multiple steps of the same type,
and respecting internal Buildbot invariants.
Semi-polyfill for addStepsAfterLastStep from Buildbot 9.
"""
def step_type(step):
return step.name.split('__')[0]
name_counts = collections.Counter()
# Check for existing max step counts for each type of step
# in the existing steps on the build.
# Adding multiple steps at the same time makes it more efficient
# to check for collisions since this is amortized over all
# steps added together.
for step in self.build.steps:
name_counts[step_type(step)] += 1
# Add new steps, updating `name_counts` along the way
for step in steps:
existing_count = name_counts[step_type(step)]
if existing_count > 0:
# First step has count = 0 but no suffix,
# so second step will have `__1` as suffix, etc.
step.name += '__{}'.format(existing_count)
name_counts[step_type(step)] += 1
self._add_step(step)
def _add_step(self, step):
"""\
Adds a new step to this build, making sure to maintain internal
Buildbot invariants.
Do not call this method directly, but go through add_steps
to prevent `name` collisions.
"""
step.setBuild(self.build)
step.setBuildSlave(self.build.slavebuilder.slave)
step.setDefaultWorkdir(self.workdir)
self.build.steps.append(step)
step_status = self.build.build_status.addStepWithName(step.name)
step.setStepStatus(step_status)
def make_step(self, command, env):
step_kwargs = {}
step_env = env
command = command.split(' ')
step_kwargs['command'] = command
if self.is_windows:
step_env += envs.Environment({
# Set home directory, to avoid adding `cd` command every time
'HOME': r'C:\buildbot\slave\{}\build'.format(
self.builder_name
),
})
step_desc = []
step_class = steps.ShellCommand
args = iter(command)
for arg in args:
if arg == './mach' or arg == 'mach.bat':
mach_arg = next(args)
step_desc = [mach_arg]
# Change Step class to capture warnings as needed
# (steps.Compile and steps.Test catch warnings)
if re.match('build(-.*)?', mach_arg):
step_class = steps.Compile
elif re.match('package', mach_arg):
step_class = steps.Compile
elif re.match('test-.*', mach_arg):
step_class = steps.Test
# Provide credentials where necessary
if re.match('upload-nightly', mach_arg):
step_kwargs['logEnviron'] = False
step_env += envs.upload_nightly
# Capture any logfiles
elif re.match('--log-.*', arg):
logfile = next(args)
if 'logfiles' not in step_kwargs:
step_kwargs['logfiles'] = {}
step_kwargs['logfiles'][logfile] = logfile
else:
step_desc += [arg]
if step_class != steps.ShellCommand:
step_kwargs['description'] = "running"
step_kwargs['descriptionDone'] = "ran"
step_kwargs['descriptionSuffix'] = " ".join(step_desc)
step_kwargs['env'] = step_env
return step_class(**step_kwargs)
def make_pkill_step(self, target):
if self.is_windows:
pkill_command = ["powershell", "kill", "-n", target]
else:
pkill_command = ["pkill", "-x", target]
return steps.ShellCommand(
command=pkill_command,
decodeRC={0: SUCCESS, 1: SUCCESS}
)
class DynamicServoFactory(ServoFactory):
"""\
Smart factory which takes a list of shell commands
from a YAML file located in the main servo/servo repository
and creates the appropriate Buildbot Steps.
Uses heuristics to infer Step type, if there are any logfiles, etc.
"""
def __init__(self, builder_name, environment):
# util.BuildFactory is an old-style class so we cannot use super()
# but must hardcode the superclass here
ServoFactory.__init__(self, [
StepsYAMLParsingStep(builder_name, environment,
"etc/ci/buildbot_steps.yml")
])
doc = ServoFactory([
# This is not dynamic because a) we need to pass the logEnviron kwarg
# and b) changes to the documentation build are already encapsulated
# in the upload_docs.sh script; any further changes should go through
# the saltfs repo to avoid leaking the token.
steps.ShellCommand(command=["etc/ci/upload_docs.sh"],
env=envs.doc,
# important not to leak token
logEnviron=False),
])
| null | buildbot/master/files/config/factories.py | factories.py | py | 9,654 | python | en | code | null | code-starcoder2 | 51 |
158780335 | import json
from django.views.generic import FormView
from hl_bigdata.forms import PostForm
from django.shortcuts import render
from hl_bigdata import models as m
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import Context, loader
from datetime import datetime, timedelta
from googlefinance import getQuotes
def list(request):
two_days_ago = datetime.utcnow() - timedelta(days=2)
recent_posts = m.Post.objects.filter(created_at__gt=two_days_ago).all()
context = Context({
'post_list': recent_posts
})
# Render accepts three arguments: the request object, the
# path of the template file and the context
return render(request, 'list.html', context)
def post_form_upload(request):
if request.method == 'GET':
form = PostForm()
else:
# A POST request: Handle Form Upload
form = PostForm(request.POST) # Bind data from request.POST into a PostForm
# If data is valid, proceeds to create a new post and redirect the user
if form.is_valid():
content = form.cleaned_data['content']
created_at = form.cleaned_data['created_at']
post = m.Post.objects.create(content=content,
created_at=created_at)
return HttpResponseRedirect(reverse('post',kwargs={'post_id': post.id}))
return render(request, 'form_upload.html', {
'form': form,
})
def post_upload(request):
if request.method == 'GET':
return render(request, 'upload.html', {})
elif request.method == 'POST':
post = m.Post.objects.create(content=request.POST['content'],
created_at=datetime.utcnow())
# No need to call post.save() at this point -- it's already saved.
return HttpResponseRedirect(reverse('post_detail', kwargs={'post_id': post.id}))
def post_detail(request, post_id):
try:
post = m.Post.objects.get(pk=post_id)
except m.Post.DoesNotExist:
# If no Post has id post_id, we raise an HTTP 404 error.
raise Http404
return render(request, 'detail.html', {'post': post})
def epic_form_upload(request):
if request.method == 'GET':
form = PostForm()
else:
# A POST request: Handle Form Upload
form = PostForm(request.POST) # Bind data from request.POST into a PostForm
# If data is valid, proceeds to create a new post and redirect the user
if form.is_valid():
content = form.cleaned_data['EPIC_code']
created_at = form.cleaned_data['created_at']
epic = m.EPIC.objects.create(EPIC_code=content,
created_at=created_at)
return HttpResponseRedirect(reverse('epic',kwargs={'epic_id': epic.id}))
return render(request, 'epic.html', {
'form': form,
})
def epic_upload(request):
if request.method == 'GET':
return render(request, 'epic.html', {})
elif request.method == 'POST':
epic = m.EPIC.objects.create(EPIC_code=request.POST['EPIC_code'],created_at=datetime.utcnow())
redirect_url = reverse('epic_detail', kwargs={'epic_id': epic.EPIC_code.encode('utf8')})
return HttpResponseRedirect(redirect_url)
def epic_detail(request, epic_id):
dump = json.dumps(getQuotes(epic_id), indent=2)
context = {'dump': dump}
return render(request, 'epic_detail.html', context)
| null | hl_bigdata/views.py | views.py | py | 3,506 | python | en | code | null | code-starcoder2 | 51 |
334046056 | import asyncio
import aiohttp
semaphore = asyncio.Semaphore(10)
def get_url():
url_lis = []
for i in range(0, 100):
url = 'https://spa5.scrape.center/api/book/?limit=18&offset={}'.format(i * 18)
url_lis.append(url)
return url_lis
url_lis = get_url()
async def request(url):
async with semaphore:
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
await asyncio.sleep(1)
return await r.json()
async def main():
await asyncio.gather(*[request(url) for url in url_lis])
if __name__ == '__main__':
import time
start = time.time()
asyncio.get_event_loop().run_until_complete(main())
print(time.time() - start)
| null | util/spa4.py | spa4.py | py | 749 | python | en | code | null | code-starcoder2 | 51 |
554575420 | """
@note:detection
"""
from PIL import Image
import numpy as np
from NN import CNN
import matplotlib.pyplot as plt
"""
@note: 由于二值化无法将由于色调相近但有差别的图像
显示出,于是尝试采用四值化处理
"""
# 多值化处理函数
def convert8(img):
row, column = img.shape
for i in range(row):
for j in range(column):
img[i][j] = int(img[i][j]/32)
# 二值化处理函数
def convert2(img):
row, column = img.shape
for i in range(row):
for j in range(column):
if img[i][j] < 200:
img[i][j] = 0
else:
img[i][j] = 1
# 预处理
def pre_handle(file_name, size=(480, 600)):
# 加载图片
image = Image.open(file_name)
# resize
image = image.resize(size, Image.ANTIALIAS)
# 统一转换图片为灰度图
image = image.convert('L')
# 统一转化为矩阵
x = np.array(image, 'float64')
return x
# 定义卷积核函数,即各个滤波器
filter1 = np.array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[-1, -1, -1, -1, -1],
[0, 0, 0, 0, 0]], 'float64')
filter2 = np.array([[0, 1, 0, -1, 0],
[0, 1, 0, -1, 0],
[0, 1, 0, -1, 0],
[0, 1, 0, -1, 0],
[0, 1, 0, -1, 0]], 'float64')
filter3 = np.array([[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, -1],
[0, 0, 0, -1, 0],
[0, 0, -1, 0, 0]], 'float64')
filter4 = np.array([[0, 0, -1, 0, 0],
[0, -1, 0, 0, 0],
[-1, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0]], 'float64')
filter5 = np.array([[0, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[1, 0, 0, 0, 1],
[0, -1, 0, -1, 0],
[0, 0, -1, 0, 0]], 'float64')
filter6 = np.array([[0, 0, -1, 0, 0],
[0, -1, 0, -1, 0],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0]], 'float64')
filter_sobel = np.array([[0., 1., 0., -1., 0.],
[0., 1., 0., -1., 0.],
[0., 2., 0., -2., 0.],
[0., 1., 0., -1., 0.],
[0., 1., 0., -1., 0.], "float64"])
# 激活函数 relu
def relu(value):
tmp = value.copy()
if tmp < 0:
return 0
else:
return tmp
# convolution function , 步长1x1,padding为2
def conv2d(conv, ft):
tmp = np.zeros((conv.shape[0]+4, conv.shape[1]+4))
for ae1 in range(conv.shape[0]):
for ae2 in range(conv.shape[1]):
tmp[ae1+2][ae2+2] = conv[ae1][ae2]
conv = tmp
rs = np.ones((conv.shape[0] - ft.shape[0]+1, conv.shape[1] - ft.shape[1]+1))
for i in range(conv.shape[0] - ft.shape[0]+1):
for j in range(conv.shape[1] - ft.shape[1]+1):
tmp = np.ones((ft.shape[0], ft.shape[1]))
for k1 in range(ft.shape[0]):
for k2 in range(ft.shape[1]):
tmp[k1][k2] = conv[i+k1][j+k2]
rs[i][j] = relu(np.sum(tmp*ft) + 1)
return rs
# max pooling function,2x2
def max_pool(pool, size=2):
tmp = np.zeros((int(pool.shape[0]/size), int(pool.shape[1]/size)))
for i in range(tmp.shape[0]):
for j in range(tmp.shape[1]):
unit = 0
for k1 in range(size):
for k2 in range(size):
if unit < pool[i*size+k1][j*size+k2]:
unit = pool[i*size+k1][j*size+k2]
tmp[i][j] = unit
return tmp
# 矩阵一维化
def to_one_dim(array):
rs = np.resize(array, (1, array.size))
return rs
# 打印矩阵size
def get_size(array):
print(array.shape[0], array.shape[1])
"""
@note:开始进行卷积神经网络计算,
我的思路为先让机器有能力
识别出哪里是广告部分,剔除
背景信息(即与广告内容无关部分),
第二部采用sliding window的
方框检测法对比两张广告内容差异
"""
# 卷积图片,逐层提取特征
def convolution(x_in):
# 第1层 卷积层
layer1 = conv2d(x_in, filter1)
plt.imshow(layer1)
plt.show()
# 第2层 池化层
layer2 = max_pool(layer1)
# 第3层 卷积层
layer3 = conv2d(layer2, filter2)
# 第4层 池化层
layer4 = max_pool(layer3)
# 第5层 卷积层
layer5 = conv2d(layer4, filter3)
# 第6层 池化层
layer6 = max_pool(layer5)
# 第7层 卷积层
layer7 = conv2d(layer6, filter4)
# 第8层 池化层
layer8 = max_pool(layer7)
return layer8
# 定义全连接层各个参数
# 学习率
lr = 0.011
# 训练标签
lb = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
# 初始权值
w1 = np.random.random((37*30, 256))*2-1
w2 = np.random.random((256, 16))*2-1
w3 = np.random.random((16, 1))*2-1
# sigmoid激活函数
def sigmoid(value):
return 1/(1+np.exp(-value))
def dsigmoid(value):
return value*(1 - value)
# 全连接函数
def fc():
global lb, l3, w1, w2, w3, x_train
# 前向传播
l1 = sigmoid(np.dot(x_train, w1))
l2 = sigmoid(np.dot(l1, w2))
l3 = sigmoid(np.dot(l2, w3))
# 反向传播
l3_delta = (l3 - lb.T)*dsigmoid(l3)
l2_delta = np.dot(l3_delta, w3.T)*dsigmoid(l2)
l1_delta = np.dot(l2_delta, w2.T)*dsigmoid(l1)
w3_c = lr*np.dot(l2.T, l3_delta)
w2_c = lr*np.dot(l1.T, l2_delta)
w1_c = lr*np.dot(x_train.T, l1_delta)
# 逐层改变权值
w3 = w3 - w3_c
w2 = w2 - w2_c
w1 = w1 - w1_c
# 测试函数
def test(x_test):
w1_test = CNN.loadparam("w1.txt")
w2_test = CNN.loadparam("w2.txt")
w3_test = CNN.loadparam("w3.txt")
# 前向传播
l1_test = sigmoid(np.dot(x_test, w1_test))
l2_test = sigmoid(np.dot(l1_test, w2_test))
l3_test = sigmoid(np.dot(l2_test, w3_test))
return l3_test[0][0]
# 梯度下降函数
def gradient_descent(epoch_num):
global lr, lb
for _ in range(epoch_num):
fc()
e = np.mean(np.abs(l3 - lb.T))
print(e)
print(l3)
# 处理函数
def operate(file_name):
# 执行预处理
x = pre_handle("./datasets/" + file_name)
# 执行卷积
x = convolution(x)
# 执行矩阵归一行
x = to_one_dim(x)
print(file_name + " is done!")
return x
if __name__ == "__main__":
# 执行
x1 = operate("1.jpg")
x2 = operate("2.jpg")
x3 = operate("3.jpg")
x4 = operate("4.jpg")
x5 = operate("5.jpg")
x6 = operate("6.jpg")
x7 = operate("7.jpg")
x8 = operate("8.jpg")
x9 = operate("9.jpg")
x10 = operate("10.jpg")
x11 = operate("11.jpg")
x12 = operate("12.jpg")
x13 = operate("13.jpg")
x14 = operate("14.jpg")
x15 = operate("15.jpg")
x16 = operate("16.jpg")
x17 = operate("17.jpg")
x18 = operate("18.jpg")
x19 = operate("19.jpg")
x20 = operate("20.jpg")
xt1 = operate("t1.jpg")
xt2 = operate("t2.jpg")
x_train = np.concatenate((x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
x12, x13, x14, x15, x16, x17, x18, x19, x20))
# 开始训练
gradient_descent(6000)
print(test(xt1))
print(test(xt2))
CNN.memory(w1, w2, w3)
| null | pre.py | pre.py | py | 7,763 | python | en | code | null | code-starcoder2 | 51 |
380042036 | import os
import subprocess
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.conf import settings
from FilmsApp.models import Film, Mark
BASE_DIR = settings.BASE_DIR
class Command(BaseCommand):
def handle(self, *args, **options):
self.create_all_marks()
self.setup()
def create_all_marks(self):
with open('all_marks', 'w') as f:
f.write(str(User.objects.all().order_by("-id")[0].id) + ' ')
f.write(str(Film.objects.all().order_by("-id")[0].id) + ' ')
f.write(str(Mark.objects.all().count()) + '\n')
for mark in Mark.objects.all():
f.write(
str(mark.user_id) + ' ' + str(mark.film_id) + ' ' + str(mark.mark) + ' ' + str(mark.mark_type_id) + '\n'
)
def setup(self):
os.chdir(BASE_DIR)
subprocess.call('./setup')
| null | WTWApp/management/commands/make_learning.py | make_learning.py | py | 928 | python | en | code | null | code-starcoder2 | 51 |
579393952 | # -*- coding: utf-8 -*-
import os
import re
ALPHANUMERIC_REGEX = re.compile("[\W_]+")
INSIDE_PARENTHESES_REGEX = re.compile("\([^)]*\)")
INSIDE_BRACKETS_REGEX = re.compile("\[[^)]*\]")
def find_files(root_folder, extension=None):
"""
:param root_folder: str
Folder to scan
:param extension: str or None
Finds files ending with just this extension. None means any extension
:return: [] of str
List of files found in folder
"""
lst = []
for fil in os.listdir(root_folder):
if os.path.isdir(os.path.join(root_folder, fil)):
lst += find_files(
os.path.join(root_folder, fil), extension
) # get list of files in directory
else: # this is a file
if extension is not None:
if fil.endswith(extension):
lst.append(os.path.join(root_folder, fil))
return lst
def remove_files(files):
"""
:param files: [] of str
Paths of files to remove
:return: void
Removes files
"""
for fil in files:
os.remove(fil)
def print_folder_songs_status(missing, extra):
"""
:param missing: [] of SpotifySong
List of songs missing
:param extra: [] of LocalSong
List of songs found extra (should not be there)
:return: void
Prints info about status of folder-playlist sync
"""
if not missing and not extra:
print("\n")
print("Everything all right!")
if missing:
print("\n")
print(len(missing),
"missing local songs (are in Spotify, but not in local)")
for song in sorted(missing):
print("\t-", str(str(song)), "(available?", song.available, ")")
if extra:
print("\n")
print(len(extra), "extra local songs (are local, but not in Spotify)")
for song in sorted(extra):
print("\t-", str(str(song)), "(", song.file_path, ")")
| null | spotify/utils.py | utils.py | py | 1,964 | python | en | code | null | code-starcoder2 | 51 |
621885187 | from dataclasses import dataclass
from .color import Color
from .point import Point
@dataclass
class Triangle:
'''
Хранит информацию о трегуольнике.
'''
color: Color
a: Point
b: Point
c: Point
@staticmethod
def read(buffer) -> 'Triangle':
color = Color.read(buffer)
buffer.skip_whitespaces()
a = Point.read(buffer)
buffer.skip_whitespaces()
b = Point.read(buffer)
buffer.skip_whitespaces()
c = Point.read(buffer)
return Triangle(color, a, b, c)
def perimiter(self):
return (self.a.distance(self.b)
+ self.b.distance(self.c)
+ self.c.distance(self.a))
def __str__(self) -> str:
return (f'Triangle: color={self.color}, '
f'points={self.a} - {self.b} - {self.c}')
| null | AVS3/triangle.py | triangle.py | py | 869 | python | en | code | null | code-starcoder2 | 51 |
146214300 | # encoding:utf-8
# memorizer main file
from kivy.app import App
# from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
# from kivy.uix.label import Label
from kivy.clock import Clock
# from kivy.properties import ObjectProperty
import time
import random
# count = 10
count = 3
class Memorizer(BoxLayout):
def time_left(self, *kwargs): # ????
print('Args: {}'.format(kwargs))
global count
print('count: {}'.format(count))
time = str(count) + ' secs left'
if count:
count -= 1
print('count decreased!')
else:
print("i'm here!")
self.ids.time_left.text = 'Time is over!'
self.ids.time_left.text = time
return None
def number(self):
return random.randint(100000, 999999)
class MemorizerApp(App):
def build(self):
self.title = 'Memorizer'
memorizer = Memorizer()
Clock.schedule_interval(memorizer.time_left, 1.0)
return memorizer
if __name__ == '__main__':
MemorizerApp().run()
| null | memorizer.py | memorizer.py | py | 1,076 | python | en | code | null | code-starcoder2 | 51 |
152628260 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 18 21:30:29 2017
@author: rimikamajumdar
"""
# Exercise 44: Inheritance versus Composition
# Inheritance: Override Explicitly
class Parent(object):
def override(self):
print("PARENT override()")
class Child(Parent):
# if you want the child to behave differently, define a function with the
# same name override and replace the functionality
def override(self):
print("CHILD override()")
dad = Parent()
son = Child()
dad.override()
son.override()
'''
Output is the following:
PARENT override()
CHILD override()
''' | null | ex44/ex44b.py | ex44b.py | py | 625 | python | en | code | null | code-starcoder2 | 51 |
409642494 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 03 09:40:19 2014
@author: Kyle Ellefsen
"""
from __future__ import (absolute_import, division,print_function, unicode_literals)
from future.builtins import (bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
import numpy as np
import scipy
import global_vars as g
import scipy.ndimage
from skimage import feature
from skimage.filters import threshold_adaptive
from process.BaseProcess import BaseProcess, SliderLabel, WindowSelector, MissingWindowError
from PyQt4.QtGui import *
from PyQt4.QtCore import *
__all__ = ['threshold','remove_small_blobs','adaptive_threshold','logically_combine','binary_dilation','binary_erosion']
def convert2uint8(tif):
oldmin=np.min(tif)
oldmax=np.max(tif)
newmax=2**8-1
tif=((tif-oldmin)*newmax)/(oldmax-oldmin)
tif=tif.astype(np.uint8)
return tif
class Threshold(BaseProcess):
"""threshold(value, darkBackground=False, keepSourceWindow=False)
Creates a boolean matrix by applying a threshold
Parameters:
| value (float) -- The threshold to be applied
| darkBackground (bool) -- If this is True, pixels below the threshold will be True
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
valueSlider=SliderLabel(2)
if g.m.currentWindow is not None:
image=g.m.currentWindow.image
valueSlider.setRange(np.min(image),np.max(image))
valueSlider.setValue(np.mean(image))
preview=QCheckBox()
preview.setChecked(True)
self.items.append({'name':'value','string':'Value','object':valueSlider})
self.items.append({'name':'darkBackground','string':'Dark Background','object':QCheckBox()})
self.items.append({'name':'preview','string':'Preview','object':preview})
super().gui()
def __call__(self,value,darkBackground=False, keepSourceWindow=False):
self.start(keepSourceWindow)
if darkBackground:
newtif=self.tif<value
else:
newtif=self.tif>value
self.newtif=newtif.astype(np.uint8)
self.newname=self.oldname+' - Thresholded '+str(value)
return self.end()
def preview(self):
value=self.getValue('value')
preview=self.getValue('preview')
darkBackground=self.getValue('darkBackground')
nDim=len(g.m.currentWindow.image.shape)
if preview:
if nDim==3: # if the image is 3d
testimage=np.copy(g.m.currentWindow.image[g.m.currentWindow.currentIndex])
elif nDim==2:
testimage=np.copy(g.m.currentWindow.image)
if darkBackground:
testimage=testimage<value
else:
testimage=testimage>value
g.m.currentWindow.imageview.setImage(testimage,autoLevels=False)
g.m.currentWindow.imageview.setLevels(-.1,1.1)
else:
g.m.currentWindow.reset()
if nDim==3:
image=g.m.currentWindow.image[g.m.currentWindow.currentIndex]
else:
image=g.m.currentWindow.image
g.m.currentWindow.imageview.setLevels(np.min(image),np.max(image))
threshold=Threshold()
class BlocksizeSlider(SliderLabel):
def __init__(self,demicals=0):
SliderLabel.__init__(self,demicals)
def updateSlider(self,value):
if value%2==0:
if value<self.slider.value():
value-=1
else:
value+=1
self.label.setValue(value)
self.slider.setValue(int(value*10**self.decimals))
def updateLabel(self,value):
if value%2==0:
value-=1
self.label.setValue(value)
class Adaptive_threshold(BaseProcess):
"""adaptive_threshold(value, block_size, darkBackground=False, keepSourceWindow=False)
Creates a boolean matrix by applying an adaptive threshold using the scikit-image threshold_adaptive function
Parameters:
| value (int) -- The threshold to be applied
| block_size (int) -- size of a pixel neighborhood that is used to calculate a threshold value for the pixel. Must be an odd number greater than 3.
| darkBackground (bool) -- If this is True, pixels below the threshold will be True
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
valueSlider=SliderLabel(2)
valueSlider.setRange(-20,20)
valueSlider.setValue(0)
block_size=BlocksizeSlider(0)
if g.m.currentWindow is not None:
max_block=int(max([g.m.currentWindow.image.shape[-1],g.m.currentWindow.image.shape[-2]])/2)
block_size.setRange(3,max_block)
preview=QCheckBox(); preview.setChecked(True)
self.items.append({'name':'value','string':'Value','object':valueSlider})
self.items.append({'name':'block_size','string':'Block Size','object':block_size})
self.items.append({'name':'darkBackground','string':'Dark Background','object':QCheckBox()})
self.items.append({'name':'preview','string':'Preview','object':preview})
super().gui()
self.preview()
def __call__(self,value,block_size,darkBackground=False, keepSourceWindow=False):
self.start(keepSourceWindow)
nDim=len(self.tif.shape)
newtif=np.copy(self.tif)
if nDim==2:
newtif = threshold_adaptive(newtif,block_size,offset=value)
else:
for i in np.arange(len(newtif)):
newtif[i] = threshold_adaptive(newtif[i],block_size,offset=value)
if darkBackground:
newtif=np.logical_not(newtif)
self.newtif=newtif.astype(g.m.settings['internal_data_type'])
self.newname=self.oldname+' - Thresholded '+str(value)
return self.end()
def preview(self):
value=self.getValue('value')
block_size=self.getValue('block_size')
preview=self.getValue('preview')
darkBackground=self.getValue('darkBackground')
nDim=len(g.m.currentWindow.image.shape)
if preview:
if nDim==3: # if the image is 3d
testimage=np.copy(g.m.currentWindow.image[g.m.currentWindow.currentIndex])
elif nDim==2:
testimage=np.copy(g.m.currentWindow.image)
testimage = threshold_adaptive(testimage,block_size,offset=value)
if darkBackground:
testimage=np.logical_not(testimage)
testimage=testimage.astype(g.m.settings['internal_data_type'])
g.m.currentWindow.imageview.setImage(testimage,autoLevels=False)
g.m.currentWindow.imageview.setLevels(-.1,1.1)
else:
g.m.currentWindow.reset()
if nDim==3:
image=g.m.currentWindow.image[g.m.currentWindow.currentIndex]
else:
image=g.m.currentWindow.image
g.m.currentWindow.imageview.setLevels(np.min(image),np.max(image))
adaptive_threshold=Adaptive_threshold()
class Canny_edge_detector(BaseProcess):
"""canny_edge_detector(sigma, keepSourceWindow=False)
Parameters:
| sigma (float) --
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
sigma=SliderLabel(2)
if g.m.currentWindow is not None:
sigma.setRange(0,1000)
sigma.setValue(1)
preview=QCheckBox(); preview.setChecked(True)
self.items.append({'name':'sigma','string':'Sigma','object':sigma})
self.items.append({'name':'preview','string':'Preview','object':preview})
super().gui()
self.preview()
def __call__(self,sigma, keepSourceWindow=False):
self.start(keepSourceWindow)
nDim=len(self.tif.shape)
newtif=np.copy(self.tif)
if nDim==2:
newtif=feature.canny(self.tif,sigma)
else:
for i in np.arange(len(newtif)):
newtif[i] = feature.canny(self.tif[i],sigma)
self.newtif=newtif.astype(g.m.settings['internal_data_type'])
self.newname=self.oldname+' - Canny '
return self.end()
def preview(self):
sigma=self.getValue('sigma')
preview=self.getValue('preview')
nDim=len(g.m.currentWindow.image.shape)
if preview:
if nDim==3: # if the image is 3d
testimage=np.copy(g.m.currentWindow.image[g.m.currentWindow.currentIndex])
elif nDim==2:
testimage=np.copy(g.m.currentWindow.image)
testimage=feature.canny(testimage,sigma)
g.m.currentWindow.imageview.setImage(testimage,autoLevels=False)
g.m.currentWindow.imageview.setLevels(-.1,1.1)
else:
g.m.currentWindow.reset()
if nDim==3:
image=g.m.currentWindow.image[g.m.currentWindow.currentIndex]
else:
image=g.m.currentWindow.image
g.m.currentWindow.imageview.setLevels(np.min(image),np.max(image))
canny_edge_detector=Canny_edge_detector()
class Logically_combine(BaseProcess):
""" logically_combine(window1, window2,operator, keepSourceWindow=False)
Combines two windows according to the operator
Parameters:
| window1 (Window)
| window2 (Window)
| operator (str)
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
window1=WindowSelector()
window2=WindowSelector()
operator=QComboBox()
operator.addItem('AND')
operator.addItem('OR')
operator.addItem('XOR')
self.items.append({'name':'window1','string':'Window 1','object':window1})
self.items.append({'name':'window2','string':'Window 2','object':window2})
self.items.append({'name':'operator','string':'Operator','object':operator})
super().gui()
def __call__(self,window1, window2,operator,keepSourceWindow=False):
self.keepSourceWindow=keepSourceWindow
g.m.statusBar().showMessage('Performing {}...'.format(self.__name__))
if window1 is None or window2 is None:
raise(MissingWindowError("You cannot execute '{}' without selecting a window first.".format(self.__name__)))
if window1.image.shape!=window2.image.shape:
g.m.statusBar().showMessage('The two windows have images of different shapes. They could not be combined')
return None
if operator=='AND':
self.newtif=np.logical_and(window1.image,window2.image)
elif operator=='OR':
self.newtif=np.logical_or(window1.image,window2.image)
elif operator=='XOR':
self.newtif=np.logical_xor(window1.image,window2.image)
self.oldwindow=window1
self.oldname=window1.name
self.newname=self.oldname+' - Logical {}'.format(operator)
if keepSourceWindow is False:
window2.close()
g.m.statusBar().showMessage('Finished with {}.'.format(self.__name__))
return self.end()
logically_combine=Logically_combine()
class Remove_small_blobs(BaseProcess):
"""remove_small_blobs(rank, value, keepSourceWindow=False)
Finds all contiguous 'True' pixels in rank dimensions. Removes regions which have fewer than the specified pixels.
Parameters:
| rank (int) -- The number of dimensions. If rank==2, each frame is treated independently
| value (int) -- The size (in pixels) below which each contiguous region must be in order to be discarded.
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
rank=QSpinBox()
rank.setRange(2,3)
value=QSpinBox()
value.setRange(1,100000)
self.items.append({'name':'rank','string':'Number of Dimensions','object':rank})
self.items.append({'name':'value','string':'Value','object':value})
super().gui()
def __call__(self,rank,value,keepSourceWindow=False):
self.start(keepSourceWindow)
oldshape=self.tif.shape
s=scipy.ndimage.generate_binary_structure(rank,1)
labeled_array, num_features = scipy.ndimage.measurements.label(self.tif, structure=s)
B=np.copy(self.tif.reshape(np.size(self.tif)))
def fn(val, pos):
if len(pos)<=value:
B[pos]=0
lbls = np.arange(1, num_features+1)
scipy.ndimage.labeled_comprehension(self.tif, labeled_array, lbls, fn, float, 0, True)
self.newtif=np.reshape(B,oldshape).astype(g.m.settings['internal_data_type'])
self.newname=self.oldname+' - Removed Blobs '+str(value)
return self.end()
remove_small_blobs=Remove_small_blobs()
class Binary_Dilation(BaseProcess):
"""binary_dilation(rank,connectivity,iterations, keepSourceWindow=False)
Performs a binary dilation on a binary image. The 'False' pixels neighboring 'True' pixels become converted to 'True' pixels.
Parameters:
| rank (int) -- The number of dimensions to dilate. Can be either 2 or 3.
| connectivity (int) -- `connectivity` determines the distance to dilate.
`connectivity` may range from 1 (no diagonal elements are neighbors)
to `rank` (all elements are neighbors).
| iterations (int) -- How many times to repeat the dilation
| darkBackground (bool) -- If this is True, pixels below the threshold will be True
Returns:
newWindow
"""
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
rank=QSpinBox()
rank.setRange(2,3)
connectivity=QSpinBox()
connectivity.setRange(1,3)
iterations=QSpinBox()
iterations.setRange(1,100)
self.items.append({'name':'rank','string':'Number of Dimensions','object':rank})
self.items.append({'name':'connectivity','string':'Connectivity','object':connectivity})
self.items.append({'name':'iterations','string':'Iterations','object':iterations})
super().gui()
def __call__(self,rank,connectivity,iterations, keepSourceWindow=False):
self.start(keepSourceWindow)
if len(self.tif.shape)==3 and rank==2:
s=scipy.ndimage.generate_binary_structure(3,connectivity)
s[0]=False
s[2]=False
else:
s=scipy.ndimage.generate_binary_structure(rank,connectivity)
self.newtif=scipy.ndimage.morphology.binary_dilation(self.tif,s,iterations)
self.newtif=self.newtif.astype(g.m.settings['internal_data_type'])
self.newname=self.oldname+' - Dilated '
return self.end()
binary_dilation=Binary_Dilation()
class Binary_Erosion(BaseProcess):
def __init__(self):
super().__init__()
def gui(self):
self.gui_reset()
rank=QSpinBox()
rank.setRange(2,3)
connectivity=QSpinBox()
connectivity.setRange(1,3)
iterations=QSpinBox()
iterations.setRange(1,100)
self.items.append({'name':'rank','string':'Number of Dimensions','object':rank})
self.items.append({'name':'connectivity','string':'Connectivity','object':connectivity})
self.items.append({'name':'iterations','string':'Iterations','object':iterations})
super().gui()
def __call__(self,rank,connectivity,iterations, keepSourceWindow=False):
self.start(keepSourceWindow)
if len(self.tif.shape)==3 and rank==2:
s=scipy.ndimage.generate_binary_structure(3,connectivity)
s[0]=False
s[2]=False
else:
s=scipy.ndimage.generate_binary_structure(rank,connectivity)
self.newtif=scipy.ndimage.morphology.binary_erosion(self.tif,s,iterations)
self.newtif=self.newtif.astype(g.m.settings['internal_data_type'])
self.newname=self.oldname+' - Dilated '
return self.end()
binary_erosion=Binary_Erosion()
| null | process/binary.py | binary.py | py | 16,297 | python | en | code | null | code-starcoder2 | 51 |
335833383 | from urllib.request import urlopen
'''获取字符串函数
引用自作业说明
将URL请求转换成正常的字符串'''
def newstr(link):
doc = urlopen(link)
docstr = doc.read()
doc.close()
jstr = docstr.decode()
return jstr
'''货币数目分离函数
彭金山
从字符串中获取所兑换货币的数量,并转化为浮点数'''
def answer(newstr):
money=newstr.split()
eur=money[9][1:]
eur1=float(eur)
return eur1
'''货币转化函数
彭金山
输入两种货币名称以及数量(用空格隔开),输出目标货币的兑换数量'''
def exchange(currency_from,currency_to,amount_from):
a=str(currency_from)
b=str(currency_to)
c=str(amount_from)
ori=' http://cs1110.cs.cornell.edu/2016fa/a1server.php? '
new=' http://cs1110.cs.cornell.edu/2016fa/a1server.php?from='+a+'&to='\
+b+'&amt='+c
sen=newstr(new)
eur1=answer(sen)
return eur1
#输入输出举例
currency_from,currency_to,amount_from=input().split()
print(exchange(currency_from,currency_to,amount_from))
'''测试函数1
彭金山
测试字符串转换函数'''
def a1():
assert '{ "from" : "", "to" : "", "success" : false, "error" : "Source currency code is invalid." }'==\
newstr(' http://cs1110.cs.cornell.edu/2016fa/a1server.php? ')
'''测试函数2
彭金山
测试货币数目分离函数'''
def a2():
assert 8.38095==answer('{ "from" : "10 United States Dollars", "to" : "8.38095 Euros", "success" : true, "error" : "" }')
'''测试函数3
彭金山
测试最终输出结果'''
def a3():
assert 8.38095==exchange('USD','EUR',10)
'''测试函数完全版
彭金山
测试所有的测试函数'''
def testall():
a1()
a2()
a3()
print("All tests passed")
| null | pyassign2/currency.py | currency.py | py | 1,765 | python | en | code | null | code-starcoder2 | 51 |
52831665 | import requests
from operator import itemgetter
url='https://hacker-news.firebaseio.com/v0/topstories.json'
r= requests.get(url)
# get the result json from the topstories to an boject submission_ids
submission_ids = r.json()
# an empty list
submission_dicts = []
# loops thorugh top 30 news from the above json
for submission_id in submission_ids[:30]:
# creates another json url to request the information about the particular news
url = ("https://hacker-news.firebaseio.com/v0/item/"+str(submission_id)+".json")
submission_r = requests.get(url)
# gets the status of the api call
print(submission_r.status_code)
# create an object that holds the new api call of each news
response_dict = submission_r.json()
# create a dictionary that holds the title , link and the number of comment from each news
submission_dict = {
'title': response_dict['title'],
'link': "http://news.ycombinator.com/item?id="+str(submission_id),
'comments': response_dict.get('descendants', 0)
}
# update the list of submission dicts with the dictionary submission dicts
submission_dicts.append(submission_dict)
# sort the above dictionary based on the key number of comments
submission_dicts= sorted(submission_dicts, key=itemgetter('comments'), reverse=True)
# a seperate print statement loop to print the information about each news their title link and the number of comments
for submission_dict in submission_dicts:
print("\nTitle:", submission_dict['title'])
print("Discussion link:", submission_dict['link'])
print("Comments:", submission_dict['comments'])
| null | API call in Python/hn_submissions.py | hn_submissions.py | py | 1,638 | python | en | code | null | code-starcoder2 | 51 |
410899419 | # -*- coding: utf-8 -*-
"""
"""
#%% download latest translations from crowdin
#pip install crowdin-cli-py --upgrade
import subprocess
if subprocess.call(['crowdin-cli-py', 'download'])==1:
raise ValueError("Download failes, maybe you need to adjust paths in corwdin.yaml!")
#%% Build translations
#pip install polib
import polib
from glob import glob
paths = glob('lang/*/LC_MESSAGES/')
paths=[p[5:10] for p in paths]
for p in paths:
print("build %s"%p)
try:
po = polib.pofile('lang/%s/LC_MESSAGES/customize.po'%p)
po.save_as_mofile('lang/%s/LC_MESSAGES/customize.mo'%p)
except OSError:
print("no customize.po found")
try:
po = polib.pofile('lang/%s/LC_MESSAGES/update.po'%p)
po.save_as_mofile('lang/%s/LC_MESSAGES/update.mo'%p)
except OSError:
print("no update.po found")
try:
po = polib.pofile('lang/%s/LC_MESSAGES/site.po'%p)
po.save_as_mofile('lang/%s/LC_MESSAGES/site.mo'%p)
except OSError:
print("no site.po found")
#%% Build minified version
#pip install ply==3.4
#pip install slimit
from slimit import minify
def read_file(name):
with open (name, "r", encoding="utf-8") as f:
return "".join(str(f.read()))#.replace('\n', '')
def write_file(name,string):
with open(name, "w", encoding="utf-8") as f:
f.write(string)
add="""//(c)2017, MIT Style License <browser-update.org/LICENSE.txt>
//it is recommended to directly link to this file because we update the detection code
"""
text=read_file("update.js")
minned=minify(text, mangle=False, mangle_toplevel=False)
write_file("update.min.js",add+minned)
text=read_file("update.show.js")
minned=minify(text, mangle=False, mangle_toplevel=False)
write_file("update.show.min.js",minned)
# build npm versions of the script
import re
t_upjs=read_file("update.js")
t_upjs=t_upjs.replace("""$buo(window.$buoop);""","""module.exports = $buo;\n""")
write_file("update.npm.js",t_upjs)
#combine both files into a single one
t_upjs=t_upjs.replace("""var e=document.createElement("script");
e.src = op.jsshowurl||(/file:/.test(location.href) && "http://browser-update.org/update.show.min.js") || "//browser-update.org/update.show.min.js";
document.body.appendChild(e);
""","$buo_show();")
t_upjs_npm=re.sub(r'jsv="([^"]*)";','jsv="\\1npm";',t_upjs)
t_showjs=read_file("update.show.js")
t_showjs=t_showjs.replace("""$buo_show();""","")
write_file("update.npm.full.js",t_upjs_npm+t_showjs)
#build cloudflare versions
t_upjs_cf=re.sub(r'jsv="([^"]*)";','jsv="\\1cf";',t_upjs)
write_file("update.cloudflare.js",t_upjs_cf+t_showjs)
#%%
upload()
#
clear_cache()
#%% publish to npm
import subprocess
subprocess.call(['npm', 'publish'])
#%% Convert strings to javascript format
st='<b>Your web browser ({brow_name}) is out-of-date</b>. Update your browser for more security, comfort and the best experience on this site. <a{up_but}>Update browser</a> <a{ignore_but}>Ignore</a>'
import polib
from glob import glob
paths = glob('lang/*/LC_MESSAGES/')
paths=[p[5:10] for p in paths]
for p in paths:
#print("build %s"%p)
#if p[:2] not in ["vi","hi","sk"]:
# continue
try:
po = polib.pofile('lang/%s/LC_MESSAGES/update.po'%p)
except OSError:
print("no update.po found")
if p in ["rm_CH","en_SE"]:
continue
if p in ["zh_TW","sr_CS"]:
for i in po:
if i.msgid==st:
print("t[\"%s\"]='%s';"%(p[:5].lower().replace("_","-"),i.msgstr.replace("\n","").replace("'","\\'")))
break
else:
for i in po:
if i.msgid==st:
if i.msgstr!="":
print("t.%s='%s';"%(p[:2],i.msgstr.replace("\n","").replace("'","\\'")))
else:
print("//t.%s='%s';"%(p[:2],""))
break
#%% download maxmind geoip database
#wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz
#gunzip GeoLite2-Country.mmdb.gz
| null | manage/build.py | build.py | py | 4,124 | python | en | code | null | code-starcoder2 | 51 |
621086196 | '''
Name: Muhammad Khan
Date: 03/20/2019
Assignment06
'''
import mailroom as m
import pytest as p
import os
import sys
def test_quit():
with p.raises(SystemExit):
assert m.quit()
def test_email_message():
msg = """
\rDear {:},
\rThank you so much for your generous donation of $ {:.2f}.
\rBest Regards,
\r -Team"""
for name, donation in m.donors_data.items():
assert m.email_message(name,donation[-1]) == msg.format(name,
donation[-1])
def test_calculate_total_gift():
for item in [items for items in m.calculate_total_gift()]:
assert item[1] == sum(m.donors_data[item[0]][:])
def test_letter_format():
msg = """Dear {:},
Thank you so much for your kind donation of ${:.2f}. With that you have
generously donated a total amount of ${:.2f} in your last {} donation(s).
We must ensure you that your donations will be put to a very good use.
Sincerely,
-Team """
for donor in m.calculate_total_gift():
assert m.letter_format(*donor) == msg.format(*donor)
def test_sorted_list_desc():
data = m.calculate_total_gift()
assert m.sorted_list_desc()[0][1] == max([sum(data) for
data in m.donations])
def test_send_letter_everyone():
m.send_letter_everyone()
assert os.path.exists("Letters")
for file in os.listdir(os.path.join("Letters")):
donor = file[0:-15]
assert donor in m.donors_data.keys()
| null | students/MzKhan/lesson10/mailroom/test_mailroom.py | test_mailroom.py | py | 1,670 | python | en | code | null | code-starcoder2 | 51 |
269940010 | from torch import nn
import torch
from torch.nn import Sequential
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from hw_asr.base import BaseModel
class RnnModel(BaseModel):
def __init__(self, n_feats, n_class, fc_hidden=512, *args, **kwargs):
super().__init__(n_feats, n_class, fc_hidden, *args, **kwargs)
self.n_layers = kwargs['n_layers']
self.fc_hidden = fc_hidden
self.rnn = nn.RNN(n_feats, fc_hidden, self.n_layers, batch_first=True)
self.fc = nn.Linear(in_features=fc_hidden, out_features=n_class)
def forward(self, spectrogram, *args, **kwargs):
packed_inputs = pack_padded_sequence(spectrogram, kwargs["spectrogram_length"],
enforce_sorted=False, batch_first=True)
out, _ = self.rnn(packed_inputs)
out, _ = pad_packed_sequence(out, batch_first=True)
out = self.fc(out)
return {"logits": out}
def transform_input_lengths(self, input_lengths):
return input_lengths # we don't reduce time dimension here
| null | hw_asr/model/rnn_model.py | rnn_model.py | py | 1,088 | python | en | code | null | code-starcoder2 | 51 |
68796074 |
import random
def build_random_list(size,max_value):
"""
Parameters:
size : the number of elements in the lsit
max_value : the max random value to put in the list
"""
l = [] # start with an empty list
# make a loop that counts up to size
i = 0
while i < size:
l.append(random.randrange(0,max_value))
# we could have written this instead of the above line:
# l = l + [random.randrange(0,max_value)]
i = i + 1
return l
list = build_random_list(10,10)
def locate(l, value):
index = 0;
while (index < len(l)) :
if (list[index] == value):
return index
index+=1
return -1;
def count(l, value):
index = 0
count = 0
while index < len(l):
if l[index] == value:
count+=1
index+=1
return count
def reverse(l):
reversedList = []
index = len(l) - 1
while index >= 0:
reversedList.append(l[index])
index-=1
return reversedList
def isIncreasing(l):
index = 1
value = l[0]
while index < len(l):
if l[index] > value:
value = l[index]
index+=1
else:
return False
return True
def palindrom(l):
reversedList = reverse(l)
if l == reversedList:
return True
else:
return False
print(list)
print("Locate 2")
print(locate(list,2))
print("Count 2")
print(count(list,2))
print("Reversed list:")
print(reverse(list))
print("")
isIncreasingList = [1,2,3,4]
print(isIncreasingList)
print("Is increasing?")
print(isIncreasing(isIncreasingList))
palindromList = [1,2,2,1]
print(palindromList)
print("Is palindrom?")
print(palindrom(palindromList))
| null | hw_04/lists.py | lists.py | py | 1,739 | python | en | code | null | code-starcoder2 | 51 |
530066595 | import time, multiprocessing, queue
def sumPart(q, a1, a2):
s1 = 0
for i in range(a1, a2 + 1):
s1 += i
q.put(s1)
if __name__ == "__main__":
threadsCount = 2
limit = 2000000
da = int(limit / threadsCount) + (1 if (limit % threadsCount) > 0 else 0)
print (da)
threads = []
q = multiprocessing.Queue()
startTime = time.time()
a = 0
for _ in range(threadsCount):
aNew = a + da
t = multiprocessing.Process(target = sumPart, args = (q, a + 1, aNew))
t.start()
threads.append(t)
a = aNew
for i in range(len(threads)):
threads[i].join()
s = 0
for i in range(threadsCount):
s += q.get()
print (time.time() - startTime)
print (s) | null | multiprocessingORthread/stMP.py | stMP.py | py | 659 | python | en | code | null | code-starcoder2 | 51 |
159077660 | from oscar.apps.shipping import methods
from oscar.core import prices
from decimal import Decimal as D
class Standard(methods.Base):
code = 'standard'
name = 'Standard shipping'
is_tax_known = True
excl_tax = D('5.00')
incl_tax = D('5.00')
def calculate(self, basket):
print('Calculating the shipping costs for Standard...')
return self
class Express(methods.Base):
code = 'express'
name = 'Express shipping'
is_tax_known = True
excl_tax = D('10.00')
incl_tax = D('10.00')
def calculate(self, basket):
print('Calculating the shipping costs for Express...')
return self | null | guautotrade/appgu/oscar/shipping/methods.py | methods.py | py | 652 | python | en | code | null | code-starcoder2 | 51 |
296629896 | ## decoder group activity
import datetime
def main():
De_Or_En = input("Do you want to decode or encode a message? (D/E):> ")
if(De_Or_En == 'd' or De_Or_En == 'D'):
theMessage = input("What message do you want to decode? \n:> ")
# startTime = datetime.datetime.now()
finalMsg, elapsedTime = decode(theMessage)
# endTime = datetime.datetime.now()
# elapsedTime = endTime - startTime
prntTime(elapsedTime)
else:
theMessage = input("What message do you want to encode?\n:> ")
while True:
shiftDistance = int(input("Shift Distance (Please enter a number between 1 and 25) :> "))
if(shiftDistance >= 1 and shiftDistance <= 25):
break
finalMsg = encode(theMessage, shiftDistance)
print("Encoded Message =>", finalMsg, "\nShifted by", shiftDistance, "characters.")
def decode(msg):
startTime = datetime.datetime.now()
distance = 1
decodedMsg = ""
while True:
for ch in msg:
ordvalue = ord(ch)
ciphervalue = ordvalue - distance
if ciphervalue < ord('a'):
ciphervalue = ord('z') - (distance + (ord('a') - ordvalue - 1))
decodedMsg += chr(ciphervalue)
ans = input("Decode Message => " + str(decodedMsg) + "\nIs this correct? (Y/N) :> ")
if(ans == 'Y' or ans == 'y'):
endTime = datetime.datetime.now()
time = endTime - startTime
break
else:
distance += 1
decodedMsg = ""
return decodedMsg, time
def prntTime(time):
print("Seconds:", time.seconds, ", MicroSeconds:", time.microseconds)
def encode(msg, dist):
encodedMsg = ""
dist = dist
for ch in msg:
ordvalue = ord(ch)
ciphervalue = ordvalue + dist
if ciphervalue > ord('z'):
ciphervalue = ord('a') + dist - (ord('z') - ordvalue + 1)
encodedMsg += chr(ciphervalue)
return encodedMsg
main() | null | Bullin_Decoder_GroupProject.py | Bullin_Decoder_GroupProject.py | py | 2,010 | python | en | code | null | code-starcoder2 | 51 |
628908392 | import pytest
from selenium import webdriver
from selenium.webdriver import ChromeOptions, FirefoxOptions
@pytest.fixture
def chrome_browser():
options = ChromeOptions()
options.add_argument('--headless')
options.add_argument('--start-fullscreen')
wd = webdriver.Chrome(options=options)
yield wd
wd.quit()
@pytest.fixture
def firefox_browser():
options = FirefoxOptions()
options.add_argument('--headless')
options.add_argument('--start-fullscreen')
wd = webdriver.Firefox(options=options)
yield wd
wd.quit()
def pytest_collection_modifyitems(items, config):
browser = config.getoption('browser')
if browser is not None:
selected = []
deselected = []
for item in items:
if browser in getattr(item, 'fixturenames'):
selected.append(item)
else:
deselected.append(item)
config.hook.pytest_deselected(items=deselected)
items[:] = selected
if not items:
raise ValueError('Invalid browser name.')
def pytest_addoption(parser):
parser.addoption('--browser', help='Run tests only for certain browser.')
parser.addoption('--opencart_url', default='http://127.0.0.1:8080/opencart/')
| null | hw-selenium-1/conftest.py | conftest.py | py | 1,263 | python | en | code | null | code-starcoder2 | 51 |
127943662 | import itertools
import math
import sys
def input():
return sys.stdin.readline()[:-1]
N = int(input())
XY = []
for _ in range(N):
x, y = map(int, input().split())
XY.append([x, y])
sum_distance = 0
for x, y in list(itertools.combinations(range(N), 2)):
sum_distance += math.sqrt(abs(XY[x][0]-XY[y][0])**2+abs(XY[x][1]-XY[y][1])**2)
print(sum_distance/(N/2))
| null | ABC_C/ABC145_C.py | ABC145_C.py | py | 379 | python | en | code | null | code-starcoder2 | 51 |
105796094 | from app import app
from flask import Blueprint, jsonify, request, abort
from .infra import check_auth_token
from .models import Comment
from .service import Service
from .utils import response_message
comment_module = Blueprint('comment', __name__, url_prefix="/comments")
def web_filter():
token = request.headers.get('Authorization')
if not token:
abort(401)
if not check_auth_token(token):
abort(401)
@comment_module.route('/', methods=['POST'], strict_slashes=False)
def add():
try:
assert 'key' in request.json and request.json['key'] is not None, abort(400)
assert 'product_key' in request.json and request.json['product_key'] is not None, abort(400)
assert 'comments' in request.json and request.json['comments'] is not None, abort(400)
service = Service()
comment = Comment(request.json['key'],
request.json['product_key'],
request.json['comments'])
service.add(comment)
return response_message(201, 'Comment created successfully')
except Exception as e:
app.logger.error(e)
return abort(500)
@comment_module.route('/<key>', methods=['PUT'], strict_slashes=False)
def edit(key):
try:
assert 'key' in request.json and request.json['key'] is not None, abort(400)
assert 'product_key' in request.json and request.json['product_key'] is not None, abort(400)
assert 'comments' in request.json and request.json['comments'] is not None, abort(400)
service = Service()
comment = Comment(request.json['key'],
request.json['product_key'],
request.json['comments'])
service.edit(comment)
return response_message(200, 'Comment edited successfully')
except Exception as e:
app.logger.error(e)
return abort(500)
@comment_module.route('/<key>', methods=['GET'], strict_slashes=False)
def get(key):
try:
service = Service()
data = service.get(key)
return jsonify(data), 200
except Exception as e:
app.logger.error(e)
return abort(500)
@comment_module.route('/', methods=['GET'], strict_slashes=False)
def find_all():
try:
service = Service()
data = service.find_all()
return jsonify(data), 200
except Exception as e:
app.logger.error(e)
return abort(500)
@comment_module.route('/<key>', methods=['DELETE'], strict_slashes=False)
def delete(key):
try:
service = Service()
service.delete(key)
return response_message(204, 'Comment removed successfully')
except Exception as e:
app.logger.error(e)
return abort(500)
@comment_module.route('/product/<key>', methods=['GET'], strict_slashes=False)
def find_all_by_product_key(key):
try:
service = Service()
data = service.find_all_by_product_key(key)
return jsonify(data), 200
except Exception as e:
app.logger.error(e)
return abort(500)
| null | comments-api/app/comment/comment_api.py | comment_api.py | py | 3,075 | python | en | code | null | code-starcoder2 | 51 |
511748143 | import csv
import sqlite3
def load_csv(data, csv_file):
with open(csv_file) as f:
f_reader = csv.reader(f)
# Skip Headers
next(f_reader)
for row in f_reader:
data.append(row)
def create_table(name, c):
# # Create table if doesnt exist
c.execute('''CREATE TABLE IF NOT EXISTS {} (
id integer PRIMARY KEY AUTOINCREMENT,
name text NOT NULL,
hp integer NOT NULL,
phys_atk integer NOT NULL,
mag_atk integer NOT NULL,
phys_def integer NOT NULL,
mag_def integer NOT NULL
)'''.format(name))
def populate_table(name, data, c):
c.executemany('''INSERT INTO {}(
name,
hp,
phys_atk,
mag_atk,
phys_def,
mag_def)
VALUES (?,?,?,?,?,?)'''.format(name), data)
if __name__ == "__main__":
# Settings
csv_file = "player_classes.csv"
database_name = "player_classes.db"
table_name = "Classes"
# Load data from CSV
data = []
load_csv(data, csv_file)
# Connect to database and create cursor
con = sqlite3.connect(database_name)
c = con.cursor()
# Delete previous table
c.execute("DROP TABLE IF EXISTS {}".format(table_name))
# Create table if it doesnt exist, then populate with data
create_table(table_name, c)
populate_table(table_name, data, c)
# Save changes and close connection
con.commit()
con.close()
| null | src/actors/player_class_seed.py | player_class_seed.py | py | 1,529 | python | en | code | null | code-starcoder2 | 51 |
151211167 | #!/bin/python3
import math
import os
import random
import re
import sys
def calculateDNA (genes , health , first , last , d) :
sum = 0
for indx in range(len(d)) :
st = d[indx:]
for i , g in enumerate(genes) :
if i >= first and i <= last :
try :
if st and st.index(g) == 0 :
sum += health[i]
except :
pass
return sum
if __name__ == '__main__':
n = 6
genes = 'a b c aa d b'.rstrip().split()
health = list(map(int, '1 2 3 4 5 6'.rstrip().split()))
s = 3
max = min = 0
# for s_itr in range(s):
firstLastd = '0 4 xyz'.split()
first = int(firstLastd[0])
last = int(firstLastd[1])
d = firstLastd[2]
sum = calculateDNA (genes , health , first , last , d)
if max < sum :
max = sum
if min == 0 or min > sum :
min = sum
print (str(min) + ' ' + str(max))
| null | Python/CalculateDNA.py | CalculateDNA.py | py | 970 | python | en | code | null | code-starcoder2 | 51 |
231416434 | #!/usr/bin/env python
from __future__ import print_function
from astropy.io import fits
import numpy as np
import argparse
import sys
import os
def stack(files, out):
"""
Combine a list of fits files into a single cube and save
the output to out.
Parameters
----------
files : list
List of files
out : str
Filename to save
"""
ref = fits.open(files[0])
data = np.empty((len(files), ref[0].data.shape[-2], ref[0].data.shape[-1]),
dtype=np.float32)
for i, f in enumerate(files):
print('add {0}'.format(f))
hdu = fits.open(f)
data[i, :, :] = hdu[0].data
ref[0].data = data
ref.writeto(out, overwrite=True)
print("wrote {0}".format(out))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group1 = parser.add_argument_group("Combine images into a cube")
group1.add_argument("--infile", dest='infile', type=str, default=None,
help="A list of fits images in a file. [optional]")
group1.add_argument("--in", dest='files', type=str, default=None, nargs='+',
help="Explicit list of files to include.")
group1.add_argument("--out", dest='outfile', type=str, default=None,
help="output filename")
results = parser.parse_args()
if (results.infile is None) and (results.files is None):
parser.print_help()
sys.exit(1)
if results.infile is not None:
files = [l.strip() for l in open(results.infile).readlines()]
else:
files = results.files
if len(files) < 2:
print("not enough files, need at least 2 to make a cube")
print("given {0}".format(files))
sys.exit(1)
stack(files=files, out=results.outfile)
| null | make_cube.py | make_cube.py | py | 1,808 | python | en | code | null | code-starcoder2 | 51 |
237554207 | import logging
import json
import sys
from collections import namedtuple
from datetime import datetime, timedelta, time
from functools import wraps
from telegram.ext import CommandHandler, Updater
from telegram import ParseMode
from dbmanager import DBManager as dbm
from extras import *
# load config
with open("config.json") as f:
config = json.load(f)
#logging
log_config = config.get("log")
LOGFILE = log_config.get("debug")
BOTLOG = log_config.get("filename")
LOGFORMAT = log_config.get("logformat")
LOGLEVEL = logging.INFO
logging.basicConfig(format=LOGFORMAT, level=LOGLEVEL, filename=LOGFILE)
logger = logging.getLogger(__name__)
#handlers
filehandler = logging.FileHandler(BOTLOG)
filehandler.setLevel(LOGLEVEL)
formatter = logging.Formatter(LOGFORMAT)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
PARSEMODE = ParseMode.MARKDOWN
# named tuple for unpacked update
Update = namedtuple('Update', 'username, user_id, text, date')
def help(func):
@wraps(func)
def wrapper(*a, **kw):
update = a[1]
text = update.message.text.split()
if len(text) == 2 and text[1] in ['help', 'h']:
helptext = helpdata.get(func.__name__)
update.message.reply_text(helptext, parse_mode=PARSEMODE)
else:
return func(*a, **kw)
return wrapper
def up_data(update):
"""Convenience function to unpack data from `telegram.Update`
Returns
`Update`
"""
message = update.message
username = message.from_user.username
user_id = message.from_user.id
date = message.date
text = message.text
return Update._make([username, user_id, text, date])
def start(bot, update):
upd = up_data(update)
available_commands = "\n".join(["`/add`", "`/tasks`", "`/del`", "`/edit`", "`/done`"])
update.message.reply_text(STARTTEXT.format(available_commands), parse_mode=PARSEMODE)
logger.info(f"/start by '{upd.user_id}:{upd.username}'")
@help
def add_task(bot, update):
upd = up_data(update)
# parse input
message = upd.text
message = message.split()[1:]
parsed = parse_date(message, update)
if not parsed:
update.message.reply_text("Specified timeperiod not found!")
return
message = parsed[1]
day = datetime.strftime(parsed[0], DATEFORMAT)
# add to db
with dbm(upd.user_id) as db:
db.add(day, message)
logger.info(f"Adding '{message}' for user '{upd.user_id}:{upd.username}' to '{day}'")
update.message.reply_text("Updating tasklist ...")
@help
def get_task(bot, update):
upd = up_data(update)
reply = ""
message = upd.text.split()[1:]
with dbm(upd.user_id) as db:
if not message:
data = db.get()
day = datetime.strftime(upd.date, DATEFORMAT) # default get today
else:
day, _ = parse_date(message, update)
day = datetime.strftime(day, DATEFORMAT)
data = db.get(day)
if not data:
reply += f"*{day}* - "
if not data:
reply += "*Todo List* is empty!"
elif len(data.keys()) == 1:
reply += f"*{day}*\n"
try:
data = data['tasks']
except KeyError:
try:
data = data[day]['tasks']
except KeyError:
day, data = list(data.items())[0]
data = data['tasks']
reply = f"*{day}*\n"
for num, task in data.items():
if task['done']:
reply += f"`{num})` \u2705 "
else:
reply += f"`{num})` \u274c "
reply += f"{task['text']}\n"
else:
data = data.items()
items = [(day, day_data) for day, day_data in data]
items.sort(key=lambda x: x[0]) # sort by date ascending
days = []
for day, data in items:
reply_piece = f"*{day}*\n"
for num, task in data['tasks'].items():
if task['done']:
reply_piece += f"`{num})` \u2705 "
else:
reply_piece += f"`{num})` \u274c "
reply_piece += f"{task['text']}\n"
days.append(reply_piece)
reply += "\n".join(days)
update.message.reply_text(reply, parse_mode=PARSEMODE)
logger.info(f"Getting tasks for '{upd.user_id}:{upd.username}'")
@help
def delete_task(bot, update):
upd = up_data(update)
day = datetime.strftime(upd.date, DATEFORMAT)
reply = ""
message = upd.text.split()[1:]
if not message:
reply += "Tell me what to delete."
logger.debug("/delete command empty")
update.message.reply_text(reply)
return
with dbm(upd.user_id) as db:
date_match = re.match(DATEREGEX, message[0])
if len(message) == 1:
if message[0] == 'all':
db.delete(force=True)
reply += "Deleting database"
logger.info("Deleting all tasks for '{upd.user_id}:{upd.username}'")
# Without specifying date default delete task from today
if message[0].isdigit():
try:
db.delete(day, message[0])
reply += f"Deleting task {message[0]} from *today*"
logger.info(f"Deleting '{message[0]}' on '{day}' for '{upd.user_id}:{upd.username}'")
except KeyError:
reply += f"Task {message[0]} in list {day} not found!"
if date_match:
if message[0] in tomorrow:
message[0] = datetime.strftime(upd.date+timedelta(days=1), DATEFORMAT)
try:
if message[0] == 'today':
db.delete(day)
reply += "Deleting *today*"
else:
db.delete(message[0])
reply += f"Deleting day *{message[0]}*"
logger.info(f"Deleting '{message[0]}' for '{upd.user_id}:{upd.username}'")
except KeyError:
reply += f"{message[0]} not found!"
if not reply:
reply += f"\"{message[0]}\" not found!"
else:
if not date_match:
reply += f"{message[0]} not found!"
else:
if message[0] in tomorrow:
message[0] = datetime.strftime(upd.date+timedelta(days=1), DATEFORMAT)
if message[1].isdigit():
try:
db.delete(message[0], message[1])
reply += f"Deleting task {message[1]} from {message[0]}"
logger.info(f"Deleting '{message[1]}' from '{message[0]}' for '{upd.user_id}:{upd.username}'")
except KeyError:
reply += f"Task {message[1]} not found in {message[0]}"
update.message.reply_text(reply, parse_mode=PARSEMODE)
@help
def edit_task(bot, update):
upd = up_data(update)
day = datetime.strftime(upd.date, DATEFORMAT)
reply = ""
message = upd.text.split()[1:]
if not message:
reply += "Tell me what task to edit"
elif len(message) < 2:
reply += "I didn't get that :(\nType: /edit _help_"
else:
with dbm(upd.user_id) as db:
if message[0].isdigit():
text = " ".join(message[1:])
try:
db.edit(day, message[0], text)
reply += f"Editing task {message[0]} on {day}"
logger.info(f"Editing '{message[1]}' from '{day}' for '{upd.user_id}:{upd.username}'")
except KeyError:
reply += f"Task {message[0]} not found!"
else:
if not message[1].isdigit():
reply += f"Second argument should be _task number_\nType: /edit _help_"
else:
time = message[0]
date_match = re.match(DATEREGEX, time)
if date_match:
if time in tomorrow:
time = upd.date + timedelta(days=1)
time = str(time.date())
else:
update.message.reply_text(f"*\"{time}\"* not found!", parse_mode=PARSEMODE)
return
text = " ".join(message[2:])
try:
db.edit(time, message[1], text)
reply += f"Editing task {message[1]} on {time}"
logger.info(f"Editing '{message[1]}' from '{time}' for '{upd.user_id}:{upd.username}'")
except KeyError:
reply += f"Task _{message[1]}_ on *{time}* not found!"
update.message.reply_text(reply, parse_mode=PARSEMODE)
@help
def done_task(bot, update):
upd = up_data(update)
time = datetime.strftime(upd.date, DATEFORMAT)
reply = ""
message = upd.text.split()[1:]
if not message:
reply += "Which task?"
else:
with dbm(upd.user_id) as db:
if message[0].isdigit():
number = message[0]
try:
done = db.done(time, number)
reply += f"Marking task {number} "
if done:
reply += "*DONE*"
logmessage = 'DONE'
else:
reply += "*UNDONE*"
logmessage = 'UNDONE'
logger.info(f"Marking '{number}' {logmessage} on '{time}' for '{upd.user_id}:{upd.username}'")
except KeyError:
reply += f"Task {number} not found!"
else:
time = message[0]
number = message[1]
if not number.isdigit():
update.message.reply_text(f"*{number}* is not a digit!")
return
date_match = re.match(DATEREGEX, time)
if date_match:
if time in tomorrow:
time = datetime.strftime(upd.date + timedelta(days=1), DATEFORMAT)
time = str(time)
try:
done = db.done(time, number)
reply += f"Marking task {number} on {time} "
if done:
reply += "*DONE*"
logmessage = 'DONE'
else:
reply += "*UNDONE*"
logmessage = 'DONE'
logger.debug(f"Marking '{number}' {logmessage} on '{time}' for '{upd.user_id}:{upd.username}'")
except KeyError:
reply += f"Task {number} on {time} not found!"
else:
reply += "*\"{time}\"* not found!"
update.message.reply_text(reply, parse_mode=PARSEMODE)
pass
def daily_maintenance(bot, job):
"""Moves all tasks from today to day after that at the end of the day"""
dtoday = datetime.today()
#dtoday = datetime.today() - timedelta(days=1)
today = datetime.strftime(dtoday, DATEFORMAT)
tomorrow = datetime.strftime(dtoday + timedelta(days=1), DATEFORMAT)
with dbm(upd.user_id) as db:
today_data = db.get(today)['tasks']
db.add(tomorrow, today_data)
db.delete(today)
message = f"Moved {today} data to {tomorrow} at {dtoday.time().strftime('%H:%M:%S')}"
logger.info(message)
bot.send_message(chat_id=config['auth']['myid'], text=message)
def parse_date(datestring: list, update):
"""Calculates datetime.timedelta from natural input.
If no input is found, defaults to today.
Returns:
List[datetime.datetime, str(message)]
"""
today = datetime.today()
accepted_keywords = {'today': today,
'tomorrow': today + timedelta(days=1),
'tmr': today + timedelta(days=1)}
response = []
wordsused = 0
if datestring[0] in accepted_keywords.keys():
response.append(accepted_keywords[datestring[0]])
wordsused += 1
elif datestring[0] == "in":
# Expected pattern is: int(n) str(timeperiod)
# e.g. 2 days | 5 w | 3 months | 10 mins
#make sure first arg is a number
if not datestring[1].isdigit():
update.message.reply_text("argument is not a digit")
return None
test = " ".join(datestring[1:3])
match = match_re(test)
# regext test
if not match:
return None
num, period = datestring[1:3]
period = period.lower()
num = int(num)
if period[:2] == 'mo': # handle minute & month collision
delta = timeperiods[period[:2]](num)
else:
delta = timeperiods[period[0]](num)
response.append(today + delta)
wordsused += 3
else:
response.append(today)
response.append(" ".join(datestring[wordsused:]))
return response
if __name__ == "__main__":
auth = config.get("auth")
con = config.get("con")
args = sys.argv[1:]
updater = Updater(token=auth.get("token"))
dispatcher = updater.dispatcher
jobq = updater.job_queue
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('add', add_task))
dispatcher.add_handler(CommandHandler('tasks', get_task))
dispatcher.add_handler(CommandHandler('del', delete_task))
dispatcher.add_handler(CommandHandler('edit', edit_task))
dispatcher.add_handler(CommandHandler('done', done_task))
#jobs
#jobq.run_daily(daily_maintenance, time=time(0,1))
#jobq.run_repeating(daily_maintenance, first=0, interval=600)
if args:
updater.start_webhook(listen="0.0.0.0",
port=con.get('port'),
url_path=con.get('path'),
key=con.get('key'),
cert=con.get('cert'),
webhook_url=con.get('url'))
else:
updater.start_polling()
| null | bot.py | bot.py | py | 14,398 | python | en | code | null | code-starcoder2 | 51 |
217803979 | file_obj = open("rna_codon.txt", 'r')
codon_raw = file_obj.readlines()
codon_raw2 = [elem.strip('\n') for elem in codon_raw]
# no argument splits at any whitespace
codon_raw3 = [elem.split() for elem in codon_raw2]
rna_dict = {}
# select which in list are codons, which are amino acids
codons = [0, 2, 4, 6]
aas = [1, 3, 5, 7]
for i in range(len(codon_raw3)):
for j in range(4):
rna_dict[codon_raw3[i][codons[j]]] = \
codon_raw3[i][aas[j]]
all_coded_aas = list(rna_dict.values())
amino_acids = list(set(all_coded_aas))
number_of_codons = {elem : 0 for elem in amino_acids}
for i in range(len(number_of_codons)):
for j in range(len(all_coded_aas)):
number_of_codons[amino_acids[i]] += amino_acids[i] == all_coded_aas[j]
| null | rosalind_py/read_in_codon.py | read_in_codon.py | py | 766 | python | en | code | null | code-starcoder2 | 51 |
70817364 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from HIGHLIGHTdataset import SEVDataset
import argparse
import numpy as np
import os
from tqdm import tqdm
import time
import IPython
from torchvision import datasets, transforms
import warnings
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data.dataloader import default_collate
from efficientnet_pytorch import EfficientNet # EfficientNet的使用需要倒入的库
from label_smooth import LabelSmoothSoftmaxCE
curdir = os.path.dirname(__file__)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", default=32, type=int)
parser.add_argument("--lr", default=1e-3, type=float)
parser.add_argument("--weight-decay", default=1e-3, type=float)
parser.add_argument("--num-epoch", default=10, type=int)
parser.add_argument("--save-interval", default=1, type=int)
parser.add_argument("--step-interval", default=10, type=int)
parser.add_argument("--step-save", default=1000, type=int)
parser.add_argument("--evaluate-step", default=100, type=int)
parser.add_argument("--save-dir", default=os.path.join(curdir, "highlightmodels/"))
parser.add_argument("--total-updates", default=50000, type=int)
parser.add_argument('--gradient-accumulation-steps',
type=int,
default=10,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument("--model-type", default='efficientnet-b0')
parser.add_argument("--class_num", default=2, type=int)
parser.add_argument("--feature_extract", default=True, type=bool)
parser.add_argument("--cuda_num", default=2, type=int)
args = parser.parse_args()
return args
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
}
return params_dict[model_name]
def train(args):
print(args)
args.save_dir = args.save_dir + args.model_type
args.save_dir += "_highlight_"
args.save_dir = args.save_dir + time.strftime('%Y-%m-%d-%H-%M-%S')
os.makedirs(args.save_dir, exist_ok=True)
print(args.save_dir, 'make!')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.set_device(args.cuda_num)
print(device)
if torch.cuda.is_available():
print('device: ', torch.cuda.current_device())
data_transform = transforms.Compose([
transforms.Resize(efficientnet_params(args.model_type)[2]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
train_set = SEVDataset('/mnt/sda1/songzimeng/highlightdata/train/', transform=data_transform)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)
valid_set = SEVDataset('/mnt/sda1/songzimeng/highlightdata/valid/', transform=data_transform)
valid_loader = DataLoader(valid_set, batch_size=args.batch_size, shuffle=True)
model = EfficientNet.from_pretrained(args.model_type, num_classes=args.class_num).to(device)
# model._fc.out_features = args.class_num
params_to_update = model.parameters()
print("Params to learn:")
if args.feature_extract:
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model.named_parameters():
if param.requires_grad == True:
print("\t", name)
criterion = LabelSmoothSoftmaxCE()
optimizer = optim.Adam(params_to_update, lr=args.lr, betas=(0.9, 0.999), eps=1e-9)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.7, patience=3, verbose=True)
global_step = 0
# evaluate(model, valid_set)
#step_interval = int(64 / args.batch_size) * 25
#print('step_interval: ', step_interval)
best_acc = 0
for epoch in range(args.num_epoch):
print('epoch: ', epoch+1)
losses = []
total = 0
correct = 0
for step, samples in enumerate(train_loader, 0):
model.train()
imgs, labels = samples['image'].to(device).float(), samples['label'].to(device)
optimizer.zero_grad()
outputs = model(imgs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += predicted.eq(labels).cpu().sum()
if (step + 1) % args.step_interval == 0:
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% '
% (epoch + 1, step + 1, np.mean(losses), 100. * float(correct) / float(total)))
if (step + 1) % args.step_save == 0:
torch.save(model, args.save_dir + "/highlight_step_save.pt")
losses = []
total = 0
correct = 0
with torch.no_grad():
print('Evaluate')
eval_correct = 0
eval_total = 0
evaluate_step = 0
for samples in valid_loader:
model.eval()
imgs, labels = samples['image'].to(device).float(), samples['label'].to(device)
outputs = model(imgs)
# 取得分最高的那个类 (outputs.data的索引号)
_, predicted = torch.max(outputs.data, 1)
eval_total += labels.size(0)
eval_correct += (predicted == labels).cpu().sum()
evaluate_step += 1
if evaluate_step >= args.evaluate_step :
break
print('Evaluated acc:%.3f%%' % (100. * float(eval_correct) / float(eval_total)))
acc = 100. * float(eval_correct) / float(eval_total)
#scheduler.step(acc)
if acc > best_acc:
torch.save(model, args.save_dir + "/highlight_best_save.pt")
best_acc = acc
if (epoch + 1) % args.save_interval == 0 or epoch == 0:
torch.save(model, args.save_dir + "/highlight_{}.pt".format(epoch + 1))
if optimizer.param_groups[0]['lr'] == 0:
break
with torch.no_grad():
print('Evaluate')
eval_correct = 0
eval_total = 0
evaluate_step = 0
for samples in valid_loader:
model.eval()
imgs, labels = samples['image'].to(device).float(), samples['label'].to(device)
outputs = model(imgs)
# 取得分最高的那个类 (outputs.data的索引号)
_, predicted = torch.max(outputs.data, 1)
eval_total += labels.size(0)
eval_correct += (predicted == labels).cpu().sum()
evaluate_step += 1
if evaluate_step >= (args.evaluate_step * 10):
break
print('Evaluated acc:%.3f%%' % (100. * float(eval_correct) / float(eval_total)))
eval_acc = 100. * float(eval_correct) / float(eval_total)
scheduler.step(eval_acc)
if __name__ == "__main__":
args = get_args()
train(args)
| null | pretrain/HIGHLIGHTclassifier/efficientnet_train.py | efficientnet_train.py | py | 8,333 | python | en | code | null | code-starcoder2 | 50 |
653390793 | ##########################################
# #
# Draw a house! #
# #
##########################################
# Use create_line(), create_rectangle() and create_oval() to make a
# drawing of a house using the tKinter Canvas widget.
# 70pt: House outline (roof and the house)
# 80pt: Square windows and a door
# 90pt: A door handle plus a chimney!
# 100pt: Green grass on the ground and a red house!
# Minus 5pts if your code has no comments
# Minus 10pts if you only commit once to github
from Tkinter import *
root = Tk()
drawpad = Canvas(root, width=800,height=800, background='white')
drawpad.grid(row=0, column=1)
#creating the house
square = drawpad.create_rectangle(200,200,600,600, fill='red')
#creating the roof
line1 = drawpad.create_line(200,200,400,0)
line2 = drawpad.create_line(400,0,600,200)
#making door
square2 = drawpad.create_rectangle(375,600,425,500, fill='brown')
#making windows
window1 = drawpad.create_rectangle(250,250,350,350, fill='white')
window2 = drawpad.create_rectangle(450,250,550,350, fill='white')
#creating doorhandle
doorhandle = drawpad.create_oval(405,535,420,550, fill='yellow')
#creating chimney
line3 = drawpad.create_line(500,100,500,25)
line4 = drawpad.create_line(500,25,550,25)
line5 = drawpad.create_line(550,25,550,150)
#grass
grass = drawpad.create_rectangle(0,600,800,650, fill='green')
root.mainloop() | null | 70-100pt.py | 70-100pt.py | py | 1,463 | python | en | code | null | code-starcoder2 | 50 |
239125907 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def assert_parameters(file_, parameters):
for k, v in parameters.items():
assert file_.contains("{} = {}".format(k, v))
def test_hosts_file(host):
assert_parameters(
host.file("/etc/sysctl.conf"),
{
"vm.dirty_ratio": 20,
"vm.dirty_background_ratio": 15
}
)
assert_parameters(
host.file("/etc/sysctl.d/10-networking.conf"),
{
"net.ipv4.tcp_syncookies": 1,
"net.ipv4.ip_forward": 0
}
)
assert_parameters(
host.file("/etc/sysctl.d/20-security.conf"),
{
"kernel.dmesg_restrict": 1
}
)
| null | molecule/default/tests/test_sysctl.py | test_sysctl.py | py | 829 | python | en | code | null | code-starcoder2 | 50 |
572708343 | # coding: utf-8
"""
restccnu::apis::apartment
`````````````````````````
部门信息
:MAINTAINER: neo1218
:OWNER: muxistudio
"""
_apartment_list = [
{
'apartment': "学生事务大厅",
'phone': ["67865591"],
'place': "文华公书林(老图书馆)3楼"
},
{
'apartment': "校医院",
'phone': ["67867176"],
'place': "九号楼侧边下阶梯右转处"
},
{
'apartment': "水电费缴纳处",
'phone': ["67861701"],
'place': "东南门附近的水电修建服务中心内"
},
{
'apartment': "校园卡管理中心",
'phone': ["67868524"],
'place': "田家炳楼侧边阶梯旁"
},
{
'apartment': "教务处",
'phone': ["67868057"],
'place': "行政楼2楼"
},
{
'apartment': "学生资助中心",
'phone': ["67867877"],
'place': "学生事务大厅10号窗口"
},
# {
# 'apartment': "校团委",
# 'phone': ["67867539"],
# 'place': "行政楼副楼301"
# },
{
'apartment': "党校办公室",
'phone': ["67868011"],
'place': "行政楼副楼",
},
{
'apartment': "素质教育办公室",
'phone': ["67868057"],
'place': "暂无",
},
{
'apartment': "档案馆",
'phone': ["67867198"],
'place': "科学会堂一楼",
},
{
'apartment': "国际交流事务办",
'phone': ["67861299"],
'place': "法学院前面的路口左拐(有路标)",
},
{
'apartment': "心理咨询中心",
'phone': ["67868274"],
'place': "文华公书林(老图书馆)3楼",
},
{
'apartment': "保卫处",
'phone': ["67868110"],
'place': "化学逸夫楼斜对面",
},
]
| null | restccnu/spiders/apartment.py | apartment.py | py | 2,150 | python | en | code | null | code-starcoder2 | 50 |
60826114 | # Crie um programa que leia o nome de uma cidade e diga se ela começa ou não com o nome "SANTO"
# le-se cidade[:5].capitalize() == 'Santo' --> se a cidade e igual == a 'Santo'
cidade = str(input('Informe o nome de sua cidade: ')).strip()
dividido = cidade.split()
print('Santo' in dividido[0].capitalize())
#print(cidade[:5].capitalize() == 'Santo')
#dividido = cidade.split()
#print('O nome de sua cidade e {} \nSeu primeiro nome e {} \nE ela {} começa com Santo'.format(cidade, dividido[0],
#'Santo' in dividido[0].capitalize()))
| null | desafio24.py | desafio24.py | py | 539 | python | en | code | null | code-starcoder2 | 50 |
4519184 | # urlretrieve : 요청하는 url의 정보를 로컬 파일로 저장
# csv 파일, api 데이터 등 많은 양의 데이터를 한꺼번에 저장할 때 사용
# 좋아하는 연예인 사진 저장하기
import urllib.request as req
# 요청 url
img_url = "https://search.pstatic.net/common/?src=http%3A%2F%2Fimgnews.naver.net%2Fimage%2F5242%2F2014%2F03%2F08%2F1394221104_thumb_59_20140308045602.jpg&type=sc960_832"
# 로컬 파일
save_img = "d:/dog.jpg"
try:
file1, header1 = req.urlretrieve(img_url, save_img)
except Exception as e:
print(e)
else:
print(header1)
print("성공")
| null | urllib/urlretrieve3.py | urlretrieve3.py | py | 619 | python | en | code | null | code-starcoder2 | 50 |
555225421 | import curses
from windows.window import Window
class StatusWin(Window):
def __init__(self, lines, cols, begin_y, begin_x, parent):
super().__init__(lines, cols, begin_y, begin_x, parent)
self.controller = self.parent.controller
self.status = "Default mode"
self.currentInput = ""
self.mode = 0
def render(self):
if self.mode == 0:
self.renderStatus()
elif self.mode == 1:
self.renderCommand()
def renderStatus(self):
width = self.scr.getmaxyx()[1]
hint = "Text Editor. "
hint += "t - Typing Mode, c - Command Mode, ESC - Normal Mode."
hint = hint.ljust(width)
self.scr.addstr(0, 0, hint, curses.A_REVERSE)
self.scr.addstr(1, 0, self.status.ljust(width - 1),
curses.A_REVERSE)
position = 'LN: ' + str(self.parent.bufferWin.getLine() + 1)
position += 'COL:' + str(self.parent.bufferWin.getCol())
self.scr.addstr(1, width - 1 - len(position), position,
curses.A_REVERSE)
def renderCommand(self):
height = self.scr.getmaxyx()[0]
width = self.scr.getmaxyx()[1]
hint = "Enter command: ".ljust(width)
self.scr.addstr(height - 2, 0, hint, curses.A_REVERSE)
self.scr.addstr(height - 1, 0, ":" + self.currentInput)
height = self.scr.getmaxyx()[0]
self.scr.move(height - 1, len(self.currentInput) + 1)
self.scr.cursyncup()
def handleInputCommand(self, c):
if c == 10 or c == curses.KEY_ENTER:
cmd = self.currentInput.lower()
if cmd.find("connect") == 0:
s = self.currentInput.split(" ")
if len(s) == 3:
self.controller.connectToServer(s[1], int(s[2]))
if len(s) == 2:
if s[1].find(":") == -1:
self.controller.connectToServer(s[0], 9999)
else:
s = s[1].split(":")
self.controller.connectToServer(s[0], int(s[1]))
else:
self.controller.connectToServer("localhost", 9999)
self.parent.bufferWin.offset = 0
self.parent.bufferWin.y = 0
self.parent.bufferWin.x = 0
if cmd.find("open") == 0:
if len(cmd.split(" ")) != 2:
self.mode = 0
newStatus = "open document."
newStatus += " usage: open <file.txt>"
self.setStatus(newStatus)
return
self.controller.openFile(cmd.split(" ")[1])
if cmd.find("save") == 0:
if len(cmd.split(" ")) != 2:
self.mode = 0
newStatus = "save document to file."
newStatus += " usage: save <file.txt>"
self.setStatus(newStatus)
return
self.controller.saveFile(cmd.split(" ")[1])
if cmd.find("setname") == 0:
if len(cmd.split(" ")) != 2:
self.mode = 0
newStatus = "Set name to display in history and access"
newStatus += ". name <NAME>"
self.setStatus(newStatus)
return
self.controller.setName(cmd.split(" ")[1])
self.setStatus("New name: " + self.controller.name)
if cmd == "getname":
self.setStatus("Name: " + self.controller.name)
if cmd == "history":
f = open("history.txt", "w+")
for op in self.controller.history:
f.write("-----\r")
for key in op.keys():
f.write(key + ':' + str(op[key]) + "\r")
f.close()
self.mode = 0
elif c == 8 or c == curses.KEY_BACKSPACE:
self.currentInput = self.currentInput[:len(self.currentInput)-1]
elif c == 27:
self.mode = 0
return
else:
self.currentInput += chr(c)
def handleInput(self, keycode):
if self.mode == 1:
self.handleInputCommand(keycode)
return
key = chr(keycode).lower()
if key == "c":
self.mode = 1
self.currentInput = ""
if key == "t":
self.parent.activeWindow = self.parent.bufferWin
self.setStatus("Typing mode")
def setStatus(self, text):
self.status = text
| null | windows/status_win.py | status_win.py | py | 4,608 | python | en | code | null | code-starcoder2 | 50 |
89721936 | from django.conf.urls import include, url
from rest_framework import routers
from olo.api import views as olo_api_views
router = routers.DefaultRouter()
router.register(r'match', olo_api_views.MatchView, 'match')
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^userlist/$', olo_api_views.UserlistAPIView.as_view(), name='userlist'),
url(r'^roomlist/$', olo_api_views.RoomlistAPIView.as_view(), name='roomlist'),
url(r'^like/$', olo_api_views.LikeAPIView.as_view(), name='like'),
] | null | olo/urls.py | urls.py | py | 508 | python | en | code | null | code-starcoder2 | 50 |
322896154 | #!/usr/bin/env python
# coding: utf-8
# In[4]:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame
import sys
import array
# In[5]:
df_data = pd.read_csv("BITS AIC 2019 - Reflexis Raw Dataset (1).csv")
# In[6]:
df_cs = pd.read_csv("wow.csv")
# In[19]:
# In[13]:
#df_mean = df_data[1:757].mean()
list=[]
store=[]
#print(df_data[0:df_cs['Cummulative count'][0]-1].corr())
for i in range(len(df_cs)-1):
if(i==0):
df_mean = df_data[0:756].mean()
list.append(df_mean['Average Sale Purchase'])
store.append(df_mean['STORE'])
else:
m = df_cs['Cummulative count'][i]
k = df_cs['Cummulative count'][i+1]
a= df_cs['Store'][i]
df_mean = df_data[m:k-1].mean()
list.append(df_mean['Average Sale Purchase'])
store.append(df_mean['STORE'])
#print(df_mean)
#print(list)
#list.sort()
print(list)
print(store)
# In[18]:
matplotlib.rcParams.update({'font.size': 14})
f, axarr = plt.subplots(1,2, figsize=(20, 4))
axarr[0].scatter( store, list,
edgecolor='black', linewidth='1', s=70, alpha=0.7, c="#e84629")
axarr[0].set_xlabel("Store Number")
axarr[0].set_ylabel("av of average sales")
axarr[0].set_ylim(0, 1)
axarr[0].set_yticks(np.arange(30, 80, 10))
axarr[0].set_xticks(np.arange(0, 8000, 1000))
axarr[0].grid(color='red', linestyle='--', linewidth=1, alpha=0.2)
axarr[0].spines["top"].set_visible(False)
axarr[0].spines["right"].set_visible(False)
axarr[0].spines["bottom"].set_visible(False)
axarr[0].spines["left"].set_visible(False)
plt.show()
| null | AIC-Mean.py | AIC-Mean.py | py | 1,647 | python | en | code | null | code-starcoder2 | 50 |
553457420 | import requests
from os import path
import os
import http.cookiejar
agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'
header = {
'HOST' : 'www.zhihu.com',
'referer' : 'https://www.zhihu.com/',
'user-agent' : agent
}
from selenium import webdriver
import time
options = webdriver.ChromeOptions()
options.add_argument('lang=zh_CN.UTF-8')
options.add_argument('user-agent="Mozilla/5.0 (iPod; U; CPU iPhone OS 2_1 like Mac OS X; ja-jp) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5F137 Safari/525.20"')
browser = webdriver.Chrome(chrome_options=options)
# browser = webdriver.Chrome()
browser.get("https://www.zhihu.com/signin")
browser.find_element_by_css_selector(".SignFlow-accountInput.Input-wrapper input").send_keys(
"13760710096")
time.sleep(1)
browser.find_element_by_css_selector(".SignFlow-password input").send_keys(
"XQY1197966810G")
time.sleep(2)
browser.find_element_by_css_selector(
".Button.SignFlow-submitButton").click()
time.sleep(3)
browser.get("https://www.zhihu.com/")
time.sleep(6)
zhihu_cookies = browser.get_cookies()
print("aaa", zhihu_cookies)
cookie_dict = {}
import pickle
for cookie in zhihu_cookies:
base_path = path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'cookies')
print(base_path)
f = open(base_path + "/zhihu/" + cookie['name'] + '.zhihu', 'wb')
pickle.dump(cookie, f)
f.close()
cookie_dict[cookie['name']] = cookie['value']
browser.close()
| null | ArticleSpider/ArticleSpider/utils/zhihu_login_requests.py | zhihu_login_requests.py | py | 1,547 | python | en | code | null | code-starcoder2 | 50 |
440657111 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 16:43:47 2017
@author: stevedeng
"""
import tkinter as tk
from tkinter import ttk
class tdList(tk.Tk):
def __init__(self, tasks=None):
super().__init__()
self.title("TO DO APP")
self.geometry("500x750")
self.backGroundColor_lib = ["palegreen","mistyrose","lightcyan","wheat","azure","paleturquoise","beige","lavender"]
self.fontcolor_lib = ["black"]
if not tasks:
self.tasks = []
task1 = tk.Label(self, text="TASKS FOR TODAY", bg="snow", fg="black", pady=20, font=("Times",23))
task1.pack(side=tk.TOP, fill=tk.X)
self.tasks.append(task1)
else:
self.tasks = []
for i in range(len(tasks)):
if i == 0:
task1 = tk.Label(self, text="TASKS FOR TODAY", bg="snow", fg="black", pady=20, font=("Times",23))
task1.pack(side=tk.TOP, fill=tk.X)
self.tasks.append(task1)
else:
self.add(tasks[i])
self.task_create = tk.Text(self, height=3, bg="white", fg="black")
self.task_create.pack(side=tk.BOTTOM, fill=tk.X)
self.task_create.focus_set()
self.bind('<Return>', self.add_task)
def add(self,txt,event=None):
new_task = tk.Label(self, text = txt, pady=20)
done_button = ttk.Button(new_task, text = "done",command = lambda:self.removeTask(done_button))
backGroundIdx = len(self.tasks)%len(self.backGroundColor_lib)
fontIdx = len(self.tasks)%len(self.fontcolor_lib)
backGroundColor = self.backGroundColor_lib[backGroundIdx]
fontColor = self.fontcolor_lib[fontIdx]
new_task.configure(bg=backGroundColor,fg=fontColor,font=("Times",20))
new_task.pack(side=tk.TOP, fill=tk.X)
done_button.pack(side=tk.RIGHT)
self.tasks.append(new_task)
def add_task(self, event=None):
new_text = self.task_create.get(1.0,tk.END).strip()
if len(new_text) > 0:
new_task = tk.Label(self, text = new_text, pady=20)
done_button = ttk.Button(new_task, text = "done",command = lambda:self.removeTask(done_button))
backGroundIdx = len(self.tasks)%len(self.backGroundColor_lib)
fontIdx = len(self.tasks)%len(self.fontcolor_lib)
backGroundColor = self.backGroundColor_lib[backGroundIdx]
fontColor = self.fontcolor_lib[fontIdx]
new_task.configure(bg=backGroundColor,fg=fontColor,font=("Times",20))
new_task.pack(side=tk.TOP, fill=tk.X)
done_button.pack(side=tk.RIGHT)
self.tasks.append(new_task)
self.task_create.delete(1.0, tk.END)
def removeTask(self, done_button):
done_button.pack_forget()
done_button.master.pack_forget()
self.tasks.remove(done_button.master)
def on_closing(self):
writefile = open("data.txt","w")
for item in self.tasks:
print(item.cget("text"),file=writefile)
writefile.close()
self.destroy()
if __name__ == "__main__":
#reading previously saved tasks
try:
readfile = open("data.txt","r")
except FileNotFoundError:
file = open("data.txt","w")
file.close()
readfile=open("data.txt","r")
tasks_lst=[]
for line in readfile:
line=line.strip()
tasks_lst.append(line)
#create todoApp
todo = tdList(tasks_lst)
todo.protocol("WM_DELETE_WINDOW", todo.on_closing)
todo.mainloop()
| null | src/todoList.py | todoList.py | py | 3,920 | python | en | code | null | code-starcoder2 | 51 |
428871614 | #!/usr/bin/env python
"""Problem Set 1 Code for QCB505 Fall 2020
"""
import numpy as np
import matplotlib.pyplot as plt
__author__ = "Scott Wolf"
__date__ = "20200908"
__credits__ = ["Scott Wolf"]
__version__ = "1"
__status__ = "Prototype"
__url__ = "https://www.dropbox.com/sh/jauik83zfg0rtfe/AACugg-_9g4Mfo2fPIUq_zQea?dl=0"
L = 5
kappa = 1
beta = 0.5
Nsteps = 100000
EE = np.zeros((Nsteps, 1))
S = np.zeros((Nsteps, 1))
for t in range(1, Nsteps):
sold = S[t - 1]
Eold = (kappa / 2) * ((sold / L) ** 2)
snew = sold.copy()
step = np.sign(np.random.normal())
snew = sold + step
Enew = (kappa / 2) * ((snew / L) ** 2)
if np.exp(-beta * (Enew - Eold)) > np.random.uniform():
S[t] = snew
EE[t] = Enew
else:
S[t] = sold
EE[t] = Eold
plt.hist(np.sum(S, axis=1))
plt.show()
plt.plot(EE[0:1000])
plt.show()
# Section 2
N = 100
Nsteps = 10000
s = np.zeros((Nsteps, N))
s[0, ] = np.sign(np.random.randn(N))
h = 1
beta = 2
EE = np.zeros(Nsteps)
for t in range(1,Nsteps):
sold = s[t - 1,]
Eold = -h * np.sum(sold)
snew = sold.copy()
flip = np.random.choice(100)
snew[flip] = -sold[flip]
Enew = -h * np.sum(snew)
if np.exp(-beta * (Enew - Eold)) > np.random.rand(1):
s[t,] = snew
EE[t] = Enew
else:
s[t,] = sold
EE[t] = Eold
plt.hist(np.sum(s, axis=1))
plt.show()
plt.plot(EE/N)
plt.show()
# Section 3
L = 10
N = L**2
nn = np.zeros((L,L))
for i in range(0,L):
for j in range(0,L):
nn[i,j] = (i-1)*L +(j-1) + 1 | null | 20200908_pset/20200908_pset.py | 20200908_pset.py | py | 1,551 | python | en | code | null | code-starcoder2 | 51 |
111810308 | import re
import binascii
from collections import OrderedDict
from Crypto.Cipher import AES
import cloudscraper
import requests
session = cloudscraper.create_scraper(interpreter='native', debug=False)
def makeCookie(response):
if 'slowAES.decrypt' in response.text:
try:
cryptVars = OrderedDict(re.findall(r'(a|b|c)=toNumbers\("(.*?)"\)', response.text))
check = binascii.hexlify(
AES.new(
binascii.unhexlify(cryptVars['a']),
AES.MODE_CBC,
binascii.unhexlify(cryptVars['b'])
).decrypt(
binascii.unhexlify(cryptVars['c'])
)
).decode('ascii')
data = {
'url': response.text.split('location.href="')[1].split('"')[0],
'cookie': [
response.text.split('document.cookie="')[1].split('=')[0],
check
]
}
print(f"Setting Human Check to {data['cookie'][1]}")
return data
except:
return 0
else:
return 0
def monitor(url):
response = session.get(url)
cookie = makeCookie(response)
if cookie != 0:
requests.utils.add_dict_to_cookiejar(
session.cookies,
{
cookie['cookie'][0]: cookie['cookie'][1]
}
)
url = cookie['url']
return monitor(url)
print(response.text)
monitor('https://www.consortium.co.uk/polar-skate-co-stripe-puffer-ivory-navy-pol-f19-stripepuffer-ivonvy.html')
| null | new-bypass.py | new-bypass.py | py | 1,618 | python | en | code | null | code-starcoder2 | 51 |
325556325 | import gc
import os
import numpy as np
import pandas as pd
import torch
from torch.utils import data
class BenchmarkDataset(data.Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
self.divide = False
self.is_test = False
self.debug = False
def __len__(self):
return self.X.shape[0]
def __getitem__(self, index):
if index not in range(0, len(self.X)):
return self.__getitem__(np.random.randint(0, self.__len__()))
image = self.X[index]
if self.divide:
image = image / 255.
image = torch.from_numpy(image).float().permute([2, 0, 1]).contiguous()
if self.debug:
print(image.shape)
if not self.is_test:
target = self.y[index]
return image, target
if self.is_test:
return (image,)
| null | pytorch/torch_dataset.py | torch_dataset.py | py | 883 | python | en | code | null | code-starcoder2 | 51 |
209793644 | import os
import pickle
import torch
from torch import optim
from core.network import LSTM, train, test
# Load DataLoader
DATA_PATH = os.path.join('..', 'data')
DATA_NAME = 'data_loader.pkl'
with open(os.path.join(DATA_PATH, DATA_NAME), 'rb') as f:
data_dict = pickle.load(f)
train_dl = data_dict['train_dl']
val_dl = data_dict['val_dl']
dev_dl = data_dict['dev_dl']
# Configure training parameters
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
learning_rate = 0.0002
hidden_dim = 100
num_layers = 1
num_epoch = 100
model = LSTM(hidden_dim=hidden_dim, num_layers=num_layers)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train and test
train(model, device, train_dl, val_dl, optimizer, num_epoch)
loss, acc = test(model, device, dev_dl)
# Save model
DIR = os.path.join('..', 'model')
NAME = 'model-{}_{}_{}_{:.4f}.pkl'.format(num_layers, hidden_dim, learning_rate, acc)
if not os.path.exists(DIR):
os.makedirs(DIR)
torch.save(model.state_dict(), os.path.join(DIR, NAME))
| null | train.py | train.py | py | 1,057 | python | en | code | null | code-starcoder2 | 51 |
243793853 | import requests
import pymysql
from bs4 import BeautifulSoup
def download_page(url):
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0"}
resource = requests.get(url, headers=headers)
html = resource.text
return html
def get_article_list_from_html(html):
article_list = []
soup = BeautifulSoup(html, 'html.parser')
content = soup.find(id='content-left')
for i in content.find_all(class_='article'):
article_list.append(i)
return article_list
def get_content(article):
# print(article.find('span', class_='contentForAll'))
if None is article.find('span', class_='contentForAll'):
return article.find(class_='content').span.text
else:
base_url = 'https://www.qiushibaike.com'
url = base_url + article.find('a', class_='contentHerf')['href']
return get_content_from_url(url)
def get_content_from_url(url):
html = download_page(url)
soup = BeautifulSoup(html, 'html.parser')
return soup.find('div', class_='content').text
def save_to_db(id, author, author_age, content, up_num, comment_num):
host = 'localhost'
port = 3306
user = 'root'
password = 'hello9504'
db = 'crawler'
charset = 'utf8mb4'
sql = ("""insert into qiushi(id, author, author_age, content,
up_num, commont_num) values('{}', '{}', {}, '{}', {}, {})""").format(id, author, author_age, content, up_num, comment_num)
conn = pymysql.connect(host=host, port=port, user=user, password=password, db=db, charset=charset)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
def crawler_qiushi():
html = download_page('https://www.qiushibaike.com/text/')
article_list = get_article_list_from_html(html)
for i in article_list:
id = i.a['href']
author = i.find('h2').text
if author != '匿名用户':
author_age = int(i.find('div', class_='articleGender').text)
else:
author_age = -1
content = get_content(i)
up_num = int(i.find('span', class_='stats-vote').find('i', class_='number').text)
comment_num = int(i.find('span', class_='stats-comments').find('i', class_='number').text)
save_to_db(id, author, author_age, content, up_num, comment_num)
crawler_qiushi()
| null | com/hello/crawler/1_qiushibaike_crawler.py | 1_qiushibaike_crawler.py | py | 2,353 | python | en | code | null | code-starcoder2 | 51 |
46985470 | import requests
from bs4 import BeautifulSoup as bs
import os
def getTitle(soup):
title=str(soup.find('div',attrs={'class':'red title'}))
return title[title.find("title")+7:title.find("</div")]
def comeonUrl():
url=input("input page's url : ")
try:
driver=requests.get(url)
except:
print("invalid URL")
return
html=driver.text
soup=bs(html,'html.parser')
title=getTitle(soup)
print(title)
link=soup.find('div',attrs={'class':'chapter-list'})
f=open("ground.txt",'w')
f.write(str(link))
f.close()
namecheck=0
resource=open(title+"-resource.txt",'w')
f=open("ground.txt",'r')
pr=""
while 1:
a=f.readline()
if not a:
break
if namecheck==1:
print(a)
name=a.strip()
name=name[:name.find("<span")]
resource.write(pr+" name : "+name+"\n")
namecheck=0
continue
if "<div class=\"title\">" in a:
namecheck=1
if 'href' in a:
pr="https://manamoa17.net"
pr+=a[a.find("href=")+6:a.find("\">")]
pr=pr.replace("amp;","")
f.close()
resource.close()
os.remove("ground.txt")
| null | Source/comeonURL.py | comeonURL.py | py | 1,241 | python | en | code | null | code-starcoder2 | 51 |
325147570 | # Autoplay videodirectory
import os, xbmc
# set path to dir you want to play
path="/mnt/movies/"
dirList=os.listdir(path)
videoList = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
videoList.clear()
for fname in dirList:
videoList.add(path + "\\" + fname)
# shuffle playlist
videoList.shuffle()
# play playlist
xbmc.Player().play(videoList)
# put playlist on repeat
xbmc.executebuiltin("xbmc.playercontrol(RepeatAll)") | null | autoexec.py | autoexec.py | py | 420 | python | en | code | null | code-starcoder2 | 51 |
291217303 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
__author__ = 'jfsu'
import sys
import os
BASE_DIR = os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) )
sys.path.append(BASE_DIR)
from conf import config
class users(object):
def __init__(self):
self.users_file = config.USERS_FILE+"user_db.txt"
self.users = self.read_users()
def read_users(self):
print(self.users_file)
with open(self.users_file,'r',encoding="utf-8") as f:
users = eval(f.read())
# print(users)
return users
def get_users(self):
return self.users
def get_user(self,username):
for user in self.users:
print("\033[42;1m>>>:\033[0m",user)
if user['username'] == username:
# print(user["password"])
return user
# aa = users()
#
# aa.get_user('jfsu') | null | 第三部分,模块,面向对象/FTP作业 -实现简单ftp功能/FTP/core/userAuth.py | userAuth.py | py | 881 | python | en | code | null | code-starcoder2 | 51 |
541401871 | from django.urls import path
from . import views
from django.contrib.auth.views import logout
app_name = 'todo'
urlpatterns = [
path('', views.home_view, name='index'),
path('logout/', logout, {'next_page': 'todo:index'}, name='logout'),
path('add', views.TodoCreateView.as_view(), name='add_todo'),
path('<int:pk>/delete', views.TodoDeleteView.as_view(), name='delete_todo'),
path('<int:pk>/edit', views.TodoEditView.as_view(), name='edit_todo'),
path('<int:pk>/done', views.done_view, name='done_todo'),
] | null | todo/urls.py | urls.py | py | 534 | python | en | code | null | code-starcoder2 | 51 |
310803468 | from htmlshow import *
import datetime
import locale
locale.setlocale(locale.LC_TIME, "fr_FR") # swedish
'''
# font-size:17px;
font-family:pixlim;
font-family:maniaccompact;
font-family:smallishnarrow;
font-family:consolas;
font-family:smallishunaligne;
font-size:0.6em;
font-smooth: never;
cellspacing:0;
border-spacing: 0;
vertical-align: top;
font-size: 1.4em;
# table.outer > tr td {
border-collapse: collapse;
'''
css2_old = '''
/* ALL TABLES*/
table {
}
/* ########## OUTER ########## */
table.outer tr td {
border-collapse: collapse;
position: relative;
vertical-align: top;
padding: 5px 15px 5px 15px;
}
/*
padding: 10px 25px 10px 25px;
margin: 5px 15px 5px 15px auto;
*/
/* ########## INNER ########## */
table.inner {
/* border-collapse: collapse; */
border-collapse: collapse;
}
table.inner tr td.empty { border:none; }
table.inner tr td.weekend {background: #d8d8d8;}
table.inner tr td, table.inner tr th {
border: 1px solid #a00;
border-spacing: 0px;
width: 4em;
height: 2em;
vertical-align: top;
line-height: 1;
padding: 0 0 0 3px;
font-family:pixlim;
font-size:16px;
}
/* only border between months*/
.large_cell {
border-bottom: solid 5px #ddd;
}
.large_cell_bottom {
border-bottom: none;
}
.month {
font-family: monospace;
font-weight: bold;
font-size: 1.2em;
text-align: center;
}
.month_top {
position: absolute;
font-family: monospace;
font-weight: bold;
font-size: 1.2em;
top:10;
left:10;
}
.month_bottom {
position: absolute;
font-family: monospace;
font-weight: bold;
font-size: 1.2em;
bottom:10;
right:10;
}
'''
css = '''
/* ALL TABLES*/
/* ########## OUTER ########## */
table.outer tr td {
border-collapse: collapse;
position: relative;
vertical-align: top;
padding: 2px 15px 3px 15px;
padding: 0px 15px 0px 15px;
}
/*
padding: 10px 25px 10px 25px;
margin: 5px 15px 5px 15px auto;
*/
/* ########## INNER ########## */
table.inner {
/* border-collapse: collapse; */
border-collapse: collapse;
}
table.inner tr td.empty { border:none; }
table.inner tr td.weekend {background: #e4e4e4 padding-box;}
table.inner tr td, table.inner tr th {
border: 1px solid #999;
width: 2.5em;
height: 2em;
width: 4em;
height: 1.5em;
height: 1.65em;
height: 1.7em;
vertical-align: top;
line-height: 1;
padding: 0 0 0 3px;
font-family:pixlim;
font-size:16px;
}
/* only border between months*/
.large_cell {
border-right: solid 5px #ddd;
}
.large_cell_bottom {
border: none;
}
.leftish {position:absolute; top:-3px;}
.month {
font-family: monospace;
font-weight: bold;
font-size: 1.2em;
text-align: center;
}
'''
'''
# border-collapse:collapse;
border: 1px dashed #aaa;
'''
nope = '''
td {
margin:0;
padding:0;
padding-left: 10px;
padding-right:10px;
border-right: dotted grey 1px;
border-top: dotted grey 1px;
}
padding:3px 15px 3px 15px;
'''
years = [y for y in range(2020,2025)]
import calendar
for y in years:
start = datetime.date(y, 1, 1)
week_days = [start + datetime.timedelta(days=i) for i in range(367 if calendar.isleap(y) else 366)]
# +0 to +365, mmmmh
# weeks = []
# ar = [i for i in range(50)]
# splits = [ar[7*i:7*i+7] for i in range(1+50//7)]
# for a in splits: print(a)
# weeks = [week_days[7*i:7*i+7] for i in range(1+len(week_days)//7)]
# for a in weeks: print([d.strftime("%a %Y-%b-%d").lower() for d in a])
html = open('cal-%d.html'%y,'w', encoding='utf-8')
html_start(html, css)
text_blob = ''
# text_blob += '<div><table>'
# month = weeks[0][0].month
# current_month = (2020, 1)
# weekline = ['<td class="empty"></td>' for a in range(7)]
whole_months = {}
months_indexed = {}
# for i,day in enumerate(week_days):
for day in week_days:
# week = (day.timetuple().tm_yday
week = day.isocalendar()[1]
month = (day.year, day.month)
if month not in months_indexed: months_indexed[month] = {}
if week not in months_indexed[month]:months_indexed[month][week] = []
months_indexed[month][week].append(day)
for month, weeks in months_indexed.items():
if month not in whole_months: whole_months[month] = {week:[] for week in weeks}
for week, days in weeks.items():
for day in days:
whole_months[month][week].append(day)
month_tables = {}
for month, weeks in whole_months.items():
month_tables[month] = []
for week, days in weeks.items():
# weekline = ['<td class="empty"></td>' for a in range(7)]
weekline = [' <td class="empty"></td>\n' for a in range(7)]
for day in days:
weekline[day.weekday()] =' <td class="%s"><div class="leftish">%s</div></td>\n'%(
# weekline[day.weekday()] ='<td>%s</td>\n'%(
'weekend' if day.weekday()>4 else '',
# day.strftime("%m-%d").lower()
day.strftime("%d").lower()
# '-'
)
# print(weekline)
# print(len(weekline))
month_tables[month].append(weekline)
cols = 6
cols = 2
cols = 4
cols = 3
rows = 12//cols
# rows, cols = cols, rows
table =[['' for a in range(cols)] for b in range(rows)]
for col in range(1,13):
weeklines = month_tables[(y,col)]
month_name = datetime.date(y,col,1).strftime('%B').lower()
joined = ''.join(['<tr>\n%s</tr>\n'%''.join(weekline) for weekline in weeklines])
# row, column = (col-1)//rows, (col-1)%rows
column, row = (col-1)//rows, (col-1)%rows
month = '<div class="month">%s</div>\n'%month_name
with_month = month+'<table class="inner">\n%s</table>\n'%joined
table[row][column] = with_month
table_html = ''
row_length = len(table[0])
for i, row in enumerate(table):
# table_html+=tr(row)
# len(table)-1
table_html += '<tr>\n%s</tr>'%''.join(['<td class="%s">%s</td>\n'%(
'large_cell_bottom' if j == row_length-1 else 'large_cell',
# j,
a) for j,a in enumerate(row)])
# text_blob +='<table class="outer">%s</table>'%tr(columns)
# text_blob += '<div class="month">%d</div><br/><table class="outer">\n%s</table>\n'%(y,table_html)
text_blob += '<div class="month">%d</div><table class="outer">\n%s</table>\n'%(y,table_html)
html.write(text_blob)
html_finish(html)
| null | _projlab/calendar-jog/cal2020-2.py | cal2020-2.py | py | 7,239 | python | en | code | null | code-starcoder2 | 51 |
50993001 | # -*- coding: utf-8 -*-
""" This code is open-sourced software licensed under the MIT license"""
""" Copyright 2019 Marta Cortes, UbiComp - University of Oulu"""
""" Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
DISCLAIMER
This code is used to crawl/parse data from several files from Thessaloniki municipality (https://opendata.thessaloniki.gr). By downloading this code, you agree to contact the corresponding data provider and verify you are allowed to use (including, but not limited, crawl/parse/download/store/process) all data obtained from the data source.
"""
""" Download excel files and transform to correct format in csv files. """
""" """
""" Excel files are linked in href attribute of <a> elements in the given URL (Not nested URLs)"""
""" Each station, in stations array, is linked to a numerical code in this file"""
""" Longitude and latitude and location (as descriptive name) are added to each row of each station"""
""" Greek names for date and weekday are translated"""
# Code: thess_env_cityofthess_dailyyearly
# Code with numbering: thess_env_cityofthess_dailyyearly_1, thess_env_cityofthess_dailyyearly_2, thess_env_cityofthess_dailyyearly_3, thess_env_cityofthess_dailyyearly_4, thess_env_cityofthess_dailyyearly_5, thess_env_cityofthess_dailyyearly_6
#Stations (latitude, longitude):
#Egnatia (Στ. ΕΓΝΑΤΙΑΣ): Egnatia and I. Dragoumi (1st Municipal District) (40.63753, 22.94095): thess_env_cityofthess_dailyyearly_1
#Martiou (Στ. 25ης ΜΑΡΤΙΟΥ): 25 March and Karakasi (5th Municipal District) (40.60102, 22.96017): thess_env_cityofthess_dailyyearly_2
#Lagada (Στ. ΛΑΓΚΑΔΑ): Lagada and Koutifari (2nd Municipal District) (40.65233, 22.93514): thess_env_cityofthess_dailyyearly_3
#Eptapyrgio (Στ. ΕΠΤΑΠΥΡΓΙΟΥ): Agia Anastasia and Agrafon (3rd Diamersima) (40.64407, 22.95837): thess_env_cityofthess_dailyyearly_4
#Malakopi (Toumba) (Στ. ΜΑΛΑΚΟΠΗΣ): Harisio Girokomio (Dimitrios Charisis) (4th Diamersima) (40.61637, 22.98233): thess_env_cityofthess_dailyyearly_5
#Dimarxeio (Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.): King's George A (1st Diamersima) (40.62381, 22.95312): thess_env_cityofthess_dailyyearly_6
#NO, NO2, O3, PM10, PM2.5, CO, SO2
#μg/m3,μg/m3,μg/m3,μg/m3,μg/m3,mg/m3,μg/m3
from bs4 import BeautifulSoup
from urllib.request import urlopen, urlretrieve
import time
import os
from collections import deque
import pandas as pd
import shutil
import uuid
from kafka import KafkaProducer
from kafka.errors import KafkaError
import logging
__author__ = "Marta Cortes"
__mail__ = "marta.cortes@oulu.fi"
__origin__ = "UbiComp - University of Oulu"
logging.basicConfig(level=logging.INFO)
code = 'thess_env_cityofthess_dailyyearly'
stations = {'Στ. ΕΓΝΑΤΙΑΣ':[40.63753, 22.94095],'Στ. 25ης ΜΑΡΤΙΟΥ':[40.60102, 22.96017],'Στ. ΛΑΓΚΑΔΑ':[40.65233, 22.93514],'Στ. ΕΠΤΑΠΥΡΓΙΟΥ':[40.64407, 22.95837],'Στ. ΜΑΛΑΚΟΠΗΣ':[40.61637, 22.98233],'Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.':[40.62381, 22.95312]}
names = {'Στ. ΕΓΝΑΤΙΑΣ':'Egnatia','Στ. 25ης ΜΑΡΤΙΟΥ':'Martiou','Στ. ΛΑΓΚΑΔΑ':'Lagada','Στ. ΕΠΤΑΠΥΡΓΙΟΥ':'Eptapyrgio','Στ. ΜΑΛΑΚΟΠΗΣ':'Malakopi','Μτ.Στ. ΔΩΜΑ ΠΑΛ. ΔΗΜΑΡ.':'Dimarxeio'}
origin_url = 'https://opendata.thessaloniki.gr/el/dataset/%CE%BC%CE%B5%CF%84%CF%81%CE%AE%CF%83%CE%B5%CE%B9%CF%82-%CE%B4%CE%B7%CE%BC%CE%BF%CF%84%CE%B9%CE%BA%CE%BF%CF%8D-%CE%B4%CE%B9%CE%BA%CF%84%CF%8D%CE%BF%CF%85-%CF%83%CF%84%CE%B1%CE%B8%CE%BC%CF%8E%CE%BD-%CE%B5%CE%BB%CE%AD%CE%B3%CF%87%CE%BF%CF%85-%CE%B1%CF%84%CE%BC%CE%BF%CF%83%CF%86%CE%B1%CE%B9%CF%81%CE%B9%CE%BA%CE%AE%CF%82-%CF%81%CF%8D%CF%80%CE%B1%CE%BD%CF%83%CE%B7%CF%82-%CF%84%CE%BF%CF%85-%CE%B4%CE%AE%CE%BC%CE%BF%CF%85-%CE%B8%CE%B5%CF%83%CF%83%CE%B1%CE%BB%CE%BF%CE%BD%CE%AF%CE%BA%CE%B7%CF%82'
#
l_temp_path = './temp/'
l_final_path = './data/'
class thess_env_cityofthess_dailyyearly (object):
def __init__(self, url):
self.url = url
self.xlfnames = []
self.url_queue = deque([])#doble-ended queu
self.folder = l_temp_path
def get_page(self, url):
""" Downloiad the page at given URL"""
""" @param url: Url we want to crawl"""
""" @type url: String """
"""@return the page"""
try:
u = urlopen(url)
html = u.read().decode('utf-8')
except Exception as e:
logging.exception(e)
finally:
print("Closing")
u.close()
return html
def get_soup(self, html):
"""Returns the BeautifulSoup object of the given page"""
if html is not None:
soup = BeautifulSoup(html, "html.parser")
return soup
else:
return
def get_links(self, soup):
"""Get the links of interest from the given Beuti"""
""" @param soup: BeautifulSoup object that cointains the targeted links """
""" @type soup: BeautifulSoup object """
for link in soup.select('a[href^="https://"]'):#All links which have a href element
href = link.get('href')#The actually href element of the link
if not any(href.endswith(x) for x in ['.csv','.xls','.xlsx']):
print("No excel")
continue
if not href in self.url_queue:
self.url_queue.append(href)#Add the URL to our queue
def get_files(self):
"""Create a temp folder to download"""
#self.folder= +str(int(time.time()))
if not os.path.exists(self.folder):
os.mkdir(self.folder)
while len(self.url_queue): #If we have URLs to crawl - we crawl
href = self.url_queue.popleft() #We grab a URL from the left of the list
filename = href.rsplit('/', 1)[-1]
print("Downloading %s to %s..." % (href, filename) )
fullname = os.path.join(self.folder, filename)
urlretrieve(href, fullname)
self.xlfnames.append(filename)
def run_downloader(self):
"""downloads the htmlpage and looks for the links with excel files"""
"""calls to the file downloader"""
html = self.get_page(self.url)
soup = self.get_soup(html)
if soup is not None: #If we have soup -
self.get_links(soup)
self.get_files()
def parse_sheet(self,xl, sheet):
""" @param xl: excel file object """
""" @type xl: dataframe """
""" @param sheet: sheet object """
""" @type sheet: dataframe """
if sheet in stations.keys():
#Create dataframe. Note, put this out of the loop to write all the sheets in same csv file
df = pd.DataFrame()
#print(sheet.encode('utf-8'))
df_tmp = xl.parse(sheet)
#Clean the data
#replace return, remove units
df_tmp.columns = df_tmp.columns.str.replace('\n',' ').str.strip(' μg/m3').str.strip(' mg/m3')
#select the columns of interest
df_tmp = df_tmp.filter(regex='(NO|NO2|O3|PM10|PM2,5|CO|SO2|Ημερο - μηνία|Ημέρα)')
#df_tmp.columns = df_tmp.columns.str.strip(' μg/m3').str.strip(' mg/m3')
#correct format of information
df_tmp['Ημέρα']= df_tmp['Ημέρα'].dt.day_name()
df_tmp['Latitude'] =stations[sheet][0]
df_tmp['Longitude'] =stations[sheet][1]
df_tmp['Location'] =names[sheet]
#renaming fields in greek
df_tmp.rename(columns={'Ημερο - μηνία':'Date', 'Ημέρα':'Weekday'},inplace=True)
#Directory name by code/codenumber
outerdir = l_final_path +code
if not os.path.exists(outerdir):
os.mkdir(outerdir)
outdir = outerdir+'/'+code+'_'+str(list(stations).index(sheet)+1)
if not os.path.exists(outdir):
os.mkdir(outdir)
df = df.append(df_tmp, ignore_index=True)
#Write to the csv file. Note, put this out of the loop to write all the sheets in same csv file
csvfile = csvfile = str(uuid.uuid4()) + ".csv"#sheet+'.csv'
fullname = os.path.join(outdir, csvfile)
df.to_csv(fullname, mode='a', encoding='utf-8-sig', index=False)#mode a is append
def parse_files (self):
""" calls parse_sheet to each sheet in the given file """
""" @param name: name of the file """
""" @type name: string """
for fileName in self.xlfnames:
xlfname = self.folder+'/'+fileName#
xl = pd.ExcelFile(xlfname)
for sheet in xl.sheet_names:
self.parse_sheet(xl,sheet)
def producer(self,topic,msg,e=None):
""" This function sends data to kafka bus"""
producer = KafkaProducer(bootstrap_servers=['HOST_IP'], api_version=(2, 2, 1))
msg_b = str.encode(msg)
producer.send(topic, msg_b).get(timeout=30)
if (e):
logging.exception('exception happened')
if __name__ == '__main__':
a = thess_env_cityofthess_dailyyearly(origin_url)
if (a.run_downloader()):
if(a.parse_files()):
a.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_INGESTION",'City of Thessaloniki environmental data ingested to HDFS')
| null | Environmental/thess_env_cityofthess_dailyyearly.py | thess_env_cityofthess_dailyyearly.py | py | 9,577 | python | en | code | null | code-starcoder2 | 51 |
307690580 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import chromedriver_binary # Adds chromedriver binary to path
import functools #for error handling
import re
from time import sleep #to pause execution
def exception(fn):
"""
A decorator that wraps the passed in function and logs exceptions should one occur
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except NoSuchElementException:
print("There was a NoSuchElementException in " + fn.__name__)
return wrapper
class IT_Request_Automator():
browser = ""
user_name, email, phone = "", "", ""
"""
A Class for automating it requests
"""
def __init__(self):
self.user_name = "Jeremy Marino"
self.user_email = "Jeremy@ccbf.net"
self.user_phone = "951-764-2881"
# self.browser = ""
def start_browser(self):
self.browser = webdriver.Chrome()
return self.browser
def is_valid_email(self, email_input):
email_regex = re.compile(r"\b\w+@{1}\w+.{1}\w+\b")
match = email_regex.search(email_input)
if match:
return True
return False
def get_email(self):
self.user_email = input("What's the email? ")
if not (self.is_valid_email(self.user_email)):
print("That's not valid try again\n")
self.get_email()
return self.user_email
def get_user_info(self):
# global user_name, user_email, user_phone
default_email = input("Do you want to proceed with e-mail 'Jeremy@ccbf.net'? (y/n): ")
if default_email == "y":
self.email = "Jeremy@ccbf.net"
else:
self.email = self.get_email()
@exception
def get_request_page(self):
self.browser.get("https://ccbf.atlassian.net/servicedesk/customer/portal/2/group/2/create/10002")# go straifght to the page
return self.browser
@exception
def type_email(self):
email_field = self.browser.find_element_by_css_selector("#email")
email_field.click()
email_field.send_keys(self.user_email)
@exception
def type_summary(self):
summary_field = self.browser.find_element_by_css_selector("#summary")
summary_field.click()
summary_field.send_keys(self.it_summary)
@exception
def get_summary(self):
self.it_summary = input("What's the summary?\n")
@exception
def type_description(self):
description_field = self.browser.find_element_by_css_selector("#description")
description_field.click()
description_field.send_keys(self.it_description)
@exception
def get_description(self):
self.it_description = input("What's the description?\n")
def main():
it_form = IT_Request_Automator()
it_form.get_user_info()
it_form.get_summary()
it_form.get_description()
global browser_automate #stops browser from closing
browser_automate = it_form.start_browser()
it_form.get_request_page()
sleep(2.5); #sleep for page to load
it_form.type_summary()
it_form.type_email()
it_form.type_description()
main()
| null | Forms/it-form-request.py | it-form-request.py | py | 3,003 | python | en | code | null | code-starcoder2 | 51 |
535691735 | # coding: utf8
from __future__ import unicode_literals
import argparse
import os
import sys
import traceback
from beastling.beastxml import BeastXml
from beastling.report import BeastlingReport
from beastling.report import BeastlingGeoJSON
import beastling.configuration
from beastling.extractor import extract
def errmsg(msg):
sys.stderr.write(msg)
def main(*args):
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"config",
help="Beastling configuration file(s) (or XML file if --extract is used)",
default=None,
nargs="+")
parser.add_argument(
"--extract",
default=False,
action="store_true",
help="Extract configuration file (and possibly data files) from a BEASTling-generated XML file.")
parser.add_argument(
"--report",
default=False,
action="store_true",
help="Save a high-level report on the analysis as a Markdown file.")
parser.add_argument(
"--language-list",
default=False,
action="store_true",
help="Save a list of languages in the analysis as a plain text file.")
parser.add_argument(
"-o", "--output",
help="Output filename, for example `-o analysis.xml`",
default=None)
parser.add_argument(
"--overwrite",
help="Overwrite an existing configuration file.",
default=False,
action="store_true")
parser.add_argument(
"--stdin",
help="Read data from stdin.",
default=False,
action="store_true")
parser.add_argument(
"--prior", "--sample-from-prior", "-p",
help="Generate XML file which samples from the prior, not posterior.",
default=False,
action="store_true")
parser.add_argument(
"-v", "--verbose",
help="Display details of the generated analysis.",
default=False,
action="store_true")
args = parser.parse_args(args or None)
if args.extract:
do_extract(args)
else:
do_generate(args)
sys.exit(0)
def do_extract(args):
if len(args.config) != 1:
errmsg("Can only extract from exactly one BEAST XML file")
sys.exit(1)
if not os.path.exists(args.config[0]):
errmsg("No such BEAST XML file: %s\n" % args.config)
sys.exit(2)
try:
messages = extract(args.config[0], args.overwrite)
except Exception as e:
errmsg("Error encountered while extracting BEASTling config and/or data files:\n")
traceback.print_exc()
sys.exit(3)
for msg in messages:
sys.stdout.write(msg)
def do_generate(args):
# Make sure the requested configuration file exists
for conf in args.config:
if not os.path.exists(conf):
errmsg("No such configuration file: %s\n" % conf)
sys.exit(1)
# Build but DON'T PROCESS the Config object
# This is fast, and gives us enough information to check whether or not
try:
config = beastling.configuration.Configuration(
configfile=args.config, stdin_data=args.stdin, prior=args.prior)
except Exception as e: # PRAGMA: NO COVER
errmsg("Error encountered while parsing configuration file:\n")
traceback.print_exc()
sys.exit(2)
# Make sure we can write to the appropriate output filename
output_filename = args.output if args.output else config.basename+".xml"
if os.path.exists(output_filename) and not args.overwrite:
errmsg("File %s already exists! Run beastling with the --overwrite option if you wish to overwrite it.\n" % output_filename)
sys.exit(4)
# Now that we know we will be able to save the resulting XML, we can take
# the time to process the config object
try:
config.process()
except Exception as e:
errmsg("Error encountered while parsing configuration file:\n")
traceback.print_exc()
sys.exit(2)
# Print messages
## Urgent messages are printed first, whether verbose mode is on or not
for msg in config.urgent_messages:
errmsg(msg + "\n")
## Non-urgent messages are next, but only if verbose mode is on
if args.verbose:
for msg in config.messages:
errmsg(msg + "\n")
# Build XML file
try:
xml = BeastXml(config)
except Exception as e:
errmsg("Error encountered while building BeastXML object:\n")
traceback.print_exc()
sys.exit(3)
# Write XML file
xml.write_file(output_filename)
# Build and write report
if args.report:
report = BeastlingReport(config)
report.write_file(config.basename+".md")
geojson = BeastlingGeoJSON(config)
geojson.write_file(config.basename+".geojson")
# Build and write language list
if args.language_list:
write_language_list(config)
def write_language_list(config):
with open(config.basename + "_languages.txt", "w") as fp:
fp.write("\n".join(config.languages)+"\n")
| null | beastling/cli.py | cli.py | py | 5,078 | python | en | code | null | code-starcoder2 | 51 |
248345009 | from collections import OrderedDict
from random import randint
mydict = OrderedDict()
mydict1 = dict()
for i in range(1,31,1):
n = randint(1,101)
mydict[i] = i*2
mydict1[i] = i*2
# for i in range(2,31,3):
# n = randint(1,101)
# mydict[i] = i*2
# mydict1[i] = i*2
#
# for i in range(3,31,3):
# n = randint(1,101)
# mydict[i] = i*2
# mydict1[i] = i*2
for k,v in mydict.items():
print(" for key {} value is {} ".format(k,v))
for k,v in mydict1.items():
print(" for key {} value is {} ".format(k,v))
#print(mydict1)
| null | com/ishaan/python/CollectionsExamples/OrderedDicts.py | OrderedDicts.py | py | 565 | python | en | code | null | code-starcoder2 | 51 |
105417969 | # graphicsUtils.py
# ----------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import sys
import math
import random
import string
import time
import types
import tkinter
_Windows = sys.platform == 'win32' # True if on Win95/98/NT
_root_window = None # The root window for graphics output
_canvas = None # The canvas which holds graphics
_canvas_xs = None # Size of canvas object
_canvas_ys = None
_canvas_x = None # Current position on canvas
_canvas_y = None
_canvas_col = None # Current colour (set to black below)
_canvas_tsize = 12
_canvas_tserifs = 0
def formatColor(r, g, b):
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
def colorToVector(color):
return [int(x, 16) / 256.0 for x in [color[1:3], color[3:5], color[5:7]]]
if _Windows:
_canvas_tfonts = ['times new roman', 'lucida console']
else:
_canvas_tfonts = ['times', 'lucidasans-24']
pass # XXX need defaults here
def sleep(secs):
global _root_window
if _root_window is None:
time.sleep(secs)
else:
_root_window.update_idletasks()
_root_window.after(int(1000 * secs), _root_window.quit)
_root_window.mainloop()
def begin_graphics(width=640, height=480, color=formatColor(0, 0, 0), title=None):
global _root_window, _canvas, _canvas_x, _canvas_y, _canvas_xs, _canvas_ys, _bg_color
# Check for duplicate call
if _root_window is not None:
# Lose the window.
_root_window.destroy()
# Save the canvas size parameters
_canvas_xs, _canvas_ys = width - 1, height - 1
_canvas_x, _canvas_y = 0, _canvas_ys
_bg_color = color
# Create the root window
_root_window = tkinter.Tk()
_root_window.protocol('WM_DELETE_WINDOW', _destroy_window)
_root_window.title(title or 'Graphics Window')
_root_window.resizable(0, 0)
# Create the canvas object
try:
_canvas = tkinter.Canvas(_root_window, width=width, height=height)
_canvas.pack()
draw_background()
_canvas.update()
except:
_root_window = None
raise
# Bind to key-down and key-up events
_root_window.bind("<KeyPress>", _keypress)
_root_window.bind("<KeyRelease>", _keyrelease)
_root_window.bind("<FocusIn>", _clear_keys)
_root_window.bind("<FocusOut>", _clear_keys)
_root_window.bind("<Button-1>", _leftclick)
_root_window.bind("<Button-2>", _rightclick)
_root_window.bind("<Button-3>", _rightclick)
_root_window.bind("<Control-Button-1>", _ctrl_leftclick)
_clear_keys()
_leftclick_loc = None
_rightclick_loc = None
_ctrl_leftclick_loc = None
def _leftclick(event):
global _leftclick_loc
_leftclick_loc = (event.x, event.y)
def _rightclick(event):
global _rightclick_loc
_rightclick_loc = (event.x, event.y)
def _ctrl_leftclick(event):
global _ctrl_leftclick_loc
_ctrl_leftclick_loc = (event.x, event.y)
def wait_for_click():
while True:
global _leftclick_loc
global _rightclick_loc
global _ctrl_leftclick_loc
if _leftclick_loc is not None:
val = _leftclick_loc
_leftclick_loc = None
return val, 'left'
if _rightclick_loc is not None:
val = _rightclick_loc
_rightclick_loc = None
return val, 'right'
if _ctrl_leftclick_loc is not None:
val = _ctrl_leftclick_loc
_ctrl_leftclick_loc = None
return val, 'ctrl_left'
sleep(0.05)
def draw_background():
corners = [(0, 0), (0, _canvas_ys),
(_canvas_xs, _canvas_ys), (_canvas_xs, 0)]
polygon(corners, _bg_color, fillColor=_bg_color,
filled=True, smoothed=False)
def _destroy_window(event=None):
sys.exit(0)
# global _root_window
# _root_window.destroy()
# _root_window = None
# print "DESTROY"
def end_graphics():
global _root_window, _canvas, _mouse_enabled
try:
try:
sleep(1)
if _root_window is not None:
_root_window.destroy()
except SystemExit as e:
print('Ending graphics raised an exception:', e)
finally:
_root_window = None
_canvas = None
_mouse_enabled = 0
_clear_keys()
def clear_screen(background=None):
global _canvas_x, _canvas_y
_canvas.delete('all')
draw_background()
_canvas_x, _canvas_y = 0, _canvas_ys
def polygon(coords, outlineColor, fillColor=None, filled=1, smoothed=1, behind=0, width=1):
c = []
for coord in coords:
c.append(coord[0])
c.append(coord[1])
if fillColor is None:
fillColor = outlineColor
if filled == 0:
fillColor = ""
poly = _canvas.create_polygon(
c, outline=outlineColor, fill=fillColor, smooth=smoothed, width=width)
if behind > 0:
_canvas.tag_lower(poly, behind) # Higher should be more visible
return poly
def square(pos, r, color, filled=1, behind=0):
x, y = pos
coords = [(x - r, y - r), (x + r, y - r), (x + r, y + r), (x - r, y + r)]
return polygon(coords, color, color, filled, 0, behind=behind)
def circle(pos, r, outlineColor, fillColor, endpoints=None, style='pieslice', width=2):
x, y = pos
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints is None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]:
e[1] = e[1] + 360
return _canvas.create_arc(x0, y0, x1, y1, outline=outlineColor, fill=fillColor,
extent=e[1] - e[0], start=e[0], style=style, width=width)
def image(pos, file="../../blueghost.gif"):
x, y = pos
# img = PhotoImage(file=file)
return _canvas.create_image(x, y, image=tkinter.PhotoImage(file=file), anchor=tkinter.NW)
def refresh():
_canvas.update_idletasks()
def moveCircle(id, pos, r, endpoints=None):
global _canvas_x, _canvas_y
x, y = pos
# x0, x1 = x - r, x + r + 1
# y0, y1 = y - r, y + r + 1
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints is None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]:
e[1] = e[1] + 360
edit(id, ('start', e[0]), ('extent', e[1] - e[0]))
move_to(id, x0, y0)
def edit(id, *args):
_canvas.itemconfigure(id, **dict(args))
def text(pos, color, contents, font='Helvetica', size=12, style='normal', anchor="nw"):
global _canvas_x, _canvas_y
x, y = pos
font = (font, str(size), style)
return _canvas.create_text(x, y, fill=color, text=contents, font=font, anchor=anchor)
def changeText(id, newText, font=None, size=12, style='normal'):
_canvas.itemconfigure(id, text=newText)
if font is not None:
_canvas.itemconfigure(id, font=(font, '-%d' % size, style))
def changeColor(id, newColor):
_canvas.itemconfigure(id, fill=newColor)
def line(here, there, color=formatColor(0, 0, 0), width=2):
x0, y0 = here[0], here[1]
x1, y1 = there[0], there[1]
return _canvas.create_line(x0, y0, x1, y1, fill=color, width=width)
##############################################################################
### Keypress handling ########################################################
##############################################################################
# We bind to key-down and key-up events.
_keysdown = {}
_keyswaiting = {}
# This holds an unprocessed key release. We delay key releases by up to
# one call to keys_pressed() to get round a problem with auto repeat.
_got_release = None
def _keypress(event):
global _got_release
# remap_arrows(event)
_keysdown[event.keysym] = 1
_keyswaiting[event.keysym] = 1
# print event.char, event.keycode
_got_release = None
def _keyrelease(event):
global _got_release
# remap_arrows(event)
try:
del _keysdown[event.keysym]
except:
pass
_got_release = 1
def remap_arrows(event):
# TURN ARROW PRESSES INTO LETTERS (SHOULD BE IN KEYBOARD AGENT)
if event.char in ['a', 's', 'd', 'w']:
return
if event.keycode in [37, 101]: # LEFT ARROW (win / x)
event.char = 'a'
if event.keycode in [38, 99]: # UP ARROW
event.char = 'w'
if event.keycode in [39, 102]: # RIGHT ARROW
event.char = 'd'
if event.keycode in [40, 104]: # DOWN ARROW
event.char = 's'
def _clear_keys(event=None):
global _keysdown, _got_release, _keyswaiting
_keysdown = {}
_keyswaiting = {}
_got_release = None
def keys_pressed(d_o_e=lambda arg: _root_window.dooneevent(arg),
d_w=tkinter._tkinter.DONT_WAIT):
d_o_e(d_w)
if _got_release:
d_o_e(d_w)
return list(_keysdown.keys())
def keys_waiting():
global _keyswaiting
keys = list(_keyswaiting.keys())
_keyswaiting = {}
return keys
# Block for a list of keys...
def wait_for_keys():
keys = []
while keys == []:
keys = keys_pressed()
sleep(0.05)
return keys
def remove_from_screen(x,
d_o_e=lambda arg: _root_window.dooneevent(arg),
d_w=tkinter._tkinter.DONT_WAIT):
_canvas.delete(x)
d_o_e(d_w)
def _adjust_coords(coord_list, x, y):
for i in range(0, len(coord_list), 2):
coord_list[i] = coord_list[i] + x
coord_list[i + 1] = coord_list[i + 1] + y
return coord_list
def move_to(object, x, y=None,
d_o_e=lambda arg: _root_window.dooneevent(arg),
d_w=tkinter._tkinter.DONT_WAIT):
if y is None:
try:
x, y = x
except:
raise RuntimeError('incomprehensible coordinates')
horiz = True
newCoords = []
current_x, current_y = _canvas.coords(object)[0:2] # first point
for coord in _canvas.coords(object):
if horiz:
inc = x - current_x
else:
inc = y - current_y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
def move_by(object, x, y=None,
d_o_e=lambda arg: _root_window.dooneevent(arg),
d_w=tkinter._tkinter.DONT_WAIT):
if y is None:
try:
x, y = x
except:
raise Exception('incomprehensible coordinates')
horiz = True
newCoords = []
for coord in _canvas.coords(object):
if horiz:
inc = x
else:
inc = y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
def writePostscript(filename):
"Writes the current canvas to a postscript file."
psfile = file(filename, 'w')
psfile.write(_canvas.postscript(pageanchor='sw',
y='0.c',
x='0.c'))
psfile.close()
ghost_shape = [
(0, - 0.5),
(0.25, - 0.75),
(0.5, - 0.5),
(0.75, - 0.75),
(0.75, 0.5),
(0.5, 0.75),
(- 0.5, 0.75),
(- 0.75, 0.5),
(- 0.75, - 0.75),
(- 0.5, - 0.5),
(- 0.25, - 0.75)
]
if __name__ == '__main__':
begin_graphics()
clear_screen()
ghost_shape = [(x * 10 + 20, y * 10 + 20) for x, y in ghost_shape]
g = polygon(ghost_shape, formatColor(1, 1, 1))
move_to(g, (50, 50))
circle((150, 150), 20, formatColor(0.7, 0.3, 0.0), endpoints=[15, - 15])
sleep(2)
| null | week03_model_free/crawler_and_pacman/seminar_py3/graphicsUtils.py | graphicsUtils.py | py | 11,760 | python | en | code | null | code-starcoder2 | 51 |
352731255 | # Heuristic Greedy algorithm to deal with the Knapsack problem
import readData
print("Greedy Algorithm")
print("- - - - - - - - - - - - - - - - - ")
data, capacity = readData.main()
print("DATA____:", data, "CAPACIRY____:", capacity)
container = [] # the container to contain the objects in
solution = [0] * len(data) # binary representation of the soultion
ratios = [] # an array to store the ratios of each object
count = 0 # a count to track the weights of objects before adding them to the container
# For loop to get the ratios of value/weight and add the ratios of each object to the container
for i in range(len(data)):
ratio = data[i][0] / data[i][1]
ratios.append([ratio, data[i][2]])
print(data)
print(ratios)
# Sort the resulting ratios in decending order
decreasing = sorted(ratios, key=lambda x: x[0], reverse=True)
print(decreasing)
# For loop to add the objects to the container without exceeding the capacity of the container
for i in range(len(decreasing)):
for j in range(len(data)):
if (decreasing[i][1] == data[j][2]):
if (data[j][1] <= capacity):
if (count + data[j][1] <= capacity):
count = count + data[j][1]
container.append(data[j])
index = data[j][2]
solution[index] = 1
print("\nWeights", count)
print("Here's the resulting container: ", container)
print("\nBinary string representation of the soultion: ", solution)
CHROMO = [] * 3 # [[binary soultion], fitness, total weight]
fitness = 0
totalWeight = 0
for j in range(len(solution)):
if (solution[j] == 1):
fitness += data[j][0]
totalWeight += data[j][1]
CHROMO.append(solution)
CHROMO.append(fitness)
CHROMO.append(totalWeight)
print("!!result!!:", CHROMO)
print("Total value: ", CHROMO[1],
" Total weight: ", CHROMO[2])
| null | greedy.py | greedy.py | py | 1,870 | python | en | code | null | code-starcoder2 | 51 |
320849235 | class Solution(object):
def checkIfExist(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
arr2 = []
for idx,num in enumerate(arr):
arr2[:] = arr[:]
arr2.pop(idx)
if (num*2 in arr2):
return True
break
return False
| null | Python/1346_Check_If_N_and_Its_Double_Exist.py | 1346_Check_If_N_and_Its_Double_Exist.py | py | 359 | python | en | code | null | code-starcoder2 | 51 |
264514867 | """
To draw Ref marker and Stop marker on A4 paper
(A4 size = 210 x 297 mm)
"""
import cv2
import numpy as np
paper_size = 2100, 2100
center = paper_size[0]//2, paper_size[1]//2
cross_size = 20
normal_marker = np.ones(paper_size)*255
cv2.circle(normal_marker, center, 940, (180, 180, 180), thickness=2)
cv2.circle(normal_marker, center, 600, (0, 0, 0), thickness=-1)
cv2.circle(normal_marker, center, 380, (255, 255, 255), thickness=-1)
cv2.circle(normal_marker, center, 190, (0, 0, 0), thickness=-1)
rectangle_points = (center[0], center[1] - cross_size), (center[0], center[1] + cross_size)
cv2.rectangle(normal_marker, rectangle_points[0], rectangle_points[1], (200, 200, 200), cross_size//2)
rectangle_points = (center[0] - cross_size, center[1]), (center[0] + cross_size, center[1])
cv2.rectangle(normal_marker, rectangle_points[0], rectangle_points[1], (200, 200, 200), cross_size//2)
stop_marker = np.ones(paper_size)*255
cv2.circle(stop_marker, center, 940, (0, 0, 0), thickness=-1)
cv2.circle(stop_marker, center, 600, (255, 255, 255), thickness=-1)
cv2.circle(stop_marker, center, 380, (0, 0, 0), thickness=-1)
cv2.circle(stop_marker, center, 190, (255, 255, 255), thickness=-1)
markers = np.bmat([[normal_marker, normal_marker], [stop_marker, stop_marker]])
cv2.imwrite("New_markers_A4.jpg", markers)
| null | draw_marker.py | draw_marker.py | py | 1,319 | python | en | code | null | code-starcoder2 | 51 |
499967764 | """
Author : ParkEunsik
Date : 2019/07/16
url : https://www.acmicpc.net/problem/11650
"""
import sys
from operator import itemgetter
coor = []
for i in range(int(sys.stdin.readline())):
coor.append(list(map(int, sys.stdin.readline().split())))
coor.sort(key=itemgetter(0, 1)) # 우선적으로 y정렬하고 그 다음 x 정렬
for i in range(len(coor)):
print(coor[i][0], coor[i][1])
| null | 11650/11650.py | 11650.py | py | 414 | python | en | code | null | code-starcoder2 | 51 |
519429431 | # created: 2018-12-01
# see 'parser' for expected command-line arguments
# goal: be FAST!!!
''' to test, run something like:
python3 compare_inventories.py inventory-files_do-not-add-to-git/2018-12-15-imac-pro.jsonl inventory-files_do-not-add-to-git/2018-12-29-imac-pro.jsonl > /tmp/out
TODOs:
- display moved files better as a correspondence between first -> second
- add a FAST MODE that doesn't try to detect directory/file moves
- is sorted() really needed at some parts? does it really speed things up?
- wait, if i can detect whether a directory was moved from A to B, but
more stuff was added into B, then i should be able to detect whether
something was moved from A to B, with stuff deleted from B, since it's
SYMMETRIC - you just run the detection algorithm *backwards*, thinking of
it as something moving from B to A, with more stuff added to A
- get this test working:
./run-test.sh tests/simple-dir-move-plus-delete-files/
'''
# include slashes in dirnames to prevent spurious substring matches
DEFAULT_IGNORE_DIRS = ['directory-tree-inventory/inventory-files_do-not-add-to-git',
'/.git',
'/node_modules',
'/__pycache__',
'.dropbox.cache'] # don't have a leading slash '/' so we can ignore dropbox cache at the top level too
DEFAULT_IGNORE_FILENAMES = ['Thumbs.db', 'thumbs.db', '.DS_Store', 'Icon\r'] # 'Icon\r' doesn't print properly anyhow, #weird
DEFAULT_IGNORE_DIREXTS = ['pgbovine,.htm', 'pgbovine,.html']
DEFAULT_SUMMARY_THRESHOLD = 10
import argparse
import json
import os
import sys
import time
import datetime
from collections import Counter, defaultdict
# requires python >= 3.5 to get os.walk to use the MUCH faster os.scandir function
assert float(sys.version[:3]) >= 3.5
prev_time = time.time() # kick it off at the very beginning
def print_time_elapsed(label):
global prev_time
cur_time = time.time()
#print(f'{label}: {int(cur_time - prev_time)}s') # silence for now
prev_time = cur_time
# parses an inventory file created by create_inventory() in create_inventory.py
# and returns a dict
def parse_inventory_file(filename):
ret = {}
assert os.path.isfile(filename)
metadata = None
# index the records in a few ways
records_by_path = {} # key: (dirname, filename) tuple
# key: modtime, value: list of records with this modtime
records_by_modtime = defaultdict(list)
# key: file size, value: list of records with this file size
records_by_filesize = defaultdict(list)
# key: crc32 hash value, value: list of records with this hash
records_by_crc32 = defaultdict(list)
n_records = 0
for line in open(filename):
record = json.loads(line)
# first line is metadata
if not metadata:
metadata = record
continue # ok, next!
n_records += 1
dn = record['d']
fn = record['f']
ext = record['e']
modtime = record['mt']
filesize = record['sz']
assert (dn, fn) not in records_by_path
records_by_path[(dn, fn)] = record
records_by_modtime[modtime].append(record)
records_by_filesize[filesize].append(record)
try:
crc32_val = record['crc32']
records_by_crc32[crc32_val].append(record)
except KeyError:
pass
# clean up metadata
metadata['dt'] = datetime.datetime.utcfromtimestamp(metadata['ts']).strftime('%Y-%m-%d %H:%M:%S UTC')
del metadata['ts']
if not metadata['ignore_dirs']:
del metadata['ignore_dirs']
ret['metadata'] = metadata
ret['records_by_path'] = records_by_path
ret['records_by_modtime'] = records_by_modtime
ret['records_by_filesize'] = records_by_filesize
if records_by_crc32:
ret['records_by_crc32'] = records_by_crc32
assert len(records_by_path) == n_records
assert sum(len(e) for e in records_by_modtime.values()) == n_records
assert sum(len(e) for e in records_by_filesize.values()) == n_records
return ret
# create a tree from a list of files, with each dt node entry containing:
# full_dirpath: tuple of strings (so that it's hashable), each containing
# one directory name on the path from root to the current dir
# files: list of file entries in this directory
# subdirs: dict of subdirectories - key: name, value: dt node
def create_dirtree(files_lst):
rootdir = dict(files=[],
subdirs={}, # key: name of one subdirectory level, value: its entry
full_dirpath=tuple()) # init to empty tuple since there are NO path components
for e in files_lst:
path_entries = e['dirs']
if len(path_entries) == 1 and path_entries[0] == '': # top-level root dir
rootdir['files'].append(e)
else:
cur_entry = rootdir # always start at root
# traverse down path_entries and create children to the tree
# rooted at rootdir as necessary:
for i, p in enumerate(path_entries):
if p not in cur_entry['subdirs']:
# ugh, full_dirpath is messy to construct; i basically
# just want the prefix of the full path_entries list
cur_entry['subdirs'][p] = dict(files=[], subdirs={},
full_dirpath=tuple(path_entries[:i+1]))
cur_entry = cur_entry['subdirs'][p]
cur_entry['files'].append(e)
augment_dirtree_with_metadata(rootdir) # augment with stats!
# verify!
assert len(files_lst) == rootdir['metadata']['total_num_files']
return rootdir
# after a directory tree has been fully constructed with create_dirtree,
# run this to AUGMENT each entry with a metadata dict containing stats
# such as: num_files, num_subdirs, total_num_files, total_num_subdirs
# where the latter 2 *recursively* count the total number of files and
# subdirectories within this one
# (note that this is a different kind of metadata than the one mentioned
# in parse_inventory_file)
def augment_dirtree_with_metadata(dt):
def helper(cur_entry):
md = {}
cur_entry['metadata'] = md
# for only this level in the directory tree:
md['num_files'] = len(cur_entry['files'])
md['num_subdirs'] = len(cur_entry['subdirs'])
# we might increment these later as we recurse ...
md['total_num_files'] = md['num_files']
md['total_num_subdirs'] = md['num_subdirs']
for k in cur_entry['subdirs']:
(child_num_files, child_num_subdirs) = helper(cur_entry['subdirs'][k])
md['total_num_files'] += child_num_files
md['total_num_subdirs'] += child_num_subdirs
# return to parent!
return (md['total_num_files'], md['total_num_subdirs'])
helper(dt)
# metadata is a dict, so serialize it by sorting and stringifying its items
def serialize_metadata(md):
return str(sorted(md.items()))
# returns the entry within dt that's referred to by dirpath
# dt: a directory tree created with create_dirtree
# dirpath: list of ordered directory names
# (will give an error if dirpath isn't in dt)
def get_directory_entry(dt, dirpath):
#print('get_directory_entry:', dirpath)
cur = dt
for e in dirpath:
cur = cur['subdirs'][e]
#print('~', cur['full_dirpath'], tuple(cur['subdirs'].keys()))
#print('~~~')
return cur
# generator that traverses a dirtree object in pre-order (I think?)
# TODO: rewrite other functions using this generator
def gen_dirtree_entries(dt):
def helper(cur_entry):
yield cur_entry
for k in cur_entry['subdirs']:
yield from helper(cur_entry['subdirs'][k])
yield from helper(dt)
# iterate over dt and run func on each entry in pre-order.
# if func returns True, then return early and don't recurse inward
# into its subdirectories. otherwise keep recursing inward
def dirtree_foreach(dt, func):
def helper(cur_entry):
return_early = func(cur_entry)
if return_early:
return
# recurse inward ...
for k in cur_entry['subdirs']:
helper(cur_entry['subdirs'][k])
helper(dt)
def make_dirtuple(dirpath):
return tuple(dirpath.split('/'))
def dirtuple_to_path(dt):
return '/'.join(dt)
# from a file entry
def get_path_from_file(e):
return (e['dirs'], e['fn'])
from enum import Enum
class DirtreeCompareState(Enum):
UNKNOWN = 1
A_SUPERSET_OF_B = 2
B_SUPERSET_OF_A = 3
EQUAL = 4
# compare two directory trees to see if they match in terms of
# constituent files (name, size, modtime matches) and sub-directories,
# or whether dtA is a strict superset of dtB,
# or whether dtB is a strict superset of dtA
#
# we use only filenames and sizes for comparisons, which might result
# in some false positives since file contents might have changed but
# sizes remain the same!
#
# TODO: maybe implement a heuristic of:
# if dtA or dtB have ZERO FILES and zero subdirs in them at the top level,
# then return UNKNOWN since we really don't have enough info to determine
# ... also it's kinda weird to say that something is a superset of an
# EMPTY directory, since it's technically true but doesn't tell us much
def dirtree_compare(dtA, dtB):
# start assuming they're equal and then have our traversal disprove it
status = DirtreeCompareState.EQUAL
def helper(dtA_entry, dtB_entry):
nonlocal status # argh
# note we don't check full_dirpath because dtA and dtB can have
# vastly different paths but still contain the same files and
# sub-directory names
# each element contains the following identifying metadata about
# each file that we will compare: fn (filename), size (# bytes)
# (we don't include 'dirs' since the exact directory names will
# be, by definition, different, between two directory trees)
# [TODO: incorporate crc32 if available]
# [TODO: incorporate an approximate check for e['mt'] within a
# minute if not ignore_modtimes]
dtA_files = set((e['fn'], e['size']) for e in dtA_entry['files'])
dtB_files = set((e['fn'], e['size']) for e in dtB_entry['files'])
if dtA_files == dtB_files:
pass # keep going
elif dtA_files.issuperset(dtB_files):
# contradiction
if status == DirtreeCompareState.B_SUPERSET_OF_A:
return False
status = DirtreeCompareState.A_SUPERSET_OF_B
elif dtB_files.issuperset(dtA_files):
# contradiction
if status == DirtreeCompareState.A_SUPERSET_OF_B:
return False
status = DirtreeCompareState.B_SUPERSET_OF_A
else:
return False # bad!
# now compare subdir names
dtA_subdir_names = set(dtA_entry['subdirs'])
dtB_subdir_names = set(dtB_entry['subdirs'])
if dtA_subdir_names == dtB_subdir_names:
pass # carry on
elif dtA_subdir_names.issuperset(dtB_subdir_names):
# contradiction
if status == DirtreeCompareState.B_SUPERSET_OF_A:
return False
status = DirtreeCompareState.A_SUPERSET_OF_B
elif dtB_subdir_names.issuperset(dtA_subdir_names):
# contradiction
if status == DirtreeCompareState.A_SUPERSET_OF_B:
return False
status = DirtreeCompareState.B_SUPERSET_OF_A
else:
return False # bad!
# recurse into the *intersection* of subdirs so that they exist
# in both dtA and dtB:
common_subdirs = dtA_subdir_names & dtB_subdir_names
# recurse inward ...
for k in common_subdirs:
# a single False and we're DONE FOR!
ret = helper(dtA_entry['subdirs'][k], dtB_entry['subdirs'][k])
if not ret:
return False
return True # we made it all the way!
ret = helper(dtA, dtB)
if not ret:
status = DirtreeCompareState.UNKNOWN
return status
# dt: created by create_dirtree
# summary_threshold: don't recurse anymore if the current level
# (recursively) has more than N files
#
# min_levels_to_recurse: force recursing into at least N levels, even
# if there are more than summary_threshold files at some levels
#
# if you pass in None, then assume infinite threshold or levels
#
# TODO: can we re-write this using dirtree_foreach? that would seem cleaner :)
def pretty_print_dirtree(dt, summary_threshold, min_levels_to_recurse,
aux_dict_repr, hide_empty_dirs=False):
if summary_threshold is None: summary_threshold = float('inf')
if min_levels_to_recurse is None: min_levels_to_recurse = float('inf')
def print_helper(cur_entry, level):
n_files_recursive = cur_entry['metadata']['total_num_files']
if hide_empty_dirs and n_files_recursive == 0:
return
prefix = (' ' * level)
prefix_plus_one = (' ' * (level+1))
if cur_entry['full_dirpath']:
dirname = cur_entry['full_dirpath'][-1] # get the last entry
else:
# no elements in full_dirpath means the root directory
dirname = ''
print(prefix + '/' + dirname)
#print(prefix + '/' + dirname + ' : ' + str(cur_entry['metadata'])) # more verbose with metadata
# base case - get out and don't recurse anymore
# use min_levels_to_recurse to force it into at least N levels
if level >= min_levels_to_recurse and n_files_recursive > summary_threshold:
print(f'{prefix_plus_one}[{n_files_recursive} files and possible sub-directories]')
# recursive case
else:
n_files_in_cur_level = cur_entry['metadata']['num_files']
assert n_files_in_cur_level == len(cur_entry['files'])
# if n_files_in_cur_level is above summary_threshold, then
# summarize this level but still recurse
if n_files_in_cur_level > summary_threshold:
print(f'{prefix_plus_one}[{n_files_in_cur_level} files]')
else:
# otherwise list out all the files
for f in cur_entry['files']:
print(f'{prefix_plus_one}{f["fn"]} {aux_dict_repr(f)}')
# now recurse! sort to print in alphabetical order
for k in sorted(cur_entry['subdirs']):
print_helper(cur_entry['subdirs'][k], level+1)
print_helper(dt, 0)
# compare inventories produced by parse_inventory_file
# you can pass in optional paths to ignore
def compare_inventories(first, second, summary_threshold, min_levels_to_recurse,
ignore_modtimes=False,
ignore_dirs=[],
ignore_filenames=[],
ignore_exts=[],
ignore_direxts=[],
quiet=False):
# make sure they start as empty lists
if not ignore_dirs: ignore_dirs = []
if not ignore_filenames: ignore_filenames = []
if not ignore_exts: ignore_exts = []
if not ignore_direxts: ignore_direxts = []
# append defaults:
ignore_dirs += DEFAULT_IGNORE_DIRS
ignore_filenames += DEFAULT_IGNORE_FILENAMES
ignore_direxts += DEFAULT_IGNORE_DIREXTS
# parse it:
ignore_direxts = [tuple(e.split(',')) for e in ignore_direxts]
for e in ignore_direxts: assert len(e) == 2
# make sure extensions start with '.'!
for e in ignore_exts:
assert e[0] == '.'
# e is a (dirname, filename) pair
def should_ignore(e):
for d in ignore_dirs:
if d in e[0]: # naive substring match
return True
if e[1] in ignore_filenames:
return True
ext = os.path.splitext(e[1])[1].lower() # extension - LOWERCASE IT for simplicity
if ext in ignore_exts:
return True
# simultaneously match both a directory (e[0]) and an extension
if (e[0], ext) in ignore_direxts:
return True
return False
if not quiet:
print(f'ignore_dirs: {ignore_dirs}\nignore_filenames: {ignore_filenames}\nignore_exts: {ignore_exts}\nignore_direxts: {ignore_direxts}\nsummary_threshold: {summary_threshold}\nmin_levels_to_recurse: {min_levels_to_recurse}\n')
print('---')
print('First: ', first['metadata'])
print('Second:', second['metadata'])
print('---')
first_rbp = first['records_by_path']
second_rbp = second['records_by_path']
# filter out should_ignore() as early as possible for efficiency!
first_rbp_keys = set(e for e in first_rbp if not should_ignore(e))
second_rbp_keys = set(e for e in second_rbp if not should_ignore(e))
in_first_but_not_second = first_rbp_keys.difference(second_rbp_keys)
in_second_but_not_first = second_rbp_keys.difference(first_rbp_keys)
in_both = first_rbp_keys.intersection(second_rbp_keys)
print_time_elapsed('B')
changed_files = []
# for files in both first and second, compare their metadata
for e in in_both:
first_data = first_rbp[e]
second_data = second_rbp[e]
modtimes_differ = False
modtimes_diff_secs = 0
sizes_differ = False
sizes_diff_bytes = 0
# use a heuristic for 'close enough' in terms of modtimes
# (within a minute)
if not ignore_modtimes:
modtimes_diff_secs = round(second_data['mt'] - first_data['mt'])
if abs(modtimes_diff_secs) > 60:
modtimes_differ = True
if first_data['sz'] != second_data['sz']:
sizes_differ = True
sizes_diff_bytes = second_data['sz'] - first_data['sz']
# only report a change if the SIZE differs
# (note that there may be false positives if size remains the
# same but internal bytes change)
if sizes_differ:
assert len(e) == 2
changed_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],
diff_secs=modtimes_diff_secs,
diff_bytes=sizes_diff_bytes))
changed_tree = create_dirtree(changed_files)
print('files changed ...')
def changed_repr(f):
delta_bytes = None
if f["diff_bytes"] > 0:
delta_bytes = f'+{f["diff_bytes"]} bytes'
elif f["diff_bytes"] < 0:
delta_bytes = f'{f["diff_bytes"]} bytes'
else:
delta_bytes = 'NO SIZE CHANGE'
return f'({str(datetime.timedelta(seconds=f["diff_secs"]))}, {delta_bytes})'
pretty_print_dirtree(changed_tree, summary_threshold, min_levels_to_recurse, changed_repr)
# all files in first
first_files = []
for e in first_rbp_keys:
assert len(e) == 2
entry = first_rbp[e]
first_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],
size=entry['sz'], mt=entry['mt'], e=entry['e']))
first_tree = create_dirtree(first_files)
second_files = []
for e in second_rbp_keys:
assert len(e) == 2
entry = second_rbp[e]
second_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],
size=entry['sz'], mt=entry['mt'], e=entry['e']))
second_tree = create_dirtree(second_files)
only_first_files = []
# whoa it's much faster if you SORT first!
# i suspect that create_dirtree works better if you feed it an
# ordered list of paths, but i haven't empirically confirmed yet
for e in sorted(in_first_but_not_second):
assert len(e) == 2
entry = first_rbp[e]
only_first_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],
size=entry['sz'], mt=entry['mt'], e=entry['e']))
only_first_tree = create_dirtree(only_first_files)
only_second_files = []
for e in sorted(in_second_but_not_first): # whoa it's much faster if you SORT first!
assert len(e) == 2
entry = second_rbp[e]
only_second_files.append(dict(dirs=make_dirtuple(e[0]), fn=e[1],
size=entry['sz'], mt=entry['mt'], e=entry['e']))
only_second_tree = create_dirtree(only_second_files)
# experimental: look for directories and files that have POSSIBLY
# been moved between only_first_tree and only_second_tree
# look up files by filesize first, then check modtimes
# (TODO: if crc32 exists, then use that!)
only_second_files_by_filesize = defaultdict(list)
for e in only_second_files:
only_second_files_by_filesize[e['size']].append(e)
# subset of only_first_files containing files that ...
only_first_files_not_moved = [] # have likely NOT been moved to second
only_first_files_moved = [] # have likely been moved to second
# subset of only_second_files containing files that ...
only_second_files_not_moved = [] # have likely NOT been moved from first
only_second_files_moved = [] # have likely been moved from first
print_time_elapsed('C')
# VERY IMPORTANT so that we can do set membership checks in O(1) time
# instead of doing O(n^2) list membership checks, which were SLOWWWW:
only_second_files_moved_paths = set() # each element is (tuple(dirs), fn)
for f in only_first_files:
file_moved = False
sz = f['size']
modtime = f['mt']
extension = f['e']
# are there files with the same size in second?
if sz in only_second_files_by_filesize:
same_size_matches = only_second_files_by_filesize[sz]
# should match file extension and modtime as well,
# which is a pretty strict check!
#
# either we ignore modtimes or check if they're within 1 minute
match_size_ext_modtime = [e for e in same_size_matches if
e['e'] == extension and
(True if ignore_modtimes else abs(e['mt'] - modtime) < 60)]
if match_size_ext_modtime:
file_moved = True
# TODO: what about files with multiple matches due to redundancies?
for m in match_size_ext_modtime:
m_path = get_path_from_file(m)
# don't double-add to list (use a set to do O(1) time
# redundancy lookups instead of O(n^2) if we used
# the list directly)
if m_path not in only_second_files_moved_paths:
only_second_files_moved.append(m)
only_second_files_moved_paths.add(m_path)
if file_moved:
only_first_files_moved.append(f)
else:
only_first_files_not_moved.append(f)
print_time_elapsed('D')
for f in only_second_files:
f_path = get_path_from_file(f)
if f_path not in only_second_files_moved_paths: # O(1) set membership check
only_second_files_not_moved.append(f)
print_time_elapsed('E')
# consistency checks
assert len(only_first_files_moved) + len(only_first_files_not_moved) == len(only_first_files)
assert len(only_second_files_moved) + len(only_second_files_not_moved) == len(only_second_files)
only_first_files_moved_tree = create_dirtree(only_first_files_moved)
only_second_files_moved_tree = create_dirtree(only_second_files_moved)
# checks to see if ENTIRE DIRECTORIES were likely moved from only_first_files_moved_tree
# now do the *exact same symmetric check* to see what was likely
# moved from only_second_files_moved_tree
# each element is a pair of (dirpath in first, dirpath in second)
# first moved verbatim over to second
moved_directory_dirpaths = set()
# first moved over to second, but second has MORE additional data inside
# (so first is a subset of second)
moved_directory_subset_dirpaths = set()
# create a closure over my_full_tree and other_tree_to_check
def make_directory_move_checker(my_full_tree, other_tree_to_check):
# pass this into dirtree_foreach() ... returns True if you want to
# break early and not recurse into children
# - side-effect: adds entries to moved_directory_dirpaths
def directory_move_checker(cur_entry):
fdp = cur_entry['full_dirpath']
# this is the metadata of the current directory within my_full_tree
full_entry = get_directory_entry(my_full_tree, fdp)
full_metadata = full_entry['metadata']
# ok if this condition is true, that means that the ENTIRE contents
# of the directory from my_full_tree does not appear in cur_entry,
# which means that the entire directory wasn't moved. so get out!
if full_metadata != cur_entry['metadata']:
return False # return False early and keep recursing
# check to make sure those trees are indeed equal
# TODO: comment out in production if you want to speed things up
assert dirtree_compare(cur_entry, full_entry) == DirtreeCompareState.EQUAL
# now iterate through all nodes in other_tree_to_check and find any
# that equal to or a *superset* of cur_entry.
#
# TODO: can optimize by stopping short whenever you find a node
# whose metadata is *SMALLER* than cur_entry['metadata'] since
# that can't possibly be a superset of cur_entry
for other_entry in gen_dirtree_entries(other_tree_to_check):
sdp = other_entry['full_dirpath']
cur_to_other_comp = dirtree_compare(cur_entry, other_entry)
# the entire contents of fdp has moved over to sdp
if cur_to_other_comp == DirtreeCompareState.EQUAL:
moved_directory_dirpaths.add((fdp, sdp))
return True # if there's a real match, don't recurse
# fdp is a subset of sdp, so that means everything
# moved over but sdp has additional data inside
elif cur_to_other_comp == DirtreeCompareState.B_SUPERSET_OF_A:
moved_directory_subset_dirpaths.add((fdp, sdp))
return True # if there's a real match, don't recurse
return False # default, return False and keep recursing
return directory_move_checker
# symmetrically check both ends ...
# TODO: optimize by eliminating redundancy later if necessary:
dirtree_foreach(only_first_files_moved_tree,
make_directory_move_checker(first_tree, only_second_files_moved_tree))
# TODO: get this to work ...
#dirtree_foreach(only_second_files_moved_tree,
# make_directory_move_checker(second_tree, only_first_files_moved_tree))
def plain_repr(f):
return f'({f["size"]} bytes, modtime: {int(f["mt"])})'
if moved_directory_dirpaths or moved_directory_subset_dirpaths:
print('======')
# ok now moved_directory_dirpaths should be populated with dirpaths
# of ENTIRE DIRECTORIES that have moved ...
for fdp, sdp in sorted(moved_directory_dirpaths):
print('DIR_MOVED:\n ', dirtuple_to_path(fdp))
print(' ', dirtuple_to_path(sdp))
for fdp, sdp in sorted(moved_directory_subset_dirpaths):
from_tree = get_directory_entry(first_tree, fdp)
from_tree_check = get_directory_entry(only_first_files_moved_tree, fdp)
# TODO: comment out in production if you want to speed things up
assert dirtree_compare(from_tree, from_tree_check) == DirtreeCompareState.EQUAL
to_tree = get_directory_entry(only_second_tree, sdp)
assert from_tree['full_dirpath'] == fdp
assert to_tree['full_dirpath'] == sdp
print('DIR_MOVED_SUBSET_HOLY_GAO:')
print(' ', dirtuple_to_path(fdp))
#pretty_print_dirtree(from_tree, 0, 2, plain_repr)
#print()
print(' ', dirtuple_to_path(sdp))
#pretty_print_dirtree(to_tree, 0, 2, plain_repr)
#print()
# remove entries from only_first_files_moved_tree / only_second_files_moved_tree
# if they appear in moved_directory_dirpaths or moved_directory_subset_dirpaths
# - i *think* this is sufficient, according to my venn diagram sketches
# - *maybe* it's easier to go off of the only_first_files_moved and
# only_second_files_moved lists, filter them, and rebuild the trees?
paths_to_filter_from_first = set()
paths_to_filter_from_second = set()
for fdp, sdp in moved_directory_dirpaths.union(moved_directory_subset_dirpaths):
# important to filter from the subset of files that have been MOVED
from_tree = get_directory_entry(only_first_files_moved_tree, fdp)
to_tree = get_directory_entry(only_second_files_moved_tree, sdp)
for d in gen_dirtree_entries(from_tree):
paths_to_filter_from_first.update([get_path_from_file(e) for e in d['files']])
for d in gen_dirtree_entries(to_tree):
paths_to_filter_from_second.update([get_path_from_file(e) for e in d['files']])
# filter the respective lists:
only_first_files_moved = [e for e in only_first_files_moved
if get_path_from_file(e) not in paths_to_filter_from_first]
only_second_files_moved = [e for e in only_second_files_moved
if get_path_from_file(e) not in paths_to_filter_from_second]
# now rebuild those trees ...
only_first_files_moved_tree = create_dirtree(only_first_files_moved)
only_second_files_moved_tree = create_dirtree(only_second_files_moved)
print('======')
print('files in first that were moved ...')
pretty_print_dirtree(only_first_files_moved_tree, summary_threshold, min_levels_to_recurse, plain_repr)
print('\nfiles in second that were moved ...')
pretty_print_dirtree(only_second_files_moved_tree, summary_threshold, min_levels_to_recurse, plain_repr)
print('======')
print('only in first (but not moved) ...')
pretty_print_dirtree(create_dirtree(only_first_files_not_moved), summary_threshold, min_levels_to_recurse, plain_repr)
print('\nonly in second (but not moved) ...')
pretty_print_dirtree(create_dirtree(only_second_files_not_moved), summary_threshold, min_levels_to_recurse, plain_repr)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# mandatory positional arguments:
parser.add_argument("first_file", help="first inventory file to compare")
parser.add_argument("second_file", help="second inventory file to compare")
parser.add_argument("--ignore_modtimes", help="ignore modification times", action="store_true")
parser.add_argument("--ignore_dirs", nargs='+', help="ignore the following directories: <list>")
parser.add_argument("--ignore_files", nargs='+', help="ignore the following filenames: <list>")
parser.add_argument("--ignore_exts", nargs='+', help="ignore the following file extensions (use lowercase!): <list>")
parser.add_argument("--ignore_direxts", nargs='+', help="ignore the following file extensions within directories: <list> of entries, each being 'dirname,extension'")
parser.add_argument("--summary_threshold", action='store', default=DEFAULT_SUMMARY_THRESHOLD, help="summarize a directory when it or its subdirectories have more than N files")
parser.add_argument("--min_levels", action='store', default=3, help="but recurse into at least N levels")
parser.add_argument("--quiet", help="less verbose output", action="store_true")
args = parser.parse_args()
first = parse_inventory_file(args.first_file)
print_time_elapsed('first parse done')
second = parse_inventory_file(args.second_file)
print_time_elapsed('second parse done')
compare_inventories(first, second,
int(args.summary_threshold),
int(args.min_levels),
args.ignore_modtimes,
args.ignore_dirs, args.ignore_files,
args.ignore_exts, args.ignore_direxts,
args.quiet)
| null | compare_inventories.py | compare_inventories.py | py | 32,402 | python | en | code | null | code-starcoder2 | 51 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.