seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
16483457321 | from flask import Blueprint, g
from flask_graphql import GraphQLView
from flask_cors import CORS
from .schema import schema
api = Blueprint('api', __name__)
CORS(api, supports_credentials=True) # Enables CORS with cross origin cookies
class CustomGraphQlView(GraphQLView):
def dispatch_request(self):
response = super().dispatch_request()
for cookie in g.get("cookies", []):
response.set_cookie(cookie.key, cookie.value, **cookie.settings)
return response
api.add_url_rule(
"/graphql",
view_func=CustomGraphQlView.as_view(
"graphql",
schema=schema,
graphiql=True,
middleware=[]
)
)
| AlexEshoo/poll_app_graphql | poll_app_graphql/api/__init__.py | __init__.py | py | 672 | python | en | code | 0 | github-code | 36 |
15000533019 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 15:36:15 2018
@author: chrelli
added unix time stamps to the first camera!
Ways to slim down the data:
no unix time stamps?
no color frame showing? - yes, helps a lot!
no png compression? Totally fine at 30 fps!
Majow to do list:
- use arduino to synchronize? Yes, could send out synchronization time code to another unit: Problem: doesn't account for delay of arriving frames
- use depth roi to slim down writing footprint
- use LED roi to get blinking time stamps
-
## with connected device cam
from pyrealsense import offline
offline.save_depth_intrinsics(dev)
"""
#%% Import the nescessary stuff
# basic OS stuff
import time, os, sys, shutil
import json
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# small utilities
import csv
from colour import Color
from itertools import compress # for list selection with logical
from tqdm import tqdm
# for image manipulation
import cv2
# for recording and connecting to the intel realsense librar
#import pyrealsense as pyrs
# add the realsense library
sys.path.append(r'/usr/local/lib')
# and load it!
import pyrealsense2 as rs
#import multiprocessing
from multiprocessing import Process
# import handy Functions
from utils.common_utils import *
from utils.recording_utils import *
#%% Parse some inputs
import argparse
parser = argparse.ArgumentParser(description='Records cad and d images with no roi cut to disk. Also records timestamps and led traces using the auto LED mask. Currently, with no ROI, the program maxes out disk write speed around 45 fps.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--ncams', type=int, default = 4 , choices=[1,2,3,4],
help='number of cameras to stream')
parser.add_argument('--fps',type=int, default = 30 , choices=[30,60],
help='select fps to stream')
#parser.add_argument("--singlecore", help="disables mult.proc. for debugging on macbook, overrides ncams to 1",
# action="store_true")
parser.add_argument("--plots", help="shows the live video while recording",
action="store_true")
args = parser.parse_args()
#%% Constants
# frame_width,frame_height = 848,480
frame_width,frame_height = 640,480
fps_choice = args.fps
# number of padding digits for the frame numbers
n_padding_digits = 8
print('# cameras: '+str(args.ncams))
print('Frame size is '+str(frame_width)+'x'+str(frame_height)+' pixels.')
print('Grabbing frames at '+str(fps_choice)+' fps')
# get the current timestring
timestr = time.strftime("%Y%m%d-%H%M%S")
# reset the folder
#data_folder = '/media/chrelli/Data0'
#top_folder = data_folder + '/calibration_' + timestr
#reset_folder_if_present(top_folder)
#
#top_folder_0 = top_folder
#top_folder_1 = top_folder
# reset the folders
top_folder_0 = '/media/chrelli/Data0' + '/calibration_' + timestr
top_folder_1 = '/media/chrelli/Data1' + '/calibration_' + timestr
reset_folder_if_present(top_folder_0)
reset_folder_if_present(top_folder_1)
# also make the numpy folders
npy_folder_0 = top_folder_0+'/npy_raw'
npy_folder_1 = top_folder_1+'/npy_raw'
reset_folder_if_present(npy_folder_0)
reset_folder_if_present(npy_folder_1)
#%% 8 bit color setup
fps_color = (Color('White').rgb)
ts_color = (Color('Peru').rgb)
# convert to 8 bit color
fps_color=tuple(255*x for x in fps_color)
ts_color=tuple(255*x for x in ts_color)
#%% Block for running
# open the pyrealsense server
#serv = pyrs.Service()
# set the start time for the unix time stamp
start_time = time.time()
# open up a realsense context and get a list of the devices!
ctx = rs.context()
devices = [ctx.devices[i] for i in range(args.ncams)]
# sort the devices by their serial numbers
serials = [devices[i].get_info(rs.camera_info.serial_number) for i in range(args.ncams)]
devices = [x for _,x in sorted(zip(serials,devices))]
def sub_function_trick(which_device,top_folder):
show_frames = args.plots
####################
#
# DEVICE SETUP BLOCK
#
#####################
# get the serial of that device
device = devices[which_device]
device_serial = device.get_info(rs.camera_info.serial_number)
#set the preset
advnc_mode = rs.rs400_advanced_mode(device)
print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")
# run like
# advnc_mode.load_json(json_string)
# load the preset here!
preset_folder = '/home/chrelli/git/3d_sandbox/mycetrack0p8/presets/'
if device_serial[:3] == '740':
preset_name = 'master60pp'
else:
preset_name = 'slave60pp'
jsonFile = preset_folder+preset_name+'.json'
jsonObj = json.load(open(jsonFile))
json_string = str(jsonObj).replace("'", '\"')
print("Configuration " + jsonFile + " loaded");
time.sleep(1.)
advnc_mode.load_json(json_string)
print("Configuration " + jsonFile + " applied!");
if device_serial[:3] == '740':
# master
targetSyncMode = 1
else:
# slave
targetSyncMode = 2
device.first_depth_sensor().set_option(rs.option.inter_cam_sync_mode, targetSyncMode)
# first, open up a config
config = rs.config()
# then open a pipeline
pipeline = rs.pipeline()
# enable the selected device and streams # RGB SPACE HERE
config.enable_device(device_serial);
config.enable_stream(rs.stream.depth, frame_width,frame_height, rs.format.z16, fps_choice)
# config.enable_stream(rs.stream.color, frame_width,frame_height, rs.format.rgb8, fps_choice)
config.enable_stream(rs.stream.color, frame_width,frame_height, rs.format.rgb8, fps_choice)
config.enable_stream(rs.stream.infrared,1, frame_width,frame_height, rs.format.y8, fps_choice)
print("PING after enabling the sync mode is {}".format(device.first_depth_sensor().get_option(rs.option.inter_cam_sync_mode)))
# Start streaming, call the stream 'cfg' for some reason, as pr example
cfg = pipeline.start(config)
# create an align object
# alternative is to align to color, faster but less precise: align_to = rs.stream.color
align_to = rs.stream.depth
align = rs.align(align_to)
print('dev '+str(which_device)+' serial is ' + device_serial)
# Use the first three digits of the serial as a string to tag the device:
device_tag = device_serial[0:3]
if show_frames:
# open a window for cv2
window_title = "dev"+str(which_device)+"(#" + device_tag + ")"
cv2.namedWindow(window_title+'cad')
# block for setting up a low-level fps estimation,
cnt = 0 # a counter
last = time.time() # start_time
fps = 0 # initial fps value
# save the camera intrinsics
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = cfg.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print ("Depth Scale is: " , depth_scale)
# this is how to get the intrinsics
profile = cfg.get_stream(rs.stream.depth) # Fetch stream profile for depth stream
intr = profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile
#% now make file and save time stamps and depth scaling and intrinsics etc
# use the old naming convention
parameternames = np.array(['cam_params.fx',
'cam_params.fy',
'cam_params.ppx',
'cam_params.ppy',
'd_scale',
'fps_choice',
'frame_width',
'frame_height'])
parameters = np.array([intr.fx,
intr.fy,
intr.ppx,
intr.ppy,
depth_scale,
fps_choice,
intr.width,
intr.height])
# open a file for writint the parameters
with open(top_folder+'/parameters_'+str(which_device)+'.csv','w') as intrfile:
writer = csv.writer(intrfile, delimiter=',')
writer.writerow(parameternames)
writer.writerow(parameters)
# load the automatic led mask from the constants folder!
led_mask,led_logic,led_centroid = load_auto_roi(which_device)
# open a file for time stamps
tsfile = open(top_folder+'/timestamps_'+str(which_device)+'.csv','w')
# ## HF try to open an HF file
# import h5py
# #TODO input from somewhere
# hf = h5py.File(top_folder+'/dev'+str(which_device)+'_d_'+'.h5', 'w')
# # also open one for the cad
# hf_cad = h5py.File(top_folder+'/dev'+str(which_device)+'_cad_'+'.h5', 'w')
# NPY ADDITION
npy_folder = top_folder+'/npy_raw'
# open a file for led stamps
# ledsfile = open(top_folder+'/ledstamps_'+str(which_device)+'.csv','w')
print('starting to stream from device '+str(which_device)+'!')
# wait for a bit for the cam to warm up
# and loop over 30 frames
warmup_time = 2 # seconds
warmup = 0
while warmup < fps_choice*warmup_time:
frames = pipeline.wait_for_frames()
warmup += 1
print('device '+str(which_device)+' is warmed up!')
# START A CLOCK FOR THE FRAMES!
FRAME_CLOCK = 0
try:
while True:
if show_frames:
# for counting frame rate
cnt += 1
if (cnt % 10) == 0:
now = time.time() # after 10 frames
dt = now - last # how long did it take?
fps = 10/dt # calculate frame rate
last = now # assign a new value to the 'last time'
#################################
#
# R E A D B L O C K
#
#################################
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
# get the frame numbers and time stamps
# ts = round(frames.get,2)
ts = frames.get_timestamp()
fn = frames.get_frame_number()
# get the unix time stamp
ts_unix = time.time()-start_time
# run the alignment process
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
cad_frame = aligned_frames.get_color_frame()
# also get one for the LED
# depth_frame = frames.get_depth_frame()
# color_frame = frames.get_color_frame()
infrared_frame = frames.get_infrared_frame()
# Convert images to numpy arrays
depth = np.asanyarray(depth_frame.get_data())
cad = np.asanyarray(cad_frame.get_data())
c = np.asanyarray(infrared_frame.get_data())
# get the LED value, round it a bit, could be profiled
led_stamp = c[led_centroid[1],led_centroid[0]]
# this is the writing block for the csv file, frame number and time stamp!
# tsfile.write(str(FRAME_CLOCK)+','+str(fn)+','+str(ts)+','+str(ts_unix)+','+str(single_pixel_RGB2GRAY(led_stamp))+'\n')
tsfile.write(str(FRAME_CLOCK)+','+str(fn)+','+str(ts)+','+str(ts_unix)+','+str(led_stamp)+'\n')
# this is the writing block for the csv file, frame number and time stamp!
#TODO put led with the others in same file?
# ledsfile.write(str(single_pixel_RGB2GRAY(led_stamp))+'\n')
# write the depth frames to tiff (replace: send to queue)
# cv2.imwrite(top_folder+'/dev'+str(which_device)+'_d_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.png', depth)
# cv2.imwrite(top_folder+'/dev'+str(which_device)+'_cad_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.png', cad)
# hf.create_dataset(str(FRAME_CLOCK), data=depth)
# hf_cad.create_dataset(str(FRAME_CLOCK), data=cad)
np.save(npy_folder+'/dev'+str(which_device)+'_d_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.npy',depth, allow_pickle = False)
np.save(npy_folder+'/dev'+str(which_device)+'_cad_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.npy',cad, allow_pickle = False)
# UPDATE CLOCK
FRAME_CLOCK += 1
#
if show_frames:
# add text and show the CAD frames
cv2.putText(cad, window_title+', fps: '+str(fps)[:4], (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, fps_color)
cv2.putText(cad, str(round(ts)), (0, frame_height-20), cv2.FONT_HERSHEY_SIMPLEX, 1, ts_color)
cv2.imshow(window_title+'cad', cad)
if cv2.waitKey(1) & 0xFF == ord('q'):
# looks for a small q to nbe pressed
# close the time stamp file
tsfile.close
# close the hf file
# hf.close()
# hf_cad.close()
# ledsfile.close
# stop the device
pipeline.stop()
print('pipeline from device '+str(which_device)+' is now closed!')
break
finally:
tsfile.close
# close the hf file
# hf.close()
# hf_cad.close()
# stop the device
pipeline.stop()
print('pipeline from device '+str(which_device)+' is now closed!')
#%% define helping funtions for the multiprocessing
# these functions have to not be iterable.
def read_device_0():
print('starting camera 1!')
which_device = 0
top_folder = top_folder_0
sub_function_trick(which_device,top_folder)
def read_device_1():
print('starting camera 2!')
which_device = 1
top_folder = top_folder_0
sub_function_trick(which_device,top_folder)
def read_device_2():
print('starting camera 3!')
which_device = 2
top_folder = top_folder_1
sub_function_trick(which_device,top_folder)
def read_device_3():
print('starting camera 4!')
which_device = 3
top_folder = top_folder_1
sub_function_trick(which_device,top_folder)
#%% run the processes on independent cores
from multiprocessing import Process
if __name__ == '__main__':
if args.ncams == 4:
print('starting 4 cams, with multiprocessing!')
# start 4 worker processes
Process(target=read_device_0).start()
time.sleep(3.)
Process(target=read_device_1).start()
Process(target=read_device_2).start()
Process(target=read_device_3).start()
Process(target=blink_using_firmata_random).start()
elif args.ncams == 3:
print('starting 3 cams, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=read_device_1).start()
Process(target=read_device_2).start()
Process(target=blink_using_firmata).start()
elif args.ncams == 2:
print('starting 2 cams, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=read_device_1).start()
Process(target=blink_using_firmata).start()
elif args.ncams == 1:
print('starting 1 cam, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=blink_using_firmata).start()
| chrelli/3DDD_social_mouse_tracker | recording/record_calib_npy.py | record_calib_npy.py | py | 15,418 | python | en | code | 5 | github-code | 36 |
10680492541 | import time
import tensorflow.compat.v1 as tf
# tf.disable_eager_execution()
tf.config.run_functions_eagerly(True)
tf.enable_eager_execution()
from utils import *
from models import RGCN
import random
import pandas as pd
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from gcn.models import GCN
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
random.seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer.')
flags.DEFINE_float('dropout', 0.6, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_float('para_var', 1, 'Parameter of variance-based attention')
flags.DEFINE_float('para_kl', 5e-4, 'Parameter of kl regularization')
flags.DEFINE_float('para_l2', 5e-4, 'Parameter for l2 loss.')
flags.DEFINE_integer('early_stopping', 20, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
# Load data
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, label = load_data(FLAGS.dataset)
# train_mask[641:700] = True
perturbed_features, y_train_gpc = perturb_features_gpc(features, 0.8)
gpc_idx, gpc_feature, gpc_y_train_gpc = get_gpc_train_data(perturbed_features, y_train_gpc, train_mask, test_mask, val_mask)
print(len(gpc_idx))
kernel = 1.0 * RBF(1.0)
gpc = GaussianProcessClassifier(kernel=kernel,
random_state=0).fit(gpc_feature[:500], gpc_y_train_gpc[:500])
# gpc.score(perturbed_features, y_train_gpc)
gpc_res = gpc.predict(perturbed_features)
# print(type(gpc_res))
# gpc_res = np.asarray(y_train_gpc) # delete later, used for testing
# print(type(y_train_gpc))
gpc_predict_pert_idx = [i for i, x in enumerate(gpc_res==1) if x]
# print(all_idx_to_remove)
features = sp.csr_matrix(perturbed_features)
# features, y_train, train_mask, adj, label, y_val, val_mask = remove_pert(features, y_train, train_mask, adj, label, y_val, val_mask, gpc_predict_pert_idx)
features = modify_pert(features, gpc_predict_pert_idx) # for continuous features
# print(train_mask)
features = preprocess_features(features)
support = [preprocess_adj(adj, -0.5), preprocess_adj(adj, -1.0)]
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(2)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32),
}
model = GCN(placeholders, input_dim=features[2][1], logging=True)
sess = tf.Session()
def evaluate(features, support, labels, mask, placeholders, adj):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders, adj)
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test)
sess.run(tf.global_variables_initializer())
cost_val = []
var1 = []
for epoch in range(FLAGS.epochs):
t = time.time()
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders, adj)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
outs = sess.run([model.opt_op, model.loss, model.accuracy, model.vars], feed_dict=feed_dict)
# print(outs[3].shape)
# if epoch == (FLAGS.epochs-1):
# # print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
# df = pd.DataFrame(data = outs[3])
# df.to_csv('/home/zihe-leon/Desktop/RobustGCN-master/src/var0.csv', index = False)
cost, _, duration = evaluate(features, support, y_val, val_mask, placeholders, adj)
cost_val.append(cost)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "time=", "{:.5f}".format(time.time() - t))
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, label = load_data(FLAGS.dataset)
features = preprocess_features(features)
support = [preprocess_adj(adj, -0.5), preprocess_adj(adj, -1.0)]
test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, placeholders, adj)
print("Test set results:", "cost=", "{:.5f}".format(test_cost),
"accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))
| zzheng18/CSPC680RGCNV | src/train_gpc.py | train_gpc.py | py | 4,973 | python | en | code | 0 | github-code | 36 |
15017549955 | def cleaning_digor_eng():
with open('Дигорско-английский.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n").lower().replace('æ', 'ӕ'), f.readlines()))
# bad_letters = 'eyopakxc'
# good_letters = 'еуоракхс'
# for i in range(len(bad_letters)):
# lines = [i.replace(bad_letters[i], good_letters[i]) for i in lines]
with open('Дигорско-английский очищенный.txt', 'w', encoding='utf-8') as f:
for line in lines:
if line.strip():
f.write(line + '\n')
def cleaning_digor_rus():
# print(' ' == ' ')
# return
with open('Дигорско-русский.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n").lower().replace('æ', 'ӕ'), f.readlines()))
# bad_letters = 'eyopakxc'
# good_letters = 'еуоракхс'
# for i in range(len(bad_letters)):
# lines = [i.replace(bad_letters[i], good_letters[i]) for i in lines]
with open('Дигорско-русский очищенный.txt', 'w', encoding='utf-8') as f:
for line in lines:
if line.find('[') == -1 or \
'm1' in line and ('см.' in line or 'тж.' in line or 'мн.' in line) or \
'm3' in line:
if line.strip():
f.write(line + '\n')
def cleaning_pronouns():
with open('Местоимения.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n").lower().replace('æ', 'ӕ'), f.readlines()))
# bad_letters = 'eyopakxc'
# good_letters = 'еуоракхс'
# for i in range(len(bad_letters)):
# lines = [i.replace(bad_letters[i], good_letters[i]) for i in lines]
with open('Местоимения очищенные.txt', 'w', encoding='utf-8') as f:
for line in lines:
if line.strip():
f.write(line + '\n')
class Lexeme:
def __init__(self, lex, transl_en, gramm):
self.lex = lex
self.transl_en = transl_en
self.gramm = gramm
def __str__(self):
return f'''{self.lex}\n{self.transl_en}\n{self.gramm}\n'''
def __repr__(self):
return f'''{self.lex}\n{self.transl_en}\n{self.gramm}\n'''
def find_lexem(lexems, word):
for i in range(len(lexems)):
if word in lexems[i].lex:
return i
return -1
osetin_alphabet = {'а': 1, 'ӕ': 2, 'б': 3, 'в': 4, 'г': 5, 'гъ': 6, 'д': 7, 'дж': 8, 'дз': 9, 'е': 10,
'ё': 11, 'ж': 12, 'з': 13, 'и': 14, 'й': 15, 'к': 16, 'къ': 17, 'л': 18, 'м': 19,
'н': 20, 'о': 21, 'п': 22, 'пъ': 23, 'р': 24, 'с': 25, 'т': 26, 'тъ': 27, 'у': 28,
'ф': 29, 'х': 30, 'хъ': 31, 'ц': 32, 'цъ': 33, 'ч': 34, 'чъ': 35, 'ш': 36, 'щ': 37,
'ъ': 38, 'ы': 39, 'ь': 40, 'э': 41, 'ю': 42, 'я': 43}
def words_comparator(word1, word2):
try:
ossetian_alphabet = " а ӕ б в г гъ д дж дз е ё ж з и й к къ л м н о п пъ р с т тъ у ф х хъ ц цъ ч чъ ш щ ъ ы ь э ю я"
i1 = 0
i2 = 0
while i1 < len(word1) and i2 < len(word2):
letter1 = word1[i1]
letter2 = word2[i2]
if i1 + 1 < len(word1) and word1[i1: i1 + 2] in ossetian_alphabet:
letter1 = word1[i1: i1 + 2]
i1 += 1
if i2 + 1 < len(word2) and word2[i2: i2 + 2] in ossetian_alphabet:
letter2 = word2[i2: i2 + 2]
i2 += 1
if ossetian_alphabet.index(letter1) < ossetian_alphabet.index(letter2):
return -1
elif ossetian_alphabet.index(letter1) > ossetian_alphabet.index(letter2):
return 1
i1 += 1
i2 += 1
if len(word1) < len(word2):
return -1
elif len(word1) > len(word2):
return 1
return 0
except Exception:
if '-' not in word1 and '-' not in word2:
print(word1, word2)
print(Exception.args)
if '-' in word1:
return -1
return 1
# print('c' == 'c')
# print(words_comparator("cтъона", 'дехгӕнӕг'))
# exit()
def union_dictionaries():
lexems = []
count_paired_words = 0
all_ = 0
with open('Дигорско-английский очищенный.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n"), f.readlines()))
i = 0
while i < len(lines):
if '-lexeme' in lines[i]:
lexems.append(Lexeme(
lex=[i.strip('/|').strip() for i in lines[i + 1].removeprefix(' lex:').strip('.').split('.')],
# lex=[lines[i + 1].strip(' lex:').strip('').split('.')],
transl_en=lines[i + 5].removeprefix(' transl_en:').strip(),
gramm=[i.strip() for i in lines[i + 6].removeprefix(' gramm:').split(',')]
))
if '' in lexems[-1].lex:
lexems[-1].lex = lexems[-1].lex.remove('')
i += 6
i += 1
with open('Местоимения очищенные.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n"), f.readlines()))
i = 0
while i < len(lines):
if '-lexeme' in lines[i]:
lexems.append(Lexeme(
lex=[i.strip('/|').strip() for i in lines[i + 2].removeprefix(' stem:').strip('.').split('.')],
transl_en=lines[i + 5].removeprefix(' transl_en:').strip(),
gramm=[i.strip() for i in lines[i + 6].removeprefix(' gramm:').split(',')]
))
if '' in lexems[-1].lex:
lexems[-1].lex = lexems[-1].lex.remove('')
i += 6
i += 1
with open('Дигорско-русский очищенный.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n"), f.readlines()))
with open('unioned dictionary.txt', 'w', encoding='utf-8') as f:
i = 0
while i < len(lines):
if lines[i].find('[') == -1:
all_ += 1
f.write(lines[i] + '\n')
r = i + 1
while r < len(lines) and lines[r].find('[') != -1:
if lines[r].strip():
f.write(lines[r] + '\n')
r += 1
ind = find_lexem(lexems, lines[i])
if ind != -1:
count_paired_words += 1
f.write(' [m1]')
for gr in lexems[ind].gramm[:-1]:
f.write(f'[p][i][c][com]{gr},[/com][/c][/i][/p]')
f.write(f'[p][i][c][com]{lexems[ind].gramm[-1]}[/com][/c][/i][/p]')
f.write('[/m]\n')
f.write(f' [m1][trn]{lexems[ind].transl_en}[/trn][/m]\n')
i = r
else:
i += 1
print(all_, 'all_')
print(count_paired_words, 'count_paired_words')
print(len(lexems), 'lexems')
used_lexems = [False] * len(lexems)
with open('unioned dictionary.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n"), f.readlines()))
for i in range(len(lines)):
if lines[i].find('[') == -1:
ind = find_lexem(lexems, lines[i])
if ind != -1:
used_lexems[ind] = True
inserted = 0
for i in range(len(lexems)):
if not used_lexems[i]:
inserted += 1
ind = 0
error_list = []
temp = lexems[i].lex
if len(temp) > 1:
error_list.append(lexems[i])
continue
temp = temp[0]
r = 0
while r < len(lines):
if lines[r].find('[') == -1:
if words_comparator(temp, lines[r]) == 1:
ind = r
r += 1
else:
break
else:
r += 1
# if ind == 0:
# print(temp)
gram = ' [m1]'
for gr in lexems[i].gramm[:-1]:
gram += f'[p][i][c][com]{gr},[/com][/c][/i][/p]'
gram += f'[p][i][c][com]{lexems[i].gramm[-1]}[/com][/c][/i][/p]' + '[/m]'
trn = f' [m1][trn]{lexems[i].transl_en}[/trn][/m]'
lines.insert(ind, lexems[i].lex[0])
lines.insert(ind + 1, gram)
lines.insert(ind + 2, trn)
print(inserted, "inserted")
print(len(error_list), 'errorlist')
with open('unioned dictionary2.txt', 'w', encoding='utf-8') as f:
f.writelines([i + '\n' for i in lines])
with open('errorlist.txt', 'w', encoding='utf-8') as f:
f.writelines([i + '\n' for i in error_list])
# with open('Дигорско-русский очищенный.txt', 'r', encoding='utf-8') as f:
# a = sum(map(lambda x: x.rstrip("\n").strip() == "", f.readlines()))
# print(a)
# cleaning_digor_rus()
# with open('Дигорско-русский очищенный.txt', 'r', encoding='utf-8') as f:
# a = sum(map(lambda x: x.rstrip("\n").strip() == "", f.readlines()))
# print(a)
# with open('Местоимения.txt', 'r', encoding='utf-8') as f:
# a = sum(map(lambda x: x.rstrip("\n").strip() == "", f.readlines()))
# print(a)
# cleaning_pronouns()
# with open('Местоимения очищенные.txt', 'r', encoding='utf-8') as f:
# c = 0
# for i in f.readlines():
# if i.startswith(' stem:'):
# if len(i.strip(' stem:').strip().split('.')) > 1:
# print(i)
# c += 1
# print(c)
#
# with open('Дигорско-английский очищенный.txt', 'r', encoding='utf-8') as f:
# c = 0
# for i in f.readlines():
# if i.startswith(' lex:'):
# if len(i.strip(' lex:').strip().split('.')) > 1:
# print(i)
# c += 1
# print(c)
# print(ord('æ'), ord('æ'))
# cleaning_digor_eng()
# print(1)
# cleaning_digor_rus()
# print(2)
# cleaning_pronouns()
# print(3)
# union_dictionaries()
# print(4)
# print('ӕ' < 'б')
def final_cleaning():
with open('Английский.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.rstrip("\n"), f.readlines()))
# print(len(lines))
# i = 0
# while i < len(lines):
# while i < len(lines) and not lines[i].strip():
# print(1)
# del lines[i]
# i += 1
# print(len(lines))
fixed_lines = []
words_without_trn = []
i = 0
while i < len(lines):
fixed_lines.append(lines[i])
if i == len(lines) - 1 or '[' not in lines[i + 1]:
words_without_trn.append(lines[i])
i += 1
else:
i += 1
temp = []
while i < len(lines) and '[' in lines[i]:
temp.append(lines[i])
i += 1
temp.sort()
fixed_lines.extend(temp)
with open('слова без перевода.txt', 'w', encoding='utf-8') as f:
f.writelines([i + '\n' for i in words_without_trn])
with open('Английский исправленный.txt', 'w', encoding='utf-8') as f:
f.writelines([i + '\n' for i in fixed_lines])
def extract_word(s, start_ind, char_break):
"""
не спрашивайте зачем она нужна. так надо
:param s:
:param start_ind:
:param char_break:
:return: слово и индекс, где остановились
"""
temp = ''
while s[start_ind] != char_break:
temp += s[start_ind]
start_ind += 1
return temp.strip(), start_ind
def clear_string_m1():
kw = ['см.', 'тж.', 'мн.'] # keywords
with open('Английский исправленный.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.strip().rstrip("\n"), f.readlines()))
with open('temp.txt', 'w', encoding='utf-8') as f:
for i in range(len(lines)):
line = lines[i]
if 'm1' in line and kw[0] in line or kw[1] in line or kw[2] in line:
dick = {
kw[0]: [],
kw[1]: [],
kw[2]: []
}
cur_kw = 0
r = 0
lenn = len(line)
while r < lenn:
if line.startswith('[p][i][c][com]', r):
temp, r = extract_word(line, r + 14, '[')
r += 14 # minimum 10 forward
if temp in kw:
cur_kw = kw.index(temp)
if line.startswith('[ref]', r):
temp, r = extract_word(line, r + 5, '[')
r += 3
dick[kw[cur_kw]].append(temp)
# try: заигнорю слова на которые нет ссылок потому что он указывал только тег ref
# if line.startswith('[i][com]', r) and not line.startswith('[i][com](', r):
# temp, r = extract_word(line, r + 4, '')
# dick[kw[cur_kw]].append(temp)
# except:
# pass
if line.startswith('[i][com]([p][c]', r):
temp, r = extract_word(line, r + 15, '[') # temp always == 'мн.'
r += 8
temp, r = extract_word(line, r, ')')
dick[kw[2]].append(temp)
r += 1
part1 = ''
if len(dick[kw[0]]) > 0:
part1 = f'[p][i][c][com]{kw[0]}[/com][/c][/i][/p] {", ".join([f"[ref]{ref}[/ref]" for ref in dick[kw[0]]])}'
part2 = ''
if len(dick[kw[1]]) > 0:
part2 = f'[p][i][c][com]{kw[1]}[/com][/c][/i][/p] {", ".join([f"[ref]{ref}[/ref]" for ref in dick[kw[1]]])}'
part3 = ''
if len(dick[kw[2]]) > 0:
part3 = f'[i][com]([p][c]{kw[2]}[/c][/p] {dick[kw[2]][0]})[/com][/i]'
if len(part1) > 0 or len(part2) > 0 or len(part3) > 0:
line = f'[m1]{part1} {part2} {part3}[/m]'
else:
continue
if '[' in line:
line = ' ' + line
while ' ' in line:
line = line.replace(' ', ' ')
f.write(line + '\n')
def testing():
kw = ['см.', 'тж.', 'мн.'] # keywords
with open('test.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.strip().rstrip("\n"), f.readlines()))
for i in range(len(lines)):
line = lines[i]
# if line.count('.') > 1:
# print(line)
with open('test_out.txt', 'w', encoding='utf-8') as f:
for i in range(len(lines)):
line = lines[i]
if 'm1' in line and kw[0] in line or kw[1] in line or kw[2] in line:
dick = {
kw[0]: [],
kw[1]: [],
kw[2]: []
}
cur_kw = 0
r = 0
lenn = len(line)
while r < lenn:
if line.startswith('[p][i][c][com]', r):
temp, r = extract_word(line, r + 14, '[')
r += 14 # minimum 10 forward
if temp in kw:
cur_kw = kw.index(temp)
if line.startswith('[ref]', r):
temp, r = extract_word(line, r + 5, '[')
r += 2
dick[kw[cur_kw]].append(temp)
# try: заигнорю слова на которые нет ссылок потому что он указывал только тег ref
# if line.startswith('[i][com]', r) and not line.startswith('[i][com](', r):
# temp, r = extract_word(line, r + 4, '')
# dick[kw[cur_kw]].append(temp)
# except:
# pass
if line.startswith('[i][com]([p][c]', r):
temp, r = extract_word(line, r + 15, '[') # temp always == 'мн.'
r += 8
temp, r = extract_word(line, r, ')')
dick[kw[2]].append(temp)
r += 1
part1 = ''
if len(dick[kw[0]]) > 0:
part1 = f'[p][i][c][com]{kw[0]}[/com][/c][/i][/p] {", ".join([f"[ref]{ref}[/ref]" for ref in dick[kw[0]]])}'
part2 = ''
if len(dick[kw[1]]) > 0:
part2 = f'[p][i][c][com]{kw[1]}[/com][/c][/i][/p] {", ".join([f"[ref]{ref}[/ref]" for ref in dick[kw[1]]])}'
part3 = ''
if len(dick[kw[2]]) > 0:
part3 = f'[i][com]([p][c]{kw[2]}[/c][/p] {dick[kw[2]][0]})[/com][/i]'
if len(part1) > 0 or len(part2) > 0 or len(part3) > 0:
line = f'[m1]{part1} {part2} {part3}[/m]'
else:
continue
if '[' in line:
line = ' ' + line
while ' ' in line:
line = line.replace(' ', ' ')
f.write(line + '\n')
def replace_m1():
kw = ['см.', 'тж.', 'мн.'] # keywords
with open('temp.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.strip().rstrip("\n"), f.readlines()))
with open('temp2.txt', 'w', encoding='utf-8') as f:
i = 0
while i < len(lines):
word = lines[i]
r = i + 1
space = ' '
gr = []
other = []
trn = []
m3 = []
while r < len(lines) and '[' in lines[r]:
if 'm3' in lines[r]:
m3.append(lines[r])
elif 'trn' in lines[r]:
trn.append(lines[r])
elif kw[0] in lines[r] or kw[1] in lines[r] or kw[2] in lines[r]:
other.append(lines[r])
else:
gr.append(lines[r])
r += 1
i = r
f.write(word + '\n')
for m in other:
f.write(' ' + m + '\n')
for m in gr:
f.write(' ' + m + '\n')
for m in trn:
f.write(' ' + m + '\n')
for m in m3:
f.write(' ' + m + '\n')
def transfer_duplicates():
with open('temp2.txt', 'r', encoding='utf-8') as f:
lines = list(map(lambda x: x.strip().rstrip("\n"), f.readlines()))
space = ' '
used = []
temp3_lines = []
duplicated_lines = []
i = 0
while i < len(lines):
word = lines[i]
if word in used:
duplicated_lines.append(word)
r = i + 1
while r < len(lines) and '[' in lines[r]:
duplicated_lines.append(space + lines[r])
r += 1
i = r
else:
temp3_lines.append(word)
r = i + 1
while r < len(lines) and '[' in lines[r]:
temp3_lines.append(space + lines[r])
r += 1
i = r
used.append(word)
with open('итоговый вариант.txt', 'w', encoding='utf-8') as f:
for line in temp3_lines:
f.write(line + '\n')
with open('duplicated words.txt', 'w', encoding='utf-8') as f:
for line in duplicated_lines:
f.write(line + '\n')
transfer_duplicates()
# if 'см' in line or 'тж' in line:
# sign = ''
# refs = []
# r = 0
# while r < len(line):
# if line[r] == 'p' and line[r:r + 13] == 'p][i][c][com]':
# temp = ''
# t = r + 13
# while line[t] != '[':
# temp += line[t]
# t += 1
# temp = temp.strip()
# if temp in ['см', 'тж']:
# sign = temp
# if line[r] == 'r' and line[r:r + 4] == 'ref]':
# temp = ''
# t = r + 4
# while line[t] != '[':
# temp += line[t]
# t += 1
# refs.append(temp)
# line = f' [m1][p][i][c][com]{sign}[/com][/c][/i][/p] {", ".join([f"[ref]{ref}[/ref]" for ref in refs])}[/m]'
# elif 'мн' in line:
# pass
# line = line.removeprefix('[m1]').removesuffix('[/m]')
# line, ref = line[:line.find('[ref]')], line[line.find('[ref]'):]
# words = [''.strip(' ,.:|/;').removeprefix('[p][i][c][com]') for i in line.strip().split('[/com][/c][/i][/p]')]
# print(words)
# if i == 20:
# return
# ref = ''
| Lana-Dzuceva/translation_script | cleaning and union dictionaries.py | cleaning and union dictionaries.py | py | 21,163 | python | en | code | 0 | github-code | 36 |
3585781734 | from __future__ import absolute_import, division, print_function
# Local Imports
from modeling import ArgStrModel
from arguments import TrainingArguments
from training import ArgStrTrainer
from processors import get_datasets
from utils import Data_Collator, set_seed
# Standard Imports
import os
import random
# Third Party Imports
import json
import torch
import mlflow
from transformers import BertConfig
if __name__ == '__main__':
# load the best trained model and config
config_available = False
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
# define the experiment parameters
bert_arch = "bert-base-uncased"
task_name = "STLAS_randomized_LOO_gretz_topic"
exp_name = task_name + "_v1_bb"
mlflow_exp_name = "ASL_randomized_v2"
NUM_OF_SEEDS = 5
# define the path from where the task data is loaded from.
task_data_dir = "/mnt/data2/Sid/arg_quality/pytorch/task4_hpo/data/*.csv"
if config_available:
config_file = open("/mnt/data2/Sid/arg_quality/pytorch/task4_hpo/best_model_details_MTLAS_LOO_swanson_v2_bb"
".json", )
config_data = json.load(config_file)
else:
# define config
config_data = {
"bert_arch": bert_arch,
"task_name": task_name,
"exp_name": exp_name,
"device": torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
"is_distributed": False,
"resources_per_trial": {
'cpu': 4,
'gpu': 1
},
"data_dir": task_data_dir,
"split_by_topic": True if "randomized" in task_name else False,
"eval_batch_size": 128,
"train_batch_size": 64,
"max_seq_length": None,
"max_seq_length_perc": 0.95,
"data_collator": Data_Collator,
"dropout_prob": 0.1,
"bert_hidden_layers": 4,
"nn_config": 1,
"dataset_loss_method": "unweighted",
"learning_rate": 4.8672041684500765e-06,
"weight_decay": 0.1936871758204528,
"num_epochs": 50,
"max_steps": -1, # We use num_epochs instead.
"mlflow": {
"experiment_name": mlflow_exp_name,
"tracking_uri": "http://mlflow.dbs.ifi.lmu.de:5000"
}
}
if mlflow.get_experiment_by_name(mlflow_exp_name) is None:
mlflow.create_experiment(mlflow_exp_name)
mlflow.set_experiment(experiment_name=mlflow_exp_name)
mlflow.set_tracking_uri("http://mlflow.dbs.ifi.lmu.de:5000")
# define 10 seeds to run for training
seed_list = random.sample(range(0, 10000), NUM_OF_SEEDS)
for seed_value in seed_list:
set_seed(seed_value)
with mlflow.start_run(run_name="asl_randomized_v1"):
mlflow.log_param("seed", seed_value)
mlflow.log_param("Task Name", task_name)
print("Seed:", seed_value)
train_dataset, eval_dataset, test_dataset, task_dict = get_datasets(config_data)
# Load model setup.
if not config_available:
bert_config = BertConfig.from_pretrained(
config_data["bert_arch"],
finetuning_task=task_name,
output_hidden_states=True
)
model = ArgStrModel.from_pretrained(
config_data["bert_arch"],
config=bert_config,
dropout_prob=config_data["dropout_prob"],
bert_hidden_layers=config_data["bert_hidden_layers"],
mlp_config=config_data["nn_config"],
task_dict=task_dict,
device=config_data["device"]
)
else:
bert_config = BertConfig.from_pretrained(
config_data["bert_arch"],
finetuning_task=task_name,
output_hidden_states=True
)
model = ArgStrModel(
config=bert_config,
dropout_prob=config_data["dropout_prob"],
bert_hidden_layers=config_data["bert_hidden_layers"],
mlp_config=config_data["nn_config"],
task_dict=task_dict,
device=config_data["device"])
with open(os.path.join(config_data["best_checkpoint_path"], "best_model.pt"), 'rb') as checkpt:
model_state, optimizer_state = torch.load(checkpt)
model.load_state_dict(model_state)
training_args = TrainingArguments(
learning_rate=config_data["learning_rate"],
train_model=True,
evaluate_during_training=True,
save_steps=0,
max_num_train_epochs=25,
train_batch_size=config_data["train_batch_size"],
eval_batch_size=config_data["eval_batch_size"],
weight_decay=config_data["weight_decay"],
weighted_dataset_loss=config_data["dataset_loss_method"]
)
retrain_runner = ArgStrTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
test_dataset=test_dataset,
data_collator=Data_Collator,
task_name=config_data["task_name"]
)
retrain_runner.train_model(device=config_data["device"],
mlflow_logging=True,
retraining=True,
seed_value=seed_value)
if "MTLAS" in task_name:
retrain_runner.infer_model(
infer_dataset=test_dataset,
device=config_data["device"],
exp_name=config_data["exp_name"],
task_dict=task_dict,
exp_seed=seed_value,
)
| The-obsrvr/ArgStrength | Hyper-parameter-optimization/src/retraining.py | retraining.py | py | 6,077 | python | en | code | 0 | github-code | 36 |
26236313252 | import csv
import json
# Save .csv file from dict List [{}]
def save_file(results, filename, format):
if(format=='csv'):
if(len(results) > 0):
with open(f'{filename}.csv', 'w', encoding='utf8', newline='') as output_file:
output_file.write('sep=,\n')
fc = csv.DictWriter(output_file, fieldnames=results[0].keys())
fc.writeheader()
fc.writerows(results)
elif(format=='json'):
with open(f'{filename}.json', 'w') as f:
json.dump(results, f)
print(f'file saved to {filename}.{format}') | cristianmacedo/crawliexpress | crawliexpress/lib/helpers.py | helpers.py | py | 648 | python | en | code | 8 | github-code | 36 |
30332494769 | import streamlit as st
import pandas as pd
import numpy as np
import re
import emoji
import io
from collections import Counter
import datetime
import plotly.express as px
from numpy import random
from multiprocessing.dummy import Pool as ThreadPool
from wordcloud import WordCloud, STOPWORDS
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
from deep_translator import GoogleTranslator
from helper import *
st.set_page_config(
page_title="Chat Analytics Dashboard",
page_icon="🔍",
layout="wide")
#Styling the Structure
#https://share.streamlit.io/rawrapter/chat-analytics-dashboard/main/chat_analyze.py
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.title("Chat Analytics Dashboard")
st.markdown('<small>Made with ♥ in India. © <b>Anant Arun</b></small>',unsafe_allow_html=True)
# translator = GoogleTranslator(source='auto', target='en')
#Calling Vader to extract out all sentiment analysis
sid_obj = SentimentIntensityAnalyzer()
pool = ThreadPool(8)
stopwords = set(STOPWORDS)
with st.expander("How to export your Conversation"):
st.write("""To export a copy of the history of an individual chat or group:
\n1. Open the conversation or group.
\n2. For Android: Click on three vertical dots on top right corner and select More. \nFor iOS: Tap on Contact/Group Name.
\n3. Select Export chat.
\n4. Choose Without Media.
\n5. You will asked how to save your chat history attached as a .txt document. \nSave it wherever you like. Then download the .txt file and upload it below.""")
#File uploader from streamlit to upload file
chat_file = st.file_uploader("Upload chat file (Don't worry your data is safe. Analysis is done in your browser only.)", type=["txt"])
chat_content = []
if chat_file != None:
raw_text = io.TextIOWrapper(chat_file,encoding='utf-8')
chat_content = raw_text.readlines()
def translate_request(text):
translate_text = GoogleTranslator(target='en').translate(text.strip().lower())
if translate_text != None:
translate_text = " ".join(word for word in translate_text.split(" ") if word not in stopwords)
return translate_text
def list_to_DF(list,format=0):
date_format=['%d/%m/%Y, %I:%M %p','%d/%m/%y, %I:%M %p','%m/%d/%y, %I:%M %p']
date=re.compile('\d{1,2}/\d{1,2}/\d{2,4}')
df=pd.DataFrame(columns=['date_time','author','message'])
for chat in list:
if date.match(chat):
dat_time,conversation=re.split(' - ',chat,maxsplit=1)
try:
aut,msg=re.split(':',conversation,maxsplit=1)
except ValueError:
aut=np.nan
msg=str.strip(conversation)
d=str.strip(dat_time)
try:
dt=datetime.datetime.strptime(str.strip(dat_time),date_format[format])
except ValueError:
return list_to_DF(list,format+1)
df=df.append({'date_time':dt,'author':aut,'message':str.strip(msg)},ignore_index=True)
else:
df.iloc[-1].message=df.iloc[-1].message+' '+chat
return df
def data_preperation(df):
year = lambda x:x.year
emoji_extract = lambda x:''.join(re.findall(emoji.get_emoji_regexp(),x))
url_pattern = r'(https?://\S+)'
df.dropna(inplace=True)
df['date'] = df['date_time'].apply(pd.Timestamp.date)
df['day'] = df['date_time'].apply(pd.Timestamp.day_name)
df['month'] = df['date_time'].apply(pd.Timestamp.month_name)
df['year'] = df['date_time'].apply(year)
df['time'] = df['date_time'].apply(pd.Timestamp.time).apply(lambda x: datetime.datetime.strptime(str(x), "%H:%M:%S")).apply(lambda x: x.strftime("%I:%M %p"))
df['emoji_used'] = df.message.apply(emoji_extract)
df['Media'] = df.message.str.contains('<Media omitted>')
df['urlcount'] = df.message.apply(lambda x: re.findall(url_pattern, x)).str.len()
return df
if chat_content!=[]:
df=list_to_DF(chat_content)
df=data_preperation(df)
st.subheader("Conversation Stats")
st.write("\n")
st.write("Total Text Messages: ", df.shape[0])
st.write("Total Media Messages: ", df[df['Media']].shape[0])
st.write("Total Emojis: ", sum(df['emoji_used'].str.len()))
st.write("Total Links/URLs: ", np.sum(df.urlcount))
media_messages_df = df[df['message'] == '<Media omitted>']
messages_df = df.drop(media_messages_df.index)
author_value_counts = df['author'].value_counts().to_frame()
fig0 = px.bar(author_value_counts, y='author', x=author_value_counts.index,color='author',color_continuous_scale='Tealgrn' ,labels={'index':'Employees','author':'Overall Participation'}, title="Employees Team Interaction")
st.plotly_chart(fig0)
sort_type = st.selectbox("Sort By:",["Date","Day","Time","Month"])
if sort_type=="Date":
keyword="date"
elif sort_type=="Day":
keyword="day"
elif sort_type=="Time":
keyword = "time"
elif sort_type=="Month":
keyword = "month"
sort_df = messages_df.groupby(keyword).sum()
sort_df['MessageCount'] = messages_df.groupby(keyword).size().values
sort_df.reset_index(inplace=True)
fig = px.line(sort_df, x=keyword, y="MessageCount", title=f"Overall Number of Messages according to {keyword}",)
fig.update_xaxes(nticks=20,showgrid=False)
st.plotly_chart(fig)
author_df = df["author"].value_counts().reset_index()
author_df.rename(columns={"index":"author", "author":"Number of messages"}, inplace=True)
author_df["Total %"] = round(author_df["Number of messages"]*100/df.shape[0], 2)
author_df["Involvement"] = author_df["Total %"].apply(lambda x: talkativeness(x, df["author"].nunique()))
t_author_df = df.copy()
t_author_df["year"] = t_author_df["date"].apply(lambda x: x.year)
t_author_df["month"] = t_author_df["date"].apply(lambda x: x.strftime("%b"))
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
t_author_df['month'] = pd.Categorical(t_author_df['month'], months)
analysis_1_df = t_author_df.pivot_table(index=["month", "year"], columns=["author"], values=["message"], aggfunc="count", fill_value=0)
analysis_1_df.columns = [col_[1] for col_ in analysis_1_df.columns]
analysis_1_df = analysis_1_df.reset_index().sort_values(["year", "month"])
analysis_1_df["month_year"] = analysis_1_df.apply(lambda x: x["month"] + " " + str(x["year"]), axis=1)
analysis_1_df.drop(["month", "year"], axis=1, inplace=True)
analysis_1_df.set_index('month_year',inplace=True)
author_df["Trend"] = author_df["author"].apply(lambda x: trendline(analysis_1_df[x]))
st.write('Overall Team Involvement Trend',author_df)
#emoji distribution
senders = st.selectbox("Select participant:",messages_df.author.unique())
dummy_df = messages_df[messages_df['author'] == senders]
#Individual Line chart
dummy_df1 = dummy_df.groupby(keyword).sum()
dummy_df1['MessageCount'] = dummy_df.groupby(keyword).size().values
dummy_df1.reset_index(inplace=True)
fig2 = px.line(sort_df, x=keyword, y="MessageCount", title=f"Overall Involvement of {senders} in {keyword} wise",)
fig2.update_xaxes(nticks=20,showgrid=False)
st.plotly_chart(fig2)
total_emojis_list = list([a for b in dummy_df.emoji_used for a in b])
emoji_dict = dict(Counter(total_emojis_list))
emoji_dict = sorted(emoji_dict.items(), key=lambda x: x[1], reverse=True)
author_emoji_df = pd.DataFrame(emoji_dict, columns=['emoji', 'count'])
fig5 = px.pie(author_emoji_df, values='count', names='emoji', title=f'Emoji Distribution for {senders}')
fig5.update_traces(textposition='inside', textinfo='percent+label',showlegend=False)
st.plotly_chart(fig5)
comment_words = ''
for val in dummy_df.message:
# typecaste each val to string
val = str(val)
# split the value
tokens = val.split()
# Converts each token into lowercase
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens)+" "
wordcloud = WordCloud(width = 800, height = 800,
background_color ='black',
stopwords = stopwords,min_font_size=6).generate(comment_words)
# plot the WordCloud image
with st.expander("Tap to View Wordcloud"):
fig, ax = plt.subplots(figsize = (10, 10),facecolor = 'k')
ax.imshow(wordcloud,interpolation='bilinear')
ax.axis("off")
plt.tight_layout(pad=0)
st.pyplot(fig)
senti = []
with st.spinner(f'Analyzing Sentiment for {senders}.. (This may take some time depending on size of data)'):
try:
translation = pool.map(translate_request, dummy_df["message"].values)
except Exception as e:
raise e
pool.close()
pool.join()
for i in translation:
if i!=None:
sentiment_dict = sid_obj.polarity_scores(i)
if sentiment_dict['compound'] >= 0.05 :
senti.append("Positive")
elif sentiment_dict['compound'] <= - 0.05 :
senti.append("Negative")
else :
senti.append("Neutral")
all_sents = Counter(senti)
fig6 = px.bar(y=all_sents.values(), x=all_sents.keys(),color=all_sents.keys(),color_discrete_sequence=['green','blue','red'] ,labels={'x':'Sentiment','y':'Interaction'},title=f"Sentiments for {senders}")
fig6.update_layout(showlegend=False)
st.plotly_chart(fig6)
result = max(all_sents,key=all_sents.get)
st.info(f"{senders} mostly conveys {result} behaviour")
st.write("\n")
"""
# This code was made for testing purpose only
if st.checkbox(f"Click to check score for the {senders} (Out of 100)"):
score_df = messages_df[messages_df['author'] == senders]
score_df['MessageCount'] = score_df.shape[0]
if score_df[(score_df['MessageCount'] > 400)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(80,100))
elif score_df[(score_df['MessageCount'] > 300) & (score_df['MessageCount'] < 400)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(70,80))
elif score_df[(score_df['MessageCount'] > 200) & (score_df['MessageCount'] < 300)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(60,70))
elif score_df[(score_df['MessageCount'] > 100) & (score_df['MessageCount'] < 200)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(50,60))
else:
st.write(f"Score for {senders}: ",random.randint(40,50))
"""
st.markdown(' <br><br><center>Developed and Maintained by <b><a href="https://www.linkedin.com/in/anantarun" target="_blank">Anant Arun</a></b></center>',unsafe_allow_html=True)
| RawRapter/Chat-Analytics-Dashboard | chat_analyze.py | chat_analyze.py | py | 11,100 | python | en | code | 0 | github-code | 36 |
6830029168 | import sys
sol_list = {} #to reduce the recursive computation #really nice trick
def sol(num):
if num <= 11:
return num
if num in sol_list.keys():
return sol_list[num]
else:
sol_list[num] = sol(int(num/2)) + sol(int(num/3)) + sol(int(num/4))
return sol_list[num]
try:
while True:
i = int(input())
print(sol(i))
except :
sys.exit(0)
| thirstycode/competitive-programming | Problems/Bytelandian gold coins/sol.py | sol.py | py | 361 | python | en | code | 1 | github-code | 36 |
21025379752 | from models.video_model import VideoModel
class PublicationModel:
def __init__(self, publication_url: str, publication_id: str, author_unique_id: str,
desc: str, like_count: int, comment_count: int,view_count: int, share_count: int,
category: int, created_at: int, hashtags: list[str], video: VideoModel):
self.publication_url = publication_url
self.publication_id = publication_id
self.author_unique_id = author_unique_id
self.desc = desc
self.like_count = like_count
self.comment_count = comment_count
self.view_count = view_count
self.share_count = share_count
self.hashtags = hashtags
self.category = category
self.created_at = created_at
self.video = video
def __str__(self):
return f"<Publication publication_url:{self.publication_url}, publication_id:{self.publication_id}, " \
f"author_unique_id:{self.author_unique_id}, desc:{self.desc}, like_count:{self.like_count}, " \
f"comment_count:{self.comment_count}, view_count:{self.view_count}," \
f"share_count:{self.share_count}, hashtags:{self.hashtags}, " \
f"category:{self.category}, created_at:{self.created_at}, video:{self.video}"
| MAG135/robot | models/publication_model.py | publication_model.py | py | 1,303 | python | en | code | 0 | github-code | 36 |
75034732264 | import logging
from handlers.detectors import detect_is_admin
from keyboards.default.start import start_admin
from keyboards.inline.admin.success_payment import withdraw_money_balance
from loader import dp, bot, db
from data.config import ADMINS
from keyboards.default.back import back
from states.balance import Balance
from aiogram import types
from aiogram.dispatcher import FSMContext
@dp.callback_query_handler(text="withdraw_money", state=Balance.menu)
async def withdraaw_money_from_balance(call: types.CallbackQuery, state: FSMContext):
user_id = call.from_user.id
select_user = await db.select_user_data(user_id)
balance = select_user[0][1]
if balance >= 10000:
text = "<b>Pul yechib olish uchun karta raqami kiriting...\n\n" \
"<i>Mavjud to'lov turlari</i>\n\n▪️Humo</b>"
await call.message.delete()
await call.message.answer(text=text, reply_markup=back)
await Balance.withdraw.set()
else:
text = "⚠️ Pul chiqarish uchun hisobingizda kamida 10.000 so'm bo'lishi shart!"
await call.answer(text, show_alert=True)
@dp.message_handler(state=Balance.withdraw, content_types=types.ContentType.TEXT)
async def identify_card(message: types.Message, state: FSMContext):
msg = message.text
await message.answer(
text="Yaxshi, endi nech pul chiqarib olmoqchi ekanligingizni yozing\n\nMasalan: <code>10000</code>",
reply_markup=back
)
await state.update_data(
{'card_number': msg}
)
await Balance.money.set()
@dp.message_handler(state=Balance.money, content_types=types.ContentType.TEXT)
async def identify_how_much_money(message: types.Message, state: FSMContext):
data = await state.get_data()
card_number = data.get('card_number')
user_id = message.from_user.id
full_name = message.from_user.full_name
user_mention = message.from_user.get_mention(name=full_name)
msg = message.text
select_user = await db.select_user_data(user_id)
balance = select_user[0][2]
try:
summa = int(msg)
if summa >= 10000:
if balance >= summa:
await bot.send_message(
chat_id=message.chat.id,
text="✅ Pul yechish uchun so'rovingiz qabul qilindi, admin tez orada to'lovni amalga oshiradi",
reply_markup=await detect_is_admin(user_id)
)
admin_text = f"👤 {user_mention}\n💳 {card_number}\n💸 {summa} so'm\n💰 {balance}"
await bot.send_message(
chat_id=ADMINS[0],
text=admin_text,
reply_markup=withdraw_money_balance(user_id, summa)
)
await state.reset_state(with_data=False)
elif balance < summa:
await bot.send_message(
chat_id=message.chat.id,
text=f"Hisobingizda {balance} so'm pul va siz {summa} so'm "
f"chiqarishga harakat qilyapsiz, iltimos boshqattan urinib ko'ring\n\n"
f"Masalan: <code>10000</code>",
reply_markup=back
)
else:
text = "⚠️ Eng kamida 10000 so'm chiqarib olish mumkin, summa kiriting\n\nMasalan: <code>10000</code>"
await message.answer(text, reply_markup=back)
except ValueError as VE:
logging.info(VE)
await message.answer(
text='Iltimos, faqat raqamlardan foydalaning\n\nMasalan: <code>10000</code>',
reply_markup=back
)
@dp.callback_query_handler(text_contains="tolandi_", state='*')
async def final_withdraw(call: types.CallbackQuery, state: FSMContext):
data = call.data
splited = data.split('_')
user_id = splited[1]
summa = splited[2]
select_user_data = await db.select_user_data(int(user_id))
balance = select_user_data[0][2]
get_user = await bot.get_chat(user_id)
full_name = get_user.full_name
user_mention = call.from_user.get_mention(full_name)
end_balance = balance - int(summa)
await db.update_user_balancee(end_balance, int(user_id))
await call.message.delete()
await bot.send_message(
chat_id=call.message.chat.id,
text="Foydalanuvchi puli to'lab berildi",
reply_markup=start_admin
)
text = f"✅ Pul toʻlandi: {user_mention}\n" \
f"👤 Foydalanuvchi ID: {user_id}\n" \
f"💸 Miqdor: {summa} soʻm"
await bot.send_message(
chat_id=-1001943689507,
text=text
)
await bot.send_message(
chat_id=user_id,
text=f"Sizning pul yechish so'rovingiz qabul qilindi va {summa} so'm to'lab berildi",
reply_markup=await detect_is_admin(user_id)
)
await state.finish()
@dp.callback_query_handler(text_contains="bekor_qilish_", state='*')
async def final_withdraw(call: types.CallbackQuery, state: FSMContext):
data = call.data
splited = data.split('_')
user_id = splited[2]
summa = splited[3]
await call.message.delete()
await state.update_data(
{'final_id': user_id, 'final_summa': summa}
)
await bot.send_message(
chat_id=call.message.chat.id,
text="To'lov bekor bo'lishining sababini kiriting",
reply_markup=back
)
await Balance.cancel_payment.set()
@dp.message_handler(state=Balance.cancel_payment, content_types=types.ContentType.TEXT)
async def cancel_payment_user(message: types.Message, state: FSMContext):
data = await state.get_data()
user_id = data.get('final_id')
final_summa = data.get('final_summa')
msg = message.text
await bot.send_message(
chat_id=user_id,
text=f"Sizning {final_summa} so'm chiqarishdagi harakatingiz bekor qilindi\n\nSabab: {msg}"
)
await bot.send_message(
chat_id=message.chat.id,
text="Xabar foydalanuvchiga yuborildi va to'lov harakati bekor qilindi",
reply_markup=start_admin
)
await state.finish() | uzbsobirov/Money-grow-bot | handlers/users/balance/withdraw_money.py | withdraw_money.py | py | 6,079 | python | en | code | 0 | github-code | 36 |
24856816686 | def contact_name(name, family_name, symbol):
result = name + symbol + family_name
print(result)
name = input()
last_name = input()
bond = input()
contact_name(name, last_name, bond)
# Second version
fist_name = input()
last_name = input()
delimiter = input()
print(f'{fist_name}{delimiter}{last_name}') | BorisAtias/SoftUni-Python-Fundamentals-course | Data Types and Variables - Lab/01. Concat Names.py | 01. Concat Names.py | py | 337 | python | en | code | 0 | github-code | 36 |
7165663160 | def smallestRangeI(nums: list[int], k: int) -> int:
max_nums = max(nums) - k
min_nums = min(nums) + k
result = max_nums - min_nums
return result if result >= 0 else 0
nums = [1, 3, 6]
# nums = [10, 0]
# nums = [1]
k = 3
# k = 2
# k = 0
print(smallestRangeI(nums, k))
| SafonovVladimir/mornings | 05 may/04.py | 04.py | py | 288 | python | en | code | 0 | github-code | 36 |
37037529431 | # File: CheckTags.py
"""
This program checks that tags are properly matched in an HTML file.
This version of the program runs in Python; the checktags version runs
directly from the command line.
"""
import html.parser
import urllib.request
import urllib.error
def CheckTags():
"""Reads a URL from the user and then checks it for tag matching."""
url = input("URL: ")
checkURL(url)
def checkURL(url):
"""Checks whether the tags are balanced in the specified URL."""
try:
response = urllib.request.urlopen(url)
print(type(response))
parser = HTMLTagParser()
parser.checkTags(response.read().decode("UTF-8"))
except urllib.error.URLError:
print("Something went wrong")
class HTMLTagParser(html.parser.HTMLParser):
"""
This class extends the standard HTML parser and overrides the
callback methods used for start and end tags.
"""
def __init__(self):
"""Creates a new HTMLTagParser object."""
html.parser.HTMLParser.__init__(self)
self._link_stack=[]
def brokenLinks(self):
for url in self._link_stack:
try:
response=urllib.request.urlopen(url)
self.checkTags(response.read().decode("UTF-8"))
except urllib.error.URLError:
print("Link doesn't work")
def checkTags(self, text):
"""Checks that the tags are balanced in the supplied text."""
try:
self._stack = [ ]
self.feed(text)
while len(self._stack) > 0:
startTag,startLine = self._stack.pop()
print("Missing </" + startTag + "> for <" + startTag +
"> at line " + str(startLine))
except urllib.error.URLError:
print("something didn't work")
def handle_starttag(self, startTag, attributes):
"""Overrides the callback function for start tags."""
try:
startLine,_ = self.getpos()
self._stack.append((startTag, startLine))
print(attributes)
self._link_stack = [x[1] for x in attributes if 'href' in attributes or 'src' in attributes]
except urllib.error.URLError:
print("nope")
def handle_endtag(self, endTag):
"""Overrides the callback function for end tags."""
try:
endLine,_ = self.getpos()
if len(self._stack) == 0:
print("No <" + endTag + "> for </" + endTag +
"> at line " + str(endLine))
else:
while len(self._stack) > 0:
startTag,startLine = self._stack.pop()
if startTag == endTag:
break;
print("Missing </" + startTag + "> for <" + startTag +
"> at line " + str(startLine))
except urllib.error.URLError:
print("bad")
'''
I need to make this code be able to handle broken links in the html page
'''
# Startup code
if __name__ == "__main__":
CheckTags()
| katthomp/networks | checktags.py | checktags.py | py | 3,070 | python | en | code | 0 | github-code | 36 |
11952114818 | from datetime import datetime
import backtrader as bt
import tushare as ts
import pandas as pd
class MyStrategy1(bt.Strategy):
params = (('maperiod', 20),
('printlog', False),)
def __init__(self):
# 指定价格序列
self.dataclose = self.datas[0].close
# 初始化交易指令、买卖价格和手续费
self.order = None
self.buyprice = None
self.buycomm = None
self.process = []
# 添加移动均线指标
self.sma = bt.indicators.SimpleMovingAverage(self.datas[0], period=self.params.maperiod)
# 策略核心,根据条件执行买卖交易指令(必选)
def next(self):
# 记录收盘价
# self.log(f'收盘价, {dataclose[0]}')
if self.order: # 检查是否有指令等待执行,
return
# 检查是否持仓
if not self.position: # 没有持仓
# 执行买入条件判断:收盘价格上涨突破15日均线
if self.dataclose[0] > self.sma[0]:
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# 执行买入
self.order = self.buy()
else:
# 执行卖出条件判断:收盘价格跌破15日均线
if self.dataclose[0] < self.sma[0]:
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# 执行卖出
self.order = self.sell()
# 交易记录日志(可省略,默认不输出结果)
def log(self, txt, dt=None, doprint=False):
if self.params.printlog or doprint:
dt = dt or self.datas[0].datetime.date(0)
print(f'{dt.isoformat()},{txt}')
self.process.append(dt.isoformat())
self.process.append(txt)
self.process.append('\n')
# 记录交易执行情况(可省略,默认不输出结果)
def notify_order(self, order):
# 如果order为submitted/accepted,返回空
if order.status in [order.Submitted, order.Accepted]:
return
# 如果order为buy/sell executed,报告价格结果
if order.status in [order.Completed]:
if order.isbuy():
self.log(f'买入:\n价格:{order.executed.price},成本:{order.executed.value},手续费:{order.executed.comm}')
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else:
self.log(f'卖出:\n价格:{order.executed.price},\
成本: {order.executed.value},\
手续费{order.executed.comm}')
self.bar_executed = len(self)
# 如果指令取消/交易失败, 报告结果
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('交易失败')
self.order = None
# 记录交易收益情况(可省略,默认不输出结果)
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log(f'策略收益:\n毛收益:{trade.pnl:.2f}, 净收益:{trade.pnlcomm:.2f}')
# 回测结束后输出结果(可省略,默认输出结果)
def stop(self):
self.log('(MA均线: %2d日) 期末总资金 %.2f' % (self.params.maperiod, self.broker.getvalue()), doprint=True)
f = open("./static/logs.log", "w")
f.writelines(self.process)
f.close()
def back(stock_code, startcash):
# 初始化cerebro回测系统设置
cerebro = bt.Cerebro()
# 获取数据
# start = input('输入回测起始时间(xx-xx-xx):')
start = '2018-01-01'
# end = input('输入回测结束时间(xx-xx-xx):')
end = '2021-06-01'
start1 = datetime.strptime(start, "%Y-%m-%d")
end1 = datetime.strptime(end, "%Y-%m-%d")
df = ts.get_k_data(stock_code, autype='qfq', start=start, end=end)
# df=ts.get_k_data('600000',autype='qfq',start='2018-01-01',end='2021-03-30')
df.index = pd.to_datetime(df.date)
df = df[['open', 'high', 'low', 'close', 'volume']]
data = bt.feeds.PandasData(dataname=df, fromdate=start1, todate=end1)
# data = bt.feeds.PandasData(dataname=df,fromdate=datetime(2018, 1, 1),todate=datetime(2021, 3, 30))
# 加载数据
cerebro.adddata(data)
# 将交易策略加载到回测系统中
# 设置printlog=True,表示打印交易日志log
cerebro.addstrategy(MyStrategy1, maperiod=20, printlog=True)
# 设置初始资本
cerebro.broker.setcash(startcash)
# 设置交易手续费为 0.1%
cerebro.broker.setcommission(commission=0.001)
# 设置买入设置、策略、数量
cerebro.addsizer(bt.sizers.FixedSize, stake=1000)
# 回测结果
cerebro.addanalyzer(bt.analyzers.PyFolio, _name='pyfolio')
cerebro.addanalyzer(bt.analyzers.AnnualReturn, _name='_AnnualReturn')
cerebro.addanalyzer(bt.analyzers.Calmar, _name='_Calmar')
cerebro.addanalyzer(bt.analyzers.DrawDown, _name='_DrawDown')
# cerebro.addanalyzer(bt.analyzers.TimeDrawDown, _name='_TimeDrawDown')
cerebro.addanalyzer(bt.analyzers.GrossLeverage, _name='_GrossLeverage')
cerebro.addanalyzer(bt.analyzers.PositionsValue, _name='_PositionsValue')
cerebro.addanalyzer(bt.analyzers.LogReturnsRolling, _name='_LogReturnsRolling')
cerebro.addanalyzer(bt.analyzers.PeriodStats, _name='_PeriodStats')
cerebro.addanalyzer(bt.analyzers.Returns, _name='_Returns')
cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='_SharpeRatio')
# cerebro.addanalyzer(bt.analyzers.SharpeRatio_A, _name='_SharpeRatio_A')
cerebro.addanalyzer(bt.analyzers.SQN, _name='_SQN')
cerebro.addanalyzer(bt.analyzers.TimeReturn, _name='_TimeReturn')
cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name='_TradeAnalyzer')
cerebro.addanalyzer(bt.analyzers.Transactions, _name='_Transactions')
cerebro.addanalyzer(bt.analyzers.VWR, _name='_VWR')
results = cerebro.run()
# 获取最后总资金
print(results)
print('eeeeeeeeeeeee')
portvalue = cerebro.broker.getvalue()
fpnl = portvalue - startcash
performance_dict = {}
calmar_ratio = list(results[0].analyzers._Calmar.get_analysis().values())[-1]
drawdown_info = results[0].analyzers._DrawDown.get_analysis()
average_drawdown_len = drawdown_info['len']
average_drawdown_rate = drawdown_info['drawdown']
average_drawdown_money = drawdown_info['moneydown']
max_drawdown_len = drawdown_info['max']['len']
max_drawdown_rate = drawdown_info['max']['drawdown']
max_drawdown_money = drawdown_info['max']['moneydown']
PeriodStats_info = results[0].analyzers._PeriodStats.get_analysis()
average_rate = PeriodStats_info['average']
stddev_rate = PeriodStats_info['stddev']
positive_year = PeriodStats_info['positive']
negative_year = PeriodStats_info['negative']
nochange_year = PeriodStats_info['nochange']
best_year = PeriodStats_info['best']
worst_year = PeriodStats_info['worst']
SQN_info = results[0].analyzers._SQN.get_analysis()
sqn_ratio = SQN_info['sqn']
VWR_info = results[0].analyzers._VWR.get_analysis()
vwr_ratio = VWR_info['vwr']
sharpe_info = results[0].analyzers._SharpeRatio.get_analysis()
sharpe_ratio = sharpe_info['sharperatio']
performance_dict['calmar_ratio'] = calmar_ratio
performance_dict['average_drawdown_len'] = average_drawdown_len
performance_dict['average_drawdown_rate'] = average_drawdown_rate
performance_dict['average_drawdown_money'] = average_drawdown_money
performance_dict['max_drawdown_len'] = max_drawdown_len
performance_dict['max_drawdown_rate'] = max_drawdown_rate
performance_dict['max_drawdown_money'] = max_drawdown_money
performance_dict['average_rate'] = average_rate
performance_dict['stddev_rate'] = stddev_rate
performance_dict['positive_year'] = positive_year
performance_dict['negative_year'] = negative_year
performance_dict['nochange_year'] = nochange_year
performance_dict['best_year'] = best_year
performance_dict['worst_year'] = worst_year
performance_dict['sqn_ratio'] = sqn_ratio
performance_dict['vwr_ratio'] = vwr_ratio
performance_dict['sharpe_info'] = sharpe_ratio
performance = pd.DataFrame(performance_dict, index=[0]).T
print(performance)
# Print out the final result
print(f'\n总资金: {portvalue:.2f}')
print(f'净收益: {round(fpnl, 2)}')
ans = {}
ans['all'] = int(portvalue)
ans['jing'] = int(fpnl)
# cerebro.plot(style='candlestick')
return ans
if __name__ == '__main__':
stock_code = input('输入回测股票代码:')
startcash = float(input('输入回测初始资本:'))
back(stock_code, startcash)
| Cui-Yusong/NUS_proj | backtest_sma.py | backtest_sma.py | py | 8,940 | python | en | code | 1 | github-code | 36 |
15506027054 |
# https://medium.com/analytics-vidhya/computer-vision-and-deep-learning-part-2-586b6a0d3220 --- main
# https://github.com/Esri/raster-deep-learning/blob/master/docs/writing_deep_learning_python_raster_functions.md
import cv2
import numpy as np
from matplotlib import pyplot as plt
cv_image= cv2.imread("/home/jameshung/Pictures/forest01.jpg",0)
one =cv2.adaptiveThreshold(cv_image,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
ret2, two = cv2.threshold(cv_image,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
blur = cv2.GaussianBlur(cv_image,(5,5),0)
ret3,three = cv2.threshold(blur, 0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
images = [cv_image, 0, one,
cv_image, 0, two,
blur, 0, three]
titles = ['Original Image','Histogram','Adaptive Mean Thresholding',
'Original Image','Histogram',"Otsu's Thresholding",
'Gaussian filtered Image','Histogram',"Otsu's Thresholding of Gaussian Blur Image"]
for i in range(3):
plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
plt.show()
cv2.waitKey(1000)
| hssaccord/myTEST | main.py | main.py | py | 1,348 | python | en | code | 0 | github-code | 36 |
6733918320 | def getClickData(clickData):
point = CSafePoint(clickData)
pointData = point.getPoint()
log.print(pointData.get('lat'))
log.print(pointData.get('lon'))
log.print(pointData.get('customdata'))
log.print(pointData.get('pointIndex'))
log.print(pointData.get('pointNumber'))
log.print(pointData.get('curveNumber'))
return str(pointData.getDict())
def clickState(n_clicks, clickData):
click = screenVariables.get('myTopologyMap').getValue()
return str(click) + ' ' + str(clickData)
def getZoom(n_intervals, figure):
figure = CSafeFigure(figure=figure)
return figure.getZoom()
def getClickedInfo(clickData):
point = CSafePoint(clickData)
pointData = point.getPoint()
return 'Lat: ' + str(pointData.get('lat')) + ', ' + 'Lon: ' + str(pointData.get('lon')) + ', ' + 'Data: ' + pointData.get('customdata')
clicks = 0
def setPointCoordinates(n_clicks, clickData, id, lat, lon):
myTopologyMap = screenVariables.get(getNameFromId(id))
global clicks
if n_clicks is None:
n_clicks = 0
log.print("HEEEEEEEEEEEEEEERE")
log.print(clicks)
log.print(n_clicks)
if clickData is None:
clicks = n_clicks
return myTopologyMap.getFigure()
point = CSafePoint(clickData)
pointData = point.getPoint()
setIndex = pointData.get('curveNumber')
pointIndex = pointData.get('pointIndex')
if n_clicks != clicks: #no click on figure, click on button
clicks = n_clicks
if lat is None or lon is None:
return myTopologyMap.getFigure()
return myTopologyMap.setNewCoordinatesForPoint(setIndex, pointIndex, lat, lon)
else: #click on figure
return myTopologyMap.highlight(setIndex, pointIndex) | aleksProsk/HydroOpt2.0 | user001/scripts/dash/screens/test5/callbacks.py | callbacks.py | py | 1,737 | python | en | code | 0 | github-code | 36 |
74147866345 | import pygame, humans, sys, menu
from random import randint
scan_delay = 300
coll_delay = 300
move_delay = 300
menu.valmynd()
skra = open("settings/exit.txt", "r")
for line in skra:
if line == "True":
sys.exit()
skra.close()
settings = (open("settings/settings.txt", "r").read()).split()
fps = int(settings[2])
pygame.init()
student_list = []
for stud_num in range(200):
student_list.append(humans.student(randint(0,800), randint(0,800), randint(1,10), stud_num))
school = pygame.display.set_mode((800, 800))
school.fill((255,255,255))
while 1:#gameplay loop
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
target =randint(0,len(student_list) - 1)
school.fill((255, 255, 255))
for student in student_list:
school.blit(student.image,student.rect)
if scan_delay >= 10:
student.scan_surroundings(student_list)
if move_delay >= 10:
student.move(student.moveto(student.find_friend()))
else:
student.move(student.move_angle)
student.check_collide(student_list)
if scan_delay >= 10:
scan_delay = 0
else:
scan_delay += 1
if move_delay >= 10:
move_delay = 0
else:
move_delay += 1
if coll_delay >= 1:
coll_delay = 0
else:
coll_delay += 1
pygame.display.flip()
pygame.time.Clock().tick(fps)
| Zepeacedust/skolasim | main.py | main.py | py | 1,408 | python | en | code | 1 | github-code | 36 |
70188062823 | #animation ICMECAT rose scatter plot with HI CME circles and in situ data
from scipy import stats
import scipy.io
from matplotlib import cm
import sys
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import sunpy.time
import time
import pickle
import seaborn as sns
import math
#for reading catalogues
def getcat(filename):
print('reading CAT '+filename)
cat=scipy.io.readsav(filename, verbose='true')
print('done reading CAT')
return cat
def getpositions(filename):
print( 'reading positions in '+filename)
pos=scipy.io.readsav(filename, verbose='true')
print( 'done reading positions')
return pos
def time_to_num_cat(time_in):
#for time conversion from catalogue .sav to numerical time
#this for 1-minute data or lower time resolution
#for all catalogues
#time_in is the time in format: 2007-11-17T07:20:00 or 2007-11-17T07:20Z
#for times help see:
#http://docs.sunpy.org/en/latest/guide/time.html
#http://matplotlib.org/examples/pylab_examples/date_demo2.html
j=0
#time_str=np.empty(np.size(time_in),dtype='S19')
time_str= ['' for x in range(len(time_in))]
#=np.chararray(np.size(time_in),itemsize=19)
time_num=np.zeros(np.size(time_in))
for i in time_in:
#convert from bytes (output of scipy.readsav) to string
time_str[j]=time_in[j][0:16].decode()+':00'
year=int(time_str[j][0:4])
time_str[j]
#convert time to sunpy friendly time and to matplotlibdatetime
#only for valid times so 9999 in year is not converted
#pdb.set_trace()
if year < 2100:
time_num[j]=mdates.date2num(sunpy.time.parse_time(time_str[j]))
j=j+1
#the date format in matplotlib is e.g. 735202.67569444
#this is time in days since 0001-01-01 UTC, plus 1.
#return time_num which is already an array and convert the list of strings to an array
return time_num, np.array(time_str)
def decode_array(bytearrin):
#for decoding the strings from the IDL .sav file to a list of python strings, not bytes
#make list of python lists with arbitrary length
bytearrout= ['' for x in range(len(bytearrin))]
for i in range(0,len(bytearrin)-1):
bytearrout[i]=bytearrin[i].decode()
#has to be np array so to be used with numpy "where"
bytearrout=np.array(bytearrout)
return bytearrout
def IDL_time_to_num(time_in):
#convert IDL time to matplotlib datetime
time_num=np.zeros(np.size(time_in))
for ii in np.arange(0,np.size(time_in)):
time_num[ii]=mdates.date2num(sunpy.time.parse_time(time_in[ii]))
return time_num
######################################################
#main program
plt.close('all')
sns.set_context("talk")
sns.set_style("darkgrid")
################## CONTROLS
#how much time is between frames
dayjump=0.25
#either keep or fade detections
fade=1
keep=0
#if keep is selected, the alpha for plotting each dot
keepalpha=0.7
#how long an ARRIVAL stays visible in fade mode
fadedays=30
#how big the circles are on the plot
bscale=4
#half width of the circles
lamda=30
################################
print( 'start icmecat animation program.')
#get ICMECAT
filename_icmecat='ALLCATS/HELCATS_ICMECAT_v10_SCEQ.sav'
i=getcat(filename_icmecat)
#get parameters
bmean=i.icmecat['MO_BMEAN']*bscale #bscale makes circles larger in movie
long=i.icmecat['SC_LONG_HEEQ']*np.pi/180 #heeq longitude converted to radians
rdist=i.icmecat['sc_heliodistance'] #AU
sc=i.icmecat['sc_insitu'] #string
sc=decode_array(sc)
#get indices of events in different spacecraft
vexind=np.where(sc == 'VEX')
staind=np.where(sc == 'STEREO-A')
stbind=np.where(sc == 'STEREO-B')
winind=np.where(sc == 'Wind')
mesind=np.where(sc == 'MESSENGER')
ulyind=np.where(sc == 'ULYSSES')
##################################### read in situ
print( 'read MESSENGER')
#get insitu data
mes= pickle.load( open( "DATACAT/MES_2007to2015_SCEQ_removed.p", "rb" ) )
#time conversion
#mes_time=IDL_time_to_num(mes.time)
print( 'read MESSENGER done.')
print ('read VEX')
#get insitu data
vex= pickle.load( open( "DATACAT/VEX_2007to2014_SCEQ_removed.p", "rb" ) )
#time conversion
#vex_time=IDL_time_to_num(vex.time)
print( 'read VEX done.')
print( 'read Wind')
#get insitu data
wind= pickle.load( open( "DATACAT/WIND_2007to2016_HEEQ.p", "rb" ) )
#time conversion
#wind_time=IDL_time_to_num(wind.time)
print( 'read Wind done.')
print( 'read STEREO-A')
#get insitu data
sta= pickle.load( open( "DATACAT/STA_2007to2015_SCEQ.p", "rb" ) )
#time conversion
#sta_time=IDL_time_to_num(sta.time)
print( 'read STA done.')
print( 'read STEREO-B')
#get insitu data
stb= pickle.load( open( "DATACAT/STB_2007to2014_SCEQ.p", "rb" ) )
#time conversion
#stb_time=IDL_time_to_num(stb.time)
print( 'read STB done.')
#save times
#pickle.dump([vex_time,wind_time,sta_time,stb_time,mes_time], open( "DATACAT/Insitu_times_mdates_2.p", "wb" ) )
#quicker when just reloading times
[vex_time,wind_time,sta_time,stb_time,mes_time]=pickle.load( open( "DATACAT/Insitu_times_mdates_2.p", "rb" ) )
#print 'loaded in situ times'
######################################
#get positions
pos=getpositions('DATACAT/positions_2007_2018_HEEQ_6hours.sav')
[pos_time_num,pos_time_str]=time_to_num_cat(pos.time)
#available as pos.mercury etc.
#get cme apex positions
h=getcat('ALLCATS/hicat_v3_cat_behind_visual.sav')
[h_time_num,h_time_str]=time_to_num_cat(h.all_apex_t_str)
all_apex_s=decode_array(h.all_apex_s)
#make time conversion for all icme_start_time variables
#save it as string
icme_start_time_str=i.icmecat['icme_start_time']
#save it as matplotlib date number
[icme_start_time_num,icme_start_time_str]=time_to_num_cat(icme_start_time_str)
#for each spacecraft, make a zeros array
active_icme_vex=np.zeros(np.size(icme_start_time_num))
active_icme_stb=np.zeros(np.size(icme_start_time_num))
active_icme_sta=np.zeros(np.size(icme_start_time_num))
active_icme_win=np.zeros(np.size(icme_start_time_num))
active_icme_mes=np.zeros(np.size(icme_start_time_num))
active_icme_uly=np.zeros(np.size(icme_start_time_num))
#initiate plot
plt.figure(1, figsize=(12, 6), dpi=100, facecolor='w', edgecolor='w')
#full movie April 2014 Jan 1 until end of November 2014
frame_time_num=mdates.date2num(sunpy.time.parse_time('2007-Apr-1'))
################################### plot over all frames
for k in np.arange(12680/4,(12680+120)/4,dayjump):
#3169 is time in days
start=time.time()
#to current frame time, the days need to be added, so +k is done
#save frame time as string to write on plot
frame_time_str=str(mdates.num2date(frame_time_num+k))
print( 'current frame_time_num+k', frame_time_str)
#for each frame time, check active ICMEs by looking into the full catalogue:
for m in range(0,len(icme_start_time_num)):
#calculate difference in icme_start_time to current frame
icme_diff_to_frame=(frame_time_num+k)-icme_start_time_num[m]
#for all icme_start_times that are later than the current frame,
#make them active for 30 days (fading) or infinite (keeping).
#**********************for fading
if fade > 0:
if icme_diff_to_frame > 0 and icme_diff_to_frame < fadedays:
#check if this active icme belongs to a spacecraft
#in1d compares to arrays; true or 1 if m is contained in vexind
if np.in1d(m,vexind) == 1:
active_icme_vex[m]=icme_diff_to_frame
#same for the other spacecraft
if np.in1d(m,stbind) == 1:
active_icme_stb[m]=icme_diff_to_frame
if np.in1d(m,staind) == 1:
active_icme_sta[m]=icme_diff_to_frame
if np.in1d(m,winind) == 1:
active_icme_win[m]=icme_diff_to_frame
if np.in1d(m,mesind) == 1:
active_icme_mes[m]=icme_diff_to_frame
if np.in1d(m,ulyind) == 1:
active_icme_uly[m]=icme_diff_to_frame
else:
#if no detection, set the index to 0
active_icme_vex[m]=0
active_icme_stb[m]=0
active_icme_sta[m]=0
active_icme_win[m]=0
active_icme_mes[m]=0
active_icme_uly[m]=0
#************************** for keeping
if keep > 0:
if icme_diff_to_frame > 0:
#check if this active icme belongs to a spacecraft
#in1d compares to arrays; true or 1 if m is contained in vexind
if np.in1d(m,vexind) == 1:
active_icme_vex[m]=icme_diff_to_frame
#same for the other spacecraft
if np.in1d(m,stbind) == 1:
active_icme_stb[m]=icme_diff_to_frame
if np.in1d(m,staind) == 1:
active_icme_sta[m]=icme_diff_to_frame
if np.in1d(m,winind) == 1:
active_icme_win[m]=icme_diff_to_frame
if np.in1d(m,mesind) == 1:
active_icme_mes[m]=icme_diff_to_frame
else:
#if no detection, set the index to 0
active_icme_vex[m]=0
active_icme_stb[m]=0
active_icme_sta[m]=0
active_icme_win[m]=0
active_icme_mes[m]=0
#look which ICMEs are active
active_index_vex=np.where(active_icme_vex > 0)
active_index_stb=np.where(active_icme_stb > 0)
active_index_sta=np.where(active_icme_sta > 0)
active_index_win=np.where(active_icme_win > 0)
active_index_mes=np.where(active_icme_mes > 0)
active_index_uly=np.where(active_icme_uly > 0)
#print 'active icme indices are:', active_index_vex
print (' ')
#check for active CME indices from HICAT (with the lists produced in IDL for the apex positions)
#check where time is identical to frame time
cmeind=np.where(h_time_num == frame_time_num+k)
############make plot
# rows - columns, starts with 0
ax = plt.subplot2grid((5,2), (0, 0), rowspan=5, projection='polar')
#ax = plt.subplot(121,projection='polar')
######################## 1 plot all active CME circles
#ax.scatter(h.all_apex_long[cmeind]*np.pi/180,h.all_apex_r[cmeind], s=10, c='black', alpha=1, marker='s')
#plot all active CME circles
#if np.size(cmeind) >0:
for p in range(0,np.size(cmeind)):
#print p, h.all_apex_long[cmeind[0][p]], h.all_apex_r[cmeind[0][p]]
#central d
dir=np.array([np.cos(h.all_apex_long[cmeind[0][p]]*np.pi/180),np.sin(h.all_apex_long[cmeind[0][p]]*np.pi/180)])*h.all_apex_r[cmeind[0][p]]
#points on circle, correct for longitude
circ_ang = ((np.arange(111)*2-20)*np.pi/180)-(h.all_apex_long[cmeind[0][p]]*np.pi/180)
#these equations are from moestl and davies 2013
xc = 0+dir[0]/(1+np.sin(lamda*np.pi/180)) + (h.all_apex_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.sin(circ_ang)
yc = 0+dir[1]/(1+np.sin(lamda*np.pi/180)) + (h.all_apex_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.cos(circ_ang)
#now convert to polar coordinates
rcirc=np.sqrt(xc**2+yc**2)
longcirc=np.arctan2(yc,xc)
#plot in correct color
if all_apex_s[cmeind[0][p]] == 'A':
#make alpha dependent on distance to solar equatorial plane - maximum latitude is -40/+40 -
#so to make also the -/+40 latitude CME visible, divide by 50 so alpha > 0 for these events
ax.plot(longcirc,rcirc, c='red', alpha=1-abs(h.all_apex_lat[cmeind[0][p]]/50), lw=1.5)
if all_apex_s[cmeind[0][p]] == 'B':
ax.plot(longcirc,rcirc, c='royalblue', alpha=1-abs(h.all_apex_lat[cmeind[0][p]]/50), lw=1.5)
####################### 3 plot ICME detections
#fader style plot alpha dependent on time difference - for this loop over each element:
if fade >0:
for y in range(0,np.size(active_index_vex)):
z=active_index_vex[0][y] #access elements in tuple that is produced by where
fadealpha=1-active_icme_vex[z]/(fadedays) #fadedays is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='orange', alpha=fadealpha)
for y in range(0,np.size(active_index_sta)):
z=active_index_sta[0][y]
fadealpha=1-active_icme_sta[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='red', alpha=fadealpha)
for y in range(0,np.size(active_index_stb)):
z=active_index_stb[0][y]
fadealpha=1-active_icme_stb[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='royalblue', alpha=fadealpha)
for y in range(0,np.size(active_index_win)):
z=active_index_win[0][y]
fadealpha=1-active_icme_win[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='mediumseagreen', alpha=fadealpha)
for y in range(0,np.size(active_index_mes)):
z=active_index_mes[0][y]
fadealpha=1-active_icme_mes[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='dimgrey', alpha=fadealpha)
for y in range(0,np.size(active_index_uly)):
z=active_index_uly[0][y]
fadealpha=1-active_icme_uly[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='darkolivegreen', alpha=fadealpha)
if keep >0:
ax.scatter(long[active_index_vex], rdist[active_index_vex], s=bmean[active_index_vex], c='orange', alpha=keepalpha)
ax.scatter(long[active_index_sta], rdist[active_index_sta], s=bmean[active_index_sta], c='red', alpha=keepalpha)
ax.scatter(long[active_index_stb], rdist[active_index_stb], s=bmean[active_index_stb], c='royalblue', alpha=keepalpha)
ax.scatter(long[active_index_win], rdist[active_index_win], s=bmean[active_index_win], c='mediumseagreen', alpha=keepalpha)
ax.scatter(long[active_index_mes], rdist[active_index_mes], s=bmean[active_index_mes], c='dimgrey', alpha=keepalpha)
plt.suptitle('STEREO/HI modeled CMEs (SSEF30) + in situ ICME detections and data HELCATS - HIGEOCAT ICMECAT DATACAT', fontsize=12)
#Sun
ax.scatter(0,0,s=100,c='yellow',alpha=0.8, edgecolors='yellow')
plt.figtext(0.30,0.5,'Sun', fontsize=10, ha='center')
#Earth
plt.figtext(0.30,0.25,'Earth', fontsize=10, ha='center')
#units
#plt.figtext(0.525,0.0735,'HEEQ longitude', fontsize=10, ha='left')
#plt.figtext(0.655,0.164,'AU', fontsize=10, ha='center')
#----------------- legend
plt.figtext(0.05,0.02,'Mercury', color='dimgrey', ha='center', fontsize=12)
plt.figtext(0.15,0.02,'MESSENGER', color='dimgrey', ha='center', fontsize=10)
plt.figtext(0.25 ,0.02,'Venus', color='orange', ha='center',fontsize=12)
plt.figtext(0.35,0.02,'STEREO-A', color='red', ha='center',fontsize=12)
plt.figtext(0.48,0.02,'STEREO-B', color='royalblue', ha='center',fontsize=12)
plt.figtext(0.58,0.02,'Earth', color='mediumseagreen', ha='center',fontsize=12)
plt.figtext(0.65,0.02,'Mars', color='orangered', ha='center',fontsize=10)
plt.figtext(0.71,0.02,'MSL', color='magenta', ha='center', fontsize=10)
plt.figtext(0.76,0.02,'Maven', color='steelblue', ha='center', fontsize=10)
plt.figtext(0.83,0.02,'Ulysses', color='darkolivegreen', ha='center', fontsize=10)
plt.figtext(0.90,0.02,'Rosetta', color='black', ha='center', fontsize=10)
#add legend for bmean
bleg=np.array([10,50,100])*bscale
blegstr=['10 nT','50','100']
blegr=np.zeros(len(bleg))+1.6
blegt=np.radians(range(170,195,10))
ax.scatter(blegt, blegr,s=bleg,c='violet', edgecolor='violet')
for p in range(0,len(bleg)):
ax.annotate(blegstr[p],xy=(blegt[p],blegr[p]-0.2), ha='center', va='center', fontsize=8)
############################## plot positions
#check which index is closest in positions to current time
#frame_time_num+k vs. pos_time_num
timeind=np.where(frame_time_num+k-pos_time_num == min(abs((frame_time_num+k)-pos_time_num)))
#index 1 is longitude, 0 is rdist
ax.scatter(pos.venus[1,timeind], pos.venus[0,timeind], s=50, c='orange', alpha=1, lw=0)
ax.scatter(pos.mercury[1,timeind], pos.mercury[0,timeind], s=50, c='dimgrey', alpha=1,lw=0)
ax.scatter(pos.messenger[1,timeind], pos.messenger[0,timeind], s=25, c='dimgrey', alpha=1,lw=0,marker='s')
ax.scatter(pos.sta[1,timeind], pos.sta[0,timeind], s=25, c='red', alpha=1,lw=0, marker='s')
ax.scatter(pos.stb[1,timeind], pos.stb[0,timeind], s=25, c='royalblue', alpha=1,lw=0, marker='s')
ax.scatter(pos.earth[1,timeind], pos.earth[0,timeind], s=50, c='mediumseagreen', alpha=1,lw=0)
ax.scatter(pos.mars[1,timeind], pos.mars[0,timeind], s=50, c='orangered', alpha=1,lw=0)
ax.scatter(pos.ulysses[1,timeind], pos.ulysses[0,timeind], s=25, c='darkolivegreen', alpha=1,lw=0,marker='s')
ax.scatter(pos.msl[1,timeind], pos.msl[0,timeind], s=25, c='magenta', alpha=1,lw=0,marker='s')
ax.scatter(pos.maven[1,timeind], pos.maven[0,timeind], s=25, c='steelblue', alpha=1,lw=0, marker='s')
ax.scatter(pos.rosetta[1,timeind], pos.rosetta[0,timeind], s=25, c='black', alpha=1,lw=0, marker='s')
#set axes
plt.thetagrids(range(0,360,45),(u'0\u00b0 HEEQ longitude',u'45\u00b0',u'90\u00b0',u'135\u00b0',u'+/- 180\u00b0',u'-135\u00b0',u'-90\u00b0',u'-45\u00b0'), fmt='%d', frac = 1.05,fontsize=10)
ax.set_theta_zero_location('S')
ax.set_ylim(0, 1.8)
plt.rgrids((0.4,0.7,1.0,1.3,1.6),('0.4','0.7','1.0','1.3','1.6 AU'),fontsize=10)
#plot text for date extra so it does not move
#year
plt.figtext(0.47-0.22,0.9,frame_time_str[0:4], fontsize=13, ha='center')
#month
plt.figtext(0.51-0.22,0.9,frame_time_str[5:7], fontsize=13, ha='center')
#day
plt.figtext(0.54-0.22,0.9,frame_time_str[8:10], fontsize=13, ha='center')
#hours
plt.figtext(0.57-0.22,0.9,frame_time_str[11:13], fontsize=13, ha='center')
#mysignature
plt.figtext(0.96,0.01,r'$C. M\ddot{o}stl$', fontsize=7, ha='center')
############# 5 in situ data plots
plotstartdate=mdates.num2date(frame_time_num+k-3)
plotenddate=mdates.num2date(frame_time_num+k+3)
#slicing
#take only those indices where the difference to frame_time_num+k is less than 3
mes_ind_plot=np.where(abs(mes_time-(frame_time_num+k)) < 3)
vex_ind_plot=np.where(abs(vex_time-(frame_time_num+k)) < 3)
stb_ind_plot=np.where(abs(stb_time-(frame_time_num+k)) < 3)
sta_ind_plot=np.where(abs(sta_time-(frame_time_num+k)) < 3)
wind_ind_plot=np.where(abs(wind_time-(frame_time_num+k)) < 3)
#rows - columns
#MESSENGER
ax2 = plt.subplot2grid((5,2), (0, 1))
ax2.plot_date(mes_time[mes_ind_plot],mes.btot[mes_ind_plot],'-k', lw=0.3)
ax2.plot_date(mes_time[mes_ind_plot],mes.bx[mes_ind_plot], '-r',lw=0.3)
ax2.plot_date(mes_time[mes_ind_plot],mes.by[mes_ind_plot],'-g',lw=0.3)
ax2.plot_date(mes_time[mes_ind_plot],mes.bz[mes_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
#current time
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax2.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-120,120],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-120, 120))
#VEX
ax3 = plt.subplot2grid((5,2), (1, 1))
ax3.plot_date(vex_time[vex_ind_plot],vex.btot[vex_ind_plot],'-k', lw=0.3)
ax3.plot_date(vex_time[vex_ind_plot],vex.bx[vex_ind_plot], '-r',lw=0.3)
ax3.plot_date(vex_time[vex_ind_plot],vex.by[vex_ind_plot],'-g',lw=0.3)
ax3.plot_date(vex_time[vex_ind_plot],vex.bz[vex_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax3.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-50, 50))
#Earth
ax4 = plt.subplot2grid((5,2), (2, 1))
ax4.plot_date(wind_time[wind_ind_plot],wind.btot[wind_ind_plot],'-k', lw=0.3)
ax4.plot_date(wind_time[wind_ind_plot],wind.bx[wind_ind_plot], '-r',lw=0.3)
ax4.plot_date(wind_time[wind_ind_plot],wind.by[wind_ind_plot],'-g',lw=0.3)
ax4.plot_date(wind_time[wind_ind_plot],wind.bz[wind_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax4.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-35, 35))
#STA
ax5 = plt.subplot2grid((5,2), (3, 1))
ax5.plot_date(sta_time[sta_ind_plot],sta.btot[sta_ind_plot],'-k', lw=0.3)
ax5.plot_date(sta_time[sta_ind_plot],sta.bx[sta_ind_plot], '-r',lw=0.3)
ax5.plot_date(sta_time[sta_ind_plot],sta.by[sta_ind_plot],'-g',lw=0.3)
ax5.plot_date(sta_time[sta_ind_plot],sta.bz[sta_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax5.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-35, 35))
#STB
ax6 = plt.subplot2grid((5,2), (4, 1))
ax6.plot_date(stb_time[stb_ind_plot],stb.btot[stb_ind_plot],'-k', lw=0.3)
ax6.plot_date(stb_time[stb_ind_plot],stb.bx[stb_ind_plot], '-r',lw=0.3)
ax6.plot_date(stb_time[stb_ind_plot],stb.by[stb_ind_plot],'-g',lw=0.3)
ax6.plot_date(stb_time[stb_ind_plot],stb.bz[stb_ind_plot],'-b',lw=0.3)
plt.xlim((plotstartdate, plotenddate))
myformat = mdates.DateFormatter('%m-%d')
ax6.xaxis.set_major_formatter(myformat)
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax6.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.ylim((-35, 35))
plt.xticks(fontsize=10)
#labeling of spacecraft and longitude in HEEQ
plt.figtext(0.92,0.82,'MESSENGER',color='dimgrey', fontsize=10, ha='left')
plt.figtext(0.94,0.77,"%d" % (pos.messenger[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165,'VEX',color='orange', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165,"%d" % (pos.venus[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165*2,'Wind',color='mediumseagreen', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165*2,"%d" % (pos.earth[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165*3,'STEREO-A',color='red', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165*3,"%d" % (pos.sta[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165*4,'STEREO-B',color='royalblue', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165*4,"%d" % (pos.stb[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
#labeling in situ components
plt.figtext(0.75,0.92,'Bx',color='red', fontsize=10, ha='left')
plt.figtext(0.8,0.92,'By',color='green', fontsize=10, ha='left')
plt.figtext(0.85,0.92,'Bz',color='blue', fontsize=10, ha='left')
#save figure for frame - this starts with zero at the start time
framestr = '%04i' % (k*4)
#framenr=framenr+1
print( 'frame nr.', framestr)
#plt.show()
if fade >0:
plt.savefig('animations/animation_icmecat_6hour_fade_circ_insitu_final_full/icmecat_'+framestr+'.png', dpi=300)
#plt.savefig('animations/animation_icmecat_6hour_fade_circ_insitu_final_full/icmecat_'+framestr+'.jpg', dpi=300)
# if keep >0:
# plt.savefig('animations/animation_icmecat_6hour_keep_circ_insitu_final_full/icmecat_'+framestr+'.jpg', format='jpg', dpi=300)
end=time.time()
print( 'took time in seconds:', (end-start) ,'for this frame')
#clears plot window
plt.clf()
############end of cycle
#make animation convert with automator into jpg before
#os.system('/Users/chris/movie/ffmpeg -r 15 -i /Users/chris/python/catpy/animations/animation_icmecat_6hour_fade_circ_insitu_final_full_jpg/icmecat_%04d.jpg -b 5000k -r 15 animations/icmecat_anim_6hour_fade_circ_insitu_final_full.mp4 -y')
print( 'made movie')
print( 'end icmecat animation program.')
#/Users/chris/movie/ffmpeg -r 15 -i /Users/chris/python/catpy/animations/animation_icmecat_6hour_fade_circ_insitu_all/icmecat_%04d.jpg -b 5000k -r 15 animations/icmecat_anim_6hour_fade_circ_insitu_all.mp4 -y
| cmoestl/heliocats | scripts/icmecat_anim_circles_insitu_final_full.py | icmecat_anim_circles_insitu_final_full.py | py | 23,948 | python | en | code | 10 | github-code | 36 |
31802278489 | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
class Solution(object):
def alphabetBoardPath(self, target):
"""
:type target: str
:rtype: str
"""
board = ["abcde", "fghij", "klmno", "pqrst", "uvwxy", "z"]
m = {}
for i, row in enumerate(board):
for j, c in enumerate(row):
m[c] = (i, j)
x, y = 0, 0
ret = ""
for c in target:
xx, yy = m[c]
if (x, y) == (xx, yy):
ret += "!"
else:
dx = xx - x
dy = yy - y
abs_dx = abs(dx)
abs_dy = abs(dy)
if dx >= 0 and dy >= 0:
ret += "R"*abs_dy+"D"*abs_dx
elif dx >= 0 and dy <= 0:
ret += "L"*abs_dy+"D"*abs_dx
elif dx <= 0 and dy <= 0:
ret += "L"*abs_dy+"U"*abs_dx
else:
ret += "U"*abs_dx+"R"*abs_dy
ret += "!"
x, y = xx, yy
return ret
def main():
s = Solution()
print(s.alphabetBoardPath("leet"))
print(s.alphabetBoardPath("code"))
if __name__ == "__main__":
main()
| bobcaoge/my-code | python/leetcode/1138_Alphabet_Board_Path.py | 1138_Alphabet_Board_Path.py | py | 1,230 | python | en | code | 0 | github-code | 36 |
18391614264 | import requests
from bs4 import BeautifulSoup
import wikipedia
class unotes:
def __init__(self,*args):
self.data_list = [a.replace(' ','_')for a in args]
self.content = {}
self.links = {}
def __str__(self):
return f"unotes for {self.data_list}"
def search(self):
contents = []
wiki_content = []
sub_results = []
data = self.data_list
for i in range(len(data)):
searches = wikipedia.search(data[i])
# print(searches)
try:
main_result = wikipedia.summary(searches[0],auto_suggest=False)
except:
main_result = f"Some errors. Please consider changing the parameters for {data[i]}. :) "
self.content[f'{data[i]}'] = main_result
req = requests.get(f"https://www.google.com/search?q={str(data[i].replace('_','+'))}")
content = BeautifulSoup(req.text,features='html.parser')
x = content.find_all('a')
links = []
for j in x:
link = j.attrs['href']
if 'url' in link.split("?")[0]:
url_ = link.split("?")[1].split("&")[0].split("=")[1]
if "%" not in url_:
links.append(url_)
self.links[data[i]] = links
temp_ = {'name':f'{data[i]}','content':str(main_result),'links':links}
contents.append(temp_)
return contents
def save(self,file_name,data_obj):
html_upper = """
<!DOCTYPE html>
<html lang="en">
<head>
<title>Unotes</title>
<style>
.left-cont{
border-radius: 15px;
display: block;
float: left;
margin: 25px 10px;
box-shadow: 15px 15px 20px 5px rgb(212, 233, 231);
padding: 15px;
}
.navbar{
height: 45px;
background-color: rgb(79, 210, 210);
border: 1px solid blue;
text-align: center;
}
.nav-text{
align-self: center;
margin: 14px 5px;
font-weight: bold;
font-family: 'Courier New', Courier, monospace;
color: white;
}
.show-text{
cursor: pointer;
font-weight:700;
border:1px solid black;
padding:3px;
background-color:skyblue;
}
#links_show{
display: none;
}
h3{
text-align:center;
}
</style>
</head>
<body>
<div class="navbar">
<div class="nav-text">U-notes</div>
</div>
"""
html_lower = f"</body></html>"
file = open(f'{str(file_name)}','w+',encoding='utf-8')
file.write(str(html_upper))
for i in data_obj:
name = i['name']
content = i['content']
links = i['links']
show_function_ = f"<script>function show_{name}(){'{'} {'if'} (document.getElementById('links_show{name}').style.display == 'none') {'{'} document.getElementById('links_show{name}').style.display = 'block';{'}'}else{'{'}document.getElementById('links_show{name}').style.display = 'none';{'}}'}</script>"
content_ = f"<h3>{name}</h3><p>{content}</p><hr><div class='more'><button type='button' class='show-text' onclick='show_{name}();'>See links ^_^</button><div class='more-links' style='display:none;' id='links_show{name}' >"
file.write(str(show_function_))
file.write(str(content_))
for link in links:
link_ = f"<div class='link'><a href='{link}' target='_blank'>{link}</a></div>"
file.write(str(link_))
content_end = "</div>"
file.write(str(content_end))
file.write(str(html_lower))
file.close()
def main():
note = unotes('param1','param2')
chapters = note.search()
note.save('file_name.html',chapters)
if __name__=='__main__':
main() | UtsabKafle/unotes | src/unotes.py | unotes.py | py | 4,021 | python | en | code | 0 | github-code | 36 |
26613636857 | import numpy as np
import cv2
from imgutil import read_img
from scipy.optimize import minimize
from mathutil import Rx, Ry, Rz
in_size = (1080//2, 1920//2)
fov_factor = 1
marks = [
(1, 2, [((925, 1080//2 - 338), (1131 - 1920//2, 1080//2 - 383)),
((946, 1080//2 - 321), (1156 - 1920//2, 1080//2 - 375)),
((834, 1080//2 - 390), (1036 - 1920//2, 1080//2 - 398)),
((952, 1080//2 - 372), (1154 - 1920//2, 1080//2 - 428)),
((808, 1080//2 - 367), (1014 - 1920//2, 1080//2 - 364))]),
(5, 1, [((891, 1080//2 - 450), (1007 - 1920//2, 1080//2 - 407)),
((950, 1080//2 - 331), (1075 - 1920//2, 1080//2 - 313)),
((842, 1080//2 - 380), (970 - 1920//2, 1080//2 - 316))])
]
camid_to_param_offset = {2: 0, 5: 3}
def to_vec(px, py):
fov = fov_factor*np.array([1, in_size[0]/in_size[1]])
x = fov[0]*(2*px/in_size[1] - 1)
y = fov[1]*(2*py/in_size[0] - 1)
vec = np.array([x, y, 1])
return vec/np.linalg.norm(vec)
def gen_matrix(camid, params):
if camid == 1:
return Rx(np.pi/2)
else:
off = camid_to_param_offset[camid]
return np.matmul(Rx(params[off + 2]), np.matmul(Ry(params[off + 1]), Rz(params[off])))
def calc_chi2(params):
chi2 = 0
for a, b, marker in marks:
Ma = gen_matrix(a, params)
Mb = gen_matrix(b, params)
for m1, m2 in marker:
diff = np.matmul(Ma, to_vec(*m1)) - np.matmul(Mb, to_vec(*m2))
chi2 += np.sum(diff**2)
return chi2
def show_markers():
for a, b, marker in marks:
a = read_img(a, in_size)
b = read_img(b, in_size)
for m1, m2 in marker:
cv2.circle(a, m1, 5, (1, 0, 0))
cv2.circle(b, m2, 5, (1, 0, 0))
cv2.imshow("f", np.flip(np.hstack([a, b]), axis=0))
while cv2.waitKey(0) != ord("q"):
pass
cv2.destroyAllWindows()
show_markers()
res = minimize(calc_chi2, x0=[0]*3*len(marks))
print(repr(gen_matrix(2, res.x)))
print(repr(gen_matrix(5, res.x)))
| 42Ar/cube_mapper | marker_calc.py | marker_calc.py | py | 2,039 | python | en | code | 0 | github-code | 36 |
15955214108 | #! python3
# sendDuesReminders.py - sends emails based on payment status in spreadsheet
import smtplib
import openpyxl
wb = openpyxl.load_workbook('C:\\Users\\daize\\Desktop\\pythontest\\Automate\\duesRecords.xlsx')
sheet = wb.get_sheet_by_name('Sheet1')
lastCol = sheet.max_column
latestMonth = sheet.cell(row=1, column=lastCol).value
unpaidMembers = {}
for r in range(2, sheet.max_row+1):
payment = sheet.cell(row=r, column=lastCol).value
if payment != 'paid':
name = sheet.cell(row=r, column=1).value
email = sheet.cell(row=r, column=2).value
unpaidMembers[name] = email
smtpObj = smtplib.SMTP('smtp.gmail.com', 587)
smtpObj.ehlo()
smtpObj.starttls() #TLS加密的连接需要此代码进行加密,SSL不需要
smtpObj.login('account', 'passwd')
for name, email in unpaidMembers:
body = "Suject: %s dues unpaid.\nDear %s...not paid dues for %s....Thanks" % (latestMonth, name, latestMonth)
print('Sending email to %s' % email)
sendmailStatus = smtpObj.sendmail('my account', email, body) #所有收件人都发送成功,会返回空
if sendmailStatus != {}:
print('problem send email to %s:%s' % (email, sendmailStatus))
smtpObj.quit() | sdz0716/myPython | act16-email_message/sendDuesReminders.py | sendDuesReminders.py | py | 1,218 | python | en | code | 0 | github-code | 36 |
72718761064 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Created on Fri Mar 10 12:54:45 2023
"""
import numpy as np
from pyGITR.math_helper import *
from typing import Callable
import matplotlib.pyplot as plt
import pydoc
import netCDF4
import os
def Gaussian(x: np.ndarray = np.linspace(-15000, 15000, 100000), sigma: float = 5.0, mu: float = 120.0, beta: float = 1.0, Normalized=True):
f = (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-((x-mu)/sigma)**2)
f[np.argwhere(x<0)] = 0
if Normalized:
f = f/Integrale(f, x, Array=False)
return f
def Gaussian_test(x: np.ndarray = np.linspace(-15000, 15000, 100000), sigma: float = 20.0, mu: float = 130.0, beta: float = 1.0, Normalized=True):
f = (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-((x-mu)/sigma)**2)
f[np.argwhere(x<0)] = 0
# if Normalized:
# f = f/Integrale(f, x, Array=False)
return f
def Gaussian_Jerome(x: np.ndarray = np.linspace(-15000, 15000, 100000), sigma: float = 5.0, mu: float = 120, beta: float = 0.0, Normalized=True):
f = np.abs(x)**beta*np.exp(-1.0/2.0*((x-mu)/sigma)**2)
if beta > 0:
f[np.argwhere(x<0)] = 0
if Normalized:
f = f/Integrale(f, x, Array=False)
return f
def Thomson(x: np.ndarray = np.linspace(0, 300, 10000), xb: float = 8.64, xc: float = 100, Normalized=True):
assert not (xc <= xb), "xc cannot be <= xb"
f = x/(x + xb) ** 3*(1.0-np.sqrt((x+xb)/(xc+xb)))
f[np.argwhere(x > xc)] = 0.0
# if Normalized:
# f = f/Integrale(f, x, Array=False)
return f
def Levy(x=np.linspace(0.1,10,10000), c=1, mu=0):
return np.sqrt(c/2/np.pi)*np.exp(-c/(x-mu))/((x-mu)**1.5)
x = np.linspace(1, 200, 5005)
pdf_p = Gaussian_Jerome(x)
pdf_q = Gaussian_test(x)
pdf_p_times_q = np.multiply(pdf_p,pdf_q)
#Normalization = 1.0
Normalization = Integrale(pdf_p_times_q, x, Array=False)
#Normalization = 1.0
pdf_p_times_q = np.divide(pdf_p_times_q,Normalization)
pdf_p_times_q_divide_q = np.divide(pdf_p_times_q,pdf_q)
#Normalization = Integrale(pdf_p_times_q_divide_q, x, Array=False)
#Normalization = 1.0
#print(Normalization)
pdf_p_times_q_divide_q = np.divide(pdf_p_times_q_divide_q,Normalization)
# #print(pdf_q)
plt.figure()
plt.plot(x,pdf_p,label="p distribution")
plt.plot(x,pdf_q,label="q distribution")
#plt.plot(x,pdf_p_times_q,label="multiplied distribution")
#plt.plot(x,Integrale(np.multiply(pdf_p,1.0), x, Array=True),label="cumulative of p times q")
#plt.plot(x,pdf_p_times_q_divide_q,label="multiplied distribution")
plt.xlim(100,150)
plt.legend()
plt.show()
# Gaussian(np.array([x]))
# Gaussian_Jerome(np.array([x]))
| audide12/DIIIDsurface_pyGITR | pyGITR/importance_sampling_1.py | importance_sampling_1.py | py | 2,626 | python | en | code | 1 | github-code | 36 |
24490756111 | #
class rest_get_action_queue(rest_get_table_handler):
def __init__(self):
desc = [
"List service and node actions posted in the action_queue.",
]
examples = [
"# curl -u %(email)s -o- https://%(collector)s/init/rest/api/actions?query=status=R",
]
q = q_filter(node_field=db.action_queue.node_id)
rest_get_table_handler.__init__(
self,
path="/actions",
tables=["action_queue"],
q=q,
desc=desc,
examples=examples,
)
#
class rest_post_action_queue(rest_post_handler):
def __init__(self):
desc = [
"Modify action queue entries",
]
examples = [
"""# curl -u %(email)s -X POST --header 'Content-Type: application/json' -d @/tmp/list.json -o- https://%(collector)s/init/rest/api/actions"""
]
rest_post_handler.__init__(
self,
path="/actions",
tables=["action_queue"],
desc=desc,
examples=examples
)
def handler(self, **vars):
if 'id' not in vars:
raise HTTP(400, "The 'id' key must be specified")
id = vars["id"]
del(vars["id"])
return rest_post_action_queue_one().handler(id, **vars)
#
class rest_put_action_queue(rest_put_handler):
def __init__(self):
desc = [
"Enqueue an action that will be executed by opensvc agents.",
"The user must be responsible for the target node or service.",
"The action is logged in the collector's log.",
]
data = """
- <property>=<value> pairs.
- **node_id**
. The node targeted by the action. If svc_id is not specified, the
action is run using the nodemgr opensvc agent command
- **svc_id**
. The service targeted by the action. The action is run using the
svcmgr opensvc agent command on the node specified by **node_id**.
- **action**
. The opensvc agent action to execute.
- **module**
. The compliance module to run **action** on.
- **moduleset**
. The compliance moduleset to run **action** on.
- **rid**
. The service resource id to limit **action** to.
Each action has specific property requirements:
- ``giveback``:green requires **svc_id**
- ``start``:green requires **svc_id**, optionally **node_id**, **rid**
- ``stop``:green requires **svc_id**, optionally **node_id**, **rid**
- ``freeze``:green requires **svc_id**, optionally **node_id**, **rid**
- ``thaw``:green requires **svc_id**, optionally **node_id**, **rid**
- ``compliance_check``:green requires **node_id**, **module** or **moduleset**, optionally
**svc_id**
- ``compliance_fix``:green requires **node_id**, **module** or **moduleset**, optionally
**svc_id**
- ``restart``:green requires **node_id**, **svc_id**, optionally **rid**
- ``syncall``:green requires **node_id**, **svc_id**, optionally **rid**
- ``syncnodes``:green requires **node_id**, **svc_id**, optionally **rid**
- ``syncdrp``:green requires **node_id**, **svc_id**, optionally **rid**
- ``enable``:green requires **node_id**, **svc_id**, optionally **rid**
- ``disable``:green requires **node_id**, **svc_id**, optionally **rid**
- ``pushasset``:green requires **node_id**
- ``pushdisks``:green requires **node_id**
- ``pull``:green requires **node_id**
- ``push``:green requires **node_id**
- ``pushpkg``:green requires **node_id**
- ``pushpatch``:green requires **node_id**
- ``pushstats``:green requires **node_id**
- ``checks``:green requires **node_id**
- ``sysreport``:green requires **node_id**
- ``updatecomp``:green requires **node_id**
- ``updatepkg``:green requires **node_id**
- ``rotate_root_pw``:green requires **node_id**
- ``scanscsi``:green requires **node_id**
- ``reboot``:green requires **node_id**
- ``schedule_reboot``:green requires **node_id**
- ``unschedule_reboot``:green requires **node_id**
- ``shutdown``:green requires **node_id**
- ``wol``:green requires **node_id**
"""
examples = [
"# curl -u %(email)s -o- -X PUT -d node_id=5c977731-0562-11e6-8c70-7e9e6cf13c8a -d action=pushasset https://%(collector)s/init/rest/api/actions",
]
rest_put_handler.__init__(
self,
path="/actions",
desc=desc,
data=data,
examples=examples
)
def handler(self, **vars):
action_id = json_action_one(vars)
if action_id > 0:
action_q_event()
else:
raise HTTP(500, "Failed to enqueue action")
return rest_get_action_queue_one().handler(action_id)
#
class rest_get_action_queue_stats(rest_get_handler):
def __init__(self):
desc = [
"Display action queue statistics",
]
examples = [
"# curl -u %(email)s -o- https://%(collector)s/init/rest/api/actions/stats",
]
rest_get_handler.__init__(
self,
path="/actions/stats",
desc=desc,
examples=examples,
)
def handler(self, **vars):
return dict(data=action_queue_ws_data())
#
class rest_get_action_queue_one(rest_get_line_handler):
def __init__(self):
desc = [
"Display properties of a specific action posted in the action queue.",
]
examples = [
"# curl -u %(email)s -o- https://%(collector)s/init/rest/api/actions/10",
]
rest_get_line_handler.__init__(
self,
path="/actions/<id>",
tables=["action_queue"],
desc=desc,
examples=examples,
)
def handler(self, id, **vars):
q = db.action_queue.id == int(id)
q = q_filter(q, node_field=db.action_queue.node_id)
self.set_q(q)
return self.prepare_data(**vars)
#
class rest_delete_action_queue_one(rest_delete_handler):
def __init__(self):
desc = [
"Delete an action posted in the action queue.",
]
examples = [
"# curl -u %(email)s -o- -X DELETE https://%(collector)s/init/rest/api/actions/10",
]
rest_delete_handler.__init__(
self,
path="/actions/<id>",
tables=["action_queue"],
desc=desc,
examples=examples,
)
def handler(self, id, **vars):
check_privilege("NodeManager")
q = db.action_queue.id == int(id)
q = q_filter(q, node_field=db.action_queue.node_id)
row = db(q).select().first()
if row is None:
return dict(info="Action %s does not exist in action queue" % id)
try:
node_responsible(node_id=row.node_id)
except HTTP as exc:
# accept deleting orphaned action
if exc.status != 404:
raise
db(q).delete()
_log('action_queue.delete',
'deleted actions %(u)s',
dict(u=row.command),
node_id=row.node_id)
ws_send('action_queue')
action_q_event()
return dict(info="Action %s deleted" % id)
#
class rest_post_action_queue_one(rest_post_handler):
def __init__(self):
desc = [
"Modify properties of an action posted in the action queue.",
"The user must be responsible for the node.",
"The user must be in the NodeExec or CompExec privilege group.",
"The modification is logged in the collector's log.",
"A websocket event is sent to announce the change in the table.",
]
data = """
- <property>=<value> pairs.
- Available properties are: ``%(props)s``:green.
""" % dict(props="status")
examples = [
'# curl -u %(email)s -o- -X POST -d status="C" https://%(collector)s/init/rest/api/actions/10',
]
rest_post_handler.__init__(
self,
path="/actions/<id>",
desc=desc,
data=data,
tables=["action_queue"],
examples=examples,
)
def handler(self, _id, **vars):
check_privilege(["NodeExec", "CompExec"])
q = db.action_queue.id == int(_id)
q = q_filter(q, node_field=db.action_queue.node_id)
row = db(q).select().first()
if row is None:
return dict(error="Action %s does not exist in action queue" % _id)
node_responsible(node_id=row.node_id)
if vars.keys() != ["status"]:
invalid = ', '.join(sorted(set(vars.keys())-set(["status"])))
return dict(error="Permission denied: properties not updateable: %(props)s" % dict(props=invalid))
if row.status == 'T' and vars.get("status") == "C":
return dict(error="Can not cancel action %d in %s state" % (row.id, row.status))
if row.status in ('R', 'W') and vars.get("status") == "W":
return dict(error="Can not redo action %d in %s state" % (row.id, row.status))
db(q).update(**vars)
_log('action_queue.update',
'update properties %(data)s',
dict(data=beautify_change(row, vars)),
svc_id=row.svc_id,
node_id=row.node_id)
ws_send('action_queue')
action_q_event()
return rest_get_action_queue_one().handler(_id)
| opensvc/collector | init/models/rest/api_action_queue.py | api_action_queue.py | py | 9,175 | python | en | code | 0 | github-code | 36 |
37362500325 | import os
import sys
from pathlib import Path
from typing import Optional
import tempfile
import time
import queue
import subprocess
import threading
class IPythonInterpreter:
_END_MESSAGE = "__ INTERPRETER END OF EXECUTION __"
_INTERPRETER_PROMPT = ">>> "
_LAST_VAR = "_INTERPRETER_last_val"
def __init__(
self,
*,
working_dir: Path = None,
ipython_path: Path = None,
timeout: int = None,
deactivate_venv: bool = False,
):
self._working_dir = working_dir
if ipython_path is None:
self._ipython_path = Path(sys.executable).parent / "ipython.exe"
else:
self._ipython_path = ipython_path
self._timeout = timeout
self._deactivate_venv = deactivate_venv
self._running = False
self._start()
def __del__(self):
self.stop()
def _get_env(self):
env = os.environ.copy()
if self._deactivate_venv:
if "VIRTUAL_ENV" in env:
del env["VIRTUAL_ENV"]
if "_OLD_VIRTUAL_PROMPT" in env:
env["PROMPT"] = "_OLD_VIRTUAL_PROMPT"
del env["_OLD_VIRTUAL_PROMPT"]
if "_OLD_VIRTUAL_PYTHONHOME" in env:
env["PYTHONHOME"] = "_OLD_VIRTUAL_PYTHONHOME"
del env["_OLD_VIRTUAL_PYTHONHOME"]
if "_OLD_VIRTUAL_PATH" in env:
env["PATH"] = "_OLD_VIRTUAL_PATH"
del env["_OLD_VIRTUAL_PATH"]
return env
def _start(self):
env = self._get_env()
self._process = subprocess.Popen(
[str(self._ipython_path), "--classic"],
text=True,
cwd=self._working_dir,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self._p_stdin = self._process.stdin
self._stop_threads = False
self._q_stdout = queue.Queue()
self._t_stdout = threading.Thread(
target=self._reader_thread,
args=(self._process.stdout, self._q_stdout),
daemon=True,
)
self._t_stdout.start()
self._q_stderr = queue.Queue()
self._t_stderr = threading.Thread(
target=self._reader_thread,
args=(self._process.stderr, self._q_stderr),
daemon=True,
)
self._t_stderr.start()
self._wait_till_started()
self._running = True
def stop(self):
if self._running:
self._process.kill()
self._stop_threads = True
self._t_stdout.join()
self._t_stderr.join()
self._running = False
def _reader_thread(self, pipe, q):
while not self._stop_threads:
q.put(pipe.readline())
def _read_stdout(self, timeout: Optional[int]) -> Optional[str]:
start = time.time()
stdout = ""
while True:
try:
line = self._q_stdout.get(timeout=timeout)
except queue.Empty:
line = None
if timeout is not None and time.time() - start > timeout:
line = None
if line is None:
return None
if self._END_MESSAGE in line:
break
stdout += line
return stdout[len(self._INTERPRETER_PROMPT) :]
def _read_stderr(self) -> str:
stderr = ""
while not self._q_stderr.empty():
stderr += self._q_stderr.get()
return stderr
def _write_stdin(self, text: str):
self._p_stdin.write(text)
self._p_stdin.flush()
def _wait_till_started(self):
self._write_stdin(f"'{self._END_MESSAGE}'\n")
self._read_stdout(timeout=10)
def _create_script(self, script: str) -> Path:
lines = script.splitlines()
if len(lines) > 0:
is_eval = True
try:
compile(lines[-1], "<stdin>", "eval")
except SyntaxError:
is_eval = False
if is_eval:
lines[-1] = f"{self._LAST_VAR} = ({lines[-1]})"
lines.append(f"if {self._LAST_VAR} is not None:")
lines.append(f" print({self._LAST_VAR})")
script = "\n".join(lines) + "\n"
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(script)
return Path(f.name)
def _run_script(self, script_path: Path):
self._write_stdin(f"%run -i {script_path}\n'{self._END_MESSAGE}'\n")
def _fetch_result(self) -> Optional[str]:
stdout = self._read_stdout(timeout=self._timeout)
if stdout is None:
self.stop()
self._start()
return None
stderr = self._read_stderr()
return stdout + stderr
def run_cell(self, script: str) -> Optional[str]:
"""Run the whole cell and return the last result.
Returns None if the interpreter timed out."""
script_path = self._create_script(script)
self._run_script(script_path)
result = self._fetch_result()
script_path.unlink()
return result
| silvanmelchior/IncognitoPilot | services/services/interpreter/ipython_interpreter.py | ipython_interpreter.py | py | 5,216 | python | en | code | 364 | github-code | 36 |
39399879426 | from pyspark import SparkConf, SparkContext
import random
import numpy as np
import time
def mapper1(line):
matrix_name, row, col, num = line.split(",")
row, col, num = int(row), int(col), int(num)
mapList = []
for idx in range(MATRIX_SIZE):
if matrix_name == 'M':
key = (row, idx, col)
value = num
else:
key = (idx, col, row)
value = num
mapList.append([key, value])
return mapList
def reducer1(x, y):
return x * y
def mapper2(x): # x => ((0, 0, 0), 10)
row, col, idx = x[0]
key = (row, col)
value = x[1]
return [(key, value)]
def reducer2(x, y):
return x + y
def map_and_reduce(file_path):
conf = SparkConf().setMaster("local").setAppName("matrix_multiplication")
sc = SparkContext(conf=conf)
lines = sc.textFile(file_path).flatMap(mapper1)
lines = lines.reduceByKey(reducer1)
lines = lines.flatMap(mapper2)
lines = lines.reduceByKey(reducer2)
return lines.collect()
def gen_test_case(matrix_size):
M = []
N = []
with open("{}input.txt".format(matrix_size), 'w') as f:
for i in range(matrix_size):
M_row = []
for j in range(matrix_size):
item = random.randint(1, 10)
M_row.append(item)
write_str = '{},{},{},{}\n'.format('M', i, j, item)
f.write(write_str)
M.append(M_row)
for i in range(matrix_size):
N_row = []
for j in range(matrix_size):
item = random.randint(1, 10)
N_row.append(item)
write_str = '{},{},{},{}\n'.format('N', i, j, item)
f.write(write_str)
N.append(N_row)
answer = np.matmul(np.array(M), np.array(N))
with open("{}input_answer.txt".format(matrix_size), 'w') as f:
for i in range(answer.shape[0]):
for j in range(answer.shape[1]):
item = answer[i, j]
write_str = '{},{},{}\n'.format(i, j, item)
f.write(write_str)
def write_results_to_file(results, matrix_size):
sort_key = lambda x: (x[0], x[1])
results.sort(key=sort_key)
with open("{}output.txt".format(matrix_size), 'w') as f:
for result in results:
row, col = result[0]
value = result[1]
write_str = '{},{},{}\n'.format(row, col, value)
f.write(write_str)
if __name__ == '__main__':
MATRIX_SIZE = 500
# gen_test_case(MATRIX_SIZE)
FILE_PATH = '{}input.txt'.format(MATRIX_SIZE)
start_time = time.time()
results = map_and_reduce(file_path=FILE_PATH)
end_time = time.time()
print('running time: {:.2f} min'.format((end_time - start_time) / 60))
write_results_to_file(results, MATRIX_SIZE)
| uuuChen/NTHU-Course-BIGDATA | bigData_hw1/hw1.py | hw1.py | py | 2,829 | python | en | code | 0 | github-code | 36 |
74434198503 | import json #to impost post.json
from blog.models import Post
# instance of opening and loading json data
with open('post.json') as f:
posts_json = json.load(f)
# Loop through JSON data
for post in posts_json:
"""
input:
title: the title of the json element
content: the cotent of the json element
author_id: the user number of the json element, which is used as the
ForeignKey to connect the blog site to the User database.
Still trying to verify, but SQL convention is that the blog primary
key would author and the foreign key should be author_id.
output:
After interation, it will post the JSON elements as new posts in blog
"""
post = Post(title=post['title'],
content=post['content'],
author_id = post['user_id'])
post.save()
| YusufBritton1990/Django_tutorial_backup | django_project/shell_posting.py | shell_posting.py | py | 837 | python | en | code | 0 | github-code | 36 |
1379120770 | import os
import cv2
import dlib
import numpy as np
from eye import Eye
from calibration import Calibration
class EyeTracking(object):
"""
This class tracks the user's gaze.
It provides useful information like the position of the eyes
and pupils and allows to know if the eyes are open or closed
"""
def __init__(self):
self.frame = None
self.calibration = Calibration()
self.eye_left = None
self.eye_right = None
self.ex_eye_left = None
self.ex_eye_right = None
self.is_attention = 100
self.method = ""
# _face_detector is used to detect faces
self._face_detector = dlib.get_frontal_face_detector()
# _predictor is used to get facial landmarks of a given face
cwd = os.path.abspath(os.path.dirname(__file__))
model_path = os.path.abspath(os.path.join(cwd, "./models/shape_predictor_68_face_landmarks.dat"))
self._predictor = dlib.shape_predictor(model_path)
# dnn based landmark
self.net = cv2.dnn.readNetFromCaffe(
'./models/deploy.prototxt.txt',
'./models/res10_300x300_ssd_iter_140000.caffemodel'
)
self.landmark_predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
@property
def pupils_located(self):
"""Check that the pupils have been located"""
try:
int(self.eye_left.pupil.x)
int(self.eye_left.pupil.y)
int(self.eye_right.pupil.x)
int(self.eye_right.pupil.y)
return True
except Exception:
return False
def adjust_gamma(self, image, gamma):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
def preprocess(self, img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if gray_img.mean() < 130:
img = self.adjust_gamma(img, 1.0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def _analyze(self):
"""Detects the face and initialize Eye objects"""
# 필터링의 중요성 preprocess를 하고 안 하고에 따라 결과가 달라진다.
# dnn에 감마 값을 잘 조절하면 좋지 않을까.
frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
# frame = self.preprocess(self.frame)
faces = self._face_detector(frame)
try:
if faces:
self.method = "hog"
landmark = np.empty([68, 2], dtype = int)
landmarks = self._predictor(frame, faces[0])
for i in range(68):
landmark[i][0] = landmarks.part(i).x
landmark[i][1] = landmarks.part(i).y
# print(landmark)
self.eye_left = Eye(frame, landmark, 0, self.calibration)
self.eye_right = Eye(frame, landmark, 1, self.calibration)
else:
self.method = "dnn"
(h, w) = self.frame.shape[:2]
blob = cv2.dnn.blobFromImage(
cv2.resize(self.frame, (300, 300)),
1.0,(300, 300), (104.0, 177.0, 123.0
)
)
self.net.setInput(blob)
detections = self.net.forward()
## bounding box
list_bboxes = []
list_confidence = []
list_dlib_rect = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < 0.6:
continue
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(l, t, r, b) = box.astype('int')
original_vertical_length = b - t
t = int(t + original_vertical_length * 0.15)
b = int(b - (original_vertical_length) * 0.05)
margin = ((b-t) - (r-l))//2
l = l - margin if (b-t-r+l)%2 == 0 else l - margin - 1
r = r + margin
list_bboxes.append([l, t, r, b])
list_confidence.append(confidence)
rect_bb = dlib.rectangle(left=l, top=t, right=r, bottom=b)
list_dlib_rect.append(rect_bb)
# landmark
list_landmarks = []
for rect in list_dlib_rect:
points = self.landmark_predictor(self.frame, rect)
list_points = list(map(lambda p: (p.x, p.y), points.parts()))
list_landmarks.append(list_points)
# print(list_landmarks)
list_landmarks = list_landmarks[0]
self.eye_left = Eye(self.frame, list_landmarks, 0, self.calibration)
self.eye_right = Eye(self.frame, list_landmarks, 1, self.calibration)
except IndexError:
self.eye_left = None
self.eye_right = None
def refresh(self, frame):
"""Refreshes the frame and analyzes it.
Arguments:
frame (numpy.ndarray): The frame to analyze
"""
self.frame = frame
self._analyze()
def pupil_left_coords(self):
"""Returns the coordinates of the left pupil"""
if self.pupils_located:
x = self.eye_left.origin[0] + self.eye_left.pupil.x
y = self.eye_left.origin[1] + self.eye_left.pupil.y
return (x, y)
def pupil_right_coords(self):
"""Returns the coordinates of the right pupil"""
if self.pupils_located:
x = self.eye_right.origin[0] + self.eye_right.pupil.x
y = self.eye_right.origin[1] + self.eye_right.pupil.y
return (x, y)
def get_attention(self):
return self.is_attention
def get_method(self):
return self.method
def horizontal_ratio(self):
"""Returns a number between 0.0 and 1.0 that indicates the
horizontal direction of the gaze. The extreme right is 0.0,
the center is 0.5 and the extreme left is 1.0
"""
if self.pupils_located:
pupil_left = self.eye_left.pupil.x / (self.eye_left.center[0] * 2 - 10)
pupil_right = self.eye_right.pupil.x / (self.eye_right.center[0] * 2 - 10)
return (pupil_left + pupil_right) / 2
def vertical_ratio(self):
"""Returns a number between 0.0 and 1.0 that indicates the
vertical direction of the gaze. The extreme top is 0.0,
the center is 0.5 and the extreme bottom is 1.0
"""
if self.pupils_located:
pupil_left = self.eye_left.pupil.y / (self.eye_left.center[1] * 2 - 10)
pupil_right = self.eye_right.pupil.y / (self.eye_right.center[1] * 2 - 10)
return (pupil_left + pupil_right) / 2
def is_right(self):
"""Returns true if the user is looking to the right"""
if self.pupils_located:
return self.horizontal_ratio() <= 0.35
def is_left(self):
"""Returns true if the user is looking to the left"""
if self.pupils_located:
return self.horizontal_ratio() >= 0.65
def is_center(self):
"""Returns true if the user is looking to the center"""
if self.pupils_located:
return self.is_right() is not True and self.is_left() is not True
def is_blinking(self):
"""Returns true if the user closes his eyes"""
if self.pupils_located:
blinking_ratio = (self.eye_left.blinking + self.eye_right.blinking) / 2
return blinking_ratio > 3.8
def is_focus(self):
if self.ex_eye_left is None:
self.eye_position_update()
return 0
if self.pupils_located:
focus = (
((self.eye_left.pupil.x + self.eye_right.pupil.x) / 2) -
((self.ex_eye_left.pupil.x + self.ex_eye_right.pupil.x) / 2)
)
self.eye_position_update()
if abs(focus) < 5:
return 1
else:
return 0
def eye_position_update(self):
self.ex_eye_left = self.eye_left
self.ex_eye_right = self.eye_right
def annotated_frame(self):
"""Returns the main frame with pupils highlighted"""
frame = self.frame.copy()
if self.pupils_located:
color = (0, 255, 0)
x_left, y_left = self.pupil_left_coords()
x_right, y_right = self.pupil_right_coords()
cv2.line(frame, (x_left - 5, y_left), (x_left + 5, y_left), color)
cv2.line(frame, (x_left, y_left - 5), (x_left, y_left + 5), color)
cv2.line(frame, (x_right - 5, y_right), (x_right + 5, y_right), color)
cv2.line(frame, (x_right, y_right - 5), (x_right, y_right + 5), color)
return frame
| dead4s/SpaHeron_MachineLearning_UXIS | eye_tracking/eye_tracking.py | eye_tracking.py | py | 9,108 | python | en | code | 3 | github-code | 36 |
8060696161 | # Lint as: python3
"""CoQA: A Conversational Question Answering Challenge"""
# partially taken from https://github.com/NTU-SQUAD/transformers-coqa/blob/2dfd58b70956e935e370989fa421f34bb83bff08/data/processors/coqa.py
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import re
import string
from collections import Counter
import spacy
import datasets
MAX_Q_LEN = 100 # Max length of question
YOUR_LOCAL_DOWNLOAD = "../data"
_CITATION = """\
@article{reddy-etal-2019-coqa,
title = "{C}o{QA}: A Conversational Question Answering Challenge",
author = "Reddy, Siva and
Chen, Danqi and
Manning, Christopher D.",
journal = "Transactions of the Association for Computational Linguistics",
volume = "7",
month = mar,
year = "2019",
url = "https://www.aclweb.org/anthology/Q19-1016",
doi = "10.1162/tacl_a_00266",
pages = "249--266",
}
"""
_DESCRIPTION = """\
CoQA is a large-scale dataset for building Conversational Question Answering systems. \
The goal of the CoQA challenge is to measure the ability of machines to understand a text passage\
and answer a series of interconnected questions that appear in a conversation. \
CoQA is pronounced as coca.
"""
_HOMEPAGE = "https://stanfordnlp.github.io/coqa/"
_URLs = "https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json, https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json"
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start: (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
class Coqa(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="coqa_rc",
version=VERSION,
description="Load CoQA dataset for machine reading comprehension tasks",
)
]
DEFAULT_CONFIG_NAME = "coqa_rc"
def _info(self):
if self.config.name == "coqa_rc":
features = datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
# "answer_end": datasets.Value("int32"),
}
),
"domain": datasets.Value("string"), # is "source" in CoQA (e.g., wikipedia)
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs
# data_dir = dl_manager.download_and_extract(my_urls)
data_dir = YOUR_LOCAL_DOWNLOAD # point to local dir to avoid downloading the dataset again
if self.config.name == "coqa_rc":
return [
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(
data_dir, "coqa/coqa-dev-v1.0.json"
),
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(
data_dir, "coqa/coqa-train-v1.0.json"
),
},
),
]
def _str(self, s):
""" Convert PTB tokens to normal tokens """
if (s.lower() == '-lrb-'):
s = '('
elif (s.lower() == '-rrb-'):
s = ')'
elif (s.lower() == '-lsb-'):
s = '['
elif (s.lower() == '-rsb-'):
s = ']'
elif (s.lower() == '-lcb-'):
s = '{'
elif (s.lower() == '-rcb-'):
s = '}'
return s
def space_extend(self, matchobj):
return ' ' + matchobj.group(0) + ' '
def is_whitespace(self, c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def pre_proc(self, text):
text = re.sub(u'-|\u2010|\u2011|\u2012|\u2013|\u2014|\u2015|%|\[|\]|:|\(|\)|/|\t', self.space_extend, text)
text = text.strip(' \n')
text = re.sub('\s+', ' ', text)
return text
def process(self, parsed_text):
output = {'word': [], 'offsets': [], 'sentences': []}
for token in parsed_text:
output['word'].append(self._str(token.text))
output['offsets'].append((token.idx, token.idx + len(token.text)))
word_idx = 0
for sent in parsed_text.sents:
output['sentences'].append((word_idx, word_idx + len(sent)))
word_idx += len(sent)
assert word_idx == len(output['word'])
return output
def normalize_answer(self, s):
"""Lower text and remove punctuation, storys and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_raw_context_offsets(self, words, raw_text):
raw_context_offsets = []
p = 0
for token in words:
while p < len(raw_text) and re.match('\s', raw_text[p]):
p += 1
if raw_text[p:p + len(token)] != token:
print('something is wrong! token', token, 'raw_text:',
raw_text)
raw_context_offsets.append((p, p + len(token)))
p += len(token)
return raw_context_offsets
def find_span(self, offsets, start, end):
start_index = -1
end_index = -1
for i, offset in enumerate(offsets):
if (start_index < 0) or (start >= offset[0]):
start_index = i
if (end_index < 0) and (end <= offset[1]):
end_index = i
return (start_index, end_index)
def find_span_with_gt(self, context, offsets, ground_truth):
best_f1 = 0.0
best_span = (len(offsets) - 1, len(offsets) - 1)
gt = self.normalize_answer(self.pre_proc(ground_truth)).split()
ls = [
i for i in range(len(offsets))
if context[offsets[i][0]:offsets[i][1]].lower() in gt
]
for i in range(len(ls)):
for j in range(i, len(ls)):
pred = self.normalize_answer(
self.pre_proc(
context[offsets[ls[i]][0]:offsets[ls[j]][1]])).split()
common = Counter(pred) & Counter(gt)
num_same = sum(common.values())
if num_same > 0:
precision = 1.0 * num_same / len(pred)
recall = 1.0 * num_same / len(gt)
f1 = (2 * precision * recall) / (precision + recall)
if f1 > best_f1:
best_f1 = f1
best_span = (ls[i], ls[j])
return best_span
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath, encoding="utf-8") as f:
data = json.load(f)["data"]
for row in data:
story = row["story"]
domain = row["source"]
title = row["filename"]
all_prev_utterances = []
for i, question in enumerate(row['questions']):
id_ = str(row['id']) + '_' + str(question['turn_id'])
all_prev_utterances.append(question['input_text'])
answers = [{
"text": row["answers"][i]["span_text"],
"answer_start": row["answers"][i]["span_start"]
}]
question_str = " ".join(
list(reversed(all_prev_utterances))
).strip()
question_str = " ".join(question_str.split()[:MAX_Q_LEN])
# append the original answer into the utterance list
orig_answer_text = row["answers"][i]["input_text"]
all_prev_utterances.append(orig_answer_text)
qa = {
"id": id_,
"domain": domain,
"title": title,
"context": story,
"question": question_str,
"answers": answers,
}
yield id_, qa | HLTCHKUST/CAiRE_in_DialDoc21 | utils/coqa.py | coqa.py | py | 9,844 | python | en | code | 11 | github-code | 36 |
1923472018 | #!/usr/bin/env python
#coding=utf-8
import json
from lib.sqs import zhihufav_sqs
from lib.tasks import add_note
def get_sqs_queue():
sqs_info = zhihufav_sqs.get_messages(10)
for sqs in sqs_info:
sqs_body = sqs.get_body()
receipt_handle = sqs.receipt_handle
sqs_json = json.loads(sqs_body)
api_url = sqs_json.get('api_url')
parent_note = sqs_json.get('parent_note')
add_note.delay(api_url, parent_note, receipt_handle)
if __name__=="__main__":
for i in range(5):
get_sqs_queue()
| youqingkui/zhihufav | do_tasks.py | do_tasks.py | py | 553 | python | en | code | 0 | github-code | 36 |
18692006503 | import json
import logging
import sys
from z3 import z3
from teether.constraints import check_model_and_resolve
from teether.evm.exceptions import IntractablePath
from teether.evm.state import LazySubstituteState, SymRead
from teether.project import Project
from teether.util.z3_extra_util import concrete
def set_balance(addr, bal, i_r):
calldata = z3.Array('CALLDATA_%d' % i_r.xid, z3.BitVecSort(256), z3.BitVecSort(8))
new_calldata = z3.Store(z3.Store(z3.Store(z3.Store(calldata, 0, 0x70), 1, 0xa0), 2, 0x82), 3, 0x31)
for num, byte in enumerate(addr.to_bytes(32, 'big'), 4):
new_calldata = z3.Store(new_calldata, num, byte)
subst = [(calldata, new_calldata)]
state = LazySubstituteState(i_r.state, subst)
constraints = [z3.substitute(c, subst) for c in i_r.constraints]
sha_constraints = {sha: z3.simplify(z3.substitute(sha_value, subst)) if not isinstance(sha_value, SymRead)
else sha_value for sha, sha_value in i_r.sha_constraints.items()}
mstart, msz = state.stack[-1], state.stack[-2]
mm = i_r.state.memory.read(mstart, msz)
if not isinstance(mm, SymRead):
if all(concrete(m) for m in mm):
return None
mm = z3.simplify(z3.Concat([m if not concrete(m) else z3.BitVecVal(m, 8) for m in mm]))
extra_constraints = [mm == bal]
try:
model = check_model_and_resolve(constraints + extra_constraints, sha_constraints)
sloads = []
storage = None
for v in model:
if v.name().startswith("SLOAD"):
sloads.append(model.eval(model[v]).as_long())
if v.name().startswith("STORAGE"):
storage = z3.simplify(model[v])
return {sl: model.eval(storage[sl]).as_long() for sl in sloads}
except IntractablePath:
return None
def main(code_path, output_file, target_addrs, target_bals):
if code_path.endswith('.json'):
with open(code_path, 'rb') as f:
jd = json.load(f)
p = Project.from_json(jd)
else:
with open(code_path) as infile:
inbuffer = infile.read().rstrip()
code = bytes.fromhex(inbuffer)
p = Project(code)
with open('%s.project.json' % code_path, 'w') as f:
json.dump(p.to_json(), f)
target_addrs = [int(addr, 16) for addr in target_addrs]
target_bals = [int(bal) for bal in target_bals]
storage_result = dict()
addr, bal = target_addrs[0], target_bals[0]
return_ins = p.cfg.filter_ins('RETURN')
gen_constraints = p.get_constraints(return_ins)
results = []
for _, _, i_r in gen_constraints:
stor = set_balance(addr, bal, i_r)
if stor:
storage_result.update(stor)
results = [i_r] + results
break
results.append(i_r)
else:
logging.warning(f"Could not set balance of {hex(addr)} to {bal}")
for addr,bal in zip(target_addrs[1:], target_bals[1:]):
for i_r in results:
stor = set_balance(addr, bal, i_r)
if stor:
storage_result.update(stor)
break
else:
for _, _, i_r in gen_constraints:
stor = set_balance(addr, bal, i_r)
if stor:
storage_result.update(stor)
results = [i_r] + results
break
results.append(i_r)
else:
logging.warning(f"Could not set balance of {hex(addr)} to {bal}")
with open(output_file, 'w') as f:
json.dump({"0x{0:0{1}X}".format(k, 64): "0x{0:0{1}x}".format(v, 64) for k, v in storage_result.items()}, f)
if __name__ == '__main__':
if len(sys.argv) < 5 or len(sys.argv) % 2 != 1:
print('Usage: %s <code> <output file> <target-address> <target-balance> [<target-address> <target-balance>] ...'
% sys.argv[0], file=sys.stderr)
exit(-1)
code = sys.argv[1]
output_file = sys.argv[2]
target_addresses = sys.argv[3::2]
target_balances = sys.argv[4::2]
main(code, output_file, target_addresses, target_balances) | t-hermanns/coercer | bin/set_balanceOf.py | set_balanceOf.py | py | 4,086 | python | en | code | 1 | github-code | 36 |
24592548396 | """6. Write a program that takes a string as input and returns the string with all vowels removed."""
import tests
import time
vowels = ['a', 'e', 'o', 'u', 'y', 'i', 'A', 'E', 'O', 'U', 'Y', 'I']
my_str = tests.cases[0]
#my_str = input('Enter any string: ')
my_str = my_str.strip()
def branch(number_of_repeats=10000):
def time_of_function(function):
def wrapped(*args):
start_time = time.time()
for i in range(number_of_repeats):
res = function(*args)
end_time = time.time()
dt = (round(end_time - start_time, 3))
return dt
return wrapped
return time_of_function
def try1(s):
return s.replace('a', '').replace('o', '').replace('e', '').replace('i', '').replace('u', '').replace('y', '').replace('A', '').replace('O', '').replace('E', '').replace('I', '').replace('U', '').replace('Y', '')
def try2(s):
ans = []
for l in s:
if l in vowels:
continue
ans.append(l)
return ''.join(ans)
def try3(s):
for vowel in vowels:
s = ''.join(s.split(vowel))
return s
def try4(s):
return ''.join([l for l in s if l not in vowels])
print('\nHello, this program will delete all vowels from Walt Whitman poem "O Captain! My Captain!"\n\n\n')
print(my_str)
print('\n\n\nBut you can choose you own text. Type "u" to eneter user\'s text!')
enter = input('Or anything else to continue with default text: ')
if enter.lower() in ('u', 'г'):
my_str = input('Fine! Enter your text: ')
ans = try1(my_str)
print(ans)
print('\n\n\nDo you want to know which functions works faster? I think you do!')
print('try1 will delete all vowels 10000 times for: ')
print(branch()(try1)(my_str))
print('try2 will delete all vowels 10000 times for: ')
print(branch()(try2)(my_str))
print('try3 will delete all vowels 10000 times for: ')
print(branch()(try3)(my_str))
print('try4 will delete all vowels 10000 times for: ')
print(branch()(try4)(my_str))
# my_str = input("Enter any string") | MikitaTsiarentsyeu/Md-PT1-69-23 | Tasks/Hatsak/Task3/Task3_6.py | Task3_6.py | py | 2,032 | python | en | code | 0 | github-code | 36 |
70062415785 | '''input
4
dwight jello 51430
creed beans 263
stanley pretzels 45121
pam brushtool 941
'''
n = int(input())
gifts = {}
while n > 0:
s = input().split()
k = ' '.join(s[:-1]).strip()
v = int(s[-1])
gifts[k] = v
n-=1
gifts = {k: v for k, v in sorted(gifts.items(), key=lambda item: item[1])}
for k, v in gifts.items():
print(f"{k} {v}")
| teamcodedevs/beecrowd-christmas-contest-2021 | AdrianoAlmeida/g.py | g.py | py | 344 | python | en | code | 0 | github-code | 36 |
19250130553 | import http
import requests
import tenacity
from bs4 import BeautifulSoup
from tenacity import retry_if_exception_type, stop_after_attempt
from .exceptions import BadRequest, NetworkError, NotClientError, NotFound, ServerError
from .utils import cache
@tenacity.retry(
reraise=True,
retry=retry_if_exception_type(NotClientError),
stop=stop_after_attempt(5),
)
@cache(ttl=1)
def get_weather_for_city(city: str) -> str:
"""
Returns temperature of the city in celsius,
as the source 'https://world-weather.ru' was used
"""
url = f'https://world-weather.ru/pogoda/russia/{city}'
page_text = None
try:
response = requests.get(url)
response.raise_for_status()
page_text = response.text
except requests.exceptions.HTTPError as err:
err_status = err.response.status_code
if err_status == http.HTTPStatus.BAD_REQUEST:
raise BadRequest() from None
if err_status == http.HTTPStatus.NOT_FOUND:
raise NotFound() from None
if err_status == http.HTTPStatus.INTERNAL_SERVER_ERROR:
raise ServerError from None
except requests.exceptions.ConnectionError:
raise NetworkError from None
soup = BeautifulSoup(page_text, 'html.parser')
return soup.find('div', id='weather-now-number').text
| macoyshev/weather_CLI | weather/weather_parser.py | weather_parser.py | py | 1,336 | python | en | code | 0 | github-code | 36 |
74246420903 | import pandas as pd
import glob
from openpyxl import load_workbook
import os
class DataFile:
def __init__(self, file_path):
self.file_path = file_path
@staticmethod
def read_csv_file(file_path):
"""
reads a *.csv file and returns a pandas DataFrame of the file
:param file_path: path to a *.csv file
:return: a pandas DataFrame from the chosen file
"""
with open(file_path, encoding='UTF-16LE') as file:
return pd.read_csv(file, sep='\t')
def combine_to_excel(self, input_directory: str, output_file: str) -> None:
"""
combines data from csv into one .xlsx file
:param input_directory: name of directory that contains *.csv files
:param output_file: name of the new file with combined data
:return: saves a new *.xlsx file for future manipulations
"""
parsed = [self.read_csv_file(file_path=path) for path in glob.glob(f'{input_directory}/*.csv')]
merged = pd.concat(parsed)
merged.to_excel(output_file, index=False)
@staticmethod
def load_invoices(file_path):
"""
Loads first column in first sheet into a list.
"""
invoice_file = load_workbook(file_path)
invoice_sheet = invoice_file.active
return [
str(row[0]) for row in
invoice_sheet.iter_rows(min_row=2, min_col=1, max_col=1, values_only=True)
]
@staticmethod
def remove_row(file_path):
"""
Checks if first row contains unnecessary data (not headings)
and removes it
:param file_path: path to GFIS excel file
"""
gfis_file = load_workbook(file_path)
gfis_sheet = gfis_file.active
for cell in gfis_sheet.iter_rows(max_row=1, values_only=True):
if None in cell:
gfis_sheet.delete_rows(1)
gfis_file.save(file_path)
print(f'row has been removed in {file_path}')
else:
print(f'no rows to remove in {file_path}. GFIS data spreadsheet is OK.')
@staticmethod
def remove_temporary_files(file):
"""
removes created files after processing
:param file:
:return:
"""
try:
os.remove(file)
except FileNotFoundError:
print('Nothing to remove.')
| DimaZimin/invoice_status_check | datafile.py | datafile.py | py | 2,386 | python | en | code | 0 | github-code | 36 |
28981514805 | from django.conf.urls import patterns, url, include
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'sokoban.views',
url(r'^$', 'index', name='index'),
url(r'^dashboard/$', 'dashboard', name='dashboard'),
url(r'^home/$', 'home', name='home'),
url(r'^403/$', 'alert_login_required', name='login_required'),
url(r'^(?P<owner>.+)/projects/', 'list_projects', name='project_list'),
url(r'^project/(?P<name>.*)/(?P<action>.*)/$', 'rest_project',
name='project_rest'),
url(r'^project/(?P<name>.*)/$', 'rest_project', name='project_rest'),
url(r'^middleware/$', 'installed_middle_ware', name="middle_ware_list"),
url(r'^log/(?P<project_name>.+)/$', 'get_log', name="get_log"),
url(r'^logs/$', 'get_log', name="get_logs", kwargs={'project_name': None}),
url(r'^admin/', include(admin.site.urls)),
url('^accounts/', include('account.urls')),
url('^scheduler/', include('scheduler.urls')),
)
| BusyJay/sokoban | src/sokoban/urls.py | urls.py | py | 978 | python | en | code | 3 | github-code | 36 |
11535743077 | import os
import time
requivalente=0.0
fora=True
while(fora):
os.system("clear")
resistores=[0.0,0.0,0.0]
requivalente=0.0
print("Cálculo de 3 resistores em Série.")
print("Se um dos valores do resistore for 0 o programa será encerrado.")
for i in range(0,len(resistores)):
print("Dibite o R",i+1)
resistores.insert(i,float(input("Valor: ")))
if resistores[i]==0:
print("Você entrou valor nulo. O programa será finalizado.")
time.sleep(2)
fora=False
break
requivalente+=resistores[i]
print("O resistor equivalente é: ", requivalente)
time.sleep(5)
| oidanieldantas/ProjetosComPythonEArduino | pratica1_4.py | pratica1_4.py | py | 668 | python | pt | code | 0 | github-code | 36 |
30144105909 | import os
from playhouse.sqlite_ext import CSqliteExtDatabase
from peewee import DatabaseProxy
class PDatabaseFactory:
def __init__(self, config):
self.cfg = config
self.instances = {}
self.defaut_instance = self.cfg.get('db', 'database')
self.sqlite_db_path = self.cfg.get('sqlite', 'path')
self.database_proxy = DatabaseProxy()
def get_instance(self, instance: str = None):
if not instance:
instance = self.defaut_instance
if instance not in self.instances.keys():
if instance == 'sqlite':
instance_obj = CSqliteExtDatabase(self.sqlite_db_path, autoconnect=False)
elif instance == 'sqlite-app-test':
PACKAGR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
instance_obj = CSqliteExtDatabase(os.path.join(PACKAGR_DIR, 'mediastrends_test.db'))
elif instance == 'memory':
instance_obj = CSqliteExtDatabase(':memory:')
else:
raise ValueError("Instance %s not defined" % (instance))
self.instances[instance] = instance_obj
instance = self.instances[instance]
self.database_proxy.initialize(instance)
return instance
| prise6/medias-trends | mediastrends/database/peewee/PDatabaseFactory.py | PDatabaseFactory.py | py | 1,277 | python | en | code | 2 | github-code | 36 |
1165567212 | class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
# clone num input
nums_clone = nums[:]
# create a map to store rank
res_dict = {}
for num in nums:
res_dict[num] = ''
# sort nums and update rank in map accordingly
nums.sort(reverse = True)
for index, num in enumerate(nums):
if index == 0:
res_dict[num] = 'Gold Medal'
elif index == 1:
res_dict[num] = 'Silver Medal'
elif index == 2:
res_dict[num] = 'Bronze Medal'
else:
res_dict[num] = str(index + 1)
# nums_clone has input data in original order
# create output with relative rank
res = []
for num in nums_clone:
res.append(res_dict[num])
return res
if __name__ == '__main__':
inp_arr = [[5, 4, 3, 2, 1], [10, 3, 8, 9, 4]]
out_arr = [["Gold Medal", "Silver Medal", "Bronze Medal", "4", "5"], ["Gold Medal","5","Bronze Medal","Silver Medal","4"]]
s = Solution()
for index, value in enumerate(inp_arr):
res = s.findRelativeRanks(value)
print('input s -> %s' % (value))
print('output\nexpected \t%s\nactual \t%s\n' % (out_arr[index], res))
| acharyarajiv/leetcode | easy/python/relative-ranks.py | relative-ranks.py | py | 1,368 | python | en | code | 0 | github-code | 36 |
35351359576 | # -*- coding: utf-8 -*-
# original author: Ethosa
# modified by: x2nie
import re
from retranslator import Translator
class CSharpToPython(Translator):
def __init__(self, codeString="", extra=[], useRegex=False):
"""initialize class
Keyword Arguments:
codeString {str} -- source code on C# (default: {""})
extra {list} -- include your own rules (default: {[]})
useRegex {bool} -- this parameter tells you to use regex (default: {False})
"""
self.codeString = codeString
self.extra = extra
self.Transform = self.compile = self.translate # callable objects
# create little magic ...
self.rules = CSharpToPython.RULES[:]
self.rules.extend(self.extra)
self.rules.extend(CSharpToPython.LAST_RULES)
# Translator.__init__(self, codeString, self.rules, useRegex)
super(CSharpToPython, self).__init__(codeString, self.rules, useRegex)
def translate(self, src=None):
if not src is None:
self.codeString = src
self.expliciteSelf()
self.codeString = self._resolveProperties(self.codeString)
self.codeString = self._resolveMethods(self.codeString)
ret = super(CSharpToPython, self).translate()
ret = super(CSharpToPython, self).translate()
ret = self.splitMultipleAssignments(ret)
self._grepStatics(ret)
ret = self._resolveStatics(ret)
return ret
def expliciteSelf(self):
self.properties = {}
self.methods = {}
self.statics = {}
self._grepProperties()
self._grepMethods()
pass
for p in self.properties:
print(p)
def _grepProperties(self):
rule = r"(?P<blockIndent>[ ]+)(?P<severity>(public|private|protected)[ ]+)(?P<returnType>[^\s]+[ ]+)(?P<methodName>[\w]+)[ ]*;"
matches = re.finditer(rule, self.codeString, re.MULTILINE)
for match in matches:
# self.properties.append(match.groupdict()['methodName'])
d = match.groupdict()
rep = r'self.%(methodName)s' % d #*dict
pat = r'\w*(?<!%(returnType)s)(?<!\.)%(methodName)s' % d #*dict
pat = pat.replace('[',r'\[').replace(']',r'\]')
self.properties[pat] = rep
def _resolveProperties(self, src):
for pat, rep in self.properties.items():
src = re.sub(pat, rep, src, 0, re.MULTILINE)
return src
def _grepMethods(self):
rule = r"(?P<start>[\s]+)(?P<severity>(?:public |private |protected |published |override |overload )+)(?P<returnType>\w+[ ]+)(?P<methodName>\w+)[ ]*\((?P<args>[\S ]*)\)"
matches = re.finditer(rule, self.codeString, re.MULTILINE)
for match in matches:
# self.properties.append(match.groupdict()['methodName'])
d = match.groupdict()
rep = r'self.%(methodName)s' % d #*dict
pat = r'\w*(?<!%(returnType)s)(?<!\.)%(methodName)s' % d
pat = pat.replace('[',r'\[').replace(']',r'\]')
self.methods[pat] = rep
# for name, s in match.groupdict().items():
# print(f" Group {name} `{s}`")
def _resolveMethods(self, src):
for pat, rep in self.methods.items():
src = re.sub(pat, rep, src, 0, re.MULTILINE)
return src
def splitMultipleAssignments(self, src):
# return src
pat = r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)(?P<varType>[\w\[\]\.]+)[ ]+(?P<varName1>[^, \(]+)(?P<varNames>(?:,[ ]*[\w]+)+)[ ]*=[ ]+(?P<right>[\w]+)"
def rep(match):
d = match.groupdict()
start = d['start']
indent = d['blockIndent']
value = d['right']
lines = []
# varNames = d['varNames'].split(',')
varNames = d['varName1'] + d['varNames']
varNames = varNames.split(',')
# print('varNames:', varNames)
# varNames = [d['varName1']] + varNames
for varName in varNames:
varName = varName.strip()
lines.append(f"{start}{indent}{varName} = {value}")
# print('\n'.join(lines))
return ''.join(lines)
# src = re.sub(pat, rep, src, 10, re.MULTILINE)
replaceCount = 0
src = self.r.sub(pat, rep, src, 1)
while self.r.search(pat, src):
if replaceCount+1 > 70:
break
replaceCount += 1
src = self.r.sub(pat, rep, src, 1)
return src
def _grepStatics(self, src):
rule = r"(?P<blockIndent>[ ]+)(?P<classmethod>\@classmethod)[\n\r]+(?P=blockIndent)def (?P<methodName>[\w]+)\("
matches = re.finditer(rule, src, re.MULTILINE)
for match in matches:
# self.properties.append(match.groupdict()['methodName'])
d = match.groupdict()
rep = r'cls.%(methodName)s' % d #*dict
pat = r'\w*(?<!def )(?<!\.)%(methodName)s' % d #*dict
# pat = pat.replace('[',r'\[').replace(']',r'\]')
self.statics[pat] = rep
def _resolveStatics(self, src):
for pat, rep in self.statics.items():
src = re.sub(pat, rep, src, 0, re.MULTILINE)
return src
RULES = [
(r"\)\s+\{", r"){", None, 0), #? strip `) {`
(r"\)[ ]+\{", r"){", None, 0), #? strip `) {`
(r"[ ]+\)", r"(", None, 0), #? strip ` )`
(r"\([ ]+", r"(", None, 0), #? strip `( `
(r"\{[ ]+", r"{", None, 0), #? strip `{ `
(r"\}[ ]+", r"}", None, 0), #? strip `{ `
# true
# True
(r"(?P<left>[\r\n]+(([^\"\r\n]*\"[^\"\r\n]+\"[^\"\r\n]*)+|[^\"\r\n]+))true", r"\g<left>True", None, 0),
# false
# False
(r"(?P<left>[\r\n]+(([^\"\r\n]*\"[^\"\r\n]+\"[^\"\r\n]*)+|[^\"\r\n]+))false", r"\g<left>False", None, 0),
# this
# self
(r"(?P<left>[\r\n]+(([^\"\r\n]*\"[^\"\r\n]+\"[^\"\r\n]*)+|[^\"\r\n]+))this", r"\g<left>self", None, 0),
# ||
# or
(r"\|\|", r"or", None, 0),
# &&
# and
(r"&&", r"and", None, 0),
# !(...)
# not (...)
(r"(?P<left>[\r\n]+(([^\"\r\n]*\"[^\"\r\n]+\"[^\"\r\n]*)+|[^\"\r\n]+))!\((?P<condition>[\S ]+)\)",
r"\g<left>not (\g<condition>)", None, 0),
# // ...
# # ...
(r"//([^\r\n]+)", r"#\1",None, 0),
#? for (int i = 0; i < 5; i+=2){
# ....
# }
# for i in range(0, 5, 2):
# ....
(r"(?P<blockIndent>[ ]*)for[ ]*\((?P<varType>[\S]+)[ ]*(?P<varName>[\w]+)[ ]*=[ ]*(?P<variable>[\S]+)[ ]*;[ ]*(?P=varName)[ ]*[\S]+[ ]*(?P<number>[\S ]+)[ ]*;[ ]*(?P=varName)[ ]*([\+\-]{1}=)[ ]*(?P<number2>[\S]+)[ ]*\)[ ]*{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}",
r'\g<blockIndent>for \g<varName> in range(\g<variable>, \g<number>, \g<number2>):\n\g<body>', None, 70),
#? for (int i = 0; i < width; i++){
# ....
# }
# for i in range(0, width):
# ....
# for ( int x = 0 ; x < width ; x ++) {
(r"(?P<blockIndent>[ ]*)for[ ]*\((?P<varType>[\S]+)[ ]*(?P<varName>[\w]+)[ ]*=[ ]*(?P<start>[^ ;]+)[ ]*;[ ]*(?P=varName)[ ]*\<[ ]*(?P<stop>[^;]+)[ ]*;[ ]*(?P=varName)[ ]*(?P<increment>[\+\-]+)[ ]*\){[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}",
r'\g<blockIndent>for \g<varName> in range(\g<start>, \g<stop>):\n\g<body>', None, 70),
# (r"(?P<blockIndent>[ ]*)for[ ]*\((?P<varType>[\S]+)[ ]*(?P<varName>\w+)[ ]*=[ ]*(?P<start>[\d]+)[ ]*;[ ]*(?P=varName)[ ]*\<[ ]*(?P<stop>\w+)[ ]*;[ ]*(?P=varName)[ ]*(?P<increment>[\+\-]+)[ ]*",
# r'\g<blockIndent>for \g<varName> in range(\g<start>, \g<stop>):HALO\g<increment>UHUY\nCOY', None, 0),
#? foreach (var i in array){
# ....
# }
# for i in array:
# ....
(r"(?P<blockIndent>[ ]*)foreach[ ]*\((?P<varType>[\S]+)[ ]*(?P<varName>[\S]+)[ ]*in[ ]*(?P<array>[\S]+)\){[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}", r'\g<blockIndent>for \g<varName> in \g<array>:\n\g<body>', None, 70),
# /* ... */
# """ ... """
(r"/\*(?P<comment>[\S\s]+)\*/", r'"""\g<comment>"""',None, 0),
#? else if (...){
# ....
# }
# elif ...:
# ....
(r"(?P<blockIndent>[ ]*)else if[ ]*\((?P<condition>[\S ]*)\){[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}",
r'\g<blockIndent>elif \g<condition>:\n\g<body>', None, 70),
#? if (...){
# ....
# }
# if ...:
# ....
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)if[ ]*\((?P<condition>.+?(?=\)\{))\)\{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)\}",
# (r"\n(?P<blockIndent>[ ]*)if[ ]*\((?P<condition>[^\)]*)\)\{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)\}",
r'\g<start>\g<blockIndent>if \g<condition>:\n\g<body>', None, 70),
#? else{
# ....
# }
# else:
# ....
(r"(?P<blockIndent>[ ]*)else[ ]*{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}", r'\g<blockIndent>else:\n\g<body>', None, 70),
#? (statement) ? val : alt;
#* val if (statement) else alt:
(r"\((?P<statement>[^\)]+)\)[ ]*\?[ ]*(?P<val>[^:]+):[ ]*(?P<alt>[^;]*);",
r'\g<val>if \g<statement> else \g<alt>', None, 0),
#? switch (map[x,y]) {
# match map[x,y]
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)switch[ ]*\((?P<args>[^\)]*)\)\{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)\}",
r'\g<start>\g<blockIndent>match \g<args>:\n\g<body>', None, 70),
#? break;
#? case Foo.Bar:
#* case Foo.Bar:
(r"(?P<start>[\r\n]+)(?P<break>[ ]+break[ ]*;[\r\n]+)(?P<blockIndent>[ ]*)case[ ]+(?P<args>[^:]*):",
r'\g<start>\g<blockIndent>case \g<args>:', None, 0),
#? break;
#? default:
#* case _:
(r"(?P<start>[\r\n]+)(?P<break>[ ]+break[ ]*;[\r\n]+)(?P<blockIndent>[ ]*)default[ ]*:",
r'\g<start>\g<blockIndent>case _:', None, 0),
#? case _:
#? break;
#*
(r"(?P<start>[\r\n]+)(?P<break>[ ]*case _:[\r\n]+)(?P<blockIndent>[ ]*)break[ ]*;",
r'', None, 0),
#? do {} while (...)
# while ...:
# ....
(r"(?P<blockIndent>[ ]*)do\s*{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}[ ]*while[ ]*\((?P<condition>[\S ]*)\)",
r'\g<blockIndent>while True:\n\g<body>\g<blockIndent> if not (\g<condition>):\n\g<blockIndent> break', None, 70),
#? while (...){
# ....
# }
# while ...:
# ....
(r"(?P<blockIndent>[ ]*)while[ ]*\((?P<condition>[\S ]*)\){[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}",
r'\g<blockIndent>while \g<condition>:\n\g<body>', None, 70),
#? interface IInterface{
# ....
# }
# class IInterface:
# ....
(r"(?P<blockIndent>[ ]*)interface[ ]*(?P<interfaceName>[a-zA-Z0-9_]+)[ ]*{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}",
r'\g<blockIndent>class \g<interfaceName>:\n\g<body>', None, 70),
(r"(?P<blockIndent>[ ]*)(?P<severity>(?:public |private |protected |published |override |overload |static )+)?class[ ]*(?P<interfaceName>[a-zA-Z0-9_]+)[ ]*{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)}",
r'\g<blockIndent>class \g<interfaceName>:\n\g<body>', None, 70),
#? interface method
# void test();
# def test():
# pass
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)(?P<returnType>\w+)[ ]+(?P<methodName>\w+)[ ]*\((?P<args>[\S ]*)\)\;",
r'\g<start>\g<blockIndent>def \g<methodName>(self, \g<args>):\n\g<blockIndent> pass', None, 0),
#? private static Cell[][] template1 = new Cell[][]{ }
#* @classmethod
#* def template1():
#* return [
#* ]
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)(?P<severity>(?:public |private |protected |published |override |overload )+)static (?P<returnType>[\w]+[\w\<\>\[\]]+)[ ]+(?P<methodName>\w+)[ ]*=(?P<args>[^\{]*)\{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)\}",
r'\g<start>\g<blockIndent>@classmethod\n\g<blockIndent>def \g<methodName>(cls):\n\g<blockIndent> return [\n\g<body>\n\g<blockIndent> ]', None, 70),
#? public static Template getRandom() {
#* @classmethod
#* def getRandom():
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)(?P<severity>(?:public |private |protected |published |override |overload )+)static (?P<returnType>[\w]+[\w\<\>\[\]]+)[ ]+(?P<methodName>\w+)[ ]*\((?P<args>[^\)]*)\)\{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)\}",
r'\g<start>\g<blockIndent>@classmethod\n\g<blockIndent>def \g<methodName>(cls, \g<args>):\n\g<body>', None, 70),
#? new Cell [] {Cell.Null, ..},
#* new Cell [] {Cell.Null, ..},
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]+)new (?P<returnType>[^\{]+)\{(?P<valueArray>[^\}]+)\}",
r"\g<start>\g<blockIndent>[\g<valueArray>]", None, 0),
#? public void method(){ }
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)(?P<severity>(?:public |private |protected |published |override |overload )+)(?P<returnType>\w+)[ ]+(?P<methodName>\w+)[ ]*\((?P<args>[\S ]*)\)[ ]*\{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)\}",
r'\g<start>\g<blockIndent>def \g<methodName>(self, \g<args>):\n\g<body>', None, 70),
#? public ClassName(){ }
#* def __init__( )
(r"(?P<start>[\r\n]+)(?P<blockIndent>[ ]*)public (?P<methodName>\w+)[ ]*\((?P<args>[\S ]*)\)\{[\r\n]+(?P<body>(?P<indent>[ ]*)[^\r\n]+[\r\n]+((?P=indent)[^\r\n]+[\r\n]+)*)(?P=blockIndent)\}",
r'\g<start>\g<blockIndent>def __init__(self, \g<args>):\n\g<body>', None, 70),
#? property / instance var
(r"(?P<blockIndent>[ ]+)(?P<severity>(public|private|protected)[ ]+)(?P<returnType>[^\s]+)[ ]+(?P<methodName>[\w]+)[ ]*",
r"\g<blockIndent>\g<methodName> = None", None, 0),
#? cleanup
(r", [\w\]\[]+ (?P<parameterName>[\w]+)", r", \g<parameterName>", None, 0),
(r"\(self, \):", r"(self):", None, 0),
# garbage delete
# (r"\n\n", r"\n", None, 0),
(r"\n\n\n", r"\n\n", None, 0),
(r"(?P<blockIndent>[ ]*)(?P<blockName>[a-z]+)[ ]*\([ ]*(?P<other>[\S ]*)[ ]*\){[\s]*}",
r"\g<blockIndent>\g<blockName> \g<other>:\n\g<blockIndent> pass", None, 0),
#? int i = 0;
# i = 0;
(r"(?P<blockIndent>[ ]*)(?P<varType>[\w\[\]\.]+)[ ]+(?P<varName>[\w\.]+)[ ]*=[ ]+(?P<right>[\w\'\"]+)",
r'\g<blockIndent>\g<varName> = \g<right>',None, 0),
# int[] i = {1, 2, 3};
# i = [1, 2, 3];
(r"(?P<blockIndent>[ ]*)(?P<varName>[a-zA-Z0-9_]+)[ ]*=[ ]*{(?P<list>[\S ]+)}",
r'\g<blockIndent>\g<varName> = [\g<list>]',None, 0),
#? i++
# i+=1
(r"\+\+", r" += 1",None, 0),
#? i--
# i-=1
(r"\-\-", r" -= 1",None, 0),
#? (int)abc
#* int(abc)
(r"\((?P<varType>[a-zA-Z0-9_]+)\)(?P<varName>[a-zA-Z0-9_\.]+)",
r"\g<varType>(\g<varName>)",None, 0),
#? 98.789f
#* 98.789
(r"(?P<float>\d+\.\d)f",
r"\1",None, 0),
#? range(0, abc[]/2) | range(0, r.Next(5));
#* range(abc)
# (r"\((?P<varType>[a-zA-Z0-9_]+)\)(?P<varName>[a-zA-Z0-9_]+)",
# r"\g<varType>(\g<varName>)",None, 0),
#? new Cell[rows,cols]
#* [[0 for i in range(cols)] for j in range(rows)]
(r"new (?P<varName>[a-zA-Z0-9_]+)\[(?P<rows>[a-zA-Z0-9_\.]+),(?P<cols>[a-zA-Z0-9_\.]+)\]",
r"[[0 for k in range(\g<cols>)] for j in range(\g<rows>)]",None, 0),
# r"[[0 for i\g<cols> in range(\g<cols>)] for j\g<rows> in range(\g<rows>)]",None, 0),
#? new Cell[rows.length][]
#* [[] for j in range(rows)]
(r"new (?P<varName>[a-zA-Z0-9_]+)\[(?P<rows>[^\,]]+)\]\[\]",
r"[[] for j in range(\g<rows>)]",None, 0),
#? new Cell[content[x].Length];
#* [[] for j in range(rows)]
#? ((?P<condition>.+?(?=\)\{))\)\{[\r\n]+(?P<body>(?P<ind
(r"new (?P<varName>[a-zA-Z0-9_]+)\[(?P<rows>.+?(?=\];))\];",
r"[None for j in range(\g<rows>)]",None, 0),
#? new ClassName()
#* ClassName()
(r"new (?P<ClassName>[a-zA-Z0-9_]+)\(",
r"\g<ClassName>(",None, 0),
# better view
# b==a
# b == a
# (r"(\S)(==|!=|<=|<|>|>=|=)(\S)", r"\1 \2 \3", None, 0),
# (r"(\S)[ ]*(==|!=|<=|<|>|>=|=)[ ]*(\S)", r"\1 \2 \3", None, 0),
# (r"not \(([\S ]+)(?!and|or)([\S ]+)\)", r"not \1\2", None, 0),
# ;\n
# \n
(r"(?P<indent>[ ]*)(?P<line>[\S \t]*);\n", r"\g<indent>\g<line>\n",None, 0),
# ;
# \n
# (r"(?P<indent>[ ]*)(?P<line>[\S\t]*);[^\r\n]*;",
# r"\g<indent>\g<line>\n\g<indent>",None, 0),
# ;
# \n
(r"(?P<indent>[ ]*)(?P<line>[\S \t]*);[^\r\n]*#", r"\g<indent>\g<line> #",None, 0),
#? map[x,0] = --> map[x][0] =
#? self.map[y,self.width-1] --> self.map[y][self.width-1]
# \n
# (r"(?P<var>[\w\.]+)\[(?P<one>[^,]+),(?P<two>[^\]]+)\][ ]*=",
# r"\g<var>[\g<one>][\g<two>] =",None, 0),
# (r"(?P<space>[ ]+)\[(?P<one>[\w\.\-]+),(?P<two>[^\]]+)\](?P<right>[^,]+)",
(r"(?P<space>[ \(]+)(?P<var>[\w\.]+)\[(?P<one>[^,]+),(?P<two>[^\]]+)\]",
r"\g<space>\g<var>[\g<one>][\g<two>]",None, 0),
# a.length
# len(a)
(r"([a-zA-Z0-9_\[\]\.]+)[ ]*\.[ ]*[lL]ength", r"len(\1)", None, 0),
]
LAST_RULES = [
# python methods:
(r",[ ]*\):", r"):", None, 0),
(r"Array\.Copy\(", r"ArrayCopy(", None, 0),
(r"Console\.WriteLine\(", r"print(", None, 0),
(r"Console\.Write\((?P<args>[^\)]+)\)", r"print(\g<args>, end='')", None, 0),
(r"using[ ]+[^\n\r]+", r"", None, 0),
# (r"\A", r"import random\nimport math\nimport sys\nfrom utils import *", None, 0),
(r"\A", r"from utils import *", None, 0),
(r"([a-zA-Z0-9_]+)\.contains\(([\S ]+)\)", r"\2 in \1", None, 0),
(r"([a-zA-Z0-9_]+)\.equals\(([\S ]+)\)", r"\1 == \2", None, 0),
# math module:
(r"Math\.Abs", r"abs", None, 0),
(r"Math\.Round", r"round", None, 0),
(r"Math\.PI", r"math.pi", None, 0),
(r"Math\.E", r"math.e", None, 0),
(r"Math\.A(?P<name>[a-z]+)", r"math.a\g<name>", None, 0),
(r"Math\.B(?P<name>[a-z]+)", r"math.b\g<name>", None, 0),
(r"Math\.C(?P<name>[a-z]+)", r"math.c\g<name>", None, 0),
(r"Math\.D(?P<name>[a-z]+)", r"math.d\g<name>", None, 0),
(r"Math\.E(?P<name>[a-z]+)", r"math.e\g<name>", None, 0),
(r"Math\.F(?P<name>[a-z]+)", r"math.f\g<name>", None, 0),
(r"Math\.M(?P<name>[a-z]+)", r"math.m\g<name>", None, 0),
(r"Math\.R(?P<name>[a-z]+)", r"math.r\g<name>", None, 0),
(r"Math\.P(?P<name>[a-z]+)", r"math.p\g<name>", None, 0),
(r"Math\.S(?P<name>[a-z]+)", r"math.s\g<name>", None, 0),
(r"Math\.T(?P<name>[a-z]+)", r"math.t\g<name>", None, 0),
# random module:
# (r"new[ ]+Random\(\)\.Next\((?P<first>\d+)[ ]*,[ ]*(?P<second>\d+)\)", r"random.randint(\g<first>, \g<second>+1)", None, 0),
# (r"new[ ]+Random\(\)\.NextDouble\(\)", r"random.uniform(0, 1)", None, 0)
] | x2nie/PyProceduralSokoban | cs2py.py | cs2py.py | py | 20,455 | python | en | code | 0 | github-code | 36 |
2962500565 | from prettytable import PrettyTable
import sympy as sp
sp.init_printing(use_unicode=True)
def raices_multiples(x0, tolerancia, niter):
x = sp.symbols('x')
#f = x**4 - 18*x**2+81
f = x**3 - x**2 - x + 1 + sp.sin(x-1)**2
tabla = PrettyTable(['i', 'xn', 'f(xn)', 'df(xn)', 'ddf(xn)', 'ErrorAbs', 'ErrorRel'])
df = f.diff(x) # Primera derivada
ddf = df.diff(x) # Segunda derivada
contador = 0
xn = x0
fx = f.evalf(subs={x:xn})
dfx = df.evalf(subs={x:xn})
ddfx = ddf.evalf(subs={x:xn})
tabla.add_row([contador, xn, fx, dfx, ddfx, "No hay error", "No hay error"])
error = tolerancia + 1.0
while contador < niter and error > tolerancia:
xnn = xn - (fx * dfx) / (dfx**2 - fx*ddfx)
error = abs(xnn-xn)
xn = xnn
fx = f.evalf(subs={x:xn})
dfx = df.evalf(subs={x: xn})
ddfx = ddf.evalf(subs={x: xn})
contador = contador + 1
tabla.add_row([contador, xn, fx, dfx, ddfx, error, abs(error/xn)])
print(tabla)
raices_multiples(0.5, 1e-5, 100)
| jvalen92/Analisis-Numerico | SolucionEcuancionesUnaVariable/raices_multples.py | raices_multples.py | py | 1,051 | python | en | code | 1 | github-code | 36 |
27688934832 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import RobustScaler
from sklearn.tree import DecisionTreeClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
np.random.seed(random_seed)
pipeline_components = [RobustScaler, DecisionTreeClassifier]
pipeline_parameters = {}
min_impurity_decrease_values = np.random.exponential(scale=0.01, size=num_param_combinations)
max_features_values = np.random.choice(list(np.arange(0.01, 1., 0.01)) + ['sqrt', 'log2', None], size=num_param_combinations)
criterion_values = np.random.choice(['gini', 'entropy'], size=num_param_combinations)
max_depth_values = np.random.choice(list(range(1, 51)) + [None], size=num_param_combinations)
all_param_combinations = zip(min_impurity_decrease_values, max_features_values, criterion_values, max_depth_values)
pipeline_parameters[DecisionTreeClassifier] = \
[{'min_impurity_decrease': min_impurity_decrease, 'max_features': max_features, 'criterion': criterion, 'max_depth': max_depth, 'random_state': 324089}
for (min_impurity_decrease, max_features, criterion, max_depth) in all_param_combinations]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| rhiever/sklearn-benchmarks | model_code/random_search/DecisionTreeClassifier.py | DecisionTreeClassifier.py | py | 1,283 | python | en | code | 204 | github-code | 36 |
37662096318 | from PyQt5.QtWidgets import QLineEdit, QToolButton, QWidget, QFileDialog, QDialog, QTreeWidget, QRadioButton, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QMessageBox
from pulse.utils import error
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QColor, QBrush
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
import os
from pulse.postprocessing.plot_acoustic_data import get_acoustic_frf
import matplotlib.pyplot as plt
import numpy as np
class SnaptoCursor(object):
def __init__(self, ax, x, y, show_cursor):
self.ax = ax
self.x = x
self.y = y
self.show_cursor = show_cursor
if show_cursor:
self.vl = self.ax.axvline(x=x[0], color='k', alpha=0.3, label='_nolegend_') # the vertical line
self.hl = self.ax.axhline(y=y[0], color='k', alpha=0.3, label='_nolegend_') # the horizontal line
self.marker, = ax.plot(x[0], y[0], markersize=4, marker="s", color=[0,0,0], zorder=3)
# self.marker.set_label("x: %1.2f // y: %4.2e" % (self.x[0], self.y[0]))
# plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
def mouse_move(self, event):
if self.show_cursor:
if not event.inaxes: return
x, y = event.xdata, event.ydata
if x>=np.max(self.x): return
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
self.vl.set_xdata(x)
self.hl.set_ydata(y)
self.marker.set_data([x],[y])
self.marker.set_label("x: %1.2f // y: %1.2f" % (x, y))
plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
self.ax.figure.canvas.draw_idle()
class PlotAcousticFrequencyResponseInput(QDialog):
def __init__(self, mesh, analysisMethod, frequencies, solution, list_node_ids, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('pulse/uix/user_input/ui/plotAcousticFrequencyResponseInput.ui', self)
icons_path = 'pulse\\data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.userPath = os.path.expanduser('~')
self.save_path = ""
self.mesh = mesh
self.analysisMethod = analysisMethod
self.frequencies = frequencies
self.solution = solution
self.nodeID = 0
self.imported_data = None
self.writeNodes(list_node_ids)
self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')
self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')
self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')
self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')
self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')
self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')
self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')
self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')
self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)
self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')
self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)
self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')
self.toolButton_ExportResults.clicked.connect(self.ExportResults)
self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')
self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)
self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')
self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')
self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)
self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
self.tabWidget_plot_results = self.findChild(QTabWidget, "tabWidget_plot_results")
self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, "tab_plot")
self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')
self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)
self.checkBox_dB = self.findChild(QCheckBox, 'checkBox_dB')
self.pushButton = self.findChild(QPushButton, 'pushButton')
self.pushButton.clicked.connect(self.check)
self.exec_()
def reset_imported_data(self):
self.imported_data = None
self.messages("The plot data has been reseted.")
def writeNodes(self, list_node_ids):
text = ""
for node in list_node_ids:
text += "{}, ".format(node)
self.lineEdit_nodeID.setText(text)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
self.close()
def radioButtonEvent_YAxis(self):
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
def radioButtonEvent_save_data(self):
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
def messages(self, msg, title = " Information "):
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Information)
msg_box.setText(msg)
msg_box.setWindowTitle(title)
msg_box.exec_()
def choose_path_import_results(self):
self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Dat Files (*.dat)')
self.import_name = basename(self.import_path)
self.lineEdit_ImportResultsPath.setText(str(self.import_path))
def ImportResults(self):
self.imported_data = np.loadtxt(self.import_path, delimiter=",")
self.legend_imported = "imported data: "+ basename(self.import_path).split(".")[0]
self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)
self.messages("The results has been imported.")
def choose_path_export_results(self):
self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)
self.save_name = basename(self.save_path)
self.lineEdit_SaveResultsPath.setText(str(self.save_path))
def check(self, export=False):
try:
tokens = self.lineEdit_nodeID.text().strip().split(',')
try:
tokens.remove('')
except:
pass
node_typed = list(map(int, tokens))
if len(node_typed) == 1:
try:
self.nodeID = self.mesh.nodes[node_typed[0]].external_index
except:
message = [" The Node ID input values must be\n major than 1 and less than {}.".format(len(self.nodes))]
error(message[0], title = " INCORRECT NODE ID INPUT! ")
return
elif len(node_typed) == 0:
error("Please, enter a valid Node ID!")
return
else:
error("Multiple Node IDs", title="Error Node ID's")
return
except Exception:
error("Wrong input for Node ID's!", title="Error Node ID's")
return
if self.checkBox_dB.isChecked():
self.scale_dB = True
elif not self.checkBox_dB.isChecked():
self.scale_dB = False
if not export:
self.plot()
def ExportResults(self):
if self.lineEdit_FileName.text() != "":
if self.save_path != "":
self.export_path_folder = self.save_path + "/"
else:
error("Plese, choose a folder before trying export the results!")
return
else:
error("Inform a file name before trying export the results!")
return
self.check(export=True)
freq = self.frequencies
self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + ".dat"
if self.save_Absolute:
response = get_acoustic_frf(self.mesh, self.solution, self.nodeID)
header = "Frequency[Hz], Real part [Pa], Imaginary part [Pa], Absolute [Pa]"
data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T
elif self.save_Real_Imaginary:
response = get_acoustic_frf(self.mesh, self.solution, self.nodeID)
header = "Frequency[Hz], Real part [Pa], Imaginary part [Pa]"
data_to_export = np.array([freq, np.real(response), np.imag(response)]).T
np.savetxt(self.export_path, data_to_export, delimiter=",", header=header)
self.messages("The results has been exported.")
def dB(self, data):
p_ref = 20e-6
return 20*np.log10(data/p_ref)
def plot(self):
fig = plt.figure(figsize=[12,7])
ax = fig.add_subplot(1,1,1)
frequencies = self.frequencies
response = get_acoustic_frf(self.mesh, self.solution, self.nodeID, absolute=self.plotAbs, real=self.plotReal, imag=self.plotImag)
if self.scale_dB :
if self.plotAbs:
response = self.dB(response)
ax.set_ylabel("Acoustic Response - Absolute [dB]", fontsize = 14, fontweight = 'bold')
else:
if self.plotReal:
ax.set_ylabel("Acoustic Response - Real [Pa]", fontsize = 14, fontweight = 'bold')
elif self.plotImag:
ax.set_ylabel("Acoustic Response - Imaginary [Pa]", fontsize = 14, fontweight = 'bold')
self.messages("The dB scalling can only be applied with the absolute \nY-axis representation, therefore, it will be ignored.")
else:
if self.plotAbs:
ax.set_ylabel("Acoustic Response - Absolute [Pa]", fontsize = 14, fontweight = 'bold')
elif self.plotReal:
ax.set_ylabel("Acoustic Response - Real [Pa]", fontsize = 14, fontweight = 'bold')
elif self.plotImag:
ax.set_ylabel("Acoustic Response - Imaginary [Pa]", fontsize = 14, fontweight = 'bold')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
#cursor = Cursor(ax)
cursor = SnaptoCursor(ax, frequencies, response, show_cursor=True)
plt.connect('motion_notify_event', cursor.mouse_move)
legend_label = "Acoustic Pressure at node {}".format(self.nodeID)
if self.imported_data is None:
if self.plotAbs and not self.scale_dB:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
_legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')
else:
data = self.imported_data
imported_Xvalues = data[:,0]
if self.plotAbs:
imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2])
if self.scale_dB :
imported_Yvalues = self.dB(imported_Yvalues)
elif self.plotReal:
imported_Yvalues = data[:,1]
elif self.plotImag:
imported_Yvalues = data[:,2]
if self.plotAbs and not self.scale_dB:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
_legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')
plt.gca().add_artist(_legends)
ax.set_title(('Frequency Response: {} Method').format(self.analysisMethod), fontsize = 18, fontweight = 'bold')
ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')
plt.show() | atbrandao/OpenPulse_f | pulse/uix/user_input/plotAcousticFrequencyResponseInput.py | plotAcousticFrequencyResponseInput.py | py | 13,604 | python | en | code | null | github-code | 36 |
24222902053 | import pandas as pd
import corrAnaModule as cam
import numpy as np
pd.options.mode.chained_assignment = None
def count_ratio_every_col_obj(df_raw:pd.DataFrame):
new_col=[]
total=df_raw.shape[0]
for _ in df_raw.columns:
a=pd.value_counts(df_raw[_])
df_=a/total
# print(_,max(df_))
if (max(df_)<0.95):
new_col.append(_)
return new_col
def data_generation():
df_equip_history=pd.read_csv("F:\\YIELD\\YoudaOptronics\\Archive(1)\\equip_history.csv",engine="python",sep=',',encoding='GBK')
df_equip_history['SHEET_ID']=df_equip_history['锘縎HEET_ID'] # modify unidentifiable columns
df_equip_history.drop(columns=['锘縎HEET_ID'],inplace=True)
df_equip_history.fillna('-1',inplace=True)
col_list=[]
for col in list(df_equip_history.columns):
if 'R' not in col:
col_list.append(col)
df_measure_labels=pd.read_csv("F:\\YIELD\\YoudaOptronics\\Archive(1)\\measure_labels.csv",engine="python",sep=',',encoding='GBK')
df_measure_labels.dropna(inplace=True)
df_temp=pd.merge(df_equip_history[col_list],df_measure_labels[['SHEET_ID','Y']],how='inner',on='SHEET_ID')
df_temp['label']=0
df_temp['label'][(df_temp['Y']>=1)|(df_temp['Y']<=-1)]=1
col_list.remove('SHEET_ID')
df_label1=df_temp[df_temp['label']==1].copy()#sheetId有大量重复,这里选择只要有出现过不良的就作为响应标签
df_label1.drop_duplicates(inplace=True)
df_label0=df_temp[df_temp['label']==0].copy()
df_label0.drop_duplicates(inplace=True)
df_temp=pd.concat([df_label1,df_label0],axis=0)
df_temp.drop_duplicates('SHEET_ID','first',inplace=True)
columns_list=count_ratio_every_col_obj(df_temp)
columns_list.remove('SHEET_ID')
columns_list.remove('Y')
return df_temp[columns_list]
def correlation_index_rank(df_cluster,corr_funciton_name):
corr_funciton=getattr(cam, corr_funciton_name)
columns_list = list(df_cluster.columns)
columns_list.remove('label')
corr_index_list=[]
for _ in columns_list:
corr_temp=corr_funciton(df_cluster,_)
corr_index_list.append(corr_temp)
df_col_rank=pd.DataFrame({'col':columns_list,corr_funciton_name[:-6]:corr_index_list})
df_col_rank.sort_values(by=corr_funciton_name[:-6],ascending=False,inplace=True)
df_col_rank.index=range(df_col_rank.shape[0])
# print(df_col_rank)
return df_col_rank
def final_rank_confidence(df_cluster,corr_func_list):
df_rank = pd.DataFrame({'col': list(df_cluster.columns)})
df_rank['time_index'] = df_rank.index + 1
for func in corr_func_list:
df_rank_temp = correlation_index_rank(df_cluster, corr_funciton_name=func)
df_rank_temp[func] = df_rank_temp.index + 1
df_rank = pd.merge(df_rank, df_rank_temp[['col',func]], on='col')
rank_col_list = list(df_rank.columns)
rank_col_list.remove('col')
r = len(rank_col_list)
df_rank['final_rank']=1#计算 R1*R2*..Rn开n次根
for _ in rank_col_list:
df_rank['final_rank'] = df_rank['final_rank']*df_rank[_]
df_rank['final_rank']=pow(df_rank['final_rank'], 1 / r)
df_rank.sort_values('final_rank', ascending=True, inplace=True)
df_rank.index=range(df_rank.shape[0])
print(df_rank)
return df_rank
df_cluster = data_generation()
final_rank_confidence(df_cluster,['iv_index','chi_square_index'])
# gini_index=getattr(cam, 'gini_index')
#
# for _ in
# df_rank=gini_index(df_cluster,) | samzzyy/RootCauseAnalysisOfProductionLineFailure | RootCauseAna.py | RootCauseAna.py | py | 3,482 | python | en | code | 5 | github-code | 36 |
6679921790 | #!/usr/bin/env python
"""
Download the given htmx version and the extensions we're using.
"""
import argparse
import subprocess
from typing import List, Optional
def main(argv: Optional[List[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument("version", help="e.g. 1.0.1")
args = parser.parse_args(argv)
version: str = args.version
download_file(version, "htmx.js")
download_file(version, "ext/debug.js")
download_file(version, "ext/event-header.js")
print("✅")
return 0
def download_file(version: str, name: str) -> None:
print(f"{name}...")
subprocess.run(
[
"curl",
"--silent",
"--location",
f"https://unpkg.com/htmx.org@{version}/dist/{name}",
"-o",
f"example/static/{name}",
],
check=True,
)
# Fix lack of trailing newline in minified files as otherwise pre-commit
# has to fix it.
if name.endswith(".min.js"):
with open(f"example/static/{name}", "a") as fp:
fp.write("\n")
if __name__ == "__main__":
raise SystemExit(main())
| hernantz/django-htmx-demo | download_htmx.py | download_htmx.py | py | 1,146 | python | en | code | 5 | github-code | 36 |
33873457342 | import tkinter as tk
import tkinter.font as font
from PIL import Image, ImageTk
import QuikSpace as qs
import Add_Task_School_Work as atsw
import Review_Task_School_Work as rtsw
window=""
def quikSpace():
global window
window.destroy()
qs.quikspace()
def ADD_Task_School_Work():
global window
window.destroy()
atsw.add_task_school_work()
def Review_Task_School_Work():
global window
window.destroy()
rtsw.review_task_school_work()
def school_Work():
global window
window=tk.Tk()
window.geometry("780x670")
window.resizable(0,0)
window.title("School/Work")
window.config(bg="#FFD9B3")
font1=font.Font(family="Times New Roman",size=35,weight='bold')#Created the font
font2=font.Font(family="Times New Roman",size=15)#Created the font
font3=font.Font(family="Courier New",size=17)
title=tk.Label(window, text="QuikSpace",font=font1, bg="#FFD9B3")#Created the title
title.place(x=260,y=25)
image=Image.open("Scheduler/logo.png")#Loads the image in RAM
image=image.resize((90,60))
photo=ImageTk.PhotoImage(image)#Converts the image into the Tkinter image format
image_label=tk.Label(window,image=photo)
image_label.place(x=440,y=20)
slogan=tk.Label(window, text="All your tasks, in one app", font=font2, bg="#FFD9B3")
slogan.place(x=315,y=90)
add_task=tk.Button(window, text="Add Task",width=25, height=5, highlightbackground="#B3D9FF",fg="white",command=ADD_Task_School_Work)
add_task.place(x=285,y=170)
review_tasks=tk.Button(window, text="Review Tasks",width=25, height=5, highlightbackground="#B3D9FF",fg="white",command=Review_Task_School_Work)
review_tasks.place(x=285,y=320)
main_menu=tk.Button(window, text="Main Menu",width=25, height=5, highlightbackground="#B3D9FF",fg="white",command=quikSpace)
main_menu.place(x=285,y=470)
label=tk.Label(window, text="School/Work", font=font2, bg="#FFD9B3")
label.place(x=360,y=645)
credits=tk.Label(window, text="Created By: Smyan and Rithwik", font=font3, bg="#FFD9B3")
credits.place(x=470,y=640)
window.mainloop() | math12345678/QuikSpace | School_Work.py | School_Work.py | py | 2,133 | python | en | code | 0 | github-code | 36 |
74952188583 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 24 16:58:30 2021
@author: HP
"""
def genPrimes():
n=2
primes = [2]
yield primes[0]
while True:
n += 1
for p in primes:
if (n%p) == 0:
break
else:
primes.append(n)
yield n | FHL-08/Python-Projects | Prime Number Generator.py | Prime Number Generator.py | py | 339 | python | en | code | 0 | github-code | 36 |
33293843048 | import pandas as pd
import requests
from io import StringIO
import plotly.express as px
import plotly.graph_objects as go
import seaborn as sns
import streamlit as st
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import numpy as np
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
@st.cache
def split_data(df):
features= df[['sepal.length', 'sepal.width', 'petal.length', 'petal.width']].values
labels = df['variety'].values
return train_test_split(features, labels, train_size=0.7, random_state=1)
def getData():
SL = st.slider("Sepal length", 2,25, 3)
SW = st.slider("Sepal width", 2,25, 3)
PL = st.slider("Petal length", 2,25, 3)
PW = st.slider("Petal width", 2,25, 3)
print(f"LOG: the prediction input is: {[SL, SW, PL, PW]}")
return np.array([SL, SW, PL, PW]).reshape(1,-1)
def main(df):
X_train,X_test, y_train, y_test = split_data(df)
alg = ["Decision Tree", "Support Vector Machine", "KNeighborsClassifier", "Linear SVC", "SVC", "GaussianPro00cessClassifier", "DecisionTreeClassifier", "RandomForestClassifier", "MLPClassifier", "AdaBoostClassifier", "GaussianNB", "QuadraticDiscriminantAnalysis"]
classifier = st.selectbox('Which algorithm?', alg)
if classifier=='Decision Tree':
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
acc = dtc.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_dtc = dtc.predict(X_test)
cm_dtc=confusion_matrix(y_test,pred_dtc)
st.write('Confusion matrix: ', cm_dtc)
input = getData()
st.write('The classification is: ', dtc.predict(input)[0])
elif classifier == 'Support Vector Machine':
svm=SVC()
svm.fit(X_train, y_train)
acc = svm.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_svm = svm.predict(X_test)
cm=confusion_matrix(y_test,pred_svm)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', svm.predict(input)[0])
elif classifier == "KNeighborsClassifier":
clf = KNeighborsClassifier(3)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "Linear SVC":
clf = SVC(kernel="linear", C=0.025)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "SVC":
clf = SVC(gamma=2, C=1)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "GaussianProcessClassifier":
clf = GaussianProcessClassifier(1.0 * RBF(1.0))
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "DecisionTreeClassifier":
clf = DecisionTreeClassifier(max_depth=5)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "RandomForestClassifier":
clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "MLPClassifier":
clf = MLPClassifier(alpha=1, max_iter=1000)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "AdaBoostClassifier":
clf = AdaBoostClassifier()
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "GaussianNB":
clf = GaussianNB()
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "QuadraticDiscriminantAnalysis":
clf = QuadraticDiscriminantAnalysis()
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
@st.cache
def loadData():
orig_url = "https://drive.google.com/file/d/1qgGPtbVk7dUBZxF-Q-F_xP-jMmAh11pV/view?usp=sharing"
file_id = orig_url.split('/')[-2]
dwn_url='https://drive.google.com/uc?export=download&id=' + file_id
url = requests.get(dwn_url).text
csv_raw = StringIO(url)
dfs = pd.read_csv(csv_raw)
return dfs
df = loadData()
st.title('Iris')
if st.checkbox('Show dataframe'):
st.write(df)
st.subheader('Scatter plot')
species = st.multiselect('Show iris per variety?', df['variety'].unique())
if not species:
species = df['variety'].unique()
col1 = st.selectbox('Which feature on x?', df.columns[0:4])
col2 = st.selectbox('Which feature on y?', df.columns[0:4])
if col1 == col2:
col1 = df.columns[1]
col2 = df.columns[0]
new_df = df[(df['variety'].isin(species))]
st.write(new_df)
# create figure using plotly express
fig = px.scatter(new_df, x =col1,y=col2, color='variety')
# Plot!
st.plotly_chart(fig)
st.subheader('Histogram')
feature = st.selectbox('Which feature?', df.columns[0:4])
# Filter dataframe
new_df2 = df[(df['variety'].isin(species))][feature]
fig2 = px.histogram(new_df, x=feature, color="variety", marginal="rug")
st.plotly_chart(fig2)
st.subheader('Machine Learning models')
main(df) | Tudor1415/mlsandbox | main.py | main.py | py | 8,562 | python | en | code | 0 | github-code | 36 |
37272086828 | # https://blog.csdn.net/qq_32149483/article/details/112056845
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from typing import Tuple, List
class Img_Aug:
def __init__(self, prob=0.2, crop=True, blur=True, superpixel=True,
space_trans=True, sharpen=True, emboss=True,
edge_detect=True, noise=True, dropout=True):
self.prob = prob
self.crop = crop
self.blur = blur
self.superpixel = superpixel
self.space_trans = space_trans
self.sharpen = sharpen
self.emboss = emboss
self.edge_detect = edge_detect
self.noise = noise
self.dropout = dropout
def __call__(self):
operations = [iaa.Affine(translate_px={"x": 15, "y": 15},
scale=(0.8, 0.8),
rotate=(-5, 5))]
if self.crop:
operations.append(iaa.Crop(percent=(0, self.prob)))
if self.blur:
operations.append(iaa.OneOf([iaa.GaussianBlur((0, 3.)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 11))]))
if self.superpixel:
operations.append(iaa.Sometimes(0.5, iaa.Superpixels(p_replace=(0.005, 0.5),
n_segments=(16, 28))))
if self.space_trans:
operations.append(iaa.Sequential([iaa.ChangeColorspace(from_colorspace="RGB", to_colorspace="HSV"),
iaa.WithChannels(0, iaa.Add((50, 100))),
iaa.ChangeColorspace(from_colorspace="HSV", to_colorspace="RGB")]))
if self.sharpen:
operations.append(iaa.Sharpen(alpha=(0., 1.), lightness=(0.75, 1.5)))
if self.emboss:
operations.append(iaa.Emboss(alpha=(0., 1.), strength=(0., 2.)))
if self.edge_detect:
operations.append(iaa.OneOf([iaa.EdgeDetect(alpha=(0., 0.75)),
iaa.DirectedEdgeDetect(alpha=(0., 0.75), direction=(0, 1))]))
if self.noise:
operations.append(iaa.AdditiveGaussianNoise(scale=(0, 128), per_channel=0.5))
if self.dropout:
operations.append(iaa.Dropout(p=(0.01, 0.1), per_channel=0.5))
lenTrans = len(operations)
seq = iaa.Sequential([iaa.SomeOf(min(5, lenTrans), operations, random_order=True)])
return seq
class Augmentation:
def __init__(self, seq):
self.seq = seq
def __call__(self, img, bbox=None, label=None, mode="x1y1x2y2") -> Tuple[np.ndarray, dict]:
# seq_det = self.seq.to_deterministic()
# bbox should be in x1x2y1y2 format
if bbox is None:
return self.seq(image=img)
if mode == "x1y1x2y2":
x1, y1, x2, y2 = bbox
else:
x1, y1, x2, y2 = bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]
bbs = ia.BoundingBoxesOnImage([ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, label=label)], shape=img.shape)
image_aug, bbox_aug = self.seq(image=img, bounding_boxes=bbs)
location = [bbox_aug[0].x1, bbox_aug[0].y1, bbox_aug[0].x2, bbox_aug[0].y2]
label = bbox_aug[0].label
shape = bbox_aug.shape
bbox_info = {"location": location, "label": label, "shape": shape}
return image_aug, bbox_info
if __name__ == "__main__":
seq = Img_Aug()()
import cv2
img = cv2.imread("mchar_train/000000.png")
aug_img, aug_bbox = Augmentation(seq)(img, bbox=[0.5, 0.5, 0.5, 0.5], label=0)
| SuperbTUM/machine-learning-practice | Text Recognition/data_aug.py | data_aug.py | py | 3,722 | python | en | code | 0 | github-code | 36 |
8648504297 | from django.shortcuts import render, HttpResponse
from .forms import UserForm
from sms_alert.models import User
def index(request):
if request.method == 'POST':
name = request.POST.get('name')
phone_number = request.POST.get('phone_number')
country = request.POST.get('country')
user_form = UserForm({
'timezone': 'America/Los_Angeles',
})
time_zone = user_form.cleaned_data['timezone']
temp = User(
name = name,
time_zone = time_zone,
phone_number = phone_number,
country = country
)
return HttpResponse("<h1>Successfully entered</h1>")
else:
form = UserForm()
return render(request, 'sms_alert/index.html', {'form': form})
def next(request):
if request.method == 'POST':
form = UserForm(request.POST)
time_zone = request.POST.get('timezone')
name = request.POST.get('name')
phone_number = request.POST.get('phone_number')
country = request.POST.get('country')
return render(request, 'sms_alert/next.html', {
'name' : name,
'phone_number' : phone_number,
'country' : country,
'time_zone' : time_zone,
})
else:
return render(request, 'sms_alert/next.html')
def form_save(request):
if request.method == 'POST':
name = request.POST.get('name')
phone_number = request.POST.get('phone_number')
country = request.POST.get('country')
time_zone = request.POST.get('time_zone')
awake_time = request.POST.get('awake_time')
sleep_time = request.POST.get('sleep_time')
print("-----------------------------------------------------", request.POST)
temp = User(
name=name,
sleep_time=sleep_time,
wake_time=awake_time,
time_zone=time_zone,
phone_number=phone_number,
country=country
)
temp.save()
form = UserForm()
return render(request, 'sms_alert/index.html', {'form': form})
else:
form = UserForm()
return render(request, 'sms_alert/index.html', {'form': form})
| prajjwalsinghzz14/Amnesia--Twilio-Notifiactions | Amnesia/sms_alert/views.py | views.py | py | 2,300 | python | en | code | 0 | github-code | 36 |
35411324967 | from requests import get
from webapp.db import db
from webapp.question_service.models import Question_answer
def get_question_jservice(questions_quantity: int=1) -> dict:
"""
После получения колличества запрашиваемых вопросов сервис, в свою очередь,
запрашивает с публичного API (англоязычные вопросы для викторин)
https://jservice.io/api/random?count=1 указанное в полученном запросе количество вопросов.
"""
address_questions_jservice = f'https://jservice.io/api/random?count={questions_quantity}'
questions_in_jservice = get(address_questions_jservice).json()
return questions_in_jservice
def save_question_in_db(question: dict) -> None:
"""Сохранение данных вопроса в базу данных"""
add_question = Question_answer(
id_question = question['id'],
text_question = question['question'],
text_answer = question['answer'],
)
db.session.add(add_question)
db.session.commit()
def process_check_to_valid_and_save_questions(data_questions_num: int) -> None:
try:
questions_in_jservice = get_question_jservice(data_questions_num)
except:
return print('Сайт jservice.io не доступен')
for question in questions_in_jservice:
"""
Если в БД имеется такой же вопрос, к публичному API с викторинами выполняются дополнительные запросы до тех пор
пока не будет получен уникальный вопрос для викторины.
"""
questions_in_db = Question_answer.query.filter_by(id_question=question['id']).first()
if questions_in_db != None:
questions_in_jservice.append(get_question_jservice())
else:
save_question_in_db(question)
| Straigan/webapp_question_service | webapp/services/jservice_service.py | jservice_service.py | py | 2,084 | python | ru | code | 0 | github-code | 36 |
10006400064 | """
Given a set of non-negative integers, and a value sum, determine if there is a subset of the given set with sum equal to given sum.
Examples: set[] = {3, 34, 4, 12, 5, 2}, sum = 9
Output: True //There is a subset (4, 5) with sum 9.
"""
def subset_sum(numbers, total):
if total == 0:
return True
if not any(numbers):
return False
subset = numbers.copy()
last = subset.pop()
return subset_sum(subset, total - last) or subset_sum(subset, total)
if __name__ == '__main__':
assert subset_sum({3, 34, 4, 12, 5, 2}, 9)
assert subset_sum({}, 0)
assert subset_sum({3, 34, 4, 12, 5, 2}, 0)
assert subset_sum({3, 34, 4, 12, 5, 2}, 2)
assert not subset_sum({3, 34, 4, 12, 5, 2}, 1)
assert not subset_sum({}, 2)
| juanjoneri/Bazaar | Interview/Practice/Dynamic-Programming/subset-sum.py | subset-sum.py | py | 771 | python | en | code | 0 | github-code | 36 |
34141163457 | from flask_restful import Resource
from pymongo import MongoClient
from api.starta_flode import MONGODB_CONNECTION
from statistics import median
class Statistik(Resource):
def getFlodeStatistics(self, subjects, flode=None):
allData = [r for r in subjects.find(flode and {'flode': flode} or {})]
tid = [int(x['end_at'] - x['start_at']) for x in allData if x['end_at'] is not None]
retobj = {
'started': len(allData),
'ended': len([x for x in allData if x['end_at'] is not None]),
'median': tid and median(tid) or 0
}
return retobj
def getFlodeB(self):
pass
def get(self):
mongo_client = MongoClient(MONGODB_CONNECTION)
# if not found
db = mongo_client['heroku_ssj5qmzb']
subjects = db.subjects
statistics = {
'total': self.getFlodeStatistics(subjects),
'A': self.getFlodeStatistics(subjects, 'A'),
'B': self.getFlodeStatistics(subjects, 'B')
}
return statistics, 200 | svanis71/fkhack-back | api/statistik.py | statistik.py | py | 1,059 | python | en | code | 0 | github-code | 36 |
43694284198 | import glob
def browar(name):
print("Pracuje nad " + name)
plik = open(name, "r")
# n - liczba miast do analizy
n = int(plik.readline())
dane = plik.read().splitlines()
tab = []
flat_tab = []
for element in dane:
tab.append(element.split(" "))
for sublist in tab:
for item in sublist:
flat_tab.append(item)
# z[i] - zapotrzebowanie na piwo w mieście i
# d[i] - odleglosc od miasta i do kolejnego miasta
z = []
d = []
index = 0
for element in flat_tab:
if index % 2 == 0:
z.append(int(element))
else:
d.append(int(element))
index += 1
dl, dr, l, r, zl, zr, c = 0, 0, 0, 0, 0, 0, 0
j = 1
while j < n:
if dl+d[(l+n-1) % n] < dr+d[r]:
dl = dl + d[(l + n - 1) % n]
l = (l + n - 1) % n
c = c + dl * z[l]
zl = zl + z[l]
else:
dr = dr + d[r]
r = (r + 1) % n
c = c + dr * z[r]
zr = zr + z[r]
j += 1
min = c
min_n = 0
for i in range(1, n):
zl = zl + z[i - 1]
cc = d[(i + n - 1) % n]
cc = cc * (zl - zr)
c = c + cc
zr = zr - z[i]
dl = dl + d[(i + n - 1) % n]
dr = dr - d[(i + n - 1) % n]
while dl > dr + d[r]:
dr = dr + d[r]
r = (r + 1) % n
l = (l + 1) % n
zr = zr + z[r]
zl = zl - z[r]
c = c + z[r] * (dr - dl)
dl = dl - d[r]
if c < min:
min = c
min_n = i
plik.close()
nameout = name.replace("in\\broin", "out\\broout")
plikout = open(nameout, "w")
plikout.write(str(min) + " " + str(min_n))
plikout.close()
def main():
pliki = glob.glob("in/*.txt")
for plik in pliki:
browar(plik)
if __name__ == "__main__":
main()
| antoniusz22/Script-languages | python1.py | python1.py | py | 2,008 | python | en | code | 0 | github-code | 36 |
42775151544 | from .models import (
ActionList,
Action,
ExpectList,
Expect,
ExportDict,
SkipIfList,
SkipIf,
)
from .validators import validate_boolean, validate_conditionals,validate_export_dict
class ArgType:
type = lambda value: value # noqa
value = None
class UsingArgType(ArgType):
type = str
def __call__(self, value) -> str:
"""
1. Check if value is a string
2. Check if value is a namespace.collection
3. Check if namespace.collection is installed
"""
if not isinstance(value, str):
raise ValueError(
f"`using` must be a string, got {type(value).__name__}"
)
if len(value.split(".")) != 2:
raise ValueError(
f"`using` must be a <namespace>.<collection>, got {value}"
)
# TODO: Check if collection is installed
self.value = self.type(value)
return value
class ActionsArgType(ArgType):
type = ActionList
def __call__(self, actions) -> list:
"""
1. Check if actions is a list
2. For each action in actions
2.1 Check if action is a dict or a str
2.2 Create the action instance
2.3 Add Action to return value
"""
value = []
if not isinstance(actions, list):
msg = f"`actions` must be a list, got {type(actions).__name__}"
if isinstance(actions, dict):
msg += (
f" did you mean `- {list(actions.keys())[0]}:` ?"
)
if isinstance(actions, str):
msg += f" did you mean `[{actions}]` or `- {actions}` ?"
raise ValueError(msg)
_valid = ", ".join(Action.Config.valid_ansible_arguments)
for action in actions:
if isinstance(action, dict):
# ensure this is a single key-value pair
if len(action) != 1:
raise ValueError(
f"`action` must be single key dictionary,"
f" got {action.keys()}"
)
action_key = list(action.keys())[0]
if "." not in action_key:
raise ValueError(
f"`action` must be <module>.<action>, got {action_key}"
)
module_name, action_name = action_key.split(".")
action_value = action[action_key]
if not isinstance(action_value, dict):
raise ValueError(
f"`{action_key}:` takes"
" {param: value, ...},"
f" where `param` is one of {_valid}"
f", but got {action_value}"
)
try:
action = Action(
module_name=module_name,
action_name=action_name,
**action_value,
)
except TypeError as e:
_invalid = set(action_value) - set(
Action.Config.valid_ansible_arguments
)
raise ValueError(
f"Unsupported parameters"
f" for `{action_key}: {_invalid}`"
f". Supported parameters include: {_valid}"
) from e
elif isinstance(action, str):
if "." not in action:
raise ValueError(
f"`action` must be <module>.<action>, got {action}"
)
module, action_name = action.split(".")
action = Action(module_name=module, action_name=action_name)
else:
raise ValueError(
f"`actions` must be a list of dicts"
f" or strings, got {type(action).__name__}"
)
value.append(action)
self.value = self.type(value)
return actions
class SkipifArgType(ArgType):
type = SkipIfList
def __call__(self, value) -> str:
self.value = validate_conditionals(value, "skipif", SkipIf, self.type)
return value
class ExpectArgType(ArgType):
type = ExpectList
def __call__(self, value) -> list:
self.value = validate_conditionals(value, "expect", Expect, self.type)
return value
class ExportArgType(ArgType):
type = ExportDict
def __call__(self, value) -> dict:
self.value = self.type(validate_export_dict(value))
return value
class RequiredArgType(ArgType):
def __call__(self, value) -> bool:
self.value = validate_boolean(value)
return value
| rochacbruno/ansible-test | ansible-test/plugins/module_utils/arg_types.py | arg_types.py | py | 4,795 | python | en | code | 2 | github-code | 36 |
2941292608 | class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
res = []
path = []
nums = range(1, 10)
def backtrack(n, k, sp):
if sum(path) > n: # cut branch to speed up
return
if len(path) == k and sum(path) == n:
res.append(path.copy())
return
for i in range(sp, len(nums) , 1):
path.append(nums[i])
backtrack(n, k, i+1)
path.pop()
backtrack(n, k, 0)
return res
# - (k-len(path)) | kai0456/algo_prac | 216_Combination_Sum_III.py | 216_Combination_Sum_III.py | py | 594 | python | en | code | 0 | github-code | 36 |
17169367130 | # Program make a simple calculator that can add, subtract, multiply and divide using functions
# This function adds two numbers
def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
# This function calcuates x to the power of y
def power(x, y):
result = 1
counter = 0
while counter < y:
result = result * x
counter += 1
return result
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
print("5.Power Of")
isRunning = True
while isRunning:
choice = input("Enter choice(1/2/3/4/5) or 0 for exit:")
if choice == '1' or choice == '2' or choice == '3' or choice == '4' or choice == '5':
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
isRunning = False
elif choice == '0':
print("Exiting program..")
exit()
else:
print("Invalid input! Please, try again.")
if choice == '1':
print(num1,"+",num2,"=", add(num1,num2))
elif choice == '2':
print(num1,"-",num2,"=", subtract(num1,num2))
elif choice == '3':
print(num1,"*",num2,"=", multiply(num1,num2))
elif choice == '4':
print(num1,"/",num2,"=", divide(num1,num2))
elif choice == '5':
print(num1,"to the power of ",num2,"=", power(num1,num2))
| erkanredzheb/Simple-Python-Calculator | SimpleCalc.py | SimpleCalc.py | py | 1,468 | python | en | code | 0 | github-code | 36 |
42366591652 | from email.mime import base
from AES_encipher import CTR_Mode
from OAEP import *
from RSA_key_gen import AES_key_gen
from utility import bit_size
import base64
from math import ceil
class data_msg:
""" Class that holds the values used in the data transfer. """
def __init__(self, signature: bytes = None, msg: bytes = None, symmetric_key: bytes = None, nonce_val: int = None):
self.signature = signature
self.msg = msg
self.symmetric_key = symmetric_key
self.nonce_val = nonce_val # Used in the AES_CTR mode
def from_base64(self, base64_str: str):
dm = data_msg()
d = vars(dm)
lines = base64_str.split('\n')
idx = 0
for k in d.keys():
d[k] = lines[idx]
d[k] = base64.b64decode(d[k])
idx += 1
self.__dict__ = d
self.nonce_val = int.from_bytes(self.nonce_val, byteorder='big')
def int_to_byte(self, val):
if type(val) != type(bytes()):
l = ceil(bit_size(val) / 8)
val = val.to_bytes(length=l, byteorder='big')
return val
def get_base64_encode(self):
self.nonce_val = self.int_to_byte(self.nonce_val)
my_vars = vars(self)
result = str()
for v in my_vars.values():
if v == None:
result += base64.b64encode(bytes()).decode('utf-8') + '\n'
else:
result += base64.b64encode(v).decode('utf-8') + '\n'
self.nonce_val = int.from_bytes(self.nonce_val, byteorder='big')
return result
class AES_message_cipher:
""" Does the ciphering of a message, in bytes, of any size. """
def __init__(self, key_bit_size = 128):
self.__block_count__ = 16 # the block bytes count
kg = AES_key_gen(key_bit_size)
self.key = kg.generate_key()
self.key = self.key.to_bytes(length=kg.bit_size // 8, byteorder='big')
def encrypt(self, msg: bytes):
blocks_count = ceil(len(msg) / self.__block_count__)
dm = data_msg()
cipher_blocks = []
cipher = CTR_Mode(self.key)
for i in range(blocks_count):
idx = i * self.__block_count__
if i == blocks_count - 1:
b = msg[idx:]
else:
b = msg[idx:idx + self.__block_count__]
b = cipher.encrypt_block(data=b)
cipher_blocks += (b)
cipher_blocks = bytes(cipher_blocks)
dm.msg = cipher_blocks
dm.symmetric_key = self.key
dm.nonce_val = cipher.nonce
return dm
class AES_message_decipher:
""" Deciphers a message, in bytes, of any size. """
def __init__(self):
self.__block_count__ = 16 # the block bytes count
def decrypt(self, dm: data_msg):
blocks_count = ceil(len(dm.msg) / self.__block_count__)
cipher = CTR_Mode(dm.symmetric_key, nonce=dm.nonce_val)
msg = []
for i in range(blocks_count):
idx = i * self.__block_count__
if i == blocks_count - 1:
b = dm.msg[idx:]
else:
b = dm.msg[idx:idx + self.__block_count__]
b = cipher.decrypt_block(b)
msg += b
msg = bytes(msg)
return msg | Cezari0o/Gerador-Assinaturas-RSA | data_msg.py | data_msg.py | py | 3,301 | python | en | code | 0 | github-code | 36 |
42936594523 | import random
from words import words
import string
def get_valid_word(words):
word = random.choice(words)
while '-' in word or ' ' in word:
word = random.choice(words)
return word.upper()
def hangman():
word = get_valid_word(words)
word_letters = set(word)
alphabet = set(string.ascii_uppercase)
used_letters = set()
lives = 7
while len(word_letters) > 0 and lives > 0:
print('Mas', lives, 'zivotou a skusal si: ', ' '.join(used_letters))
word_list = [letter if letter in used_letters else '-' for letter in word]
print('Slovo: ', ' '.join(word_list))
user_letter = input('Hadaj: ').upper()
if user_letter in alphabet - used_letters:
used_letters.add(user_letter)
if user_letter in word_letters:
word_letters.remove(user_letter)
else:
lives = lives - 1
print('Pismeno,', user_letter, 'tam nepatri')
elif user_letter in used_letters:
print('To uz si pouzil')
else:
print('Pismena mas hadat')
if lives == 0:
print('Sorry umrel si bolo to', word)
else:
print('Dal si to', word, '!!')
hangman()
| MartinBesko/12 | 12 projects/pici.py | pici.py | py | 1,320 | python | en | code | 0 | github-code | 36 |
29290165497 | import glob
import os
import subprocess
import tempfile
rescompilerPath = 'build-desktop/bin/rescompiler.exe'
containerPath = 'data/build/game.dat'
excludeFiles = glob.glob('data/build/**/*', recursive=True) + glob.glob('data/sources/**/*', recursive=True)
dataFiles = [os.path.abspath(f) for f in glob.glob('data/**/*', recursive=True) if (not f in excludeFiles and not os.path.isdir(f))]
listFile = tempfile.NamedTemporaryFile('w+', delete=False)
print(f'Writing {len(dataFiles)} files to {listFile.name}')
listFile.write(os.linesep.join(dataFiles))
listFile.flush()
listFile.close()
resArgs = [
os.path.abspath(rescompilerPath),
listFile.name,
os.path.abspath(containerPath)
]
returnCode = subprocess.call(resArgs)
print(f'Completed with code {returnCode}') | Valax321/GAWFramework | rescompile.py | rescompile.py | py | 777 | python | en | code | 0 | github-code | 36 |
29024831808 | from src.algo import rpq
from src import Config
from tests.simple_test import simple_test
import json
import pytest
import os
import random
MAIN_TEST_DIR = \
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests')
TEST_DIRS = list(map(
lambda dir_name: os.path.join(MAIN_TEST_DIR, dir_name),
os.listdir(MAIN_TEST_DIR)
))
@pytest.mark.parametrize('test_dir', TEST_DIRS)
def test_small_test(test_dir):
simple_test(
test_dir,
[
('config.json', 'config'),
('data_base.txt', 'data_base'),
('query.txt', 'regular_query')
],
rpq,
lambda j: set(map(tuple, j))
)
@pytest.mark.parametrize(('max_count_vertexes', 'regex'),
[
(max_count_vertexes, regex)
for max_count_vertexes in [10, 40, 160]
for regex in ['a | b', 'a* (b | $)', '(a | b) . (b* | a b)']
])
#for regex in ['a | b']])
def test_big_test(max_count_vertexes, regex):
count_edges = random.randint(1, max_count_vertexes ** 2)
I = [random.randint(0, max_count_vertexes - 1) for _ in range(count_edges)]
J = [random.randint(0, max_count_vertexes - 1) for _ in range(count_edges)]
V = [random.choice(['a', 'b', 'c']) for _ in range(count_edges)]
count_vertexes = max(I + J) + 1
max_count_input_vertexes = random.randint(1, count_vertexes)
max_count_output_vertexes = random.randint(1, count_vertexes)
input_vertexes = list({random.randint(0, count_vertexes - 1)
for _ in range(max_count_input_vertexes)})
output_vertexes = list({random.randint(0, count_vertexes - 1)
for _ in range(max_count_output_vertexes)})
config = Config.from_dict(
{
'data_base_lists': [I, J, V],
'regular_query_regex': regex,
'input_vertexes': input_vertexes,
'output_vertexes': output_vertexes
}
)
result = rpq(config)
for V_from, _ in result:
assert V_from in input_vertexes
for _, V_to in result:
assert V_to in output_vertexes
| SergeyKuz1001/formal_languages_autumn_2020 | tests/rpq/test.py | test.py | py | 2,155 | python | en | code | 0 | github-code | 36 |
35798383779 |
from time import perf_counter
import numpy as np
import matplotlib.pyplot as plt
#The program uses the Velocity Verlet method to simulate the
#perihelion percession of Mercury over TT years divided into n
#time steps. In order to avoid problems with insuficient compter
#memory, one year at the time is simulated. When one year is simulated,
#the progtam finds the positions of Mercury closest to the Sun that current year.
#The initial conditions for next year to be simulated are extracted from the last time
#step in the current year simulated.
#The Velocity Verlet method.
def VelocityVerlet(f,x0,vx0,y0,vy0,t0,dt,n):
#Time vector t[0],t[1],t[2],......,t[n]
t = np.zeros(n+1)
#Vector for p for x and y component of position
#Vector v for x and y component of speed
p = np.zeros([n+1,2])
v = np.zeros([n+1,2])
#Initial values for position and speed when t = t[0],
#and for time
p[0,:] = np.array([x0,y0])
v[0,:] = np.array([vx0,vy0])
t[0] = t0
#Starting timer
#The Velocity Verlet method
#c1 = time.time()
start = perf_counter()
for k in range(n):
fpk = f(p,v,k)
t[k+1] = t[k] + dt
p[k+1] = p[k] + dt*v[k] + 0.5*dt**2*fpk
v[k+1] = v[k] + 0.5*dt*(f(p,v,k+1) + fpk)
#cpu time while it is running the Velocity Verlet algorithm.
slutt = perf_counter()
cpu_vv = slutt - start
#cpu_vv = time.time() - c1
return t,p,v, cpu_vv
#The acceleration needed to find the velocity components in the
#Velocity Verlet methodUnit AU/(year)**2 (AU - Astronomical Unit)
#The accelaration is without the general relativistic correction,
#that is acceleration derived from Newtons classical law of gravitation.
def f_N(p,v,k):
f = np.array([-4*np.pi**2*p[k,0],-4*np.pi**2*p[k,1]])
r = np.sqrt(p[k,0]**2 + p[k,1]**2)
f = f/float(r**3)
return f
#The acceleration needed to find
#the position and velocity components in the Velocity Verlet
#method. Unit AU/(year)**2 (AU - Astronomical Unit).
#The general relativistic correction is included.
#c is the speed of light with unit [AU/Year]
def f_E(p,v,k):
c = 63241.077084266275
l = np.abs(p[k,0]*v[k,1]-p[k,1]*v[k,0])
r = np.sqrt(p[k,0]**2 + p[k,1]**2)
f = np.array([-4*np.pi**2*(1.0+3.0*l**2/(r**2*c**2))*p[k,0],-4*np.pi**2*(1.0+3.0*l**2/(r**2*c**2))*p[k,1]])
f = f/float(r**3)
return f
#Initial time t0
t0 = 0
#Initial position of Mercury: x0 = 0.3075 AU and y0 = 0 AU
x0_N = 0.3075
y0_N = 0
x0_E = 0.3075
y0_E = 0
#Initial velocity of Mercury: vx0 = 0 AU/year and vy0 = 12.44 AU/year
vx0_N = 0
vy0_N = 12.44
vx0_E = 0
vy0_E = 12.44
#Numer of years TT,total number of time steps n ans time step length
TT = 100
n = 2*(10**6)
dt = (TT - t0)/float(n*TT)
#Initializon of lists
perihel_N = []
#kvalue_N = []
perihel_E = []
#kvalue_E = []
tt_N = []
tt_E = []
x_N = []
y_N = []
x_E = []
y_E = []
#Initializon of year counter to be printed on screen
#during simulation
teller = 0
#Simulation of one year at the time, without relativistic correction
#(index N for Newton) and with relativistic correction (index E for Einstein)
for i in range(TT):
T = 1 + i
[t,pvv_N,vvv_N,cpu_vv_N] = VelocityVerlet(f_N,x0_N,vx0_N,y0_N,vy0_N,t0,dt,n)
[t,pvv_E,vvv_E,cpu_vv_E] = VelocityVerlet(f_E,x0_E,vx0_E,y0_E,vy0_E,t0,dt,n)
#Initial conditions for simulation of next year
t0 = t[n]
x0_N = pvv_N[n,0]
y0_N = pvv_N[n,1]
x0_E = pvv_E[n,0]
y0_E = pvv_E[n,1]
vx0_N = vvv_N[n,0]
vy0_N = vvv_N[n,1]
vx0_E = vvv_E[n,0]
vy0_E = vvv_E[n,1]
#Distances between Sun and Mercury (no relativistic correction)
rr_N = np.sqrt(pvv_N[:,0]**2 + pvv_N[:,1]**2)
#rmax_N = np.max(rr_N)
#rmin_N = np.min(rr_N)
#Distances between Sun and Mercury (relativistic correction included)
rr_E = np.sqrt(pvv_E[:,0]**2 + pvv_E[:,1]**2)
#rmax_E = np.max(rr_E)
#rmin_E = np.min(rr_E)
#Finding the positions and corresponding time steps where Mecury is
#closest to the Sun (no relativistic correction)
for k in range(1,np.size(rr_N)-1):
if rr_N[k] < rr_N[k-1] and rr_N[k] < rr_N[k+1]:
perihel_N.append(rr_N[k])
tt_N.append(t[k])
x_N.append(pvv_N[k,0])
y_N.append(pvv_N[k,1])
#kvalue_N.append(k)
#perihel_N = np.asarray(perihel_N)
#kvalue_N = np.asarray(kvalue_N)
#--------------------------------
#Finding the positions and corresponding time steps where Mecury is
#closest to the Sun (relativistic correction included)
for k in range(1,np.size(rr_E)-1):
if rr_E[k] < rr_E[k-1] and rr_E[k] < rr_E[k+1]:
perihel_E.append(rr_E[k])
tt_E.append(t[k])
x_E.append(pvv_E[k,0])
y_E.append(pvv_E[k,1])
#kvalue_E.append(k)
#Printing curret year just simulated on screen.
teller = teller + 1
print(teller)
#--------------------------------------------
#Making arrays of lists with results from TT years of
#simulation
tt_N = np.asarray(tt_N)
x_N = np.asarray(x_N)
y_N = np.asarray(y_N)
perihel_N = np.asarray(perihel_N)
tt_E = np.asarray(tt_E)
x_E = np.asarray(x_E)
y_E = np.asarray(y_E)
perihel_E = np.asarray(perihel_E)
#print(np.size(tt_N))
#x_N = np.zeros(np.size(kvalue_N))
#y_N = np.zeros(np.size(kvalue_N))
#theta_N = np.zeros(np.size(kvalue_N))
#for k in range(np.size(kvalue_N)):
# x_N[k] = pvv_N[kvalue_N[k],0]
# y_N[k] = pvv_N[kvalue_N[k],1]
# theta_N[k] = np.arctan(y_N[k]/x_N[k])
#------------------------------------------
#x_E = np.zeros(np.size(kvalue_E))
#y_E = np.zeros(np.size(kvalue_E))
#theta_E = np.zeros(np.size(kvalue_E))
#for k in range(np.size(kvalue_E)):
# x_E[k] = pvv_E[kvalue_E[k],0]
# y_E[k] = pvv_E[kvalue_E[k],1]
# theta_E[k] = np.arctan(y_E[k]/x_E[k])
#print(np.size(tt_N))
#print(np.size(tt_E))
#print(np.size(x_N))
#print(np.size(y_N))
#print(np.size(x_E))
#print(np.size(y_E))
#Writing final results to file
outfile = open('data5g_N.txt','w')
for i in range(np.size(x_N)):
outfile.write("""%2.12f %2.12f %2.12f""" % (tt_N[i], x_N[i], y_N[i]))
outfile.write('\n')
outfile.close()
outfile = open('data5g_E.txt','w')
for i in range(np.size(x_E)):
outfile.write("""%2.12f %2.12f %2.12f""" % (tt_E[i], x_E[i], y_E[i]))
outfile.write('\n')
outfile.close()
#Plotting Mercurys orbit after TT rears of simulation
#(both with and without relativistic correction) as
#a check that they look okay.
plott1 = 'ja'
#Plot of earth's position if plott = 'ja'
if plott1 == 'ja':
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(pvv_N[:,0], pvv_N[:,1],'b',label = 'Newton')
ax.plot(pvv_E[:,0], pvv_E[:,1],'r',label = 'Einstein')
#Increase margins on axes
ax.set_xmargin(0.1)
ax.axis('equal')
#plt.axis('equal')
ax.set_xlabel('x(t) [AU]', fontsize = 15)
ax.set_ylabel('y(t) [AU]', fontsize = 15)
ax.set_title('Planet Earth orbiting 2 times around the Sun', fontsize = 16)
ax.legend(loc='center', fontsize = 14)
ax.tick_params(labelsize = 14)
plt.show()
| abjurste/A19-FYS4150 | Project5/Project5g.py | Project5g.py | py | 7,275 | python | en | code | 0 | github-code | 36 |
21618567791 | from __future__ import absolute_import
import logging
import time
import unittest
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.examples import wordcount
from apache_beam.testing.pipeline_verifiers import FileChecksumMatcher
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import delete_files
class WordCountIT(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
# The default checksum is a SHA-1 hash generated from a sorted list of
# lines read from expected output. This value corresponds to the default
# input of WordCount example.
DEFAULT_CHECKSUM = '33535a832b7db6d78389759577d4ff495980b9c0'
@attr('IT')
def test_wordcount_it(self):
self._run_wordcount_it(wordcount.run)
@attr('IT', 'ValidatesContainer')
def test_wordcount_fnapi_it(self):
self._run_wordcount_it(wordcount.run, experiment='beam_fn_api')
def _run_wordcount_it(self, run_wordcount, **opts):
test_pipeline = TestPipeline(is_integration_test=True)
extra_opts = {}
# Set extra options to the pipeline for test purpose
test_output = '/'.join([
test_pipeline.get_option('output'),
str(int(time.time() * 1000)),
'results'
])
extra_opts['output'] = test_output
test_input = test_pipeline.get_option('input')
if test_input:
extra_opts['input'] = test_input
arg_sleep_secs = test_pipeline.get_option('sleep_secs')
sleep_secs = int(arg_sleep_secs) if arg_sleep_secs is not None else None
expect_checksum = (
test_pipeline.get_option('expect_checksum') or self.DEFAULT_CHECKSUM)
pipeline_verifiers = [
PipelineStateMatcher(),
FileChecksumMatcher(
test_output + '*-of-*', expect_checksum, sleep_secs)
]
extra_opts['on_success_matcher'] = all_of(*pipeline_verifiers)
extra_opts.update(opts)
# Register clean up before pipeline execution
self.addCleanup(delete_files, [test_output + '*'])
# Get pipeline options from command argument: --test-pipeline-options,
# and start pipeline job by calling pipeline main function.
run_wordcount(
test_pipeline.get_full_options_as_args(**extra_opts),
save_main_session=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| a0x8o/kafka | sdks/python/apache_beam/examples/wordcount_it_test.py | wordcount_it_test.py | py | 2,483 | python | en | code | 59 | github-code | 36 |
22604025236 | def calculate_division(percentage):
if percentage >= 75:
return "1st Division"
elif percentage >= 60:
return "2nd Division"
elif percentage >= 40:
return "3rd Division"
else:
return "Fail"
while True:
print("Menu:")
print("1. Calculate division or result")
print("2. Exit")
choice = int(input("Enter your choice: "))
if choice == 1:
num_subjects = int(input("Enter the number of subjects: "))
total_marks = 0
for i in range(num_subjects):
marks = float(input(f"Enter the marks for subject {i+1}: "))
total_marks += marks
total_marks_obtained = total_marks
total_marks_possible = num_subjects * 100
percentage = (total_marks_obtained / total_marks_possible) * 100
division = calculate_division(percentage)
print(f"You achieved {percentage:.2f}% and got {division}.")
elif choice == 2:
print("Exiting the program.")
break
else:
print("Error !Invalid choice. Please select a valid option.")
| Chiro2002/SEM_5_SE | python_and_bash/divisionMarks.py | divisionMarks.py | py | 1,126 | python | en | code | 1 | github-code | 36 |
32422202346 | import wolframalpha
import pprint
import json
class WolframAlpha:
def __init__(self, appId):
self.__client = wolframalpha.Client(appId)
self.__prettyPrinter = pprint.PrettyPrinter()
self.__pp = self.__prettyPrinter.pprint
def question(self, query):
if len(query.strip()) == 0:
return "Ask me a question."
try:
response = self.__client.query(query.strip())
except Exception:
return "Help! Tell SirDavidLudwig or Raine to fix me!"
if response['@success'] == 'true':
# Print the response to the terminal for debugging purposes
try:
json.dumps(response, indent=4)
except Exception as e:
print(response)
# Search for primary pod
for pod in response['pod']:
if "@primary" in pod and pod["@primary"] == "true":
if type(pod['subpod']) == list:
return pod['subpod'][0]['plaintext']
else:
return pod['subpod']['plaintext']
print("No primary found")
return response['pod'][0]['subpod']['plaintext']
elif '@timedout' in response:
return "I cannot tell unfortunately."
return "I'm sorry, I don't understand what you are asking me here."
| IntercraftMC/InterCraftBot_Deprecated | src/modules/wolframalpha.py | wolframalpha.py | py | 1,122 | python | en | code | 0 | github-code | 36 |
29247550428 | # Asynchronous pipe example using chained Popen
import sys, subprocess, traceback, platform
import asyncoro
import asyncoro.asyncfile
def writer(apipe, inp, coro=None):
fd = open(inp)
while True:
line = fd.readline()
if not line:
break
yield apipe.stdin.write(line.encode())
apipe.stdin.close()
def line_reader(apipe, coro=None):
nlines = 0
while True:
try:
line = yield apipe.readline()
except:
asyncoro.logger.debug('read failed')
asyncoro.logger.debug(traceback.format_exc())
break
nlines += 1
if not line:
break
print(line.decode())
raise StopIteration(nlines)
# asyncoro.logger.setLevel(asyncoro.Logger.DEBUG)
if platform.system() == 'Windows':
# asyncfile.Popen must be used instead of subprocess.Popen
p1 = asyncoro.asyncfile.Popen([r'\cygwin64\bin\grep.exe', '-i', 'error'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p2 = asyncoro.asyncfile.Popen([r'\cygwin64\bin\wc.exe'], stdin=p1.stdout, stdout=subprocess.PIPE)
async_pipe = asyncoro.asyncfile.AsyncPipe(p1, p2)
asyncoro.Coro(writer, async_pipe, r'\tmp\grep.inp')
asyncoro.Coro(line_reader, async_pipe)
else:
p1 = subprocess.Popen(['grep', '-i', 'error'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p2 = subprocess.Popen(['wc'], stdin=p1.stdout, stdout=subprocess.PIPE)
async_pipe = asyncoro.asyncfile.AsyncPipe(p1, p2)
asyncoro.Coro(writer, async_pipe, '/var/log/syslog')
asyncoro.Coro(line_reader, async_pipe)
# alternate example:
# p1 = subprocess.Popen(['tail', '-f', '/var/log/kern.log'], stdin=None, stdout=subprocess.PIPE)
# p2 = subprocess.Popen(['grep', '--line-buffered', '-i', 'error'],
# stdin=p1.stdout, stdout=subprocess.PIPE)
# async_pipe = asyncoro.asyncfile.AsyncPipe(p2)
# asyncoro.Coro(line_reader, async_pipe)
| pgiri/asyncoro | examples/pipe_grep.py | pipe_grep.py | py | 1,985 | python | en | code | 51 | github-code | 36 |
31728760633 | """
These settings are here to use during tests, because django requires them.
In a real-world use case, apps in this project are installed into other
Django applications, so these settings will not be used.
"""
DEBUG = True
TEST_MODE = True
TRANSACTIONS_MANAGED = {}
USE_TZ = False
TIME_ZONE = {}
SECRET_KEY = 'SHHHHHH'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'milestones.db'
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'milestones'
)
MIDDLEWARE_CLASSES = {}
| openedx/edx-milestones | settings.py | settings.py | py | 660 | python | en | code | 4 | github-code | 36 |
3458969657 |
class Solution(object):
def minNumber(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
if not nums:
return []
nums = self.merge_sort(nums)
res = ""
for ele in nums:
res += str(ele)
return res
def merge_sort(self,nums):
if len(nums) < 2:
return nums
mid = len(nums) // 2
left = nums[:mid]
right = nums[mid:]
return self.merge(self.merge_sort(left), self.merge_sort(right))
def merge(self, left, right):
res = []
while left and right:
if self.verse(left[0], right[0]):
res.append(left.pop(0))
else:
res.append(right.pop(0))
if left:
res.extend(left)
if right:
res.extend(right)
return res
def verse(self, a, b):
a = str(a)
b = str(b)
string1 = a + b
string2 = b + a
return True if string1 < string2 else False
if __name__ == '__main__':
nums = [111,11,1]
nums = [10,2]
nums = [3, 30, 34, 5, 9]
print(Solution().minNumber(nums))
| pi408637535/Algorithm | com/study/algorithm/offer/剑指 Offer 45. 把数组排成最小的数.py | 剑指 Offer 45. 把数组排成最小的数.py | py | 1,188 | python | en | code | 1 | github-code | 36 |
75220903145 | import argparse
from . import test_data
from .. import music
from .. import key_recognition
def parse_args():
parser = argparse.ArgumentParser(
description='Test getting key from sounds')
parser.add_argument('--verbose_factor_threshold', '-Vft',
required=False,
help='Color red and print full mismatches list for tests with match factor lesser than VERBOSE_FACTOR_THRESHOLD', # noqa
type=float)
parser.add_argument('--verbose_duration_threshold', '-Vdt',
required=False,
help='Print only mismatches with duration greater than VERBOSE_DURATION_THRESHOLD', # noqa
type=float)
args = parser.parse_args()
return args
class Mismatch(music.Key):
def __init__(self, note_computed, note_model, kind_computed, kind_model,
timestamp, duration_ms):
super().__init__(note_computed, timestamp, duration_ms, kind_computed)
self.note_model = note_model
self.kind_model = kind_model
@property
def key_model(self):
if self.note_model is None:
symbol = 'None'
else:
symbols = ['C', 'C#', 'D', 'D#', 'E',
'F', 'F#', 'G', 'G#', 'A', 'A#', 'H']
symbol = symbols[self.note_model]
return f"{symbol}-{self.kind_model}"
def __str__(self):
return f"{round(self.timestamp, 3)} - {round(self.end_timestamp, 3)}: Expected {self.key_model}, got {super().__str__()}" # noqa
def run_tests():
verbose_factor_threshold = parse_args().verbose_factor_threshold
verbose_duration_threshold = parse_args().verbose_duration_threshold
if verbose_factor_threshold is None:
verbose_factor_threshold = 0.5
if verbose_duration_threshold is None:
verbose_duration_threshold = 0.1
tests = test_data.get_all_test_models()
print("-----------TONATIONS TEST-----------------")
for test in tests:
key = key_recognition.get_key(test.sounds)
match = key == test.key
if match:
color = '\033[92m'
print(f"{test.file_path}: {color} {key} matches {test.key} \033[0m") # noqa
else:
color = '\033[91m'
print(f"{test.file_path}: {color}{key} doesn't match {test.key} \033[0m") # noqa
| JakubBilski/tonations-recognition | src/tests/key_from_sounds_test.py | key_from_sounds_test.py | py | 2,381 | python | en | code | 1 | github-code | 36 |
25552501869 | import turtle
import random
import time
print('Welcome to Flappy Bird')
print('Press UP arrow or w to jump')
print('Press Space Bar to pause / play')
print('Press Esc to exit anytime')
try:
f = open('flappy_bird.txt', 'x')
f.close()
f = open('flappy_bird.txt', 'w')
f.write('0')
f.close()
except FileExistsError:
pass
bird_jump_time = time.time()
bird_jump = False
bird_animation_time = time.time()
bird_animation_index = 0
wait = False
game_start = False
score = 0
real_score = 0
screen = turtle.Screen()
screen.setup(450, 600)
screen.title('Flappy Bird')
screen.bgpic('bg2.gif')
screen.tracer(0)
screen.register_shape('ground1.gif')
screen.register_shape('heading1.gif')
screen.register_shape('bird_down.gif')
images = ('bird1.gif', 'bird2.gif', 'bird3.gif', 'bird4.gif')
for i in images:
screen.register_shape(i)
pen = turtle.Turtle()
pen.penup()
pen.hideturtle()
pen.pencolor('#111111')
pen.fillcolor('#FF3333')
pen.pensize(3)
t = turtle.Turtle()
t.penup()
t.hideturtle()
score_pen = turtle.Turtle()
score_pen.penup()
score_pen.hideturtle()
score_pen.pencolor('brown')
heading = turtle.Turtle()
heading.penup()
heading.goto(-50, 150)
heading.shape('heading1.gif')
bird = turtle.Turtle()
wall_a1 = turtle.Turtle()
wall_a2 = turtle.Turtle()
wall_b1 = turtle.Turtle()
wall_b2 = turtle.Turtle()
ground = turtle.Turtle()
ground.shape('ground1.gif')
ground.penup()
ground.setpos(0, -260)
bird.penup()
bird.goto(-150, -150)
wall_a1.pencolor('brown')
wall_a2.pencolor('brown')
wall_b1.pencolor('brown')
wall_b2.pencolor('brown')
wall_a1.pensize(3)
wall_a2.pensize(3)
wall_b1.pensize(3)
wall_b2.pensize(3)
wall_a1.penup()
wall_a2.penup()
wall_b1.penup()
wall_b2.penup()
wall_a1.hideturtle()
wall_a2.hideturtle()
wall_b1.hideturtle()
wall_b2.hideturtle()
gx = 0
x1 = 700
x2 = 1050
y1 = random.randint(100, 300)
y2 = 600 - y1 - 150
y3 = random.randint(100, 300)
y4 = 600 - y3 - 150
def rect(t, l, b):
t.fillcolor('#00CC00')
t.pencolor('#223311')
t.pendown()
t.begin_fill()
for i in range(2):
t.forward(l)
t.right(90)
t.forward(b)
t.right(90)
t.end_fill()
t.penup()
def up():
global bird_jump, bird_jump_time, game_start
if game_start and wait is False:
bird_jump = True
bird_jump_time = time.time()
bird.sety(bird.ycor() + 10)
def exit_game():
screen.bye()
exit(0)
def pause():
global wait
if wait:
t.clear()
wait = False
elif wait is False:
wait = True
t.goto(-80, -280)
t.write('Game Paused', font=('Comic Sans Ms', 20, 'normal'))
screen.onkeypress(up, 'Up')
screen.onkeyrelease(up, 'Up')
screen.onkeypress(up, 'w')
screen.onkeyrelease(up, 'w')
screen.onkey(pause, 'space')
screen.onkey(exit_game, 'Escape')
screen.listen()
def replay_check(x, y):
if x <= 50 and x >= -50:
if y <= -150 and y >= -180:
welcome_screen()
def end_screen():
bird.shape('bird_down.gif')
medals = 'none'
score_pen.clear()
global real_score, high_score, wait
if real_score > int(high_score):
high_score = real_score
f = open('flappy_bird.txt', 'w')
f.write(str(real_score))
f.close()
while bird.ycor() >= -170:
bird.goto(bird.xcor(), bird.ycor() - 5)
screen.update()
time.sleep(0.01)
bird.hideturtle()
if real_score >= 25:
medals = 'bronze !'
if real_score >= 50:
medals = 'silver !!'
if real_score >= 100:
medals = 'gold !!!'
pen.goto(-200, 200)
pen.fillcolor('brown')
pen.pencolor('black')
box_option_rect(pen, 400, 50)
pen.goto(-140, 150)
pen.pencolor('white')
pen.write('Game Over', font=('ravie', 30, 'normal'))
pen.goto(-150, 100)
pen.fillcolor('yellow')
pen.pencolor('brown')
box_option_rect(pen, 300, 200)
pen.goto(-100, 30)
pen.write('Your Score : ', font=('Comic Sans Ms', 20, 'normal'))
pen.goto(70, 30)
pen.write(str(real_score), font=('Comic Sans Ms', 20, 'normal'))
pen.goto(-100, -20)
pen.write('Best Score : ', font=('Comic Sans Ms', 20, 'normal'))
pen.goto(70, -20)
pen.write(str(high_score), font=('Comic Sans Ms', 20, 'normal'))
pen.goto(-100, -70)
pen.write('Medals : ', font=('Bauhaus 93', 20, 'normal'))
pen.goto(20, -70)
pen.write(medals, font=('Comic Sans Ms', 20, 'normal'))
pen.goto(-50, -150)
pen.fillcolor('#FF3333')
pen.pencolor('black')
box_option_rect(pen, 100, 30)
pen.pencolor('white')
pen.goto(-47, -153)
box_option_rect(pen, 94, 24)
pen.goto(-29, -179)
pen.write('Replay', font=('Comic Sans Ms', 15, 'normal'))
screen.onclick(replay_check)
screen.mainloop()
def main_game():
global score, real_score, high_score, wait
score = 0
real_score = 0
delay_time = 0.01
heading.hideturtle()
bird.showturtle()
global bird_jump, bird_animation_time, bird_animation_index, y1, y2, y3, y4, x1, x2, gx
score_pen.goto(-10, 250)
score_pen.write(real_score, font=('Comic Sans Ms', 20, 'normal'))
screen.update()
while True:
score_pen.clear()
score_pen.write(real_score, font=('Comic Sans Ms', 20, 'normal'))
if wait is False:
delay_time = 0.01 - real_score * 0.0001
real_score = int(score / 15)
ground.clear()
ground.setpos(gx, -260)
gx -= 4.5
if gx <= -20:
gx = 20
if bird.xcor() <= x1 + 100 + 30 and bird.xcor() >= x1 - 30:
if bird.ycor() >= 300 - y1 - 20 or bird.ycor() <= 300 - y1 - 150 + 20:
end_screen()
# screen.bye ()
if bird.xcor() <= x2 + 100 + 30 and bird.xcor() >= x2 - 30:
if bird.ycor() >= 300 - y3 - 20 or bird.ycor() <= 300 - y3 - 150 + 20:
end_screen()
if bird.ycor() <= -300 + 60 or bird.ycor() >= 300 - 20:
end_screen()
if x1 < -150 or x2 < -150:
if bird.xcor() > x1 + 120 or bird.xcor() > x2 + 120:
score += 1
if time.time() - bird_animation_time >= 0.1:
bird.shape(images[bird_animation_index])
if bird_animation_index == 3:
bird_animation_index = 0
else:
bird_animation_index += 1
bird_animation_time = time.time()
if time.time() - bird_jump_time >= 0.1:
bird_jump = False
if bird_jump is False:
bird.sety(bird.ycor() - 3)
if bird_jump:
bird.sety(bird.ycor() + 2)
wall_a1.clear()
wall_a2.clear()
wall_b1.clear()
wall_b2.clear()
x1 -= 5
x2 -= 5
if x1 <= -350:
x1 = 350
y1 = random.randint(100, 300)
y2 = 600 - y1 - 150
if x2 <= -350:
x2 = 350
y3 = random.randint(100, 300)
y4 = 600 - y3 - 150
wall_a1.goto(x1, 300)
rect(wall_a1, 100, y1 - 50)
wall_a1.seth(0)
wall_a1.goto(x1 - 10, 300 - y1 + 50)
rect(wall_a1, 120, 50)
wall_a2.goto(x1, -210)
wall_a2.seth(90)
rect(wall_a2, y2 - 50 - 90, 100)
wall_a2.goto(x1 - 10, -(300 - y2 + 50))
rect(wall_a2, 50, 120)
wall_b1.goto(x2, 300)
rect(wall_b1, 100, y3 - 50)
wall_b1.seth(0)
wall_b1.goto(x2 - 10, 300 - y3 + 50)
rect(wall_b1, 120, 50)
wall_b2.goto(x2, -210)
wall_b2.seth(90)
rect(wall_b2, y4 - 50 - 90, 100)
wall_b2.goto(x2 - 10, -(300 - y4 + 50))
rect(wall_b2, 50, 120)
time.sleep(delay_time)
screen.update()
def box_option_rect(t, l, b):
t.begin_fill()
t.pendown()
for i in range(2):
t.forward(l)
t.right(90)
t.forward(b)
t.right(90)
t.end_fill()
t.penup()
def main_game_start(x, y):
global game_start
if x >= -140 and x <= -40:
if y >= -180 and y <= -150:
game_start = True
pen.clear()
bird.goto(-150, 150)
heading.hideturtle()
main_game()
elif x >= 40 and x <= 140:
if y >= -180 and y <= -150:
pen.goto(-100, 50)
pen.pencolor('brown')
pen.fillcolor('yellow')
box_option_rect(pen, 200, 100)
pen.goto(-80, -10)
pen.write('High Score : ', font=('Comic Sans Ms', 15, 'bold'))
pen.goto(50, -10)
pen.write(high_score, font=('Comic Sans Ms', 15, 'bold'))
def welcome_screen():
global high_score
f = open('flappy_bird.txt', 'r')
high_score = f.read()
f.close()
global x1, x2
x1 = 350
x2 = 700
score_pen.clear()
pen.clear()
wall_a1.clear()
wall_a2.clear()
wall_b1.clear()
wall_b2.clear()
global game_start
game_start = False
dir = +1
gx = 0
global bird_animation_time, bird_animation_index
bird.setpos(140, 150)
bird.showturtle()
heading.showturtle()
heading.setpos(-50, 150)
pen.pencolor('black')
pen.fillcolor('#FF3333')
pen.goto(-140, -150)
box_option_rect(pen, 100, 30)
pen.goto(40, -150)
box_option_rect(pen, 100, 30)
pen.pencolor('white')
pen.goto(-137, -153)
box_option_rect(pen, 94, 24)
pen.goto(43, -153)
box_option_rect(pen, 94, 24)
pen.pencolor('white')
pen.goto(-124, -179)
pen.write('START', font=('Comic Sans Ms', 15, 'normal'))
pen.goto(56, -179)
pen.write('SCORE', font=('Comic Sans Ms', 15, 'normal'))
pen.goto(-10, 250)
while True:
ground.clear()
ground.setpos(gx, -260)
gx -= 4.5
if gx <= -20:
gx = 20
if time.time() - bird_animation_time >= 0.1:
bird.shape(images[bird_animation_index])
if bird_animation_index == 3:
bird_animation_index = 0
else:
bird_animation_index += 1
bird_animation_time = time.time()
if bird.ycor() <= 140 or bird.ycor() >= 160:
dir *= -1
bird.sety(bird.ycor() + dir / 2)
heading.sety(heading.ycor() + dir / 2)
screen.update()
time.sleep(0.01)
screen.onclick(main_game_start)
# main_game code
welcome_screen()
# screen.mainloop () | tank-king/My-Games | python-turtle/flappy bird/bird.py | bird.py | py | 11,072 | python | en | code | 4 | github-code | 36 |
8513285906 | from flet import *;
def settings_subview(page):
#
from ..components.title import title;
from ..components.label_field import label_field;
from ..components.fill_btn import fill_btn;
from ..methods.color_method import inverse_color_method;
from ..alerts.change_color_alert import change_color_alert;
from ..settings.settings import Settings
def on_click(any=None):
ans = change_color_alert(page, container, "Oops!", "Please select a valid option.")
if ans:
Settings(page)
#
mydropdown = Dropdown(
options=[
dropdown.Option("AMBER"),
dropdown.Option("BLUE"),
dropdown.Option("RED"),
dropdown.Option("GREEN"),
],
border="underline",
label="Select one of these:",
);
mydropdown.value = inverse_color_method(page.client_storage.get("color"));
container = Container(
content=Column(
[
title("Settings"),
mydropdown,
fill_btn(page, "Change", icons.UPDATE, on_click, True),
Text("Developed By On3l7d15h", size=12, color=colors.with_opacity(.5, colors.PRIMARY))
],
expand=5,
horizontal_alignment=CrossAxisAlignment.CENTER,
alignment=MainAxisAlignment.CENTER
)
)
return container; | On3l7d15h/Flet_EWallet | functions/subviews/settings_subview.py | settings_subview.py | py | 1,385 | python | en | code | 1 | github-code | 36 |
73476726505 | '''simple port scan to scan all 65353 ports Developed by iamth3g33k17
'''
import socket
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host = input("[*] Please enter your host: ")
def Portscanner(port):
if sock.connect_ex((host,port)):
print("[-]Port is %d closed" %(port))
else:
print("[*]Port is %d open" %(port))
for port in range(1,65354):
Portscanner(port)
| Dave360-crypto/Practical-Security | Ethical_ hackin/Vulnerability_scanning/Port_Scanning/port_scanner_to_scan_all_tcp_ports.py | port_scanner_to_scan_all_tcp_ports.py | py | 404 | python | en | code | 0 | github-code | 36 |
39268376088 | class Solution:
# @param s, a string
# @return an integer
def minCut(self, s):
n = len(s)
if (n == 0):
return -1
isP = [[False for _ in range(n)] for _ in range(n)]
# isP[i,j] represent wether s[i:j+1] is Palindrome
for i in range(n):
isP[i][i] = True
for i in range(n-1, -1, -1):
for j in range(i+1, n):
if j == i+1:
isP[i][j] = (s[i] == s[j])
else:
isP[i][j] = (isP[i+1][j-1] and (s[i] == s[j]))
#print isP
dp = [0]*n
for i in range(n):
if isP[0][i]:
dp[i] = 0
else:
dp[i] = i
#dp[i] reprent the mincut for s[0:i+1]
for i in range(1, n):
for j in range(i, 0, -1):
if isP[j][i]:
dp[i] = min(dp[i], dp[j-1] + 1)
#print dp
return dp[-1]
| JessCL/LintCode | 108_palindrome-partitioning-ii/palindrome-partitioning-ii.py | palindrome-partitioning-ii.py | py | 1,026 | python | en | code | 0 | github-code | 36 |
506094790 | """
Analise de vizinhanca
"""
def slope_neighbor(dem_folder, neightable, cellid, neighbor, outfolder):
"""
Join file with neighbor files
"""
import os
from glass.pys.oss import lst_ff, fprop
from glass.rd import tbl_to_obj
from glass.rst.mos import rsts_to_mosaic
dems = lst_ff(dem_folder, file_format='.tif')
neigh_df = tbl_to_obj(neightable)
for dem in dems:
demfullname = fprop(dem, 'fn')
demnamelst = demfullname.split('_')
demname = "_".join(demnamelst[:-1])
# Get cell_id from raster file
cid = int(demnamelst[-1])
# Get neighbors rasters
fneigh = neigh_df[neigh_df[cellid] == cid]
vizinhos = fneigh[neighbor].tolist()
rsts_viz = [os.path.join(
dem_folder, f"{demname}_{str(v)}.tif"
) for v in vizinhos]
# Get mosaic
mos = rsts_to_mosaic([dem] + rsts_viz, os.path.join(
outfolder, demfullname + '.tif'
), api='rasterio')
return outfolder
| jasp382/glass | glass/dtt/neigh.py | neigh.py | py | 1,043 | python | en | code | 2 | github-code | 36 |
73065938023 | # Script: 14 - Python Malware Analysis
# Author: Robert Gregor
# Date of latest revision: 302030FMAR23
# Objectives
# Perform an analysis of the Python-based code given below
# Insert comments into each line of the script explaining in your own words what the virus is doing on this line
# Insert comments above each function explaining what the purpose of this function is and what it hopes to carry out
# Insert comments above the final three lines explaining how the functions are called and what this script appears to do
# My Sources:
# [Python | os.listdir() method](https://www.geeksforgeeks.org/python-os-listdir-method/)
# [Python For Loops](https://www.w3schools.com/python/python_for_loops.asp)
# [Python | os.path.isdir() method](https://www.geeksforgeeks.org/python-os-path-isdir-method/)
# [Python List extend()](https://www.programiz.com/python-programming/methods/list/extend)
# [Python break statement](https://www.tutorialspoint.com/python/python_break_statement.htm)
# [Python: os.path.abspath() method with example](https://www.geeksforgeeks.org/python-os-path-abspath-method-with-example/)
# [Python File close() Method](https://www.tutorialspoint.com/python/file_close.htm)
# [Python File Write](https://www.w3schools.com/python/python_file_write.asp)
# Main
#!/usr/bin/python3
# Imports os and datetime modules
import os
import datetime
# Sets the SIGNATURE variable equal to the string VIRUS
SIGNATURE = "VIRUS"
# Function takes path variable as parameter and searches for files to target
def locate(path):
# Declares empty list
files_targeted = []
# Sets filelist equal to list of all files and directories in path variable
filelist = os.listdir(path)
# for loop used to apply coonditionals below to each item in filelist
for fname in filelist:
# Conditional checks if path is an existing directory or not
if os.path.isdir(path+"/"+fname):
# If true, execute the locate function to append additional files to end of list
files_targeted.extend(locate(path+"/"+fname))
# Conditional checks if any of the files are python files
elif fname[-3:] == ".py":
# If true, set the infected variable equal to False
infected = False
# for loop used to open each file in the given path
for line in open(path+"/"+fname):
# Conditional checks if SIGNATURE string "VIRUS" is in any line in file
if SIGNATURE in line:
# If true, sets infected variable to True and exits current for loop
infected = True
break
# Conditional check if infected is False
if infected == False:
# If true, add file to given directory for targetting
files_targeted.append(path+"/"+fname)
# return all files that are not yet infected
return files_targeted
# Function used to infect all files identified by locate function
def infect(files_targeted):
# Set virus variable equal to pathname to path passed as parameter
virus = open(os.path.abspath(__file__))
# Sets virusstring variable to an empty string
virusstring = ""
# for loop used to enumerate through each line in virus variable
for i,line in enumerate(virus):
# Conditional checks if line is greater then or equl to zero AND less then 39
if 0 <= i < 39:
# If true, add line to virusstring
virusstring += line
# Closed virus file
virus.close
# for loop used to executes commands on each item in files_targeted
for fname in files_targeted:
# Opens file
f = open(fname)
# Set temp variable equal to contents of file opened
temp = f.read()
# Closes file
f.close()
# Open file to write to
f = open(fname,"w")
# Write over previous file with current file
f.write(virusstring + temp)
# Closes file
f.close()
# Function used to print "You have been hacked" at a specified time
def detonate():
# Conditional checks if current month is equal to 5 and current date is equal to 9
if datetime.datetime.now().month == 5 and datetime.datetime.now().day == 9:
# If true, prints statement
print("You have been hacked")
# Declares file_targeted variable equal to current directory files
files_targeted = locate(os.path.abspath(""))
# Runs infect function on files_targeted
infect(files_targeted)
# Runs detonate function
detonate()
# End | RobG-11/Ops301-Code-Challenges | 14_malware_analysis.py | 14_malware_analysis.py | py | 4,617 | python | en | code | 0 | github-code | 36 |
11342229951 | import io
import os
import random
from PIL import Image
import imageio
import requests
import seventv
def get_response(message: str):
p_message = message.lower()
if p_message[:3] == ("add"):
url = p_message.split(" ")[1]
return addGif(url)
if p_message == "help":
return helpText()
return 'I didn\'t understand what you wrote. Try typing "help".'
def helpText():
return "`?add <7tv url> to add a 7tv emoji to your server`"
def addGif(url):
webpUrl = seventv.get_webp_url(url)
try:
webp_data = requests.get(webpUrl).content
except requests.exceptions.RequestException:
return "Invalid URL"
# Extract the name from the url and replace .webp extension with .gif
gif_name = seventv.get_emote_name(url)
# Open webp image with PIL
image = Image.open(io.BytesIO(webp_data))
# If image is an animated webp, PIL will open it as a sequence of frames
frames = []
try:
while True:
frames.append(image.copy())
image.seek(len(frames)) # Skip to next frame
except EOFError:
pass # We have read all the frames from the image now
# Create a byte buffer and save the gif data into it
gif_data_buffer = io.BytesIO()
# Set the duration of each frame based on the original image's frame rate
duration_per_frame = image.info.get(
"duration", 100
) # Default to 100ms if no duration is set
imageio.mimwrite(
gif_data_buffer, frames, "GIF", duration=duration_per_frame, loop=0
)
# Get the gif data as bytes
gif_data = gif_data_buffer.getvalue()
return gif_data, gif_name
| JimenezJC/discord-7tv-emoji-app | responses.py | responses.py | py | 1,667 | python | en | code | 1 | github-code | 36 |
39056132329 | from numpy import genfromtxt,linspace, meshgrid,c_,where
from mudpy.view import plot_grd
from matplotlib import pyplot as plt
from scipy.interpolate import griddata
fault=genfromtxt('/Users/dmelgar/Slip_inv/Melinka_usgs/output/inverse_models/models/gsi_vr2.6.0011.inv.total')
grdfile='/Users/dmelgar/code/GMT/Melinka/lock.grd'
Xlock,Ylock,lock=plot_grd(grdfile,[0,1],plt.cm.magma,flip_lon=False,return_data=True)
#Interpolate
x=linspace(-75,-73,100)
y=linspace(-44,-42,100)
X,Y=meshgrid(x,y)
z = griddata(fault[:,1:3], (fault[:,8]**2+fault[:,9]**2)**0.5, (X, Y), method='linear',fill_value=0)
#get 1m contour
plt.contour(x, y, z,levels=[1,2,3,4,5,6],lw=0.5)
cs=plt.contour(x, y, z,levels=[1],lw=10)
plt.xlim([-75,-73])
plt.ylim([-44,-42])
path=cs.collections[0].get_paths()
p=path[1]
points=c_[Xlock.ravel(),Ylock.ravel()]
i=where(p.contains_points(points)==True)[0]
m=lock.ravel()[i].mean()
plt.title('Mean locking inside 1m contour is %.2f' % (m))
plt.figure()
plt.scatter(points[i,0],points[i,1],c=lock.ravel()[i],lw=0,vmin=0,vmax=1.0,cmap=plt.cm.magma)
plt.colorbar()
plt.title('Locking inside 1m contour')
plt.xlim([-75,-73])
plt.ylim([-44,-42])
plt.show() | Ogweno/mylife | Melinka/get_avg_locking.py | get_avg_locking.py | py | 1,173 | python | en | code | 0 | github-code | 36 |
19452480257 | class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
m, n = len(matrix), len(matrix[0])
i, j = m - 1, 0
while i >= 0 and j < n:
val = matrix[i][j]
if val == target:
return True
elif val > target:
i -= 1
else:
j += 1
return False | whocaresustc/Leetcode-Summary | 240. Search a 2D Matrix II.py | 240. Search a 2D Matrix II.py | py | 537 | python | en | code | 0 | github-code | 36 |
5216881963 | #!/usr/bin/env python3
import sys
from functools import reduce
import math
def sum(nums):
return reduce(lambda x, y: x + y, nums, 0)
def avg(nums):
return sum(nums) / len(nums)
def stddev(nums):
numerator = reduce(lambda x, y: (y - avg(nums))*(y - avg(nums)) + x, nums, 0)
return math.sqrt(numerator / (len(nums) - 1))
def summary(filename):
nums = []
with open(filename, 'r') as f:
for line in f:
try:
nums.append(float(line))
except ValueError:
continue
return (sum(nums), avg(nums), stddev(nums))
def main():
filenames = sys.argv[1:]
for f in filenames:
s = summary(f)
print(f'File: {f} Sum: {s[0]:.6f} Average: {s[1]:.6f} Stddev: {s[2]:.6f}')
if __name__ == "__main__":
main()
| lawrencetheabhorrence/Data-Analysis-2020 | hy-data-analysis-with-python-2020/part02-e05_summary/src/summary.py | summary.py | py | 759 | python | en | code | 0 | github-code | 36 |
15193992527 | __author__ = "Mathijs Maijer"
__email__ = "m.f.maijer@gmail.com"
class PropertyFunction(object):
'''
Class used to specify property functions,
that are meta analysis functions that can be ran during any iteration
to run custom values.
E.g: Calculate the network cluster coeffecient every 5 iterations
'''
def __init__(self, name, function, iteration_interval, params):
'''
Initialise the property function
:param str name: The name of the property function
:param function function: the function to run
:param int iteration_interval: The interval between executions of the function
:param dict params: A dictionary containing the arguments to provide for the function
'''
self.name = name
self.fun = function
self.iteration_interval = iteration_interval
self.params = params
def execute(self):
'''
Executes the function with the specified arguments and returns the result
:return: The result of the property function
'''
return self.fun(**self.params)
| Tensaiz/DyNSimF | dynsimf/models/components/PropertyFunction.py | PropertyFunction.py | py | 1,121 | python | en | code | 4 | github-code | 36 |
27632896757 | import socket
import gui.main_gui as main_gui
import configparser
import lib.package
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from game_class import Game_class
""" Загрузка параметров """
config = configparser.ConfigParser()
config.read("config.ini")
#Главное окно
class MainApp(QtWidgets.QMainWindow, main_gui.Ui_MainWindow):
def __init__(self, login):
super().__init__()
self.setupUi(self)
self.login = login
self.label.setText("Ваш логин: " + login)
self.lineEdit.setText(config["Server"]["host"])
self.lineEdit_4.setText(config["Server"]["port_client"])
self.pushButton.clicked.connect(self.connect)
self.pushButton_2.clicked.connect(self.set_config)
self.pushButton_3.clicked.connect(self.post_message)
self.pushButton_4.clicked.connect(self.get_message)
self.textEdit.setReadOnly(True)
self.comboBox.addItem("маг")
self.comboBox.addItem("воин")
self.comboBox.addItem("убийца")
# Для отправки и получения сообщений
self.sor = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sor.bind(('', 0))
self.get_message()
def message(self, icon, title, text, button):
msg = QMessageBox()
msg.setIcon(icon)
msg.setWindowTitle(title)
msg.setText(text)
msg.setStandardButtons(button)
msg.exec_()
def connect(self):
# Формируем запрос
data = lib.package.set_package(command='c', login=self.login)
# Отправляем запрос на сервер
self.sor.sendto(data.encode('utf-8'), (config["Server"]["host"], int(config["Server"]["port_client"])))
# Получаение ответа от сервера
data = self.sor.recv(1024).decode('utf-8')
# Подключение к комнате
if data == '0':
self.close()
type = self.comboBox.currentText()
game = Game_class(self.login, type)
game.start()
else:
self.message(QMessageBox.Critical, "Ошибка", "Ошибка ", QMessageBox.Cancel)
def set_config(self):
host = self.lineEdit.text()
port_client = self.lineEdit_4.text()
config.set('Server', 'host', str(host))
config.set('Server', 'port_client', str(port_client))
with open('config.ini', 'w') as configfile:
config.write(configfile)
print("done")
def post_message(self):
message = self.lineEdit_5.text()
# Создаем запрос
data = lib.package.set_package(command='m', login=self.login, message=message)
# Отправляем запрос на сервер
self.sor.sendto(data.encode('utf-8'), (config["Server"]["host"], int(config["Server"]["port_client"])))
# Обновляем окно
self.get_message()
self.lineEdit_5.setText("")
def get_message(self):
# Создаем запрос
data = lib.package.set_package(command='g')
# делаем запрос к серверу
self.sor.sendto(data.encode('utf-8'), (config["Server"]["host"], int(config["Server"]["port_client"])))
# Получаем сообщение от сервера
data = self.sor.recv(1024).decode('utf-8')
# Декодируем ответ сервера
map = lib.package.get_package(data)
data = map["message"]
# Переформатирование текста
message = ""
login = ""
text = ""
flag = 0
for i in data:
if i == "/":
if flag == 0:
flag = 1
if self.login == login:
message += "<font color=\"Red\">" + login + "</font>: "
else:
message += "<font color=\"Green\">" + login + "</font>: "
login = ""
else:
flag = 0
message += "<font color=\"Black\">" + text + "</font><br>"
text = ""
continue
if i == "!":
break
if flag == 0:
login += i
if flag == 1:
text += i
# Отображаем сообщения
self.textEdit.setHtml(message) | Arrakktur/game | client_game/main_class.py | main_class.py | py | 4,540 | python | ru | code | 0 | github-code | 36 |
22355127515 | import ast
import os
import pathlib
import tempfile
from typing import Tuple
from mlrun import MLClientCtx
from mlrun.package.packagers.python_standard_library_packagers import (
BoolPackager,
BytearrayPackager,
BytesPackager,
DictPackager,
FloatPackager,
FrozensetPackager,
IntPackager,
ListPackager,
NonePackager,
PathPackager,
SetPackager,
StrPackager,
TuplePackager,
)
from mlrun.package.utils import ArchiveSupportedFormat, StructFileSupportedFormat
from tests.package.packager_tester import (
COMMON_OBJECT_INSTRUCTIONS,
PackagerTester,
PackTest,
PackToUnpackTest,
UnpackTest,
)
# ----------------------------------------------------------------------------------------------------------------------
# builtins packagers:
# ----------------------------------------------------------------------------------------------------------------------
NoneType = type(None) # TODO: Replace with types.NoneType from python 3.10
def pack_none() -> NoneType:
return None
def validate_none(result: NoneType) -> bool:
# TODO: None values should not be casted to strings when casted to results, once it is implemented in
# 'execution._cast_result`, change this validation to `return result is None`.
return result == "None"
class NonePackagerTester(PackagerTester):
"""
A tester for the `NonePackager`.
"""
PACKAGER_IN_TEST = NonePackager()
TESTS = [
PackTest(
pack_handler="pack_none",
log_hint="my_result",
validation_function=validate_none,
),
PackToUnpackTest(
pack_handler="pack_none",
log_hint="my_result",
),
]
_INT_SAMPLE = 7
def pack_int() -> int:
return _INT_SAMPLE
def validate_int(result: int) -> bool:
return result == _INT_SAMPLE
def unpack_int(obj: int):
assert isinstance(obj, int)
assert obj == _INT_SAMPLE
class IntPackagerTester(PackagerTester):
"""
A tester for the `IntPackager`.
"""
PACKAGER_IN_TEST = IntPackager()
TESTS = [
PackTest(
pack_handler="pack_int",
log_hint="my_result",
validation_function=validate_int,
),
PackToUnpackTest(
pack_handler="pack_int",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_int",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": int.__module__,
},
unpack_handler="unpack_int",
),
]
_FLOAT_SAMPLE = 0.97123
def pack_float() -> float:
return _FLOAT_SAMPLE
def validate_float(result: float) -> bool:
return result == _FLOAT_SAMPLE
def unpack_float(obj: float):
assert isinstance(obj, float)
assert obj == _FLOAT_SAMPLE
class FloatPackagerTester(PackagerTester):
"""
A tester for the `FloatPackager`.
"""
PACKAGER_IN_TEST = FloatPackager()
TESTS = [
PackTest(
pack_handler="pack_float",
log_hint="my_result",
validation_function=validate_float,
),
PackToUnpackTest(
pack_handler="pack_float",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_float",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": float.__module__,
},
unpack_handler="unpack_float",
),
]
_BOOL_SAMPLE = True
def pack_bool() -> float:
return _BOOL_SAMPLE
def validate_bool(result: bool) -> bool:
return result is _BOOL_SAMPLE
def unpack_bool(obj: bool):
assert isinstance(obj, bool)
assert obj is _BOOL_SAMPLE
class BoolPackagerTester(PackagerTester):
"""
A tester for the `BoolPackager`.
"""
PACKAGER_IN_TEST = BoolPackager()
TESTS = [
PackTest(
pack_handler="pack_bool",
log_hint="my_result",
validation_function=validate_bool,
),
PackToUnpackTest(
pack_handler="pack_bool",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_bool",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": bool.__module__,
},
unpack_handler="unpack_bool",
),
]
_STR_RESULT_SAMPLE = "I'm a string."
_STR_FILE_SAMPLE = "Something written in a file..."
_STR_DIRECTORY_FILES_SAMPLE = "I'm text file number {}"
def pack_str() -> str:
return _STR_RESULT_SAMPLE
def pack_str_path_file(context: MLClientCtx) -> str:
file_path = os.path.join(context.artifact_path, "my_file.txt")
with open(file_path, "w") as file:
file.write(_STR_FILE_SAMPLE)
return file_path
def pack_str_path_directory(context: MLClientCtx) -> str:
directory_path = os.path.join(context.artifact_path, "my_directory")
os.makedirs(directory_path)
for i in range(5):
with open(os.path.join(directory_path, f"file_{i}.txt"), "w") as file:
file.write(_STR_DIRECTORY_FILES_SAMPLE.format(i))
return directory_path
def validate_str_result(result: str) -> bool:
return result == _STR_RESULT_SAMPLE
def unpack_str(obj: str):
assert isinstance(obj, str)
assert obj == _STR_RESULT_SAMPLE
def unpack_str_path_file(obj: str):
assert isinstance(obj, str)
with open(obj, "r") as file:
file_content = file.read()
assert file_content == _STR_FILE_SAMPLE
def unpack_str_path_directory(obj: str):
assert isinstance(obj, str)
for i in range(5):
with open(os.path.join(obj, f"file_{i}.txt"), "r") as file:
file_content = file.read()
assert file_content == _STR_DIRECTORY_FILES_SAMPLE.format(i)
def prepare_str_path_file() -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, "my_file.txt")
with open(file_path, "w") as file:
file.write(_STR_FILE_SAMPLE)
return file_path, temp_directory
class StrPackagerTester(PackagerTester):
"""
A tester for the `StrPackager`.
"""
PACKAGER_IN_TEST = StrPackager()
TESTS = [
PackTest(
pack_handler="pack_str",
log_hint="my_result",
validation_function=validate_str_result,
pack_parameters={},
),
UnpackTest(
prepare_input_function=prepare_str_path_file,
unpack_handler="unpack_str_path_file",
),
PackToUnpackTest(
pack_handler="pack_str",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_str",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": str.__module__,
},
unpack_handler="unpack_str",
),
PackToUnpackTest(
pack_handler="pack_str_path_file",
log_hint="my_file: path",
expected_instructions={"is_directory": False},
unpack_handler="unpack_str_path_file",
),
*[
PackToUnpackTest(
pack_handler="pack_str_path_directory",
log_hint={
"key": "my_dir",
"artifact_type": "path",
"archive_format": archive_format,
},
expected_instructions={
"is_directory": True,
"archive_format": archive_format,
},
unpack_handler="unpack_str_path_directory",
)
for archive_format in ArchiveSupportedFormat.get_all_formats()
],
]
_DICT_SAMPLE = {"a1": {"a2": [1, 2, 3], "b2": [4, 5, 6]}, "b1": {"b2": [4, 5, 6]}}
def pack_dict() -> dict:
return _DICT_SAMPLE
def unpack_dict(obj: dict):
assert isinstance(obj, dict)
assert obj == _DICT_SAMPLE
def validate_dict_result(result: dict) -> bool:
return result == _DICT_SAMPLE
def prepare_dict_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=_DICT_SAMPLE, file_path=file_path)
return file_path, temp_directory
class DictPackagerTester(PackagerTester):
"""
A tester for the `DictPackager`.
"""
PACKAGER_IN_TEST = DictPackager()
TESTS = [
PackTest(
pack_handler="pack_dict",
log_hint="my_dict",
validation_function=validate_dict_result,
),
*[
UnpackTest(
prepare_input_function=prepare_dict_file,
unpack_handler="unpack_dict",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_dict",
log_hint="my_dict",
),
PackToUnpackTest(
pack_handler="pack_dict",
log_hint="my_dict: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": dict.__module__,
},
unpack_handler="unpack_dict",
),
*[
PackToUnpackTest(
pack_handler="pack_dict",
log_hint={
"key": "my_dict",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_dict",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_LIST_SAMPLE = [1, 2, 3, {"a": 1, "b": 2}]
def pack_list() -> list:
return _LIST_SAMPLE
def unpack_list(obj: list):
assert isinstance(obj, list)
assert obj == _LIST_SAMPLE
def validate_list_result(result: list) -> bool:
return result == _LIST_SAMPLE
def prepare_list_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=_LIST_SAMPLE, file_path=file_path)
return file_path, temp_directory
class ListPackagerTester(PackagerTester):
"""
A tester for the `ListPackager`.
"""
PACKAGER_IN_TEST = ListPackager()
TESTS = [
PackTest(
pack_handler="pack_list",
log_hint="my_list",
validation_function=validate_list_result,
),
*[
UnpackTest(
prepare_input_function=prepare_list_file,
unpack_handler="unpack_list",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_list",
log_hint="my_list",
),
PackToUnpackTest(
pack_handler="pack_list",
log_hint="my_list: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": tuple.__module__,
},
unpack_handler="unpack_list",
),
*[
PackToUnpackTest(
pack_handler="pack_list",
log_hint={
"key": "my_list",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_list",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_TUPLE_SAMPLE = (1, 2, 3)
def pack_tuple() -> tuple:
return _TUPLE_SAMPLE
def unpack_tuple(obj: tuple):
assert isinstance(obj, tuple)
assert obj == _TUPLE_SAMPLE
def validate_tuple_result(result: list) -> bool:
# Tuples are serialized as lists:
return tuple(result) == _TUPLE_SAMPLE
def prepare_tuple_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_TUPLE_SAMPLE), file_path=file_path)
return file_path, temp_directory
class TuplePackagerTester(PackagerTester):
"""
A tester for the `TuplePackager`.
"""
PACKAGER_IN_TEST = TuplePackager()
TESTS = [
PackTest(
pack_handler="pack_tuple",
log_hint="my_tuple",
validation_function=validate_tuple_result,
),
*[
UnpackTest(
prepare_input_function=prepare_tuple_file,
unpack_handler="unpack_tuple",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_tuple",
log_hint="my_tuple",
),
PackToUnpackTest(
pack_handler="pack_tuple",
log_hint="my_tuple: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": tuple.__module__,
},
unpack_handler="unpack_tuple",
),
*[
PackToUnpackTest(
pack_handler="pack_tuple",
log_hint={
"key": "my_tuple",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_tuple",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_SET_SAMPLE = {1, 2, 3}
def pack_set() -> set:
return _SET_SAMPLE
def unpack_set(obj: set):
assert isinstance(obj, set)
assert obj == _SET_SAMPLE
def validate_set_result(result: list) -> bool:
# Sets are serialized as lists:
return set(result) == _SET_SAMPLE
def prepare_set_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_SET_SAMPLE), file_path=file_path)
return file_path, temp_directory
class SetPackagerTester(PackagerTester):
"""
A tester for the `SetPackager`.
"""
PACKAGER_IN_TEST = SetPackager()
TESTS = [
PackTest(
pack_handler="pack_set",
log_hint="my_set",
validation_function=validate_set_result,
),
*[
UnpackTest(
prepare_input_function=prepare_set_file,
unpack_handler="unpack_set",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_set",
log_hint="my_set",
),
PackToUnpackTest(
pack_handler="pack_set",
log_hint="my_set: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_set",
),
*[
PackToUnpackTest(
pack_handler="pack_set",
log_hint={
"key": "my_set",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_set",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_FROZENSET_SAMPLE = frozenset([1, 2, 3])
def pack_frozenset() -> frozenset:
return _FROZENSET_SAMPLE
def unpack_frozenset(obj: frozenset):
assert isinstance(obj, frozenset)
assert obj == _FROZENSET_SAMPLE
def validate_frozenset_result(result: list) -> bool:
# Frozen sets are serialized as lists:
return frozenset(result) == _FROZENSET_SAMPLE
def prepare_frozenset_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_FROZENSET_SAMPLE), file_path=file_path)
return file_path, temp_directory
class FrozensetPackagerTester(PackagerTester):
"""
A tester for the `FrozensetPackager`.
"""
PACKAGER_IN_TEST = FrozensetPackager()
TESTS = [
PackTest(
pack_handler="pack_frozenset",
log_hint="my_frozenset",
validation_function=validate_frozenset_result,
),
*[
UnpackTest(
prepare_input_function=prepare_frozenset_file,
unpack_handler="unpack_frozenset",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_frozenset",
log_hint="my_frozenset",
),
PackToUnpackTest(
pack_handler="pack_frozenset",
log_hint="my_frozenset: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_frozenset",
),
*[
PackToUnpackTest(
pack_handler="pack_frozenset",
log_hint={
"key": "my_frozenset",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_frozenset",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_BYTEARRAY_SAMPLE = bytearray([1, 2, 3])
def pack_bytearray() -> bytearray:
return _BYTEARRAY_SAMPLE
def unpack_bytearray(obj: bytearray):
assert isinstance(obj, bytearray)
assert obj == _BYTEARRAY_SAMPLE
def validate_bytearray_result(result: str) -> bool:
# Byte arrays are serialized as strings (not decoded):
return bytearray(ast.literal_eval(result)) == _BYTEARRAY_SAMPLE
def prepare_bytearray_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_BYTEARRAY_SAMPLE), file_path=file_path)
return file_path, temp_directory
class BytearrayPackagerTester(PackagerTester):
"""
A tester for the `BytearrayPackager`.
"""
PACKAGER_IN_TEST = BytearrayPackager()
TESTS = [
PackTest(
pack_handler="pack_bytearray",
log_hint="my_bytearray",
validation_function=validate_bytearray_result,
),
*[
UnpackTest(
prepare_input_function=prepare_bytearray_file,
unpack_handler="unpack_bytearray",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_bytearray",
log_hint="my_bytearray",
),
PackToUnpackTest(
pack_handler="pack_bytearray",
log_hint="my_bytearray: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_bytearray",
),
*[
PackToUnpackTest(
pack_handler="pack_bytearray",
log_hint={
"key": "my_bytearray",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_bytearray",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_BYTES_SAMPLE = b"I'm a byte string."
def pack_bytes() -> bytes:
return _BYTES_SAMPLE
def unpack_bytes(obj: bytes):
assert isinstance(obj, bytes)
assert obj == _BYTES_SAMPLE
def validate_bytes_result(result: str) -> bool:
# Bytes are serialized as strings (not decoded):
return ast.literal_eval(result) == _BYTES_SAMPLE
def prepare_bytes_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_BYTES_SAMPLE), file_path=file_path)
return file_path, temp_directory
class BytesPackagerTester(PackagerTester):
"""
A tester for the `BytesPackager`.
"""
PACKAGER_IN_TEST = BytesPackager()
TESTS = [
PackTest(
pack_handler="pack_bytes",
log_hint="my_bytes",
validation_function=validate_bytes_result,
),
*[
UnpackTest(
prepare_input_function=prepare_bytes_file,
unpack_handler="unpack_bytes",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_bytes",
log_hint="my_bytes",
),
PackToUnpackTest(
pack_handler="pack_bytes",
log_hint="my_bytes: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_bytes",
),
*[
PackToUnpackTest(
pack_handler="pack_bytes",
log_hint={
"key": "my_bytes",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_bytes",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
# ----------------------------------------------------------------------------------------------------------------------
# pathlib packagers:
# ----------------------------------------------------------------------------------------------------------------------
_PATH_RESULT_SAMPLE = pathlib.Path("I'm a path.")
def pack_path() -> pathlib.Path:
return _PATH_RESULT_SAMPLE
def pack_path_file(context: MLClientCtx) -> pathlib.Path:
file_path = pathlib.Path(context.artifact_path) / "my_file.txt"
with open(file_path, "w") as file:
file.write(_STR_FILE_SAMPLE)
return file_path
def pack_path_directory(context: MLClientCtx) -> pathlib.Path:
directory_path = pathlib.Path(context.artifact_path) / "my_directory"
os.makedirs(directory_path)
for i in range(5):
with open(directory_path / f"file_{i}.txt", "w") as file:
file.write(_STR_DIRECTORY_FILES_SAMPLE.format(i))
return directory_path
def validate_path_result(result: pathlib.Path) -> bool:
return pathlib.Path(result) == _PATH_RESULT_SAMPLE
def unpack_path(obj: pathlib.Path):
assert isinstance(obj, pathlib.Path)
assert obj == _PATH_RESULT_SAMPLE
def unpack_path_file(obj: pathlib.Path):
assert isinstance(obj, pathlib.Path)
with open(obj, "r") as file:
file_content = file.read()
assert file_content == _STR_FILE_SAMPLE
def unpack_path_directory(obj: pathlib.Path):
assert isinstance(obj, pathlib.Path)
for i in range(5):
with open(obj / f"file_{i}.txt", "r") as file:
file_content = file.read()
assert file_content == _STR_DIRECTORY_FILES_SAMPLE.format(i)
class PathPackagerTester(PackagerTester):
"""
A tester for the `PathPackager`.
"""
PACKAGER_IN_TEST = PathPackager()
TESTS = [
PackTest(
pack_handler="pack_path",
log_hint="my_result: result",
validation_function=validate_path_result,
pack_parameters={},
),
UnpackTest(
prepare_input_function=prepare_str_path_file, # Using str preparing method - same thing
unpack_handler="unpack_path_file",
),
PackToUnpackTest(
pack_handler="pack_path",
log_hint="my_result: result",
),
PackToUnpackTest(
pack_handler="pack_path",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": pathlib.Path.__module__,
},
unpack_handler="unpack_path",
),
PackToUnpackTest(
pack_handler="pack_path_file",
log_hint="my_file",
expected_instructions={"is_directory": False},
unpack_handler="unpack_path_file",
),
*[
PackToUnpackTest(
pack_handler="pack_path_directory",
log_hint={
"key": "my_dir",
"archive_format": archive_format,
},
expected_instructions={
"is_directory": True,
"archive_format": archive_format,
},
unpack_handler="unpack_path_directory",
)
for archive_format in ArchiveSupportedFormat.get_all_formats()
],
]
| mlrun/mlrun | tests/package/packagers_testers/python_standard_library_packagers_testers.py | python_standard_library_packagers_testers.py | py | 27,189 | python | en | code | 1,129 | github-code | 36 |
14075716492 | import matplotlib
matplotlib.rc('text', usetex = True)
from pylab import *
import os
#Use the dt distributions from Crescent City
#tide gauge to make example plots
d = loadtxt('cumulative_probs.yearly.100.txt')
figure(1,(12,9))
clf()
axes((.1,.1,.8,.38))
#Add 1.13 to get s referenced to MSL
s = d[:,0] - 2. +1.13
plot([-2.6,0.6,0.6,1.0],[1,1,0,0],'k')
for i in [7,6,5,3,1]:
plot(s,d[:,i])
legend(['dt = infinity','dt = 24 hours', 'dt = 12 hours', 'dt = 4 hours', \
'dt = 2 hour', 'dt = 0'],'lower left')
title('Cumulative Distributions for dt-Method',fontsize=18)
plot([-1.08,-1.08,-3],[1.05,0.776,0.776],'r--',linewidth=3)
plot([-1.08],[0.776],'ro')
text(-2.8,0.8,'desired probability',fontsize=17)
ylim(-0.1,1.1)
xticks([-2,-1,0,0.6,1.],['MLW','MSL','MHW','HHW',r'${\bf \hat{\xi}}$'],fontsize=18)
yticks(fontsize=18)
ylabel('probability ',fontsize=18)
xlabel(r'tide stage ${\bf \hat{\xi}}$',fontsize=18)
axes((.1,.55,.8,.38))
s = linspace(-3,0.6,101)
def h(s):
h = (s+1.4) + (s+1.6)**2
h = where(s<-1.4, 0, h)
return h
srun = linspace(-2.,0,6)
plot([-1.4],[0.0],'bo')
plot(srun,h(srun),'bo')
#Now plot the black line
srun2=zeros(9)
srun2[0]=-3.; srun2[1:3]=srun[0:2]; srun2[3]=-1.4; srun2[4:8]=srun[2:6];
srun2[8]=.6;
plot(srun2,h(srun2),'k')
plot([-3,-1.08,-1.08],[0.59,0.59,-0.5],'r--',linewidth=3)
ylim(-0.5,5)
xticks([-2,-1,0,0.6,1.],['MLW','MSL','MHW','HHW',r'${\bf \hat{\xi}}$'],fontsize=18)
yticks([0,2,4],['0','1','2'],fontsize=18)
plot([-1.08],[0.59],'ro')
text(-2.8,0.8,r'exceedance level ${\bf \zeta_i}$',fontsize=17)
text(-1.2,-.45,r'${\bf \hat{\xi_i}}$',fontsize=15)
xlim(-3,1)
ylabel(r'Quantity of Interest ${\bf \zeta}$',fontsize=18)
title(r"${\bf E_{jk}}$'s GeoClaw Simulation Curve at one Location",fontsize=18)
savefig('tidepofzeta_dt.png')
| rjleveque/pattern-method-paper | programs/tidepofzeta_dt.py | tidepofzeta_dt.py | py | 1,796 | python | en | code | 0 | github-code | 36 |
37760975497 | from rest_framework import renderers
from teslacoil.encoders import TeslaEncoder
class TeslaRenderer(renderers.JSONRenderer):
encoder_class = TeslaEncoder
def render(self, data, accepted_media_type=None, renderer_context=None):
model = renderer_context['view'].model
model_admin = renderer_context['view'].model_admin
request = renderer_context['request']
response_wrapper = {
'meta': model_admin,
'objects': data,
}
return super(TeslaRenderer, self).render(
response_wrapper, accepted_media_type, renderer_context)
| celerityweb/django-teslacoil | teslacoil/renderers.py | renderers.py | py | 614 | python | en | code | 5 | github-code | 36 |
6689931275 | import json
import os
from signal import SIGKILL
from statistics import mean
from typing import List, Optional
from sebs.cache import Cache
from sebs.local.function import LocalFunction
from sebs.storage.minio import Minio, MinioConfig
from sebs.utils import serialize, LoggingBase
class Deployment(LoggingBase):
@property
def measurement_file(self) -> Optional[str]:
return self._measurement_file
@measurement_file.setter
def measurement_file(self, val: Optional[str]):
self._measurement_file = val
def __init__(self):
super().__init__()
self._functions: List[LocalFunction] = []
self._storage: Optional[Minio]
self._inputs: List[dict] = []
self._memory_measurement_pids: List[int] = []
self._measurement_file: Optional[str] = None
def add_function(self, func: LocalFunction):
self._functions.append(func)
if func.memory_measurement_pid is not None:
self._memory_measurement_pids.append(func.memory_measurement_pid)
def add_input(self, func_input: dict):
self._inputs.append(func_input)
def set_storage(self, storage: Minio):
self._storage = storage
def serialize(self, path: str):
with open(path, "w") as out:
config: dict = {
"functions": self._functions,
"storage": self._storage,
"inputs": self._inputs,
}
if self._measurement_file is not None:
config["memory_measurements"] = {
"pids": self._memory_measurement_pids,
"file": self._measurement_file,
}
out.write(serialize(config))
@staticmethod
def deserialize(path: str, cache_client: Cache) -> "Deployment":
with open(path, "r") as in_f:
input_data = json.load(in_f)
deployment = Deployment()
for input_cfg in input_data["inputs"]:
deployment._inputs.append(input_cfg)
for func in input_data["functions"]:
deployment._functions.append(LocalFunction.deserialize(func))
if "memory_measurements" in input_data:
deployment._memory_measurement_pids = input_data["memory_measurements"]["pids"]
deployment._measurement_file = input_data["memory_measurements"]["file"]
deployment._storage = Minio.deserialize(
MinioConfig.deserialize(input_data["storage"]), cache_client
)
return deployment
def shutdown(self, output_json: str):
if len(self._memory_measurement_pids) > 0:
self.logging.info("Killing memory measurement processes")
# kill measuring processes
for proc in self._memory_measurement_pids:
os.kill(proc, SIGKILL)
if self._measurement_file is not None:
self.logging.info(f"Gathering memory measurement data in {output_json}")
# create dictionary with the measurements
measurements: dict = {}
precision_errors = 0
with open(self._measurement_file, "r") as file:
for line in file:
if line == "precision not met\n":
precision_errors += 1
line_content = line.split()
if len(line_content) == 0:
continue
if not line_content[0] in measurements:
try:
measurements[line_content[0]] = [int(line_content[1])]
except ValueError:
continue
else:
try:
measurements[line_content[0]].append(int(line_content[1]))
except ValueError:
continue
for container in measurements:
measurements[container] = {
"mean mem. usage": f"{mean(measurements[container])/1e6} MB",
"max mem. usage": f"{max(measurements[container])/1e6} MB",
"number of measurements": len(measurements[container]),
"full profile (in bytes)": measurements[container],
}
# write to output_json file
with open(output_json, "w") as out:
if precision_errors > 0:
measurements["precision_errors"] = precision_errors
json.dump(measurements, out, indent=6)
# remove the temporary file the measurements were written to
os.remove(self._measurement_file)
for func in self._functions:
func.stop()
| spcl/serverless-benchmarks | sebs/local/deployment.py | deployment.py | py | 4,773 | python | en | code | 97 | github-code | 36 |
34983405141 | #!/usr/bin/env python3
import pyowm
from pyowm.exceptions import OWMError
import sys, argparse
from datetime import datetime
import os
#os.environ['OPENWEATHER_API_KEY'] = 'aa1ab6974298fc6bf7303d6a22e073f9'
#os.environ['CITY_NAME'] = 'Honolulu'
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-k', required=False, metavar='your_api_key')
parser.add_argument('-p', required=False, metavar='\"Some Place,US\"')
args = parser.parse_args()
api_key = str(os.environ['OPENWEATHER_API_KEY'])
place = str(os.environ['CITY_NAME'])
print ('Using key ' + api_key + ' to query temperature in \"' + place + '\"...' )
owm = pyowm.OWM(api_key)
try:
observation = owm.weather_at_place(place)
except OWMError as err:
print (err)
sys.exit(2)
w = observation.get_weather()
p = observation.get_location()
print ( 'source=openweathermap ' + 'city=' + '"' + p.get_name() + '"' + ' description=' + '"' + str(w.get_status()) + '"' + \
' temp=' + str(w.get_temperature('celsius')['temp']) +'C' + ' humidity=' + str(w.get_humidity()) )
if __name__ == "__main__":
main(sys.argv[1:])
| vasooo/pannet | exercise_1/getweather.py | getweather.py | py | 1,169 | python | en | code | 0 | github-code | 36 |
34102793225 | import pyperclip, shelve, sys
mcbShelf = shelve.open('mcb')
#сохранятся содержимое буфера обмена
if len(sys.argv)==3 and sys.argv[1].lower() == 'save':
mcbShelf[sys.argv[2]] = pyperclip.paste()
print(sys.argv)
elif len(sys.argv) == 2:
if sys.argv[1].lower() == 'list':
pyperclip.copy(str(list(mcbShelf.keys())))
print(str(list(mcbShelf.keys())))
elif sys.argv[1] in mcbShelf:
t=mcbShelf[sys.argv[1]]
pyperclip.copy(t)
print(mcbShelf[sys.argv[1]])
mcbShelf.close()
| alex3287/PyCharmProjects | a_b_s/bufer.py | bufer.py | py | 555 | python | ru | code | 1 | github-code | 36 |
24974716765 | #!/usr/bin/python3
"""Defines a rectangle class which inherits from Base."""
from models.base import Base
class Rectangle(Base):
"""A rectangle class"""
def __init__(self, width, height, x=0, y=0, id=None):
"""Initiates an instance of the rectangle class.
Args:
width (int): the width of the rectangle
height (int): height of the rectangle
x (int): x cordinate
y (int): y cordinate
"""
self.width = width
self.height = height
self.x = x
self.y = y
super().__init__(id)
@property
def width(self):
"""sets / gets the width"""
return self.__width
@width.setter
def width(self, val):
if type(val) != int:
raise TypeError("width must be an integer")
if val <= 0:
raise ValueError("width must be > 0")
self.__width = val
@property
def height(self):
"""sets / gets the height"""
return self.__height
@height.setter
def height(self, val):
if type(val) != int:
raise TypeError("height must be an integer")
if val <= 0:
raise ValueError("height must be > 0")
self.__height = val
@property
def x(self):
"""sets / gets the x cordinate"""
return self.__x
@x.setter
def x(self, val):
if type(val) != int:
raise TypeError("x must be an integer")
if val < 0:
raise ValueError("x must be >= 0")
self.__x = val
@property
def y(self):
"""sets / gets the width"""
return self.__y
@y.setter
def y(self, val):
if type(val) != int:
raise TypeError("y must be an integer")
if val < 0:
raise ValueError("y must be >= 0")
self.__y = val
def area(self):
"""Returns area of the rectangle"""
return self.__width * self.__height
def display(self):
"""prints the rectangle using '#'."""
if self.__width == 0 or self.__height == 0:
print("")
return
[print() for y in range(self.y)]
for h in range(self.__height):
for x in range(self.x):
print(" ", end="")
for w in range(self.__width):
print("#", end="")
print()
def __str__(self):
"""print() and str() representation of rectangle"""
return "[Rectangle] ({}) {}/{} - {}/{}".format(self.id,
self.x, self.y,
self.width, self.height)
def update(self, *args, **kwargs):
"""updates the rectangle.
Args:
args (int): assignes values to the attributes
"""
if args:
n_args = len(args)
if n_args >= 1:
if args[0] is None:
self.__init__(self.width, self.height, self.x, self.y)
else:
self.id = args[0]
if n_args >= 2:
self.width = args[1]
if n_args >= 3:
self.height = args[2]
if n_args >= 4:
self.x = args[3]
if n_args > 4:
self.y = args[4]
elif kwargs and len(kwargs) != 0:
for key, value in kwargs.items():
if key == "id":
if value is None:
self.__init__(self.width, self.height, self.x, self.y)
else:
self.id = value
elif key == "width":
self.width = value
elif key == "height":
self.height = value
elif key == "x":
self.x = value
elif key == "y":
self.y = value
def to_dictionary(self):
"""Returns a dictionary description of rectangle"""
return {
"id": self.id,
"width": self.width,
"height": self.height,
"x": self.x,
"y": self.y
}
| Ikechukwu-Miracle/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/rectangle.py | rectangle.py | py | 4,224 | python | en | code | 0 | github-code | 36 |
5134070317 | from PyQt5.QtWidgets import QListWidget, QListWidgetItem
from PyQt5.QtWidgets import QWidget, QVBoxLayout
from whispering_assistant.window_managers.windows.base_window_template import BaseWindowTemplate
class ChoiceWindow(BaseWindowTemplate):
def __init__(self, parent=None, choices=[], process_cb=None):
super().__init__(parent, choices=choices, process_cb=process_cb)
def initUI(self, choices, process_cb):
self.setWindowTitle("Select the desired link")
self.setGeometry(1000, 500, 1600, 200)
self.selected_index = None
self.choices = choices
self.process_cb = process_cb
central_widget = QWidget(self)
layout = QVBoxLayout(central_widget)
self.list_widget = QListWidget(central_widget)
for choice in choices:
display_text = choice['display_text']
list_item = QListWidgetItem(display_text)
self.list_widget.addItem(list_item)
self.list_widget.itemDoubleClicked.connect(self.on_item_double_clicked)
layout.addWidget(self.list_widget)
central_widget.setLayout(layout)
self.setCentralWidget(central_widget)
self.show()
def on_item_double_clicked(self, item):
self.selected_index = self.list_widget.row(item)
print("self.selected_index", self.selected_index, self.choices[self.selected_index])
selected_item = self.choices[self.selected_index]
if self.process_cb:
self.process_cb(selected_item)
self.close()
| engrjabi/WhisperingAssistant | whispering_assistant/window_managers/windows/choice_window.py | choice_window.py | py | 1,535 | python | en | code | 2 | github-code | 36 |
16733527950 | def teste(b):
global a # nao crie uma variavel a, utilize o 'a' global
a = 8 # aqui estou adicionando a variavel A dentro do escopo local
b += 4 # 5 + 4 = 9
c = 2
print(f'Var A dentro vale {a}')
print(f'Var B dentro vale {b}')
print(f'Var C dentro vale {c}')
a = 5
teste(a)
print(f'Var A fora vale {a}') # aqui eu digo que é para manter a variavel
# A do escopo global "global a", a=5 vira a=8!!! | TiagoFar/PythonTeste | Aula 21 D.py | Aula 21 D.py | py | 458 | python | pt | code | 0 | github-code | 36 |
11527788837 | a = ""
started = False
while True:
a = input("command: ").lower()
if a == "start":
if started:
print("Car already started..")
else:
started = True
print("Car Started..")
elif a == "stop":
if not started:
print("Car already stopped.")
else:
started = False
print("Car Stopped.")
elif a == "help":
print("""
start -> to start the car
stop -> to stop the car
quit -> to quit
""")
elif a == "quit":
break
else:
print("Sorry, I don't Understand") | wahyudewo/Python-Project | car game.py | car game.py | py | 644 | python | en | code | 0 | github-code | 36 |
494409517 | # pylint doesn't understand pytest fixtures
# pylint: disable=unused-argument
from click.testing import CliRunner
from dagster_airflow.cli import scaffold
def test_build_dags(clean_airflow_home):
'''This test generates Airflow DAGs for several pipelines in examples/toys and writes those DAGs
to $AIRFLOW_HOME/dags.
By invoking DagBag() below, an Airflow DAG refresh is triggered. If there are any failures in
DAG parsing, DagBag() will add an entry to its import_errors property.
By exercising this path, we ensure that our codegen continues to generate valid Airflow DAGs,
and that Airflow is able to successfully parse our DAGs.
'''
runner = CliRunner()
cli_args_to_test = [
['--module-name', 'dagster_examples.toys.log_spew', '--pipeline-name', 'log_spew'],
['--module-name', 'dagster_examples.toys.many_events', '--pipeline-name', 'many_events'],
[
'--module-name',
'dagster_examples.toys.error_monster',
'--pipeline-name',
'error_monster',
'--preset',
'passing',
],
[
'--module-name',
'dagster_examples.toys.resources',
'--pipeline-name',
'resource_pipeline',
],
['--module-name', 'dagster_examples.toys.sleepy', '--pipeline-name', 'sleepy_pipeline'],
]
for args in cli_args_to_test:
runner.invoke(scaffold, args)
# This forces Airflow to refresh DAGs; see https://stackoverflow.com/a/50356956/11295366
from airflow.models import DagBag
dag_bag = DagBag()
# If Airflow hits an import error, it will add an entry to this dict.
assert not dag_bag.import_errors
assert args[-1] in dag_bag.dags
| helloworld/continuous-dagster | deploy/dagster_modules/dagster-airflow/dagster_airflow_tests/test_build_dags.py | test_build_dags.py | py | 1,760 | python | en | code | 2 | github-code | 36 |
15548149698 | from sys import stdin
n = None
def solve(G,s):
visited = [0 for _ in range(n)]
stack,ans = list(),list()
stack.append(s)
while len(stack)!=0:
u = stack.pop()
#print("u: ",u)
for v in G[u]:
if not visited[v]:
#print("v: ",v)
stack.append(v) ; visited[v] = 1
for i in range(n):
if not visited[i]: ans.append(i)
#print(ans)
return ans
def main():
global n
n = int(stdin.readline().strip())
while n!=0:
G = [ list() for _ in range(n) ]
line = list(map(int,stdin.readline().split()))
while line[0]!=0:
for i in range(1,len(line)-1):
G[line[0]-1].append(line[i]-1)
line = list(map(int,stdin.readline().split()))
#print(G)
line = list(map(int,stdin.readline().split()))
for j in range(1,line[0]+1):
l = solve(G,line[j]-1)
print(len(l),end=" ")
for u in l: print(u+1,end=" ")
print(" ")
n = int(stdin.readline().strip())
return
main() | jhoanseb/UvaJudge | vertex.py | vertex.py | py | 1,003 | python | en | code | 0 | github-code | 36 |
37635441110 | # Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it can trap after raining.
# Example 1:
# Input: height = [0,1,0,2,1,0,1,3,2,1,2,1]
# Output: 6
# Explanation: The above elevation map (black section) is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped.
# Example 2:
# Input: height = [4,2,0,3,2,5]
# Output: 9
# Constraints:
# n == height.length
# 0 <= n <= 3 * 104
# 0 <= height[i] <= 105
# Accepted
# 745,494
# Submissions
# 1,434,921
class Solution:
def trap(self, height: List[int]) -> int:
left_max = [0]
for i in range(len(height)-1):
h = height[i]
left_max.append(max(h,left_max[-1]))
right_max = [0]*len(height)
for i in range(len(height)-1,0,-1):
h = height[i]
right_max[i-1] = max(h,right_max[i])
#print(left_max)
#print(right_max)
ans = 0
for i,h in enumerate(height):
ans += max(min(left_max[i],right_max[i])-h,0)
return ans
| sunnyyeti/Leetcode-solutions | 42 Trapping Rain Water.py | 42 Trapping Rain Water.py | py | 1,147 | python | en | code | 0 | github-code | 36 |
14369897418 | from compilador.sintactico import Sintactico
codigo = input('Ingresa una expresion: ')
sin = Sintactico(codigo=codigo)
if sin.PROGRAMA() and len(sin.errores.coleccion) == 0:
print('Programa valido')
else:
print('Programa invalido')
for error in sin.errores.coleccion:
print(error) | dannyX21/compilador | test.py | test.py | py | 302 | python | es | code | 0 | github-code | 36 |
15719814735 | from PyQt5.QtWidgets import QCheckBox, QDialog, QDialogButtonBox, QTextEdit
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont
from src.logic import QFrameBase, error_dialog
from src.frames.default import Ui_FrameDefault
from src.frames.utils.info import Ui_FrameInfo
from src.frames.utils.kde import Ui_FrameKde
from src.frames.utils.image import Ui_FrameImage
from src.frames.utils.text import Ui_FrameText
from src.frames.utils.standard_type import Ui_FrameStandardType
from src.frames.dialogs.stds import Ui_DialogStds
from src.reports import print_report
from src.science import FACTORS_ALL
from src.science.classes import Standard
class QFrameDefault(QFrameBase, Ui_FrameDefault):
INSTRUCTIONS = \
"""Терминология:
* параметры погоды называются Эталонами,
* медицинские данные пациентов называются Образцами,
* отдельные факторы Образцов называются фактор-образцами
ВСЕ СТОЛБЦЫ В xlsx ФАЙЛАХ ОБЯЗАТЕЛЬНЫ К ЗАПОЛНЕНИЮ
С помощью кнопки «Добавить Пациента» заполняем данные о пациенте (все данные обязательны к заполнению) после этого, нажав кнопку «Добавить»
можно добавить пациента в базу пациентов.
С помощью кнопки «Добавить Показатели Пациента» можем выбрать пациента и базы пациентов и добавить для него показатели из xlsx файла,
нажав кнопку «Добавить показатели». При нажатии на эту кнопку можно выбрать xlsx файл с медицинскими данными пациента, они загружаются сразу четырьмя временными рядами (по факторам «без нагрузки», с физической нагрузкой», «с эмоциональной нагрузкой», «после отдыха») из xlsx-файла НА ДАННЫЙ МОМЕНТ МОЖНО ИССЛЕДОВАТЬ ТОЛЬКО КОЭФФИЦИЕНТ СИММЕТРИИ (symmetry). Столбец A – Дата в формате дд.мм.гггг и так далее столбцы BCD - параметры. Каждая ячейка – дробное число, кроме ячеек ПЕРВОГО столбца - это дата и ПОСЛЕДНЕГО - это физическое состояние пациента (строка).
С помощью кнопки «Добавить Эталон» можем выбрать xlsx файл с параметрами погоды. Столбец A – Дата в формате дд.мм.гггг и так далее столбцы BCD - параметры. Каждая ячейка – дробное число, кроме ячеек ПЕРВОГО столбца - это дата.
«Эталоны» – параметры погоды в xlsx файле расположенные в следующем порядке (Название парамера: столбец в xlsx файле):
'Дата исследования': 'date' (строка в формате дд.мм.гггг)
'Приземная скорость ветра': 'surface_wind_speed' (вещественное число, дробная часть отделяется от целой символом точки)
'Приземная температура': 'surface_temp' (вещественное число, дробная часть отделяется от целой символом точки)
'Приземная влажность': 'surface_wet' (вещественное число, дробная часть отделяется от целой символом точки)
'Приземное давление': 'surface_press' (вещественное число, дробная часть отделяется от целой символом точки)
'BX': 'bx_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'BY': 'by_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'BZ': 'bz_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'B-Vector': 'b_vector_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'Плотность протонов солнеччного ветра': 'proton_density' (вещественное число, дробная часть отделяется от целой символом точки)
'Скорость плазмы солнечного ветра': 'plasma_speed' (вещественное число, дробная часть отделяется от целой символом точки)
'Давление солнечного ветра': 'press_sun_wind' (вещественное число, дробная часть отделяется от целой символом точки)
'КР': 'kp_index' (вещественное число, дробная часть отделяется от целой символом точки)
'Радиоизлучение': 'radio_emission' (вещественное число, дробная часть отделяется от целой символом точки)
'Рентгеновское излучение Солнца-1': 'xray_sun_one' (вещественное число, дробная часть отделяется от целой символом точки)
'Рентгеновское излучение Солнца-2': 'xray_sun_two' (вещественное число, дробная часть отделяется от целой символом точки)
'Ультрофиолет-A': 'ultraviolet_a' (вещественное число, дробная часть отделяется от целой символом точки)
'Ультрофиолет-B': 'ultraviolet_b' (вещественное число, дробная часть отделяется от целой символом точки)
'Ультрофиолет-C': 'ultraviolet_c' (вещественное число, дробная часть отделяется от целой символом точки)
«Пациенты» – медицинские данные пациентов в xlsx файле расположенные в следующем порядке (Название параметра: cтолбец в xlsx файле):
'Дата исследования': 'date' (строка в формате дд.мм.гггг)
'Коэффициент симметрии': 'symmetry' (вещественное число, дробная часть отделяется от целой символом точки)
'Верхнее артериальное давление': 'upper_arterial_pressure' (вещественное число, дробная часть отделяется от целой символом точки)
'Нижнее арториальное давление': 'lower_arterial_pressure' (вещественное число, дробная часть отделяется от целой символом точки)
'chss': 'chss' (вещественное число, дробная часть отделяется от целой символом точки)
'variab': 'variab' (вещественное число, дробная часть отделяется от целой символом точки)
'Угол': 'angle' (вещественное число, дробная часть отделяется от целой символом точки)
'Состояние пациента при исследованиии': 'patients_state' (1 - без нагрузки, 2 - с физической нагрузкой, 3 - с эмоциональной нагрузкой, 4 - после отдыха)
'Физическое состояние пациента': 'physical_state' (любая строка)
В окне «Ведущий ряд» выбирается любой из загруженных файлов, затем в окне «Ведомый ряд» выбирается также любой из загруженных файлов (ряды распределения расстояний формируются от максимумов Ведомого ряда до максимумов Ведущего ряда)
В основном окне показываются фрагменты анализа в зависимости от выбранных кнопок «Все факторы», «Без нагрузки», «С физической нагрузкой», «С эмоциональной нагрузкой», «После отдыха», «Визуализация», «Статистика», «Тестирование нормальности», «4-х ядерные оценки» (график показывает ядерные оценки плотности распределений для всех факторов), «3-х ядерные оценки» (график показывает ядерные оценки плотности распределений расстояний от максимумов факторов «С физической нагрузкой», «С эмоциональной нагрузкой», «После отдыха» до фактора «Без нагрузки»)
Для формирования файла отчета (в формате docx) по выбранному эталону необходимо нажать кнопку «Сформировать отчет», в открывшемся окне выбрать фактор или все факторы и нажать кнопку «Сохранить» – будет предложено выбрать название файла и место для сохранения.
Для формирования файла отчета (в формате docx) по группе эталонов необходимо нажать кнопку «Сформировать групповой отчет», в открывшемся окне выбрать группу эталонов и фактор и нажать кнопку «Сохранить» – будет предложено выбрать название файла и место для сохранения.
"""
def __init__(self, parent):
QFrameBase.__init__(self, parent, Ui_FrameDefault)
self.add_text(QFrameDefault.INSTRUCTIONS, self.instructions_edit)
delattr(self.instructions_edit, "c_updating")
font = QFont("Times New Roman", 11)
self.instructions_edit.setFont(font)
self.instructions_edit.verticalScrollBar().setEnabled(True)
class QFrameInfo(QFrameBase, Ui_FrameInfo):
def __init__(self, parent, report, val_type: str = "val"):
QFrameBase.__init__(self, parent, Ui_FrameInfo)
self.report = report
self.val_type = val_type
self.frames = [QFrameImage(self, self.report, self.val_type),
QFrameText(self, self.report, self.val_type, 'stat'),
QFrameText(self, self.report, self.val_type, 'ntest')]
for info in range(3):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
# Убрать, объединить в одно с классом Info
class QFrameInfoKde(QFrameBase, Ui_FrameInfo):
def __init__(self, parent, report, val_type: str = "val"):
QFrameBase.__init__(self, parent, Ui_FrameInfo)
self.report = report
self.val_type = val_type
self.frames = [QFrameKde(self, self.report), QFrameText(self, self.report, self.val_type, 'stat'),
QFrameText(self, self.report, self.val_type, 'ntest')]
for info in range(3):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
# KDE куда лучше убрать
class QFrameKde(QFrameBase, Ui_FrameKde):
def __init__(self, parent, report):
QFrameBase.__init__(self, parent, Ui_FrameKde)
self.report = report
self.frames = [QFrameImage(self, self.report, "kde"), QFrameImage(self, self.report, "kde3")]
for info in range(2):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
class QFrameImage(QFrameBase, Ui_FrameImage):
def __init__(self, parent, report, va_type: str):
QFrameBase.__init__(self, parent, Ui_FrameImage)
self.report = report
self.va_type = va_type
self.va, self.image_name = self.get_va()
self.add_image(self.va, self.image, self.image_name)
# Убрать отсюда
def get_va(self):
if self.va_type == "val":
return self.report.va, 'va_img1'
elif self.va_type == "apl":
return self.report.va_apl, 'va_img2'
elif self.va_type == "kde":
return self.report.kde, 'label_kde_img'
elif self.va_type == "kde3":
return self.report.kde3, 'label_kde3_img'
class QFrameStandardType(QFrameBase, Ui_FrameStandardType):
def __init__(self, parent, report):
QFrameBase.__init__(self, parent, Ui_FrameStandardType)
self.report = report
self.frames = [QFrameInfo(self, self.report, "val"), QFrameInfo(self, self.report, "apl")]
for info in range(2):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
class QFrameText(QFrameBase, Ui_FrameText):
def __init__(self, parent, report, val_type: str, func_name: str):
QFrameBase.__init__(self, parent, Ui_FrameText)
self.report = report
self.val_type = val_type
self.func_name = func_name
self.add_text(print_report('ui', self.get_func()), self.text_edit)
# Убрать отсюда
def get_func(self):
if self.val_type == "val":
return self.func_val()
elif self.val_type == "apl":
return self.func_apl()
elif self.val_type == "kde":
return self.func_kde()
# Убрать отсюда
def func_val(self):
if self.func_name == "stat":
return self.report.get_report_stat
else:
return self.report.get_report_ntest
# Убрать отсюда
def func_apl(self):
if self.func_name == "stat":
return self.report.get_report_stat_apl
else:
return self.report.get_report_ntest_apl
# Убрать отсюда
def func_kde(self):
if self.func_name == "stat":
return self.report.get_report_stat3
else:
return self.report.get_report_ntest3
class QDialogStds(QDialog, Ui_DialogStds):
def __init__(self, parent, **kwargs):
# noinspection PyArgumentList
QDialog.__init__(self, parent)
Ui_DialogStds.setupUi(self, self)
self.dimension = 1
self.result = None
# Эталоны
self.get_stds = kwargs.get("get_stds", False)
self.std_main = kwargs.get("std_main", None)
if self.get_stds:
self.dimension += 1
# Факторы
self.btn_all.setChecked(True)
# Эталоны
if self.get_stds:
self.cbs = []
for v in reversed(sorted(list(Standard.standards.keys()))):
self.cbs.append(QCheckBox(v, self))
if self.std_main is None or self.std_main != v:
self.cbs[-1].setChecked(False)
else:
self.cbs[-1].setChecked(1)
self.layout_stds.insertWidget(0, self.cbs[-1])
else:
self.layout().removeItem(self.layout_stds)
self.buttons.button(QDialogButtonBox.Save).setText("Сохранить")
self.buttons.button(QDialogButtonBox.Cancel).setText("Отмена")
self.setWindowFlags(self.windowFlags() ^ Qt.WindowContextHelpButtonHint)
def accept(self):
self.result = []
# Факторы
if self.btn_all.isChecked():
factor = FACTORS_ALL
else:
for idx in range(4):
if self.__dict__["btn_{}".format(idx)].isChecked():
factor = idx
break
else:
error_dialog("Не выбран ни один фактор/все факторы")
return
self.result.append(factor)
# Эталоны
if self.get_stds:
stds = [cb.text() for cb in self.cbs if cb.isChecked()]
if not len(stds):
error_dialog("Выберите по крайней мере один эталон")
return
self.result.append(stds)
QDialog.accept(self)
@staticmethod
def settings(parent, **kwargs):
dialog = QDialogStds(parent, **kwargs)
if dialog.exec():
res = dialog.result
else:
res = [None] * dialog.dimension
return res[0] if len(res) == 1 else res
| qooteen/health-weather_correlation-master | src/logic/utils.py | utils.py | py | 17,580 | python | ru | code | 0 | github-code | 36 |
10367871599 | def ad(p, arr, arr2):
i = 0
j = 0
s = 0
l = len(arr)
while(i < l):
if(i+p < l):
j = i
while(j<=i + p):
s += arr[j]
j += 1
arr2.append(s)
s = 0
i += 1
def arr_prep(at):
arr = at
arr2 = []
x = 0
lt = len(arr)
Low = 0
High = 0
while (x<lt):
ad(x, arr , arr2)
x += 1
arr2.sort()
return arr2
def eachcase():
ne_arr_init = input().split()
ne_arr = int(ne_arr_init[0])
query = int(ne_arr_init[1])
a = 0
ast = input().split()
arr = []
arr2 = []
while (a<ne_arr):
arr.append(int(ast[a]))
a += 1
arr2 = arr_prep(arr)
qd = 0
while (qd < query):
finalout(arr2)
qd += 1
def finalout(arr2):
ran = input().split()
Low = int(ran[0])
High = int(ran[1])
b = Low - 1
sumout = 0
while (b<High):
sumout += arr2[b]
b += 1
print(sumout)
case = int(input())
vd = 1
while vd <= case:
print("Case #" + str(vd) + ": ")
eachcase()
vd += 1 | ayushmanbt/MyPythonStuff | COMPETETIVE CHALLANGES/GOOGLE CODE JAM KICKSTART/2018 Round Practice/Sum Of Sums - (Unsolved)/main.py | main.py | py | 1,129 | python | en | code | 0 | github-code | 36 |
71354347625 | #!/bin/python
# ===========================================================
# Created By: Richard Barrett
# Organization: DVISD
# DepartmenT: Data Services
# Purpose: Test Score & 3rd Party Website Data Pull Automation
# Date: 02/12/2020
# ===========================================================
import pandas as pd
df1 = pd.read_excel('EOCTestTrackerReport (2).xls')
df2 = pd.read_excel('SKR3437679V0A1Y4N1321699.xlsx')
difference = df1[df1!=df2]
print(difference)
| aiern/ITDataServicesInfra | Python/Analyses/Pandas/student_missing_eoc_discovery.py | student_missing_eoc_discovery.py | py | 473 | python | en | code | 0 | github-code | 36 |
23403442036 | import random
import pygame
from pygame.locals import *
import logging
import numpy as np
import itertools
# logging.basicConfig(filename = 'Result.log', level=logging.INFO)
# copy Rohit Agrawal's work and modify for py_game version. It doesn't work really well but the concept has been applied.
pygame.init()
BLUE = (0, 0, 255)
GREEN = (0 ,255, 0)
RED = (255, 0 , 0)
width = 300
height = 300
line_width = 6
font = pygame.font.SysFont(None,20)
screen = pygame.display.set_mode((width, height))
markers = [[0,0,0],[0,0,0],[0,0,0]]
clicked = False
pos = []
player = 1
winner = 0
game_over = False
again_rect = Rect(width//2 - 80,height//2,160,50)
win = [0,0]
totalgame = 0
win_text = ''
winrateCalculated = False
states_dict = {}
def copy_game_state(state):
new_state = [[0,0,0],[0,0,0],[0,0,0]]
for i in range(3):
for j in range(3):
new_state[i][j] = state[i][j]
return new_state
def play_move(state, player, block_num):
if state[int((block_num-1)/3)][(block_num-1)%3] == 0:
state[int((block_num-1)/3)][(block_num-1)%3] = player
def draw_grid():
gr = (50,50,50)
bg = (255,255,200)
screen.fill(bg)
for x in range(1,3):
pygame.draw.line(screen,gr,(0,x*(width/3)), (width,x * (width/3)), line_width)
pygame.draw.line(screen,gr,(x*(height/3),0), (x * (height/3),height), line_width)
def draw_marker():
x_pos = 0
for x in markers:
y_pos =0
for y in x:
if y == 1:
pygame.draw.line(screen,GREEN,(y_pos*100+15,x_pos*100+15),(y_pos*100+85,x_pos*100+85))
pygame.draw.line(screen,GREEN,(y_pos*100+15,x_pos*100+85),(y_pos*100+85,x_pos*100+15))
if y == -1:
pygame.draw.circle(screen,RED,(y_pos*100+50,x_pos*100+50),30,line_width)
y_pos +=1
x_pos +=1
def check_winner():
global winner
global game_over
y_pos = 0
for x in markers:
if sum(x) == 3:
winner = 1
game_over =True
if sum(x) == -3:
winner = 2
game_over =True
if markers[0][y_pos] + markers[1][y_pos] + markers[2][y_pos] == 3:
winner = 1
game_over = True
if markers[0][y_pos] + markers[1][y_pos] + markers[2][y_pos] == -3:
winner = 2
game_over= True
y_pos +=1
if markers[0][0] + markers[1][1] +markers[2][2] == 3 or markers[2][0] + markers[1][1] +markers[0][2] == 3:
winner = 1
game_over = True
if markers[0][0] + markers[1][1] +markers[2][2] == -3 or markers[2][0] + markers[1][1] +markers[0][2] == -3:
winner = 2
game_over = True
def draw_winner(winner):
global totalgame, win_text, winrateCalculated
if winner == 0 and winrateCalculated == False:
totalgame += 1
win_text = 'Draw!'
else:
if winner == 1 and winrateCalculated == False:
totalgame += 1
win[0] += 1
winrate1 = str((win[0]/totalgame)*100)
win_text = 'Player 1 wins!'
logging.info(win_text)
if winner == 2 and winrateCalculated == False:
totalgame += 1
win[1] += 1
winrate2 = str((win[1]/totalgame)*100)
win_text = 'Player 2 wins!'
logging.info(win_text)
win_img = font.render(win_text, True, BLUE)
pygame.draw.rect(screen,GREEN,(width//2 -100,height//2 -60,200,50))
screen.blit(win_img,(width//2 -100,height//2 -50))
again_text = 'Play Again?'
again_img = font.render(again_text, True, BLUE)
pygame.draw.rect(screen,GREEN,again_rect)
screen.blit(again_img,(width//2 -80,height//2 +10))
winrateCalculated = True
def getBestMove(state, player):
'''
Reinforcement Learning Algorithm
'''
moves = []
curr_state_values = []
empty_cells = []
#find all empty cells
for i in range(3):
for j in range(3):
if state[i][j] == 0:
empty_cells.append(i*3 + (j+1))
if not empty_cells:
return -1
for empty_cell in empty_cells:
moves.append(empty_cell)
new_state = copy_game_state(state)
play_move(new_state, player, empty_cell)
next_state_idx = list(states_dict.keys())[list(states_dict.values()).index(new_state)]
curr_state_values.append(state_values_for_AI[next_state_idx])
print('next state value', state_values_for_AI[next_state_idx])
print('Possible moves = ' + str(moves))
print('Move values = ' + str(curr_state_values))
print('markers:',markers)
best_move_idx = np.argmax(curr_state_values)
# print('state:',states_dict[best_move_idx])
best_move = moves[best_move_idx]
return best_move
def print_board(game_state):
print('----------------')
print('| ' + str(game_state[0][0]) + ' || ' + str(game_state[0][1]) + ' || ' + str(game_state[0][2]) + ' |')
print('----------------')
print('| ' + str(game_state[1][0]) + ' || ' + str(game_state[1][1]) + ' || ' + str(game_state[1][2]) + ' |')
print('----------------')
print('| ' + str(game_state[2][0]) + ' || ' + str(game_state[2][1]) + ' || ' + str(game_state[2][2]) + ' |')
print('----------------')
#LOAD TRAINED STATE VALUES
state_values_for_AI = np.loadtxt('trained_state_values_O.txt', dtype=np.float64)
players = [1,-1,0]
states_dict = {}
all_possible_states = [[list(i[0:3]),list(i[3:6]),list(i[6:10])] for i in itertools.product(players, repeat = 9)]
n_states = len(all_possible_states) # 2 players, 9 space values
n_actions = 9 # 9 spaces
for i in range(n_states):
states_dict[i] = all_possible_states[i]
run = True
while run:
draw_grid()
draw_marker()
draw = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if game_over == 0:
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
if event.type == pygame.MOUSEBUTTONUP and clicked == True:
clicked = False
pos = pygame.mouse.get_pos()
cell_x = pos[1]
cell_y = pos[0 ]
print(cell_x//100,cell_y//100)
if markers[cell_x//100][cell_y//100] == 0:
markers[cell_x//100][cell_y//100] = player
player *= -1
check_winner()
if player == -1 and game_over == False:
smartMove = getBestMove(markers,-1)
print(smartMove)
if smartMove == -1:
#draw
player = 0
game_over = True
else:
markers[(smartMove-1)%3][int((smartMove-1)/3)] = player
print_board(markers)
player *= -1
check_winner()
if game_over == True:
draw_winner(winner)
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
if event.type == pygame.MOUSEBUTTONUP and clicked == True:
pos = pygame.mouse.get_pos()
if again_rect.collidepoint(pos):
markers = []
pos = []
player = 1
winner = 0
game_over = False
for x in range(3):
row = [0]*3
markers.append(row)
pygame.display.update()
pygame.quit() | nguyenkhanhhung91/Python_ReinforcementLearning | HumanVsQlearningBot.py | HumanVsQlearningBot.py | py | 6,564 | python | en | code | 0 | github-code | 36 |
23007605938 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 22:37:54 2020
@author: GGX
"""
# 输出1~n的全排列(深度优先搜索)
class Solution():
def __init__(self, x):
self.n = x
self.book = [1 for _ in range(x)]
self.res = [-1 for _ in range(x)]
def fun(self, step):
if step == self.n:
print(self.res)
return
for i in range(self.n):
if self.book[i] == 1:
self.res[step] = i+1
self.book[i] = 0
self.fun(step+1)
self.book[i] = 1 #
Solution(4).fun(0)
| xmu-ggx/coding-in-offer | aha算法/dfs.py | dfs.py | py | 643 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.