diff --git "a/3893.jsonl" "b/3893.jsonl" new file mode 100644--- /dev/null +++ "b/3893.jsonl" @@ -0,0 +1,758 @@ +{"seq_id":"89678849","text":"contacts = {\"read_me\": (\"783981\", \"32786 address\", \"email@internet.com\"),\n\t\t\t\"delete_me\": (\"3789y89\", \"6783j address\", \"email@internet.biz\"),\n\t\t\t\"update_me\": (\"0\", \"123 address\", \"email@internet.net\")}\n\n\n\nuser_intent = raw_input(\"what do you want to do?\")\n\nif user_intent == \"view\":\n\tprint(contacts)\n\nif user_intent == \"add\":\n\tnew_contact_name = raw_input(\"what is their name?\")\n\tnew_contact_phone = raw_input(\"what is their phone number?\")\n\tnew_contact_address = raw_input(\"what is their address?\")\n\tnew_contact_email = raw_input(\"what is their email?\")\n\t# print(new_contact_email, new_contact_address, \n\t# \tnew_contact_phone, new_contact_name)\n\tcontacts[new_contact_name] = (new_contact_phone, \n\t\t\t\t\t\t\t\t\tnew_contact_address, \n\t\t\t\t\t\t\t\t\tnew_contact_email)\n\tprint(contacts)\n\nif user_intent == \"delete\":\n\tdelete_contact_name = raw_input(\"Who do you want to delete?\")\n\t# print(delete_contact_name)\n\tcontacts.pop(delete_contact_name)\n\tprint(contacts)\n\nif user_intent == \"update\":\n\tupdate_contact = raw_input(\"Who do you want to update?\")\n\tupdate_contact_phone = raw_input(\"what is their new number?\")\n\tupdate_contact_address = raw_input(\"what is their new address?\")\n\tupdate_contact_email = raw_input(\"what is their new email?\")\n\tcontacts[update_contact] = (update_contact_phone,\n\t\t\t\t\t\t\t\t update_contact_address, \n\t\t\t\t\t\t\t\t update_contact_email)\n\tprint(contacts)","sub_path":"test_book.py","file_name":"test_book.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"440068440","text":"def pour_problem(X, Y, goal, start=(0, 0)):\r\n # X,Y are the capacity of glasses;(x,y)are the current fill levels and represent a state.\r\n # the goal is a level that can be in either glass.Start at start start state and follow\r\n # successors until we reach the goal. keep track of frontier and\r\n # previously explored;fail when no frontier.\r\n if goal in start:\r\n return [start]\r\n explored = set() # set of a states we have visited\r\n frontier = [[start]] # ordered list of paths we have blazed\r\n while frontier:\r\n path = frontier.pop(0)\r\n (x, y) = path[-1] # last state in the first path of frontier\r\n for (state, action) in successors(x, y, X, Y).items():\r\n if state not in explored:\r\n explored.add(state)\r\n path2 = path + [action, state]\r\n if goal in state:\r\n return path2\r\n else:\r\n frontier.append(path2)\r\n return Fail\r\n\r\n\r\nFail = []\r\n\r\n\r\ndef successors(x, y, X, Y):\r\n \"\"\"\r\n return a dict of { state:action} pairs describing what can be reached from the\r\n (x,y) state ,and how\r\n :param x: glass level of x\r\n :param y: glass level of y\r\n :param X: glass sizes of X\r\n :param Y: glass sizes of Y\r\n :return:\r\n \"\"\"\r\n assert x <= X and y <= Y\r\n return{((0, y + x) if y + x <= Y else(x - (Y - y), Y)): 'X->Y',\r\n ((y + x, 0) if y + x <= X else (X, y - (X - x))): 'Y->X',\r\n (X, y): 'fill X', (x, Y): 'fill Y',\r\n (0, y): 'empty X', (x, 0): 'empty Y'}\r\n\r\nprint(pour_problem(4,9,6))","sub_path":"CS212/pouring_problem.py","file_name":"pouring_problem.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"218048766","text":"from django.urls import path\nfrom donations import views\n\nurlpatterns = [\n path('register/', views.register_donations, name='register_donations'),\n # path('complete/', views.complete, name='complete'),\n path('mydonations/',views.donation_list, name='donation_list'),\n path('project//add',views.project_select, name='project_select'),\n path('project/create/', views.create_project, name='create_project'),\n path('project/my/', views.project_list, name='project_list'),\n path('tracking/', views.tracking, name='tracking'),\n path('api/qrbinding/', views.qrcode_binding, name='qrbinding'),\n path('api/qrcode_delete/', views.qrcode_delete, name='qrcode_delete'),\n path('feedback//', views.feedback_by_qrcode, name='feedback_by_qrcode'),\n path('track_donations/', views.track_donations, name='track_donations'),\n path('confirm///', views.confirm_donation, name='confirm_donation'),\n\n path('api/feedback/donation//', views.feedback_api.as_view(), name='feedback_api'),\n path('api/project/', views.project_api.as_view(), name='project_api'),\n path('api/requiretype/', views.requiretype_api.as_view(), name='requiretype_api'),\n\n\n]","sub_path":"IODT/donations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"424696172","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nimage = cv2.imread('rectangle.jpg',0) \r\n\r\nimg_float32 = np.float32(image) \r\n\r\ndft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT) \r\ndft_shift = np.fft.fftshift(dft) \r\n\r\nmagnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1])) \r\n\r\nfig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)\r\n\r\nax1.imshow(image, cmap = 'gray')\r\nax1.set_title('Input Image')\r\nax1.set_xticks([])\r\nax1.set_yticks([])\r\n\r\nax2.imshow(magnitude_spectrum, cmap = 'gray')\r\nax2.set_title('Magnitude Spectrum')\r\nax2.set_xticks([])\r\nax2.set_yticks([])\r\n\r\nfig.savefig(\"rectangleresult.jpg\")\r\n","sub_path":"AboutMath/recandcircle.py","file_name":"recandcircle.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"361889842","text":"num = []\nr = ''\nwhile True:\n num.append(int(input('Digite um valor: ')))\n r = str(input('Deseja continuar?[S/N] ')).upper().strip()[0]\n while r not in 'SN':\n r = str(input('Opção inválida, tente novamente[S/N]: ')).strip().upper()[0]\n if r == 'N':\n break\nprint(f'Foram digitados {len(num)} números')\nnum.sort(reverse=True)\nprint(num)\nif 5 in num:\n f = 0\n for i in num:\n if i == 5:\n f += 1\n print(f'O número 5 foi digitado {f} vez(es) e está na(s) posição(ões): ', end='')\n for p, i in enumerate(num):\n if i == 5:\n print(f'{p}', end=' ')\nelse:\n print('O número 5 não foi digitado nenhuma vez')\n\n","sub_path":"Desafios/des081.py","file_name":"des081.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"253944667","text":"# -*- coding: utf-8 -*-\n# This code classifies handwritten digits\n# Also known as MNIST - Modified National Institute of Standards and Technology database\n# This configuration produced 98.01% accuracy for test set whereas it produced 99.77% accuracy for trainset.\n# Producing close accuracy rates is expected for re-run (random initialization causes to produce different results each time)\n# blog post: https://sefiks.com/2017/09/11/handwritten-digit-classification-with-tensorflow/\n# -----------------------------------------------\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# -----------------------------------------------\n\n# variables\nmnist_data = \"E:\\gitworkspace\\gitworkspace\"\n# -----------------------------------------------\n\n# data process and transformation\nMNIST_DATASET = input_data.read_data_sets(mnist_data) #(55000, 784) (55000,)\n# MNIST_DATASET = input_data.read_data_sets(mnist_data,one_hot=True) (55000, 784) (55000, 10)\ntrain_data = np.array(MNIST_DATASET.train.images, 'float32')\ntrain_target = np.array(MNIST_DATASET.train.labels, 'int64')\nprint(\"training set consists of \", len(MNIST_DATASET.train.images), \" instances\")\nprint(MNIST_DATASET.train.images.shape,MNIST_DATASET.train.labels.shape)\ntest_data = np.array(MNIST_DATASET.test.images, 'float32')\ntest_target = np.array(MNIST_DATASET.test.labels, 'int64')\nprint(\"test set consists of \", len(MNIST_DATASET.test.images), \" instances\")\nprint(MNIST_DATASET.test.images.shape,MNIST_DATASET.test.labels.shape)\n# -----------------------------------------------\n\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=len(MNIST_DATASET.train.images[1]))]\nlearningRate = 0.1\nclassifier = tf.contrib.learn.DNNClassifier(\n feature_columns=feature_columns\n , n_classes=10 # 0 to 9 - 10 classes\n , hidden_units=[128, 32] # 2 hidden layers consisting of 128 and 32 units respectively\n , optimizer=tf.train.ProximalAdagradOptimizer(learning_rate=learningRate)\n , activation_fn=tf.nn.relu\n # , activation_fn = tf.nn.softmax\n , model_dir='E:/tensorflow/tmp/test1'\n)\n# ----------------------------------------\n\n# training\ntrainForRandomSet = True\nepoch = 15000\nbatch_size = 120\n\nif trainForRandomSet == False:\n # train on all trainset\n classifier.fit(train_data, train_target, steps=epoch)\nelse:\n def generate_input_fn(data, label):\n image_batch, label_batch = tf.train.shuffle_batch(\n [data, label]\n , batch_size=batch_size\n , capacity=8 * batch_size\n , min_after_dequeue=4 * batch_size\n , enqueue_many=True\n )\n return image_batch, label_batch\n def input_fn_for_train():\n return generate_input_fn(train_data, train_target)\n # train on small random selected dataset\n classifier.fit(input_fn=input_fn_for_train, steps=epoch)\n# print(\"\\\\---training is over...\")\n# ----------------------------------------\n\n# apply to make predictions\npredictions = classifier.predict_classes(test_data)\nindex = 0\n\nfor i in predictions:\n if index < 10: # visualize first 10 items on test set\n print(\"actual: \", test_target[index], \", prediction: \", i)\n pred = MNIST_DATASET.test.images[index]\n pred = pred.reshape([28, 28]);\n plt.gray()\n plt.imshow(pred)\n plt.show()\n index = index + 1\n\n# ----------------------------------------\n\n# calculationg overall accuracy\n# print(\"\\n---evaluation...\")\naccuracy_score = classifier.evaluate(test_data, test_target, steps=epoch)['accuracy']\nprint(\"accuracy: \", 100 * accuracy_score, \"%\")\n","sub_path":"tensorflow-nn-5.py","file_name":"tensorflow-nn-5.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"158189736","text":"from os import truncate\r\nimport socket\r\nimport time\r\nimport _thread\r\nimport database\r\nfrom utils import *\r\n# Server Configuration\r\n\r\nMAX_USERS = 1000\r\nserver_ip = \"127.0.0.1\" # Server ip\r\nserver_port = 12345 # Server Port\r\n\r\n# Loading the Database and the users list\r\n# DATABASE = load_data(\"database.pkl\")\r\ndatabase.load_data()\r\nuser_list = list(database.DATABASE.keys())\r\n\r\n# Declaring Functions\r\ndef home_screen(username, socket_client):\r\n while(True):\r\n star = ''\r\n if(len(database.DATABASE[username]['pending_friend_requests'])):\r\n star = '*'\r\n socket_client.send(\r\n\"\"\"\r\nWelcome to Mini-Face\r\nOptions: (Reply with)\r\n1: Friends\r\n2: Messages\r\n3: Pending Friend Requests{}\r\n4: Feed\r\n5: Upload post\r\n6: Delete post\r\n7: See your Timeline\r\n0: Exit Mini-Face\r\n\"\"\".format(star).encode())\r\n option = socket_client.recv(1024).decode()\r\n\r\n if(option == \"1\"):\r\n friend_options(username, socket_client)\r\n \r\n elif(option == \"2\"):\r\n messages_options(username, socket_client)\r\n \r\n elif(option == \"3\"):\r\n get_pending_requests(username,socket_client)\r\n \r\n elif(option == \"4\"):\r\n get_feed(username,socket_client)\r\n\r\n elif(option == \"5\"):\r\n upload_post(username,socket_client)\r\n \r\n elif(option == \"6\"):\r\n delete_post(username,socket_client)\r\n \r\n elif(option == \"7\"):\r\n get_timeline(username,socket_client)\r\n \r\n elif(option == \"0\"):\r\n return\r\n \r\n else:\r\n socket_client.send(\"Invalid Option!\".encode())\r\n\r\ndef find_friend(username, socket_client):\r\n while True: \r\n socket_client.send(\r\n\"\"\" \r\nFind Friends \r\n1: Search for Friends\r\n2: See Friends of Friends\r\n0: Go to Friend Options\r\n\"\"\".encode())\r\n option = socket_client.recv(1024).decode()\r\n if(option == \"0\"):\r\n return\r\n \r\n if(option == \"1\"):\r\n search_user(username,socket_client,user_list)\r\n\r\n if(option == \"2\"):\r\n if(len(database.DATABASE[username]['friends']) < 2):\r\n response = \"Make more friends!\"\r\n socket_client.send(response.encode())\r\n else:\r\n get_friends_of_friends(username,socket_client)\r\n \r\ndef friend_options(username, socket_client):\r\n while True: \r\n socket_client.send(\r\n\"\"\" \r\nFriend Options \r\n1: See your Friends\r\n2: Find new Friends\r\n3: Remove Friends\r\n0: Go to Home Screen\r\n\"\"\".encode())\r\n option = socket_client.recv(1024).decode()\r\n if(option == \"0\"):\r\n return\r\n \r\n if(option == \"1\"):\r\n see_friends(username,socket_client)\r\n \r\n if(option == \"2\"):\r\n find_friend(username, socket_client)\r\n\r\n if(option == '3'):\r\n remove_friend(username, socket_client)\r\n\r\n\r\ndef client_thread(socket_client, address):\r\n try:\r\n user = login(user_list,socket_client)\r\n database.DATABASE[user][\"is_online\"] = True\r\n home_screen(user, socket_client)\r\n database.DATABASE[user][\"is_online\"] = False\r\n print(\"closing thread: \", address)\r\n socket_client.send(\"Thank you for using Mini-Face\".encode())\r\n time.sleep(0.5)\r\n write_database(\"database.pkl\")\r\n socket_client.close()\r\n except Exception as e:\r\n print(\"ENDING\", e)\r\n database.DATABASE[user][\"is_online\"] = False\r\n write_database(\"database.pkl\")\r\n socket_client.close()\r\n\r\n \r\n\r\nif(__name__ == \"__main__\"): \r\n\r\n socket_tcp = socket.socket(family = socket.AF_INET,type =socket.SOCK_STREAM)\r\n\r\n # Creating Socket for TCP\r\n socket_tcp.bind((server_ip, server_port)) # Binding is necessary in TCP\r\n socket_tcp.listen(MAX_USERS) # Listents to the clients sending connection request\r\n\r\n print(\"Server is up and running\")\r\n\r\n # creating thread for each new client\r\n while(True):\r\n socket_client, address = socket_tcp.accept()\r\n print(\"Client Connected:\", address)\r\n\r\n _thread.start_new_thread(client_thread, (socket_client, address))\r\n \r\n\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"447564448","text":"#-----------------------------\nimport sys\nimport ImgExplorer as imgex\nfrom PyQt4 import QtGui, QtCore\n#-----------------------------\n\ndef main():\n\n app = QtGui.QApplication(sys.argv)\n\n w = imgex.ImgExplorer(None)\n w.move(QtCore.QPoint(10,10))\n #w.get_image( w.icp.imageCurrent )\n w.set_image_array( imgex.getRandomWithRing2DArray() ) # if you need in a single image only...\n w.show()\n\n app.exec_() \n\n#-----------------------------\nif __name__ == \"__main__\" :\n main()\n sys.exit ('End of test')\n#-----------------------------\n","sub_path":"PlotsWithGUI/trunk/src/Example1.py","file_name":"Example1.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"415125129","text":"# -*- coding: utf-8 -*-\n\"\"\"\"\nCreated on Wed Feb 21 10:20:44 2018\n\n@author: Sebastian\n\"\"\"\n\nimport datetime\nimport itertools\n\nimport os\nimport shutil\nimport sys\nimport glob\nimport pickle\nimport numpy as np\n\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport matplotlib.colors as mcolors\nimport matplotlib.mlab as mlab\nfrom mpl_toolkits import axes_grid1\n\nimport scipy\nimport scipy.io as sio\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy.spatial.distance import pdist\nfrom scipy import misc\nfrom scipy import ndimage\nfrom scipy import stats\nfrom scipy.stats import norm\nfrom scipy.spatial.distance import squareform, pdist\nfrom scipy.signal import argrelextrema\n\nfrom klusta.kwik.model import KwikModel\n\nfrom statsmodels.stats.multicomp import pairwise_tukeyhsd\nfrom statsmodels.stats.multicomp import MultiComparison\n\nimport PIL\nfrom PIL import Image\nfrom PIL import ImageFilter\nfrom PIL import ImageOps\n\nfrom skimage import feature\nfrom skimage import filters\n\nfrom math import exp,sqrt\nZ = norm.ppf\n\nsys.path.append('C:/Users/Sebastian/owncloud/Python 3.5/imgca-master')\nimport elphy_read\nsys.path.append(\"C:/Users/Sebastian/ownCloud/Python 3.5/Python-UNIC\")\nimport elphy_reader as ertd\n\n#%%\n#Make order of files\n#Order of files Behavior --> behavior_folder --> mouse_name --> DATfiles --> protocol_name\n\n#main='G:/'\n#behavior_folder='Test'\n#experiment_folder='June2019'\n#mouse_name='M1'\n#create_dicto_from_files_in_folder(main,behavior_folder,experiment_folder,mouse_name)\n\n#main='G:/'\n#behavior_folder='Behavior'\n#new_folder='June2019'\n#new_dir=main + behavior_folder + '/' + new_folder\n#\n#from_folder='G:/Test/June2019/'\n#create_folder_with_files_in_order(from_folder,from_folder,new_dir)\n\ndef print_peo(word):\n print(word)\n \ndef create(folder):\n try:\n os.mkdir(folder)\n except FileExistsError:\n print('Already there',folder)\n pass\n\ndef get_licks(trial,thr=100):\n gra = np.gradient(trial)\n gra[gra=StimDelay) for ev in events])\n #here there are all licks, in elphy the licks are counted only after the stim\n i+=100\n\n return events,lrecord\n\ndef check_recordings(recordings):\n\n try:\n rec_shape=recordings.shape\n except AttributeError:\n rec_shape=np.array(recordings)\n rec_shape=recordings.shape\n\n if len(rec_shape)==3:\n recordings=np.array([np.ravel(trace) for trace in recordings])\n\n return recordings\n\ndef read_dat_file(exp_name,file):\n# exp_name=experiment_folder\n# file=file_full_name\n print('reading',file)\n file = file.replace('\\\\', '/')\n\n try:\n\n print('reading with alex elphy read',file)\n f=open(file,'rb')\n recordings, dates, vectors, menupar, xpar, epinfo = elphy_read.Read(f)\n f.close()\n StimDelays=[]\n if exp_name=='2P':\n try:\n StimDelays.extend(np.ravel([epinfo[ep]['CurrentStimDelay'] for ep in np.arange(len(recordings))]))\n except KeyError:\n pass\n read_info='Alex_ElphyRead'\n\n except TypeError :\n # if exp_name=='Opto_2C_L200' or exp_name=='Opto_3C_L56' or exp_name=='Opto_3C_L100' or\n # exp_name=='2019_Lesions':\n print('reading with Thomas elphy read',file)\n recordings, vectors_thomas, xpar = ertd.read_behavior(file)\n epinfo=None\n read_info='Thomas_ElphyRead'\n dates=vectors_thomas['dates']\n vectors={}\n vectors['trecord']=vectors_thomas['TRECORD']\n vectors['crecord']=vectors_thomas['CRECORD']\n vectors['lickrecord']=vectors_thomas['LICKRECORD']\n\n except IndexError :\n\n recordings=None\n dates=None\n vectors=None\n xpar=None\n StimDelays=None\n\n return recordings,dates,vectors,xpar,epinfo,read_info\n\ndef take_recorded_data_from_vectors(recordings,vectors,xpar,epinfo):\n\n ntrials = len(recordings)\n\n #delete the weird trials that are 0 and try with epinfo\n try:\n trecord=np.ravel([int(t) for t in vectors['trecord'][:ntrials]])\n crecord=np.ravel([int(t) for t in vectors['crecord'][:ntrials]])\n lrecord=np.ravel([int(l) for l in vectors['lickrecord'][:ntrials]])\n read_info_licks='from vectors'\n #here there is no way to know the number of licks so I do it summing the events for get_licks\n\n except KeyError:\n #when there is no vectors, I can try from epinfo\n trecord=np.ravel([epinfo[ep]['iCond'] for ep in np.arange(ntrials)])\n crecord=np.ravel([epinfo[ep]['correct'] for ep in np.arange(ntrials)])\n StimDelay=int(xpar['fix']['StimDelay'])\n lrecord=get_licks_from_recordings(recordings,StimDelay)\n read_info_licks='from epinfo'\n\n# if ntrials!=np.sum(vectors['trecord'][:ntrials]!=0):\n# trecord=np.ravel([epinfo[ep]['iCond'] for ep in np.arange(ntrials)])\n# crecord=np.ravel([epinfo[ep]['correct'] for ep in np.arange(ntrials)])\n# StimDelay=int(xpar['fix']['StimDelay'])\n# lrecord=get_licks_from_recordings(recordings,StimDelay)\n# read_info_licks='from epinfo'\n\n\n if ntrials != len(trecord):\n raise NameError (\"not same number of ntrials and trecord\")\n if ntrials != len(crecord):\n raise NameError (\"not same number of ntrials and crecord\")\n if ntrials != len(lrecord):\n raise NameError (\"not same number of ntrials and lrecord\")\n\n return trecord,crecord,lrecord,read_info_licks\n\ndef extract_stimuli_names(xpar):\n\n xpar_keys=np.sort(list(xpar.keys()))\n\n if xpar['fix']['Auditory']:\n key='soundlist'\n elif xpar['fix']['Visual']:\n key='imagelist'\n else:\n pass\n\n if key in xpar_keys:\n stimuli_used=xpar[key]\n stimuli_used_list=[]\n for s in stimuli_used:\n stim_file=s.split('\\\\')[-1]\n stimuli_used_list.append(stim_file)\n else:\n raise NameError(\"key not in xpar_keys: means that stimulus key like sound or image is not on the xpar extracted\")\n\n return stimuli_used_list\n\ndef create_dicto_from_files_in_folder(main,behavior_folder,experiment_folder):\n\n folder_to_order=main + behavior_folder + '/' + experiment_folder + '/'\n#\n# #Folder to save log and files\n# saving_main='G:/'\n# saving_base_folder='/Python_analysis_files/'\n# saving_folder_to_save='/Behavior/'\n# saving_path= saving_main + saving_base_folder + saving_folder_to_save + behavior_folder + '/' + experiment_folder\n# create(saving_path)\n#\n f= open(folder_to_order +'/Log_' + experiment_folder + '.txt',\"w+\")\n f.write(str(datetime.datetime.today()) + \"\\r\\n\")\n f.write('Folder: ' + folder_to_order + \"\\r\\n\")\n\n list_of_files=os.listdir(folder_to_order)\n nfiles=len(list_of_files)\n\n stimuli_x_file={}\n fc=0\n\n for file in list_of_files:\n # file=list_of_files[0]\n\n f.write(file + \"\\r\\n\")\n\n #Files must be YY-MM-DD_MOUSENAME_PROTOCOL_FILENUNMER\n try:\n\n date,mouse_name,protocol,filenumber=file.split('_')\n year=date[:2]\n if len(year)==2:\n year='20' + year\n month=date[3:5]\n day=date[6:8]\n\n stimuli_x_file[fc]={}\n stimuli_x_file[fc]['File_Name']=file\n stimuli_x_file[fc]['Stimuli']=[]\n\n stimuli_x_file[fc]['Mouse_Name']=mouse_name\n\n file_full_name= folder_to_order + '/' + file\n ending=file[-4:]\n\n if ending == '.DAT':\n\n _,_,_,xpar,_,_=read_dat_file(behavior_folder,file_full_name)\n\n if xpar == None or len(xpar)== 0:\n print('FCKNG ERROR', file_full_name)\n f.write('Error with xpar' + \"\\r\\n\")\n pass\n else:\n stimuli_used_list=extract_stimuli_names(xpar)\n stimuli_x_file[fc]['Stimuli']=stimuli_used_list\n f.write('ok' + \"\\r\\n\")\n else:\n\n print('FILE NOT READ: ',file)\n f.write('Not read' + \"\\r\\n\")\n pass\n\n fc+=1\n\n except ValueError:\n\n print('ERROR reading DATfile',file)\n f.write('Not read' + \"\\r\\n\")\n pass\n\n saving_file_name='LogDict_' + experiment_folder\n saving_final_file_name= folder_to_order + '/' + saving_file_name\n pickle.dump(stimuli_x_file, open(saving_final_file_name, \"wb\"))\n print('Dict saved',saving_final_file_name)\n\n f.close()\n\ndef sort_by_groups(list_of_items,array_to_sort):\n# array_to_sort=array_mouse_names\n xx = np.arange(len(array_to_sort))\n yy = array_to_sort\n\n groups={}\n gc=0\n for group in itertools.groupby(iter(xx), lambda x: yy[x]):\n groups[gc]={}\n groups[gc]['type']=group[0]\n groups[gc]['idxs_items']=list(group[1])\n groups[gc]['items']=np.array(list_of_items)[np.array(groups[gc]['idxs_items'])]\n gc+=1\n\n return groups\n\ndef define_groups(stimuli_x_file):\n\n nfiles=len(stimuli_x_file)\n\n #array that contains all stimuli of each sound\n array_mouse_names=[stimuli_x_file[i]['Mouse_Name'] for i in np.arange(nfiles)]\n list_of_files=[stimuli_x_file[i]['File_Name'] for i in np.arange(nfiles)]\n #dictionary with different groups found based on stimuli\n groups=sort_by_groups(list_of_files,array_mouse_names)\n #'idxs_items' , 'items' , 'type'\n mouse_names=np.sort([[m,groups[m]['type']] for m in np.arange(len(groups))])\n\n mouse_and_stimulus={}\n for mouse in mouse_names:\n key=int(mouse[0])\n name=mouse[1]\n idxs=groups[key]['idxs_items']\n array_stimuli=[]\n for i in idxs:\n array_stimuli.append(stimuli_x_file[i]['Stimuli'])\n groups_xstimuli=sort_by_groups(groups[key]['items'],array_stimuli)\n mouse_and_stimulus[name]=groups_xstimuli\n\n return mouse_and_stimulus\n\ndef create_folder_with_files_in_order(mouse_and_stimulus,from_folder,new_dir):\n\n list_of_mice=np.sort(list(mouse_and_stimulus.keys()))\n\n #check if there is a mouse name two times\n if len(list_of_mice)!=len(np.unique(list_of_mice)):\n raise NameError ('repeated mouse name check dictionary')\n\n final_dict_mouse_in_folder={}\n for mouse in list_of_mice:\n final_dict_mouse_in_folder[mouse]={}\n final_dict_mouse_in_folder[mouse]['List_of_files']=[]\n final_dict_mouse_in_folder[mouse]['Stimuli']=[]\n\n gkeys=np.sort(list(mouse_and_stimulus[mouse].keys()))\n for gk in gkeys:\n # dicto with 'idxs_items','items','type'\n\n final_dict_mouse_in_folder[mouse]['List_of_files'].append(mouse_and_stimulus[mouse][gk]['items'])\n final_dict_mouse_in_folder[mouse]['Stimuli'].append(mouse_and_stimulus[mouse][gk]['type'])\n\n create(new_dir)\n all_mice=np.sort(list(final_dict_mouse_in_folder.keys()))\n\n for mouse in all_mice:\n print(mouse)\n new_mouse_dir= new_dir + '/' + mouse\n create(new_mouse_dir)\n\n new_mouse_dir_datfiles = new_mouse_dir + '/DATfiles'\n create(new_mouse_dir_datfiles)\n\n dict_mouse=final_dict_mouse_in_folder[mouse]\n nprots=len(dict_mouse['List_of_files'])\n\n for i in np.arange(nprots):\n print(mouse,i)\n prot_name='protocol_' + str(i)\n new_mouse_dir_prot=new_mouse_dir_datfiles + '/' + prot_name\n create(new_mouse_dir_prot)\n\n f= open(new_mouse_dir_prot + '/Log.txt',\"w+\")\n f.write(str(datetime.datetime.today()) + \"\\r\\n\")\n f.write('Mouse_Name: ' + mouse + \"\\r\\n\")\n f.write('Protocol_id: ' + str(i) + \"\\r\\n\")\n f.write(str(dict_mouse['Stimuli'][i]))\n f.close()\n\n\n files=dict_mouse['List_of_files'][i]\n nfiles=len(files)\n\n for nf in np.arange(nfiles):\n\n old_location=from_folder + files[nf]\n new_location=new_mouse_dir_prot + '/' + files[nf]\n shutil.copy(old_location,new_location)\n\ndef same_stims(Stims,array):\n# Stims=trials\n# array=k_values\n\n array=np.array(array)\n D = squareform(pdist(array.T))\n nstim = array.shape[1]\n ref = np.zeros(nstim)-1\n for i in np.arange(nstim)[::-1]: # pour avoir en priorités les faibles valeurs\n idx = np.where(D[i,:]==0)[0]\n ref[idx] = i\n\n S_arr=np.zeros(len(Stims))*np.nan\n\n for i in np.arange(len(ref)):\n S_arr[Stims==i+1] = ref[i]\n S_arr+=1\n\n ref = np.unique(ref).astype(int)\n xpar=[]\n for a in np.arange(len(array)):\n xpar.append(np.take(array[a],ref))\n xpar.append(ref)\n# print('same_stims',S_arr)\n\n return xpar,S_arr\n\ndef take_table_keys(xpar_list,key):\n\n nfiles=len(xpar_list)\n array_of_keys=[]\n for f in np.arange(nfiles):\n array_of_keys.append(str(np.sort(list(xpar_list[f][key].keys()))))\n return array_of_keys\n\ndef take_table_values(xpar_list,key):\n\n nfiles=len(xpar_list)\n array_of_values=[]\n for f in np.arange(nfiles):\n array_of_values_file=[]\n keys_on_table = np.sort(list(xpar_list[f][key].keys()))\n\n for tkey in keys_on_table:\n array_of_values_file.append(xpar_list[f][key][tkey])\n array_of_values.append(array_of_values_file)\n\n return array_of_values\n\ndef sort_by_keys(goodfiles,array_of_keys):\n #in this case items is list of good files\n\n xx = np.arange(len(array_of_keys))\n yy = array_of_keys\n\n groups={}\n gc=0\n for group in itertools.groupby(iter(xx), lambda x: yy[x]):\n groups[gc]={}\n groups[gc]['items']=group[0]\n groups[gc]['idxs']=list(group[1])\n groups[gc]['files']=np.array(goodfiles)[np.array(groups[gc]['idxs'])]\n\n gc+=1\n\n return groups\n","sub_path":"Def_DATfiles.py","file_name":"Def_DATfiles.py","file_ext":"py","file_size_in_byte":14228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"283169903","text":"# coding:utf-8\nimport pyodbc\n\n\ndef getConnect(sql):\n conn = pyodbc.connect('DRIVER={SQL Server};SERVER=127.0.0.1;DATABASE=dd;UID=sa;PWD=123456;charset=utf8')\n # 打开游标\n cursor = conn.cursor()\n\n if not cursor:\n raise Exception('数据库连接失败!')\n else:\n print(\"数据库链接成功\")\n cursor.execute(sql)\n # row = cursor.fetchone()\n # 返回一个连接用于关闭,返回一个游标,\n # 在查询的的时候使用:cursor.fetchone()返回一个元组的结果集\n # 在插入的时候直接传入一个sql语句就行了不用使用后面的语句 : cursor.execute(\"insert into tableName( , ) values ('', ' ')\")\n # conn.commit()\n return conn, cursor\n\n\ndef breakConnect(conn):\n conn.commit()\n conn.close()\n print(\"数据库链接已关闭\")\n# getConnect( \"select * from 知识产权局更新记录表 where id = (select max(id) from 知识产权局更新记录表 )\")\n\n\"\"\"\"\n\"\"\"\n# if __name__ == '__main__':\n#\n# sql = \"update table1 set name='ss' where id=1;update table1 set name='ss' where id=2;update table1 set name='ss' where id=1;update table1 set name='ss' where id=3;\"\n# ss = getConnect(sql)\n# conn = ss[0]\n# cursor = ss[1]\n# breakConnect(conn)","sub_path":"project/B.大邑县卫计局/工具包/链接数据库.py","file_name":"链接数据库.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"208268400","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Code\nimport time\n\ndef countdown(b):\n while b > 0:\n print('T-minus', b);\n b -= 1\n time.sleep(5)\n\nfrom threading import Thread\nt = Thread(target = countdown, args = (10,))\nt.start()\n","sub_path":"base/proccess_ctrl/tick.py","file_name":"tick.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"642499491","text":"# faculty.signals\n# Signals used by faculty models - imported by the apps.py configuration.\n#\n# Author: Benjamin Bengfort \n# Created: Mon Dec 30 16:39:09 2019 -0600\n#\n# Copyright (C) 2019 Georgetown University\n# For license information, see LICENSE.txt\n#\n# ID: signals.py [] benjamin@bengfort.com $\n\n\"\"\"\nSignals used by faculty models - imported by the apps.py configuration.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nfrom django.dispatch import receiver\nfrom django.db.models.signals import pre_save\n\nfrom faculty.models import Assignment\n\n\n@receiver(pre_save, sender=Assignment, dispatch_uid=\"check_assignment_defaults\")\ndef check_assignment_defaults(sender, instance, **kwargs):\n \"\"\"\n Ensure that the assignment defaults are correctly set from blank fields before save.\n \"\"\"\n # This does not validate, it just assigns the cohort to the course cohort if it can\n if not instance.cohort and instance.course is not None:\n instance.cohort = instance.course.cohort\n\n # Assign the start date of the assignment to the course or cohort default.\n if not instance.start:\n if instance.course is not None:\n instance.start = instance.course.start\n elif instance.cohort is not None:\n instance.start = instance.cohort.start\n\n # Assign the end date of the assignment to the course or cohort default.\n # This must be separate from start to ensure that the default is used correctly.\n if not instance.end:\n if instance.course is not None:\n instance.end = instance.course.end\n elif instance.cohort is not None:\n instance.end = instance.cohort.end\n\n # Assign the number of hours if available on the course\n if not instance.hours and instance.course is not None:\n instance.hours = instance.course.hours\n","sub_path":"faculty/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"338151492","text":"# Sockets\n\n# La forma de comunicar programas/equipos bajo la misma red se resuelve con los llamados Sockets. El Socket en su forma mas simple es un canal/flujo de comunicacion entre dos puntos terminales. \n\n# Para crear un socket se necesita una ip y puerto. \n\nimport socket\n\nsocketServidor = socket.socket()\n\n# Ip local: localhost o 127.0.0.1\n\nsocketServidor.bind((\"localhost\", 9001))\n\nprint(\"Esperando conexiones...\")\n\nsocketServidor.listen(1) # Detiene el programa hasta que alguien intenta conectarse\n\nsocketCliente, direccion = socketServidor.accept() # Espera una conexion entrante\n\nprint(\"Conectado desde: \", direccion[0])\n\nwhile True:\n\tmensaje = socketCliente.recv(1024).decode()\n\tif mensaje == \"salir\":\n\t\tbreak\n\tprint(\"Mensaje: \" + mensaje)\n\trespuesta = input(\"Ingrese mensaje: \")\n\tsocketCliente.send(respuesta.encode())\n\nsocketCliente.close()\nsocketServidor.close()\n","sub_path":"programacion-concurrente/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"105620313","text":"from Globals import *\r\nfrom Rectangle import Rectangle\r\n\r\nPicS = enum('IDLE', 'WAIT_TX', 'TX', 'TXING', 'TX_END', encoding=\"one_hot\")\r\nPicRxPort = 57766\r\nPicTxPort = 57788\r\nPicRxDataLen = 7\r\nPicWidth = 1024\r\nPicHeight = 512\r\nPktLength = PicWidth\r\nPktNum = PicHeight * 3 #PicWidth * PicHeight * 3 / PktLength\r\nPicEndGap = 1024\r\n\r\n\r\n@block\r\ndef TxPic(clk, RxEnd, RxIP_Protocol, RxdesPort, RxDataLength, TxIDLE, TxEnd, PicTxEn, PicTxing, TxdesPort, SendDataEn, PicTxData, PicTxDataLength, PicGapLength, FrameRate):\r\n State = Signal(PicS.IDLE)\r\n PktCNT, EndCNT = (Signal(modbv(0)[16:]) for i in range(2))\r\n FrameCNT = Signal(modbv(0)[32:])\r\n EndPktCNT = Signal(modbv(0)[2:])\r\n TxingCNT = Signal(modbv(0)[16:])\r\n CNTx3, CNTy3 = (Signal(modbv(0)[2:]) for i in range(2))\r\n X, Y = (Signal(modbv(0)[WHb:]) for i in range(2))\r\n Rect1OEn = Signal(bool(0))\r\n Rect1OColor = COLOR()\r\n Rect1Size = SIZE(PicWidth,PicHeight)\r\n CNT1S = (Signal(modbv(0)[28:]))\r\n FrameRateCNT = Signal(modbv(0)[8:])\r\n\r\n\r\n\r\n @always_seq(clk.posedge, reset=None)\r\n def Seq():\r\n if State == PicS.IDLE:\r\n PicTxEn.next = 0\r\n #PicTxData.next = 2\r\n PktCNT.next = 0\r\n EndCNT.next = 0\r\n PicGapLength.next = 0\r\n EndPktCNT.next = 0\r\n PicTxing.next = 0\r\n CNTx3.next = 0\r\n X.next = 0\r\n Y.next = 0\r\n\r\n if RxEnd == 1 and RxIP_Protocol == IP_Protocal_UDP and RxdesPort == PicRxPort and RxDataLength == PicRxDataLen:\r\n State.next = PicS.TX\r\n PicTxing.next = 1\r\n\r\n if State == PicS.TX:\r\n PicTxDataLength.next = PktLength\r\n if TxIDLE and PktCNT < PktNum:\r\n PicTxEn.next = 1\r\n #PktCNT.next = PktCNT + 1\r\n State.next = PicS.TXING\r\n else:\r\n if PktCNT == PktNum:\r\n State.next = PicS.TX_END\r\n PktCNT.next = 0\r\n PicTxEn.next = 0\r\n\r\n if State == PicS.TXING:\r\n PicTxEn.next = 0\r\n TxingCNT.next = TxingCNT + 1\r\n if TxingCNT > 64 and TxIDLE == 1:\r\n State.next = PicS.TX\r\n PktCNT.next = PktCNT + 1\r\n TxingCNT.next = 0\r\n if CNTy3 == 2:\r\n CNTy3.next = 0\r\n Y.next = Y + 1\r\n else:\r\n CNTy3.next = CNTy3 + 1\r\n\r\n if SendDataEn:\r\n if CNTx3 == 2:\r\n CNTx3.next = 0\r\n if X == PicWidth - 1:\r\n X.next = 0\r\n else:\r\n X.next = X + 1\r\n else:\r\n CNTx3.next = CNTx3 + 1\r\n\r\n\r\n\r\n\r\n if State == PicS.TX_END:\r\n Y.next = 0\r\n PicTxDataLength.next = 1\r\n PicGapLength.next = PicEndGap\r\n EndCNT.next = EndCNT + 1\r\n if EndCNT > PicEndGap and TxIDLE and EndPktCNT < 2:\r\n PicTxEn.next = 1\r\n else:\r\n PicTxEn.next = 0\r\n\r\n if TxEnd == 1:\r\n EndPktCNT.next = EndPktCNT + 1\r\n\r\n if TxIDLE and EndPktCNT >= 2:\r\n State.next = PicS.IDLE\r\n FrameCNT.next = FrameCNT + 1\r\n PicTxEn.next = 0\r\n EndPktCNT.next = 0\r\n FrameRateCNT.next = FrameRateCNT + 1\r\n if FrameCNT > 20:\r\n if Rect1Size.W < 10:\r\n Rect1Size.W.next = PicWidth\r\n else:\r\n Rect1Size.W.next = Rect1Size.W - 5\r\n\r\n if CNT1S == 125000000 - 1:\r\n CNT1S.next = 0\r\n FrameRateCNT.next = 0\r\n FrameRate.next = FrameRateCNT\r\n else:\r\n CNT1S.next = CNT1S + 1\r\n\r\n\r\n iRect1 = Rectangle(clk, X, Y, LOC(0, 0), Rect1Size, 0, COLOR(255,0,255), COLOR(0,0,0), Rect1OEn, Rect1OColor)\r\n\r\n\r\n @always_comb\r\n def assign():\r\n if PicTxing:\r\n TxdesPort.next = PicTxPort\r\n if CNTx3 == 1: PicTxData.next = Rect1OColor.R\r\n if CNTx3 == 2: PicTxData.next = Rect1OColor.G\r\n if CNTx3 == 0: PicTxData.next = Rect1OColor.B\r\n\r\n\r\n\r\n return instances()","sub_path":"FPGA Ethernet Mac.py/TxPic.py","file_name":"TxPic.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"578720097","text":"# while (condition):\n# Action\n# Action 2\n# Action 3\n\ncounter = 1\ns = 0\n\nwhile counter <= 100:\n print(counter)\n s += counter\n counter += 1\n\nprint(s)\n\nletters = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n\nindex = 0\nwhile index < len(letters):\n print(index)\n print(letters[index])\n index += 1\nprint()\n\nheight = 5213\nvelocity = 48\ntime = 0\n\nwhile height > 0:\n height -= velocity\n time += 1\n\nprint(height)\nprint(time)\n","sub_path":"02.) Loops - Part 2 (While).py","file_name":"02.) Loops - Part 2 (While).py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"134779057","text":"#! /usr/bin/env python\n## Hey, Python: encoding=utf-8\n#\n# Copyright (c) 2007-2008, 2010 Adeodato Simó (dato@net.com.org.es)\n# Licensed under the terms of the MIT license.\n\nimport justrok\nfrom justrok.threaded_worker import ThreadedWorker\nimport mutagen\nimport mutagen.easyid3\nimport mutagen.asf\nimport mutagen.id3\nimport mutagen.mp3\n\nclass TagReader(ThreadedWorker):\n def __init__(self):\n ThreadedWorker.__init__(self, lambda item: self.tags(item.path))\n \n @staticmethod\n def tags(path):\n justrok.logger.info(\"Reading tag for \\\"\" + path + \"\\\":\")\n result = {}\n try:\n simple_info = None\n complex_info = mutagen.File(path)\n if complex_info is None:\n justrok.logger.warning('could not read tags from %s: mutagen.File() returned None', path)\n else:\n if isinstance(complex_info, mutagen.mp3.MP3) is True:\n try:\n simple_info = mutagen.easyid3.EasyID3(path)\n except mutagen.id3.ID3NoHeaderError:\n pass\n justrok.logger.debug(\" Simple: \" + str(simple_info))\n justrok.logger.debug(\" Complex: \" + str(complex_info))\n # Track\n if \"WM/TrackNumber\" in complex_info and type(complex_info[\"WM/TrackNumber\"][0]) is mutagen.asf.ASFUnicodeAttribute:\n result[\"track\"] = complex_info[\"WM/TrackNumber\"][0].value\n elif simple_info is not None and \"tracknumber\" in simple_info and \"tracktotal\" in simple_info:\n result[\"track\"] = simple_info[\"tracknumber\"][0] + '/' + simple_info[\"tracktotal\"][0]\n elif \"tracknumber\" in complex_info and \"tracktotal\" in complex_info:\n result[\"track\"] = complex_info[\"tracknumber\"][0] + '/' + complex_info[\"tracktotal\"][0]\n elif simple_info is not None and \"tracknumber\" in simple_info:\n result[\"track\"] = simple_info[\"tracknumber\"][0]\n elif \"tracknumber\" in complex_info:\n result[\"track\"] = complex_info[\"tracknumber\"][0]\n elif \"track\" in complex_info:\n result[\"track\"] = complex_info[\"track\"][0]\n # Disc\n if \"WM/PartOfSet\" in complex_info and type(complex_info[\"WM/PartOfSet\"][0]) is mutagen.asf.ASFUnicodeAttribute:\n result[\"disc\"] = complex_info[\"WM/PartOfSet\"][0].value\n elif simple_info is not None and \"discnumber\" in simple_info and \"disctotal\" in simple_info:\n result[\"disc\"] = simple_info[\"discnumber\"][0] + \"/\" + simple_info[\"disctotal\"][0]\n elif \"discnumber\" in complex_info and \"disctotal\" in complex_info:\n result[\"disc\"] = complex_info[\"discnumber\"][0] + \"/\" + complex_info[\"disctotal\"][0]\n elif simple_info is not None and \"discnumber\" in simple_info:\n result[\"disc\"] = simple_info[\"discnumber\"][0]\n elif \"discnumber\" in complex_info:\n result[\"disc\"] = complex_info[\"discnumber\"][0]\n # Date\n if \"WM/Year\" in complex_info and type(complex_info[\"WM/Year\"][0]) is mutagen.asf.ASFUnicodeAttribute:\n result[\"date\"] = complex_info[\"WM/Year\"][0].value\n elif simple_info is not None and \"date\" in simple_info:\n result[\"date\"] = simple_info[\"date\"][0]\n elif \"date\" in complex_info:\n result[\"date\"] = complex_info[\"date\"][0]\n elif \"year\" in complex_info:\n result[\"date\"] = complex_info[\"year\"][0]\n # Album\n if \"WM/AlbumTitle\" in complex_info and type(complex_info[\"WM/AlbumTitle\"][0]) is mutagen.asf.ASFUnicodeAttribute:\n result[\"album\"] = complex_info[\"WM/AlbumTitle\"][0].value\n elif simple_info is not None and \"album\" in simple_info :\n result[\"album\"] = simple_info[\"album\"][0]\n elif \"album\" in complex_info :\n result[\"album\"] = complex_info[\"album\"][0]\n # Title\n if \"Title\" in complex_info and type(complex_info[\"Title\"][0]) is mutagen.asf.ASFUnicodeAttribute:\n result[\"title\"] = complex_info[\"Title\"][0].value\n elif simple_info is not None and \"title\" in simple_info:\n result[\"title\"] = simple_info[\"title\"][0]\n elif \"title\" in complex_info:\n result[\"title\"] = complex_info[\"title\"][0]\n # Artist\n if \"WM/AlbumArtist\" in complex_info and type(complex_info[\"WM/AlbumArtist\"][0]) is mutagen.asf.ASFUnicodeAttribute:\n result[\"artist\"] = complex_info[\"WM/AlbumArtist\"][0].value\n elif simple_info is not None and \"artist\" in simple_info:\n result[\"artist\"] = simple_info[\"artist\"][0]\n elif \"artist\" in complex_info:\n result[\"artist\"] = complex_info[\"artist\"][0]\n # Commentary\n if \"comment\" in complex_info:\n result[\"comment\"] = complex_info[\"comment\"][0]\n elif \"description\" in complex_info:\n result[\"comment\"] = complex_info[\"description\"][0]\n elif isinstance(complex_info, mutagen.mp3.MP3) == True and complex_info.tags is not None:\n comment_frames = complex_info.tags.getall(\"COMM\")\n if len(comment_frames) > 0:\n result[\"comment\"] = comment_frames[0].text[0]\n # Length\n try:\n result['length'] = int(complex_info.info.length)\n except AttributeError:\n pass\n except Exception as exception:\n if path in str(exception): # Mutagen included the path in the exception.\n msg = 'could not read tags: %s' % exception\n else:\n msg = 'could not read tags from %s: %s' % (path, exception)\n justrok.logger.warning(msg)\n justrok.logger.info(\" Result: \" + str(result))\n return result\n","sub_path":"justrok/tag_reader.py","file_name":"tag_reader.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"325921466","text":"from flask import Flask, render_template, request, redirect, flash\nfrom werkzeug.utils import secure_filename\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestRegressor\n\napp = Flask(__name__)\napp.secret_key = 'random string'\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/rfr.html')\ndef index():\n return render_template('rfr.html')\n\n\n@app.route('/import', methods = ['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n f = request.files['file']\n f.save(secure_filename(f.filename))\n\n upload_file.dataSet = pd.read_csv(f.filename)\n flash('Imported '+ f.filename+ ' successfully')\n return render_template('rfr.html', dataSet = upload_file.dataSet)\n\n\n@app.route('/divide', methods=['GET', 'POST'])\ndef func():\n func.X = upload_file.dataSet.iloc[:, 1:2].values\n func.Y = upload_file.dataSet.iloc[:, 2].values\n flash('Divided into Dependent and Independent variables.')\n return render_template('rfr.html', dataSet = upload_file.dataSet)\n\n\n@app.route('/rfc', methods=['Get', 'POST'])\ndef rfc():\n yoe = request.form['YOE']\n rfc_n_estimators = request.form['n_estimators']\n regressor = RandomForestRegressor(n_estimators=int(rfc_n_estimators), random_state=0)\n regressor.fit(func.X, func.Y)\n y_pred = regressor.predict(yoe)\n return render_template('rfr.html', dataSet = upload_file.dataSet, prediction=y_pred)\n\n\nif __name__ == '__main__':\n app.run(debug = True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"44586238","text":"\"\"\"\nThe Python standard library's 'calendar' module allows you to \nrender a calendar to your terminal.\nhttps://docs.python.org/3.6/library/calendar.html\n\nWrite a program that accepts user input of the form\n `calendar.py month [year]`\nand does the following:\n - If the user doesn't specify any input, your program should \n print the calendar for the current month. The 'datetime'\n module may be helpful for this.\n - If the user specifies one argument, assume they passed in a\n month and render the calendar for that month of the current year.\n - If the user specifies two arguments, assume they passed in\n both the month and the year. Render the calendar for that \n month and year.\n - Otherwise, print a usage statement to the terminal indicating\n the format that your program expects arguments to be given.\n Then exit the program.\n\"\"\"\n\nimport sys\nimport calendar\nfrom datetime import datetime\n\nuser = ()\n\n\ndef prompt():\n global user\n user = input(\n \"Please provide a month, or a month and year separated by a comma.\").split(',')\n user = [int(i) for i in user if i != '']\n render_data(user)\n\n\ndef render_data(user_input):\n today = datetime.now()\n if not user_input:\n # Print current month\n print(calendar.month(today.year, today.month))\n prompt()\n elif len(user_input) == 1:\n # Print month in current year\n print(calendar.month(today.year, user_input[0]))\n prompt()\n elif len(user_input) == 2:\n # Print month and year provided\n print(calendar.month(user_input[1], user_input[0]))\n prompt()\n else:\n error_handler()\n\n\ndef error_handler():\n print(\"You provided an invalid input. Please provide a month, or a month and year separated by a comma.\")\n prompt()\n\n\nprompt()\n","sub_path":"src/14_cal.py","file_name":"14_cal.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"71107122","text":"from loguru import logger\nfrom util import util\nfrom db import dao\nfrom models.bakchod import Bakchod\nfrom telegram import ParseMode\n\n\ndef handle(update, context):\n\n util.log_chat(\"about\", update)\n\n if update.message.reply_to_message:\n query_id = update.message.reply_to_message.from_user[\"id\"]\n else:\n query_id = update.message.from_user[\"id\"]\n\n bakchod = dao.get_bakchod_by_id(query_id)\n\n if bakchod is None:\n bakchod = Bakchod.fromUpdate(update)\n dao.insert_bakchod(bakchod)\n\n update.message.reply_text(\n text=generate_about_response(bakchod), parse_mode=ParseMode.MARKDOWN\n )\n\n\ndef generate_about_response(bakchod):\n\n about_response = \"*About \" + util.extract_pretty_name_from_bakchod(bakchod) + \":* \\n\"\n about_response = about_response + \"~ ID: `{}` \\n\".format(bakchod.id)\n about_response = about_response + \"~ ₹okda: `{}` \\n\".format(round(bakchod.rokda, 2))\n\n return about_response\n","sub_path":"src/handlers/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"87373129","text":"import numpy as np\nimport pandas as pd\nimport scipy.interpolate as si\nimport plotly.plotly as py\n# import plotly.graph_objs as go\nfrom plotly.graph_objs import *\n\nfrom ..config import *\n\nclass rho:\n def __init__(self, df=None, wl=None):\n self.df = df\n self.aot = 0.1\n self.ws = 2\n self.wl = wl\n self.rhosoaa_fine_file = rhosoaa_fine_file\n self.rhosoaa_coarse_file = rhosoaa_coarse_file\n self.M1999_file = M1999_file\n self.M2015_file = M2015_file\n #self.load_rho_lut()\n\n def load_rho_lut(self):\n self.rhosoaa_fine = pd.read_csv(self.rhosoaa_fine_file, index_col=[0, 1, 2, 3, 4, 5])\n self.rhosoaa_coarse = pd.read_csv(self.rhosoaa_coarse_file, index_col=[0, 1, 2, 3, 4, 5])\n self.rhoM1999 = pd.read_csv(self.M1999_file, skiprows=7)\n self.rhoM2015 = pd.read_csv(self.M2015_file, skiprows=8)\n\n def get_rho_values(self, ws=2, aot=0.1, sza=[30], wl=None):\n\n if all(wl != None):\n self.wl = wl\n\n grid = self.rho.rho.index.levels\n # convert pandas dataframe into 6D array of the tabulated rho values for interpolation\n rho_ = reshape().df2ndarray(self.rho, 'rho')\n\n rho_wl = calc().spline_4d(grid, rho_[:, :, :, :, 1, 1], ([ws], [aot], self.wl, sza))\n return rho_wl\n\n def process(self, ws=2, aot=0.1):\n\n wl = self.wl\n df = self.df\n rho = self.get_rho_values(wl=wl, sza=df['sza'].values.mean())\n self.Rrs = (df.loc[:, (\"Lt\")] - rho * df.loc[:, (\"Lsky\")]) / df.loc[:, (\"Ed\")]\n self.Rrs.columns = pd.MultiIndex.from_product([['Rrs(awr)'], self.Rrs.columns], names=['param', 'wl'])\n\n return self.Rrs, rho\n\n\nclass calc:\n def __init__(self):\n pass\n\n def earth_sun_correction(self, dayofyear):\n '''\n Earth-Sun distance correction factor for adjustment of mean solar irradiance\n\n :param dayofyear:\n :return: correction factor\n '''\n theta = 2. * np.pi * dayofyear / 365\n d2 = 1.00011 + 0.034221 * np.cos(theta) + 0.00128 * np.sin(theta) + \\\n 0.000719 * np.cos(2 * theta) + 0.000077 * np.sin(2 * theta)\n return d2\n\n def bidir(self, sza, vza, azi):\n\n bidir = 1\n\n return bidir\n\n def spline_4d(self, gin, lut, gout):\n '''\n Interpolation with two successive bicubic splines on a regular 4D grid.\n Designed for interpolation in radiative transfer look-up tables with the two last dimensions\n (i.e., wavelength and solar zenith angle) of the same length.\n Those dimensions are then reduced/merged to a single one to get interpolated data on a 3D grid.\n\n :param gin: regular 4D grid of the tabulated data (tuple/array/list of arrays)\n :param lut: tabulated data\n :param gout: new 4D grid on which data are interpolated (with dims 2 and 3 of the same length);\n (tuple/array/list of arrays)\n :return: Interpolated data (1D or 3D array depending on the dimension shapes of gout\n '''\n import scipy.interpolate as si\n\n N = gin[0].__len__(), gin[1].__len__(), gin[2].__len__(), gin[3].__len__()\n Nout = gout[0].__len__(), gout[1].__len__(), gout[2].__len__()\n tmp = np.zeros([N[0], N[1], Nout[2]])\n\n for i in range(N[0]):\n for j in range(N[1]):\n tmp[i, j, :] = si.RectBivariateSpline(gin[2], gin[3], lut[i, j, :, :])(gout[2], gout[3], grid=False)\n if Nout[0] == Nout[1] == 1:\n interp = np.ndarray(Nout[2])\n for iband in range(Nout[2]):\n interp[iband] = si.RectBivariateSpline(gin[0], gin[1], tmp[:, :, iband])(gout[0], gout[1], grid=False)\n else:\n interp = np.ndarray([Nout[0], Nout[1], Nout[2]])\n for iband in range(Nout[2]):\n interp[:, :, iband] = si.RectBivariateSpline(gin[0], gin[1], tmp[:, :, iband])(gout[0], gout[1],\n grid=True)\n\n return interp\n\n\nclass reshape:\n def __init__(self):\n pass\n\n def ndarray2df(self, arr, grid, names):\n arr = np.column_stack(list(map(np.ravel, np.meshgrid(*grid))) + [arr.ravel()])\n df = pd.DataFrame(arr, columns=names) # e.g., names=['wind','aot','wl','sza','azi','vza','rho','rho_g'])\n return df\n\n def df2ndarray(self, df, name):\n shape = map(len, df.index.levels)\n arr = np.full(shape, np.nan)\n # fill it using Numpy's advanced indexing\n arr[df.index.labels] = df[name].values.flat\n return arr\n","sub_path":"build/lib/gen_rho/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"404914695","text":"def compatible(a, b):\n\tif len(a) != len(b):\n\t\treturn False\n\tn = len(a)\n\tfor i in range(n):\n\t\tif a[i] != -1 and b[i] != -1 and a[i] != b[i]:\n\t\t\treturn False\n\treturn True\n\ndef partition(comp):\n\tn = len(comp)\n\trest = range(n)\n\tres = [-1 for i in range(n)]\n\tt = 0\n\n\twhile len(rest) > 0:\n\t\tflag = 1\n\t\tfor i in rest:\n\t\t\tfor j in rest:\n\t\t\t\tif not comp[i][j]:\n\t\t\t\t\tnodes = [i, j]\n\t\t\t\t\tflag = 0\n\t\t\t\t\tbreak\n\t\t\tif not flag:\n\t\t\t\tbreak\n\t\t\n\t\tif flag: \n\t\t\tfor i in rest:\n\t\t\t\tres[i] = t\n\t\t\t\tt += 1\n\t\t\tbreak\n\n\t\tfor i in rest:\n\t\t\tflag = 1\n\t\t\tfor node in nodes:\n\t\t\t\tif comp[i][j]:\n\t\t\t\t\tflag = 0\n\t\t\t\t\tbreak\n\t\t\tif flag:\n\t\t\t\tnodes.append(i)\n\t\n\t\ts = [[i] for i in nodes]\n\t\trestt = []\n\t\tfor i in rest:\n\t\t\tfor idx, group in enumerate(s):\n\t\t\t\tflag = 1\n\t\t\t\tfor node in group:\n\t\t\t\t\tif not comp[i][node]:\n\t\t\t\t\t\tflag = 0\n\t\t\t\t\t\tbreak\n\t\t\t\tif flag:\n\t\t\t\t\tres[i] = t + idx\n\t\t\t\t\tif group[0] != i:\n\t\t\t\t\t\tgroup.append(i)\n\t\t\t\t\tbreak\n\t\t\tif res[i] == -1:\n\t\t\t \trestt.append(i)\n\n\t\tt += len(s)\n\t\trest = restt\n\t\n\treturn res\n\nif __name__ == \"__main__\":\n\twith open(\"test2.in\", \"r\") as f:\n\t\tlines = f.readlines()\n\n\tdata = [[int(term) for term in line.strip().split()] for line in lines]\n\n\tn, m = data[0]\n\tnext, output = [], []\n\tfor i in range(1, n + 1):\n\t\tnext.append(data[i][0:m])\n\t\toutput.append(data[i][m:])\n\n\tcomp = [[1 for i in range(n)] for i in range(n)]\n\tflag = 0\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tif not compatible(output[i], output[j]):\n\t\t\t\tflag = 1\n\t\t\t\tcomp[i][j] = 0\n\t\n\twhile flag:\n\t\tflag = 0\n\t\tfor i in range(n):\n\t\t\tfor j in range(n):\n\t\t\t\tif comp[i][j]:\n\t\t\t\t\tfor k in range(m):\n\t\t\t\t\t\tif next[i][k] != -1 and next[j][k] != -1 and not comp[next[i][k]][next[j][k]]:\n\t\t\t\t\t\t\tflag = 1\n\t\t\t\t\t\t\tcomp[i][j] = 0\n\t\t\t\t\t\t\tbreak\n\t\n\tres = partition(comp)\n\tstr = \" \".join([str(term) for term in res])\n\twith open(\"test2.out\", \"w\") as f:\n\t\tf.write(str)\n","sub_path":"homework3/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"520356609","text":"# REQUIREMENTS = ['wideq']\n# DEPENDENCIES = ['smartthinq']\n\nimport json\nimport logging\nimport voluptuous as vol\nfrom datetime import timedelta\nimport time\n\nfrom .wideq.device import (\n OPTIONITEMMODES,\n STATE_OPTIONITEM_ON,\n STATE_OPTIONITEM_OFF,\n DeviceType,\n)\n\nfrom .wideq.washer import WasherDevice\n\nfrom homeassistant.components import sensor\nimport homeassistant.helpers.config_validation as cv\n\nfrom homeassistant.const import STATE_ON, STATE_OFF\nfrom .const import DOMAIN, CLIENT, LGE_DEVICES\nfrom . import LGEDevice\n\nATTR_CURRENT_STATUS = \"current_status\"\nATTR_RUN_STATE = \"run_state\"\nATTR_PRE_STATE = \"pre_state\"\nATTR_REMAIN_TIME = \"remain_time\"\nATTR_INITIAL_TIME = \"initial_time\"\nATTR_RESERVE_TIME = \"reserve_time\"\nATTR_CURRENT_COURSE = \"current_course\"\nATTR_ERROR_STATE = \"error_state\"\nATTR_ERROR_MSG = \"error_message\"\nATTR_SPIN_OPTION_STATE = \"spin_option_state\"\nATTR_WATERTEMP_OPTION_STATE = \"watertemp_option_state\"\nATTR_CREASECARE_MODE = \"creasecare_mode\"\nATTR_CHILDLOCK_MODE = \"childlock_mode\"\nATTR_STEAM_MODE = \"steam_mode\"\nATTR_STEAM_SOFTENER_MODE = \"steam_softener_mode\"\nATTR_DOORLOCK_MODE = \"doorlock_mode\"\nATTR_PREWASH_MODE = \"prewash_mode\"\nATTR_REMOTESTART_MODE = \"remotestart_mode\"\nATTR_TURBOWASH_MODE = \"turbowash_mode\"\nATTR_TUBCLEAN_COUNT = \"tubclean_count\"\nATTR_WASH_COMPLETED = \"wash_completed\"\n\nSENSORMODES = {\n \"ON\": STATE_ON,\n \"OFF\": STATE_OFF,\n}\n\n_LOGGER = logging.getLogger(__name__)\nSCAN_INTERVAL = timedelta(seconds=30)\n\n\ndef setup_platform(hass, config, async_add_entities, discovery_info=None):\n pass\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Set up the LGE Washer components.\"\"\"\n _LOGGER.info(\"Starting smartthinq sensors...\")\n\n client = hass.data[DOMAIN][CLIENT]\n lge_sensors = []\n\n for device in client.devices:\n device_id = device.id\n device_name = device.name\n device_mac = device.macaddress\n model_name = device.model_name\n\n if device.type == DeviceType.WASHER:\n\n base_name = device_name\n\n w = LGEWasherDevice(client, device, base_name)\n lge_sensors.append(w)\n hass.data[DOMAIN][LGE_DEVICES][w.unique_id] = w\n\n _LOGGER.info(\n \"LGE Washer added. Name: %s - Model: %s - Mac: %s - ID: %s\",\n base_name,\n model_name,\n device_mac,\n device_id,\n )\n\n if lge_sensors:\n async_add_entities(lge_sensors)\n\n return True\n\n\nclass LGEWasherDevice(LGEDevice):\n \"\"\"A sensor to monitor LGE Washer devices\"\"\"\n\n def __init__(self, client, device, name):\n\n \"\"\"initialize a LGE Washer Device.\"\"\"\n super().__init__(WasherDevice(client, device), name)\n\n @property\n def icon(self):\n return \"mdi:washing-machine\"\n\n @property\n def state_attributes(self):\n \"\"\"Return the optional state attributes.\"\"\"\n data = {\n ATTR_WASH_COMPLETED: self._wash_completed,\n ATTR_ERROR_STATE: self._error_state,\n ATTR_ERROR_MSG: self._error_msg,\n ATTR_RUN_STATE: self._current_run_state,\n ATTR_PRE_STATE: self._pre_state,\n ATTR_CURRENT_COURSE: self._current_course,\n ATTR_SPIN_OPTION_STATE: self._spin_option_state,\n ATTR_WATERTEMP_OPTION_STATE: self._watertemp_option_state,\n ATTR_TUBCLEAN_COUNT: self._tubclean_count,\n ATTR_REMAIN_TIME: self._remain_time,\n ATTR_INITIAL_TIME: self._initial_time,\n ATTR_RESERVE_TIME: self._reserve_time,\n ATTR_CREASECARE_MODE: self._creasecare_mode,\n ATTR_CHILDLOCK_MODE: self._childlock_mode,\n ATTR_STEAM_MODE: self._steam_mode,\n ATTR_STEAM_SOFTENER_MODE: self._steam_softener_mode,\n ATTR_DOORLOCK_MODE: self._doorlock_mode,\n ATTR_PREWASH_MODE: self._prewash_mode,\n ATTR_REMOTESTART_MODE: self._remotestart_mode,\n ATTR_TURBOWASH_MODE: self._turbowash_mode,\n }\n return data\n\n # @property\n # def is_on(self):\n # if self._state:\n # return self._state.is_on\n\n @property\n def _wash_completed(self):\n if self._state:\n if self._state.is_wash_completed:\n return SENSORMODES[\"ON\"]\n\n return SENSORMODES[\"OFF\"]\n\n @property\n def _current_run_state(self):\n if self._state:\n if self._state.is_on:\n run_state = self._state.run_state\n return run_state\n\n return \"-\"\n\n # @property\n # def run_list(self):\n # return list(RUNSTATES.values())\n\n @property\n def _pre_state(self):\n if self._state:\n pre_state = self._state.pre_state\n if pre_state == STATE_OPTIONITEM_OFF:\n return \"-\"\n else:\n return pre_state\n\n return \"-\"\n\n @property\n def _remain_time(self):\n if self._state:\n if self._state.is_on:\n remain_hour = self._state.remaintime_hour\n remain_min = self._state.remaintime_min\n remaintime = [remain_hour, remain_min]\n if int(remain_min) < 10:\n return \":0\".join(remaintime)\n else:\n return \":\".join(remaintime)\n return \"0:00\"\n\n @property\n def _initial_time(self):\n if self._state:\n if self._state.is_on:\n initial_hour = self._state.initialtime_hour\n initial_min = self._state.initialtime_min\n initialtime = [initial_hour, initial_min]\n if int(initial_min) < 10:\n return \":0\".join(initialtime)\n else:\n return \":\".join(initialtime)\n return \"0:00\"\n\n @property\n def _reserve_time(self):\n if self._state:\n if self._state.is_on:\n reserve_hour = self._state.reservetime_hour\n reserve_min = self._state.reservetime_min\n reservetime = [reserve_hour, reserve_min]\n if int(reserve_min) < 10:\n return \":0\".join(reservetime)\n else:\n return \":\".join(reservetime)\n return \"0:00\"\n\n @property\n def _current_course(self):\n if self._state:\n course = self._state.current_course\n smartcourse = self._state.current_smartcourse\n if self._state.is_on:\n if course == \"Download course\":\n return smartcourse\n elif course == \"OFF\":\n return \"-\"\n else:\n return course\n\n return \"-\"\n\n @property\n def _error_state(self):\n if self._state:\n if self._state.is_on:\n if self._state.is_error:\n return SENSORMODES[\"ON\"]\n\n return SENSORMODES[\"OFF\"]\n\n @property\n def _error_msg(self):\n if self._state:\n if self._state.is_on:\n error = self._state.error_state\n return error\n\n return \"-\"\n\n @property\n def _spin_option_state(self):\n if self._state:\n spin_option = self._state.spin_option_state\n if spin_option == \"OFF\":\n return \"-\"\n else:\n return spin_option\n else:\n return \"-\"\n\n @property\n def _watertemp_option_state(self):\n if self._state:\n watertemp_option = self._state.water_temp_option_state\n if watertemp_option == \"OFF\":\n return \"-\"\n else:\n return watertemp_option\n else:\n return \"-\"\n\n @property\n def _creasecare_mode(self):\n if self._state:\n mode = self._state.creasecare_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _childlock_mode(self):\n if self._state:\n mode = self._state.childlock_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _steam_mode(self):\n if self._state:\n mode = self._state.steam_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _steam_softener_mode(self):\n if self._state:\n mode = self._state.steam_softener_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _prewash_mode(self):\n if self._state:\n mode = self._state.prewash_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _doorlock_mode(self):\n if self._state:\n mode = self._state.doorlock_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _remotestart_mode(self):\n if self._state:\n mode = self._state.remotestart_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _turbowash_mode(self):\n if self._state:\n mode = self._state.turbowash_state\n return OPTIONITEMMODES[mode]\n else:\n return OPTIONITEMMODES[\"OFF\"]\n\n @property\n def _tubclean_count(self):\n if self._state:\n return self._state.tubclean_count\n\n return \"N/A\"\n","sub_path":"custom_components/smartthinq_washer/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":9578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"139488440","text":"\n##################################################\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom treeitems import *\nfrom functools import partial\n\n##################################################\n\nclass ModelTreeWidget(QTreeWidget):\n\n ##################################################\n\n def __init__(self, parent=None):\n\n super(ModelTreeWidget, self).__init__(parent)\n self.setIconSize(QSize(23,23))\n self.setupHeader()\n self.addPoliticalBlockFolder()\n\n ##################################################\n\n def setupHeader(self):\n\n modelTreeRoot = QTreeWidgetItem()\n modelTreeRoot.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_earthglobe_icon.svg\"))\n modelTreeRoot.setText(0, \"Stock-flow Studio\")\n self.setHeaderItem(modelTreeRoot)\n\n ##################################################\n\n def addPoliticalBlockFolder(self):\n\n self.politicalBlockFolder = QTreeWidgetItem()\n self.politicalBlockFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_political_block_icon.svg\"))\n self.politicalBlockFolder.setText(0, \"Political Blocks\")\n self.addTopLevelItem(self.politicalBlockFolder)\n\n ##################################################\n\n def addPoliticalBlock(self):\n\n inputDialog = QInputDialog()\n blockName = QInputDialog.getText(inputDialog, \"Add Political Block\", \"Name:\")\n blockName = str(blockName[0])\n item = QTreeWidgetItem()\n item.setText(0,blockName)\n\n politicalBlock = QTreeWidgetItem()\n politicalBlock.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_political_block_icon.svg\"))\n politicalBlock.setText(0, blockName)\n\n naturalResourcesFolder = QTreeWidgetItem()\n naturalResourcesFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_natural_resources_icon.svg\"))\n naturalResourcesFolder.setText(0, \"Natural Resources\")\n politicalBlock.addChild(naturalResourcesFolder)\n\n governmentFolder = QTreeWidgetItem()\n governmentFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_government_icon.svg\"))\n governmentFolder.setText(0, \"Governments\")\n politicalBlock.addChild(governmentFolder)\n\n centralBankFolder = QTreeWidgetItem()\n centralBankFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_centralbank_icon.svg\"))\n centralBankFolder.setText(0, \"Central Banks\")\n politicalBlock.addChild(centralBankFolder)\n\n privateBankFolder = QTreeWidgetItem()\n privateBankFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_private_banks_icon.svg\"))\n privateBankFolder.setText(0, \"Private Banks\")\n politicalBlock.addChild(privateBankFolder)\n\n publicSectorsFolder = QTreeWidgetItem()\n publicSectorsFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_public_sector_icon.svg\"))\n publicSectorsFolder.setText(0, \"Public Sectors\")\n politicalBlock.addChild(publicSectorsFolder)\n\n privateSectorsFolder = QTreeWidgetItem()\n privateSectorsFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_private_sector_icon.svg\"))\n privateSectorsFolder.setText(0, \"Private Sectors\")\n politicalBlock.addChild(privateSectorsFolder)\n\n populationsFolder = QTreeWidgetItem()\n populationsFolder.setIcon(0, QIcon(\"resources/modeltree-icons/modeltree_population_icon.svg\"))\n populationsFolder.setText(0, \"Populations\")\n politicalBlock.addChild(populationsFolder)\n\n self.politicalBlockFolder.addChild(politicalBlock)\n\n self.setCurrentItem(politicalBlock)\n\n\n##################################################\n\n def contextMenuEvent(self, cursor):\n\n if self.currentItem().text(0) == \"Political Blocks\":\n self.showPoliticalBlocksFolderMenu()\n\n elif self.currentItem().text(0) == \"Natural Resources\":\n self.showNaturalResourcesFolderMenu()\n\n elif self.currentItem().text(0) == \"Governments\":\n self.showGovernmentsFolderMenu()\n\n elif self.currentItem().text(0) == \"Central Banks\":\n self.showCentralBanksFolderMenu()\n\n elif self.currentItem().text(0) == \"Private Banks\":\n self.showPrivateBanksFolderMenu()\n\n elif self.currentItem().text(0) == \"Public Sectors\":\n self.showCentralBanksFolderMenu()\n\n elif self.currentItem().text(0) == \"Private Sectors\":\n self.showPrivateBanksFolderMenu()\n\n elif self.currentItem().text(0) == \"Populations\":\n self.showPopulationsFolderMenu()\n\n else: pass\n\n ##################################################\n\n # def showPoliticalBlocksFolderMenu(self):\n #\n # folderName = self.currentItem().text(0)\n # self.showPoliticalBlocksFolderMenu(folderName)\n\n ##################################################\n\n def showCentralBanksFolderMenu(self):\n\n folderName = self.currentItem().text(0)\n self.showObjectFolderMenu(folderName)\n\n ##################################################\n\n def showPrivateBanksFolderMenu(self):\n\n folderName = self.currentItem().text(0)\n self.showObjectFolderMenu(folderName)\n\n ##################################################\n\n def showGovernmentsFolderMenu(self):\n\n folderName = self.currentItem().text(0)\n self.showObjectFolderMenu(folderName)\n\n ##################################################\n\n def showPopulationsFolderMenu(self):\n\n folderName = self.currentItem().text(0)\n self.showObjectFolderMenu(folderName)\n\n ##################################################\n\n def showObjectFolderMenu(self, folderName):\n\n cursor = QCursor()\n menu = QMenu()\n\n objectFolderAdd = QAction(\"Add \" + folderName, self, triggered=partial(self.addObjectFolder, folderName))\n menu.addAction(objectFolderAdd)\n\n mousePos = cursor.pos()\n mousePos.setX(mousePos.x() + 5)\n mousePos.setY(mousePos.y() + 5)\n menu.move(mousePos)\n\n menu.exec_()\n\n ##################################################\n\n def showPoliticalBlocksFolderMenu(self):\n\n cursor = QCursor()\n menu = QMenu()\n\n politicalBlockAdd = QAction(\"Add Block\", self, triggered=partial(self.addPoliticalBlock))\n menu.addAction(politicalBlockAdd)\n\n mousePos = cursor.pos()\n mousePos.setX(mousePos.x() + 5)\n mousePos.setY(mousePos.y() + 5)\n menu.move(mousePos)\n\n menu.exec_()\n\n ##################################################\n\n def addObjectFolder(self, folderName):\n\n inputDialog = QInputDialog()\n folder_name = QInputDialog.getText(inputDialog, \"Add \" + folderName, \"Name:\")\n folder_name = str(folder_name[0])\n item = QTreeWidgetItem()\n item.setText(0,folder_name)\n\n # instrumentsIssuedFolder = CentralBankInstrumentsIssuedFolder()\n # item.addChild(instrumentsIssuedFolder)\n #\n # instrumentsHeldFolder = CentralBankInstrumentsHeldFolderItem()\n # item.addChild(instrumentsHeldFolder)\n\n # accountsOfferedFolder = CentralBankAccountsOfferedFolderItem()\n # item.addChild(accountsOfferedFolder)\n\n accountsHeldFolder = CentralBankAccountsHeldFolder()\n item.addChild(accountsHeldFolder)\n\n variablesFolder = CentralBankVariablesFolderItem()\n item.addChild(variablesFolder)\n\n parametersFolder = CentralBankParametersFolderItem()\n item.addChild(parametersFolder)\n\n # transactionsFolder = CentralBankTransactionsFolderItem()\n # item.addChild(transactionsFolder)\n\n parentItem = self.findItems(folderName, Qt.MatchCaseSensitive, 0)[0]\n parentItem.addChild(item)\n\n ##################################################\n\n def showCentralBankObjectMenu(self):\n\n objectName = self.currentItem().text(0)\n self.showModelObjectMenu(objectName)\n\n ##################################################\n\n def showPrivateBankObjectMenu(self):\n\n objectName = self.currentItem().text(0)\n self.showModelObjectMenu(objectName)\n\n ##################################################\n\n def showGovernmentObjectMenu(self):\n\n objectName = self.currentItem().text(0)\n self.showModelObjectMenu(objectName)\n\n ##################################################\n\n def showPopulationObjectMenu(self):\n\n objectName = self.currentItem().text(0)\n self.showModelObjectMenu(objectName)\n\n ##################################################\n\n def showModelObjectMenu(self, objectName):\n\n cursor = QCursor()\n menu = QMenu()\n\n variableAdd = QAction(\"Add Variable\", self, triggered=self.addVariable)\n menu.addAction(variableAdd)\n\n parameterAdd = QAction(\"Add Parameter\", self, triggered=self.addParameter)\n menu.addAction(parameterAdd)\n\n selectionDel = QAction(\"Delete \" + objectName, self, triggered=self.deleteSelection)\n menu.addAction(selectionDel)\n\n mousePos = cursor.pos()\n mousePos.setX(mousePos.x() + 5)\n mousePos.setY(mousePos.y() + 5)\n menu.move(mousePos)\n\n menu.exec_()\n\n##################################################","sub_path":"gui/modeltree.py","file_name":"modeltree.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"225701711","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom urbansim.models.regression import RegressionModel\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport orca\nfrom urbansim.utils import misc\nimport sys\nimport time\nfrom tqdm import tqdm\nimport time\nimport yaml\n\nfrom dcm_ard_libs import minimize, neglog_DCM\nfrom fit_large_MNL_LCM import run_large_MNL\nfrom urbansim_templates import modelmanager as mm\nmm.initialize('configs/hlcm_2050')\n\n# from guppy import hpy; h=hpy()\n# import pymrmr\n\n# suppress sklearn warnings\ndef warn(*args, **kwargs):\n pass\n\nos.chdir(\"/home/da/semcog_urbansim\")\n\n# import utils\n# data_path = r\"/home/da/share/U_RDF2050/model_inputs/base_hdf\"\ndata_path = r'/home/da/share/urbansim/RDF2050/model_inputs/base_hdf'\nhdf_list = [\n (data_path + \"/\" + f)\n for f in os.listdir(data_path)\n if (\"forecast_data_input\" in f) & (f[-3:] == \".h5\")\n]\nhdf_last = max(hdf_list, key=os.path.getctime)\nhdf = pd.HDFStore(hdf_last, \"r\")\n# hdf = pd.HDFStore(data_path + \"/\" +\"forecast_data_input_091422.h5\", \"r\")\nprint(\"HDF data: \", hdf_last)\n\nvar_validation_list = [\n (data_path + \"/\" + f)\n for f in os.listdir(data_path)\n if (\"variable_validation\" in f) & (f[-5:] == \".yaml\")\n]\nvar_validation_last = max(var_validation_list, key=os.path.getctime)\nwith open(var_validation_last, \"r\") as f:\n vars_config = yaml.load(f, Loader=yaml.FullLoader)\nvalid_b_vars = vars_config[\"buildings\"][\"valid variables\"]\nvalid_hh_vars = vars_config[\"households\"][\"valid variables\"]\n\n\ndef apply_filter_query(df, filters=None):\n if filters:\n if isinstance(filters, str):\n query = filters\n else:\n query = \" and \".join(filters)\n return df.query(query)\n else:\n return df\n\n\ndef load_hlcm_df(hh_var, b_var):\n # load both hh\n hh = households.to_frame(hh_var)\n b = buildings.to_frame(b_var)\n return hh, b\n\ndef columns_in_vars(vars):\n hh_columns, b_columns = [], []\n for varname in vars:\n if ':' in varname:\n vs = varname.split(':')\n hh_columns.append(vs[0].strip())\n b_columns.append(vs[1].strip())\n else:\n if varname in valid_hh_vars:\n hh_columns.append(varname.strip())\n elif varname in valid_b_vars:\n b_columns.append(varname.strip())\n else:\n print(varname, \" not found in both hh and buildings table\")\n return hh_columns, b_columns\n \n\ndef get_interaction_vars( df, varname):\n \"\"\"Get interaction variables from variable name\n\n Args:\n varname (string): name of the interaction variable\n \"\"\"\n if \":\" in varname:\n var1, var2 = varname.split(\":\")\n var1, var2 = var1.strip(), var2.strip()\n return (df[var1] * df[var2]).values.reshape(-1, 1)\n else:\n return df[varname].values.reshape(-1, 1)\n\n\nused_vars = pd.read_excel(\"/home/da/share/urbansim/RDF2050/model_estimation/configs_hlcm_2050_update3.xlsx\", sheet_name=2)\nv1 = used_vars[~used_vars[\"new variables 1\"].isna()][\"new variables 1\"].unique()\nv2 = used_vars[~used_vars[\"new variables 2\"].isna()][\"new variables 2\"].unique()\nvars_to_use = np.array(list(set(v1.tolist()).union(v2.tolist())))\n# vars_to_use = used_vars[0].unique()\n\n# config\nchoice_column = \"building_id\"\n# hh_sample_size = 10000\n# estimation_sample_size = 50\n# LARGE_AREA_ID = 147\nhh_filter_columns = [\"building_id\", \"large_area_id\", \"mcd_model_quota\", \"year_built\", \"residential_units\"]\nb_filter_columns = [\"large_area_id\", \"mcd_model_quota\", \"residential_units\"]\n# load variables\nRELOAD = False\nif RELOAD:\n # from notebooks.models_test import *\n import models\n buildings = orca.get_table(\"buildings\")\n households = orca.get_table(\"households\")\n orca.add_injectable('year', 2020)\n orca.run([\"build_networks_2050\"])\n orca.run([\"neighborhood_vars\"])\n # set year to 2050 \n orca.add_injectable('year', 2050)\n orca.run([\"mcd_hu_sampling\"])\n# TODO: get vars from vars list from last forecast\n hh_columns, b_columns = columns_in_vars(vars_to_use)\n\n\n hh_var = hh_columns + hh_filter_columns\n b_var = b_columns + b_filter_columns\n hh_region, b_region = load_hlcm_df(hh_var, b_var)\n hh_region.to_csv('hh.csv')\n b_region.to_csv('b_hlcm.csv')\nelse:\n hh_region = pd.read_csv('hh.csv', index_col=0)\n b_region = pd.read_csv('b_hlcm.csv', index_col=0)\n orca.add_table('households', hh_region)\n orca.add_table('buildings', b_region)\n\ndef estimation(LARGE_AREA_ID):\n hh_sample_size = 10000\n estimation_sample_size = 50\n # sampling hh\n # from the new move-ins, last 5-10 years\n # weighted by mcd_quota\n hh = hh_region[hh_region.large_area_id == LARGE_AREA_ID]\n hh = hh[hh.building_id > 1]\n hh = hh[hh.residential_units > 0]\n hh = hh[hh.year_built > 2005]\n # exclude hh in pseudo buildings\n hh = hh[hh.building_id < 90000000]\n hh[\"mcd_model_quota\"] += 1 # add 1 to all hh's mcd_model_quota for weights\n # if total number of hh is less than hh_sample_size\n hh_sample_size = min(hh_sample_size, hh.shape[0])\n hh = hh.sample(hh_sample_size, weights=\"mcd_model_quota\") # hh = hh.sample(hh_sample_size)\n hh = hh.reset_index()\n hh = hh.fillna(0)\n # sampling b\n # sample buildings from the chosen HH's buildings list\n uhh_id = hh.building_id.unique()\n sampled_b_id = []\n for _ in range(estimation_sample_size-1):\n for j in hh.building_id:\n sampled_b_id.append(np.random.choice(uhh_id[uhh_id!=j]))\n b_sample = b_region.loc[sampled_b_id]\n b_sample = pd.concat([b_region.loc[hh.building_id], b_sample])\n b_sample = b_sample.reset_index()\n b_sample = b_sample.fillna(0)\n # remove unnecessary col in HH\n hh = hh[[col for col in hh.columns if col not in hh_filter_columns+[\"household_id\"] or col in ['year_built']]]\n # remove unnecessary col in buildings\n b_sample = b_sample[[col for col in b_sample.columns if col not in b_filter_columns]]\n\n X_df = pd.concat(\n [pd.concat([hh]*estimation_sample_size).reset_index(drop=True), b_sample], axis=1)\n # Y: 1 for the building picked\n # Y = X_df.building_id.isin(picked_bid).astype(int).values\n # Y: set first hh_sample_size item 1\n Y = np.zeros((hh_sample_size*estimation_sample_size,1), dtype=int)\n Y[:hh_sample_size,0] = 1\n # remove extra cols\n X_df = X_df[[col for col in X_df.columns if col not in ['building_id']]]\n # create interaction variables\n newX_cols_name = vars_to_use\n X_wiv = np.array([])\n for varname in newX_cols_name:\n if X_wiv.size > 0:\n X_wiv = np.concatenate((X_wiv, get_interaction_vars(X_df, varname)), axis=1)\n else:\n X_wiv = get_interaction_vars(X_df, varname)\n\n # df to ndarray\n X = X_wiv\n\n # col index with 0 variation\n used_val = np.arange(X.shape[1])[np.std(X, axis=0, dtype=np.float64) > 0]\n unused_val = np.array([x for x in range(X.shape[1]) if x not in used_val])\n\n # only keep variables with variation\n X = X[:, np.std(X, axis=0, dtype=np.float64) > 0]\n # standardize X\n X = (X - np.mean(X, axis=0)) / np.std(X, axis=0, dtype=np.float64)\n # shuffle X\n shuffled_index = np.arange(Y.size)\n np.random.shuffle(shuffled_index)\n X = X[shuffled_index, :].astype(float)\n Y = Y[shuffled_index].reshape(-1, 1)\n # TODO: Y_onehot\n Y_onehot = Y\n # availablechoice is 1\n available_choice = np.ones((X.shape[0], 1))\n\n # theta: m x 1\n theta = np.zeros((X.shape[1], 1))\n\n # dtypes conversion\n X = {0:X, 1:X}\n theta = {0:theta, 1:theta}\n Y = 1 - Y # 0 means picked, 1 means not picked\n Y_onehot = np.concatenate((Y_onehot, 1-Y_onehot), axis=1)\n available_choice = np.concatenate((available_choice, available_choice), axis=1)\n\n t0 = time.time()\n theta_optim_full = minimize(theta, neglog_DCM, -10000, X, Y, Y_onehot, available_choice)\n t1 = time.time()\n print(\"minimizer finished in \", t1-t0)\n\n # exporting theta\n out_theta = pd.DataFrame(theta_optim_full[0], columns=['theta'])\n out_theta.index = newX_cols_name[used_val]\n out_theta = out_theta.loc[out_theta.theta.abs().sort_values(ascending=False).index]\n out_theta.to_csv('configs/hlcm_2050/thetas/out_theta_%s_%s.txt' % (LARGE_AREA_ID, estimation_sample_size))\n\n print(\"Warning: variables with 0 variation\")\n print(newX_cols_name[unused_val])\n print('ARD-DCM done')\n\nif __name__ == \"__main__\":\n la_estimation_configs = {\n 3: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 40\n },\n 5: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 40\n },\n 93: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 50\n },\n 99: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 40\n },\n 115: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 50\n },\n 125: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 40\n },\n 147: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 40\n },\n 161: {\n 'skip_estimation': False,\n 'number_of_var_to_use': 50\n },\n }\n for la_id, la_config in la_estimation_configs.items():\n if not la_config['skip_estimation']:\n estimation(la_id)\n run_large_MNL(hh_region, b_region, la_id, la_config['number_of_var_to_use'])","sub_path":"HLCM_estimation.py","file_name":"HLCM_estimation.py","file_ext":"py","file_size_in_byte":9483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"212522980","text":"\"\"\"\nUnit tests for input-related operations.\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom seq2seq import inputs\nfrom seq2seq.test import utils as test_utils\n\n\nclass VocabInfoTest(tf.test.TestCase):\n \"\"\"Tests VocabInfo class\"\"\"\n\n def setUp(self):\n super(VocabInfoTest, self).setUp()\n tf.logging.set_verbosity(tf.logging.INFO)\n self.vocab_list = [\"Hello\", \".\", \"Bye\"]\n self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list)\n\n def tearDown(self):\n super(VocabInfoTest, self).tearDown()\n self.vocab_file.close()\n\n def test_vocab_info(self):\n vocab_info = inputs.get_vocab_info(self.vocab_file.name)\n self.assertEqual(vocab_info.vocab_size, 3)\n self.assertEqual(vocab_info.path, self.vocab_file.name)\n self.assertEqual(vocab_info.special_vocab.OOV, 3)\n self.assertEqual(vocab_info.special_vocab.SEQUENCE_START, 4)\n self.assertEqual(vocab_info.special_vocab.SEQUENCE_END, 5)\n self.assertEqual(vocab_info.total_size, 6)\n\n\nclass ReadFromDataProviderTest(tf.test.TestCase):\n \"\"\"\n Tests Data Provider operations.\n \"\"\"\n\n def setUp(self):\n super(ReadFromDataProviderTest, self).setUp()\n tf.logging.set_verbosity(tf.logging.INFO)\n\n def test_read_from_data_provider(self):\n file = test_utils.create_temp_tfrecords(\n source=\"Hello World .\", target=\"Bye\")\n data_provider = inputs.make_data_provider([file.name], num_epochs=5)\n features = inputs.read_from_data_provider(data_provider)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n with tf.contrib.slim.queues.QueueRunners(sess):\n res = sess.run(features)\n\n self.assertEqual(res[\"source_len\"], 3)\n self.assertEqual(res[\"target_len\"], 1)\n np.testing.assert_array_equal(res[\"source_tokens\"].astype(\"U\"),\n [\"Hello\", \"World\", \".\"])\n np.testing.assert_array_equal(res[\"target_tokens\"].astype(\"U\"), [\"Bye\"])\n\n\nclass CreateVocabularyLookupTableTest(tf.test.TestCase):\n \"\"\"\n Tests Vocabulary lookup table operations.\n \"\"\"\n\n def setUp(self):\n super(CreateVocabularyLookupTableTest, self).setUp()\n tf.logging.set_verbosity(tf.logging.INFO)\n self.vocab_list = [\"Hello\", \".\", \"Bye\"]\n self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list)\n\n def tearDown(self):\n super(CreateVocabularyLookupTableTest, self).tearDown()\n self.vocab_file.close()\n\n def test_lookup_table(self):\n\n vocab_to_id_table, id_to_vocab_table, vocab_size = \\\n inputs.create_vocabulary_lookup_table(self.vocab_file.name)\n\n self.assertEqual(vocab_size, 3)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.initialize_all_tables())\n\n ids = vocab_to_id_table.lookup(\n tf.convert_to_tensor([\"Hello\", \".\", \"Bye\", \"??\", \"xxx\"]))\n ids = sess.run(ids)\n np.testing.assert_array_equal(ids, [0, 1, 2, 3, 3])\n\n words = id_to_vocab_table.lookup(\n tf.convert_to_tensor(\n [0, 1, 2, 3], dtype=tf.int64))\n words = sess.run(words)\n np.testing.assert_array_equal(\n words.astype(\"U\"), [\"Hello\", \".\", \"Bye\", \"UNK\"])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"Github/IPS-2/seq2seq-fix/seq2seq/test/inputs_test.py","file_name":"inputs_test.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"499721733","text":"from __future__ import print_function\n\nfrom config import config\nimport os\nimport argparse\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import roc_auc_score, accuracy_score\n\nparser = argparse.ArgumentParser(description='PyTorch LNM bags Pipeline')\nparser.add_argument('--foldNum', type=int, default=6, metavar='fN',\n help='number of folds for cross validation (default: 6)')\nparser.add_argument('--currFold', type=int, default=1, metavar='cF',\n help='current fold of the cross validation (default: 1)')\nargs = parser.parse_args()\nstep = args.currFold\n\n#%%\ndef calculate_ROCAUC(Y_ture, Y_prob):\n ROCAUC = roc_auc_score(Y_ture, Y_prob) if len(np.unique(Y_ture)) > 1 else 0.0\n return ROCAUC\n# train\ndef train_and_predict(datastore,tempStore):\n \n print('-'*30)\n print('Loading and preprocessing train data...')\n print('-'*30) \n \n aux_train = np.load(os.path.join(datastore, 'aux_train_{}.npy'.format(step))) \n aux_train = aux_train.astype('float32')\n print('aux_train: shape{}'.format(aux_train.shape)) \n # convert class vectors to binary class matrices\n y_train = np.load(os.path.join(datastore, 'y_train_{}.npy'.format(step)))\n print('y_train: shape{}'.format(y_train.shape))\n # nb_classes = len(np.unique(y_train))\n # Y_train = np_utils.to_categorical(y_train, nb_classes)\n\n print('-'*30)\n print('Loading and preprocessing test data...')\n print('-'*30)\n \n aux_test = np.load(os.path.join(datastore, 'aux_test_{}.npy'.format(step)))\n aux_test = aux_test.astype('float32')\n y_test = np.load(os.path.join(datastore, 'y_test_{}.npy'.format(step)))\n # convert class vectors to binary class matrices\n # Y_test = np_utils.to_categorical(y_test, nb_classes)\n \n print('-'*30)\n print('Creating and compiling model...')\n print('-'*30)\n #---------------------------------#\n model = SVC(C=1, \n kernel='linear', \n degree=3, \n gamma='auto', \n coef0=0.0, \n shrinking=True, \n probability=True, \n tol=0.001, \n cache_size=200, \n class_weight=None, \n verbose=False, \n max_iter=-1, \n random_state=None\n )\n #---------------------------------#\n print('-'*30)\n print('Fitting model...')\n print('-'*30)\n\n model.fit(aux_train, y_train)\n\n print('-'*30)\n print('Predicting masks on test data...')\n print('-'*30)\n y_possibility = model.predict_proba(aux_test)\n np.save(os.path.join(tempStore,'Y_predict.npy'), y_possibility) \n\n # evaluate\n print('-'*60)\n print('best test result')\n # predict result\n Y_predict = np.argmax(y_possibility, axis=1) \n print ('Y_predict_type:{}'.format(Y_predict.dtype))\n print ('Y_predi:{}'.format(Y_predict))\n\n # ground truth\n Y_test = np.squeeze(y_test)\n Y_test= np.int64(Y_test)\n print ('Y_test_:{}'.format(Y_test))\n print ('Y_test_type:{}'.format(Y_test.dtype))\n\n #classification_report\n print('Accuracy:{}'.format(accuracy_score(y_true=Y_test, y_pred=Y_predict)))\n print('ROCAUC:{}'.format(calculate_ROCAUC(Y_ture=Y_test, Y_prob = y_possibility[:,1])))\n target_names = ['Y', 'N']\n print(classification_report(Y_test, Y_predict, target_names=target_names))\n\nif __name__ == '__main__':\n datastore = './dataStore'\n tempStore = './tempData'\n if not os.path.exists(tempStore):\n os.mkdir(tempStore)\n train_and_predict(datastore,tempStore)","sub_path":"HuashanHand_new_copy/Old/train_domain_knowledge.py","file_name":"train_domain_knowledge.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"13896573","text":"from pelicula import Pelicula\r\nfrom CatalogoPelicula import Catalogo\r\npelicula=None\r\nNear=True \r\n\r\nwhile(Near):\r\n \r\n print(\"\\t\\t<<-Peliculass->>\")\r\n print(\"1-Agregar Pelicula\")\r\n print(\"2-Imprimir Pelicula\")\r\n print(\"3-Eliminar\")\r\n print(\"X-Salir\")\r\n opcion=input(\"Elige: \")\r\n \r\n if opcion==\"X\":\r\n Near=False\r\n print(\"Salir\")\r\n \r\n elif opcion != \"1\" and opcion !=\"2\" and opcion !=\"3\" and opcion !=\"4\" and opcion !=\"x\":\r\n print(\"Vuelve a escribir\")\r\n \r\n elif opcion==\"1\":\r\n name_movie=input(\"Escribe la pelicula: \")\r\n pelicula=Pelicula(name_movie)\r\n Catalogo.agregar_pelicula(pelicula) \r\n elif opcion==\"2\":\r\n if pelicula is None:\r\n name_movie=input(\"Escribe la pelicula: \")\r\n pelicula=Pelicula(name_movie)\r\n Catalogo.agregar_pelicula(pelicula)\r\n else:\r\n print(\"Catalogo de peliculas\")\r\n Catalogo.lista()\r\n \r\n \r\n elif opcion==\"3\": \r\n Catalogo.eliminar()","sub_path":"A_Programas/catalogoMovies/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"157021783","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef prepare_data(filename, delimiter='\\t'):\n features = []\n labels = []\n with open(filename) as f:\n for each_row in f.readlines():\n each_row = each_row.strip().split(delimiter)\n features.append([float(i) for i in each_row[:-1]])\n labels.append(int(float(each_row[-1])))\n features = np.mat(features)\n labels = np.mat(labels).T\n return features, labels\n\n\ndef stump_classify(data, feature_idx, thresh_val, thresh_ineq):\n \"\"\"\n return predicted values given a decision stump\n \"\"\"\n res_array = np.ones((np.shape(data)[0], 1))\n # If we choose \"<=\", data with corresponding feature value lower than\n # the given threshold will be classified as -1, i.e. binary classification\n if thresh_ineq == 'lower_than':\n res_array[data[:, feature_idx] <= thresh_val] = -1.0\n else:\n res_array[data[:, feature_idx] > thresh_val] = -1.0\n return res_array\n\n\ndef build_stump(data, labels, D, num_steps=10):\n \"\"\"\n build a decision dump with the lowest error\n :param D: weights vector for each data point, shape:(m, 1)\n \"\"\"\n m = data.shape[0]\n best_stump = {}\n best_estimation = np.mat(np.zeros((m, 1)))\n # init error sum, to +infinity\n min_error = np.inf\n # loop over all features\n for feature_idx in range(data.shape[1]):\n # define the step length\n feature_min_val = data[:, feature_idx].min()\n feature_max_val = data[:, feature_idx].max()\n step_size = (feature_max_val - feature_min_val) / num_steps\n # loop over all range in current feature, we add -1 and int(numSteps)+1\n # because we need to consider both front and tail.\n for j in range(-1, int(num_steps) + 1):\n # go over less than and greater than\n for inequal in ['lower_than', 'greater_than']:\n # define threshold(split value) used to split current feature\n thresh_val = (feature_min_val + float(j) * step_size)\n # predicted_labels: -1 -> F; +1 -> T\n predicted_labels = stump_classify(data, feature_idx,\n thresh_val, inequal)\n # compare predicted_labels with true labels\n error_arr = np.mat(np.ones((m, 1)))\n # error_arr: 0 -> correct; 1 -> mistake\n error_arr[predicted_labels == labels] = 0\n # calculate weighted error: inner product\n weighted_error = D.T * error_arr\n # save the best decision stump\n if weighted_error < min_error:\n min_error = weighted_error\n best_estimation = predicted_labels.copy()\n best_stump['dim'] = feature_idx\n best_stump['thresh'] = thresh_val\n best_stump['ineq'] = inequal\n print(f\"split: feature {best_stump['dim']}, \"\n f\"thresh {best_stump['thresh']}, \"\n f\"thresh ineqal: {best_stump['ineq']}, \"\n f\"the weighted error is {min_error}\")\n return best_stump, min_error, best_estimation\n\n\ndef adaboost_train(data, labels, nun_iter=40):\n weak_learner_arr = []\n m = data.shape[0]\n # init weights_arr to all equal\n weights_arr = np.mat(np.ones((m, 1)) / m)\n agg_estimation = np.mat(np.zeros((m, 1)))\n for i in range(nun_iter):\n # build Stump\n best_stump, min_error, best_estimation = \\\n build_stump(data, labels, weights_arr)\n # print(\"weights_arr:\", weights_arr.T)\n # calculate alpha: (1/2)*log((1-e)/e);\n # throw in max(error,eps) to account for error=0\n alpha = float(0.5*np.log((1.0-min_error)/max(min_error, 1e-16)))\n best_stump['alpha'] = alpha\n # store Stump Params in Array\n weak_learner_arr.append(best_stump)\n # print(\"classEst: \", best_estimation.T)\n # If a data point has been wrongly classified,\n # multiply its weight by exp(a).\n # Otherwise, multiply its weights by exp(-a)\n # we use labels*predicted_labels to define whether\n # a data point has been classified correctly\n expon = np.multiply(-1 * alpha * labels, best_estimation)\n weights_arr = np.multiply(weights_arr, np.exp(expon))\n weights_arr = weights_arr / weights_arr.sum()\n # calc training error of all classifiers,\n # if this is 0 quit for loop early\n agg_estimation += alpha * best_estimation\n # print(\"aggClassEst: \", agg_estimation.T)\n agg_errors = np.multiply(np.sign(agg_estimation) != labels,\n np.ones((m, 1)))\n error_rate = agg_errors.sum() / m\n # print(\"total error: \", error_rate)\n if error_rate == 0.0:\n break\n return weak_learner_arr, agg_estimation\n\n\ndef ada_classify(data_point, classifier_arr):\n data = np.mat(data_point)\n m = np.shape(data)[0]\n agg_estimation = np.mat(np.zeros((m, 1)))\n # make predictions on each classifier and get a weighted sum\n for i in range(len(classifier_arr)):\n best_estimation = stump_classify(data, classifier_arr[i]['dim'],\n classifier_arr[i]['thresh'],\n classifier_arr[i]['ineq'])\n agg_estimation += classifier_arr[i]['alpha']*best_estimation\n # print(agg_estimation)\n return np.sign(agg_estimation)\n\n\ndef plotROC(predStrengths, classLabels):\n \"\"\"\n 其实还有一种更直观的绘制ROC曲线的方法。就是把横轴的刻度间隔设为1/N.纵轴的刻度间隔设为1/P.\n N,P分别为负样本与正样本数量。然后再根据模型的输出结果降序排列,依次遍历样本,从0开始绘制ROC曲线,\n 每遇到一个正样本就沿纵轴方向绘制一个刻度间隔的曲线,每遇到一个负样本就沿横轴方向绘制一个刻度间隔的曲线��\n 遍历完所有样本点以后,曲线也就绘制完成了。\n 链接:https://www.jianshu.com/p/2ca96fce7e81\n \"\"\"\n # cursor\n cur = (1.0, 1.0)\n # variable to calculate AUC\n y_sum = 0.0\n num_pos_class = sum(np.array(classLabels) == 1.0)\n y_step = 1 / float(num_pos_class)\n x_step = 1 / float(len(classLabels)-num_pos_class)\n # get sorted index, it's reverse\n sorted_indicies = predStrengths.argsort()\n fig = plt.figure()\n fig.clf()\n ax = plt.subplot(111)\n # loop through all the values, drawing a line segment at each point\n for index in sorted_indicies.tolist()[0]:\n if classLabels[index] == 1.0:\n del_x = 0\n del_y = y_step\n else:\n del_x = x_step\n del_y = 0\n y_sum += cur[1]\n # draw line from cur to (cur[0]-delX,cur[1]-delY)\n ax.plot([cur[0], cur[0]-del_x], [cur[1], cur[1]-del_y], c='b')\n cur = (cur[0]-del_x, cur[1]-del_y)\n ax.plot([0, 1], [0, 1], 'b--')\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve for AdaBoost horse colic detection system')\n ax.axis([0, 1, 0, 1])\n plt.show()\n print(\"the Area Under the Curve is: \", y_sum * x_step)\n\n\nif __name__ == '__main__':\n data, labels = prepare_data('resources/horseColicTraining2.txt')\n weak_learner_arr, agg_estimation = adaboost_train(data, labels, 10)\n plotROC(agg_estimation.T, labels)\n","sub_path":"ML_in_Action/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"62753295","text":"import logging\nimport nltk\nimport os\nimport pickle\nimport pymysql\nimport sqlite3\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn_evaluation import ClassifierEvaluator\n\nfrom classification import conf\n\n\nnltk.download('stopwords')\nLOG = logging.getLogger(__name__)\nENG_STOPWORDS = set(nltk.corpus.stopwords.words('english'))\nNON_ENG_STOPWORDS = set(nltk.corpus.stopwords.words()) - ENG_STOPWORDS\n\n\ndef is_english(text):\n \"\"\"Detect language is English or not (Vietnamese)\"\"\"\n text = text.lower()\n words = set(nltk.wordpunct_tokenize(text))\n return len(words & ENG_STOPWORDS) > len(words & NON_ENG_STOPWORDS)\n\n\ndef get_stop_words(is_eng=True):\n \"\"\"Get language's stopwords\n\n :param boolean is_eng: True if English.\n \"\"\"\n if is_eng:\n return 'english'\n else:\n with open(conf.VN_SW) as f:\n vn_stopwords = f.read().splitlines()\n return vn_stopwords\n\n\ndef bag_of_words(is_eng=True):\n \"\"\"Bag of words vectorize.\n\n :param boolean is_eng: True if English.\n \"\"\"\n return CountVectorizer(stop_words=get_stop_words(is_eng),\n token_pattern=r'\\b[^\\W\\d_]+\\b')\n\n\ndef td_idf(is_eng=True):\n \"\"\"Tf Idf vectorize.\n\n :param boolean is_eng: True if English.\n \"\"\"\n return TfidfVectorizer(stop_words=get_stop_words(is_eng),\n token_pattern=r'\\b[^\\W\\d_]+\\b')\n\n\ndef get_data_from_db(query, rows, engine=conf.DEFAULT_DB_ENGINE):\n \"\"\"Connect to database and get data from it.\n\n :param str query: sql query.\n :param int rows: number of rows.\n :param str engine: database engine.\n default is MySQL.\n :rtype: list\n \"\"\"\n # Initialize `connection` var to None. In case we could not\n # create a connection to the database(for ex the disk is full)\n # we would not have a connection var defined.\n connection = None\n try:\n if engine.lower() == 'mysql':\n connection = pymysql.connect(host=conf.MYSQL_HOST,\n port=conf.MYSQL_PORT,\n user=conf.MYSQL_USER,\n password=conf.MYSQL_PASSWORD,\n db=conf.MYSQL_DB,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.\n DictCursor)\n elif engine.lower() == 'sqlite':\n connection = sqlite3.connect(conf.SQLITE_DB)\n LOG.info('Connected to {}!' . format(engine))\n\n cursor = connection.cursor()\n cursor.execute(query)\n result = cursor.fetchmany(rows)\n LOG.info('Get random data from database.')\n\n contents = []\n labels = []\n if engine.lower() == 'mysql':\n for r in result:\n contents.append(r['content'])\n labels.append(r['label'])\n else:\n for r in result:\n contents.append(r[0].strip())\n labels.append(r[1])\n return (contents, labels)\n except Exception as e:\n LOG.exception('Failed when connecting to database: {}. ' . format(e))\n finally:\n if connection:\n LOG.info('Close database connection.')\n connection.close()\n\n\ndef save(obj, path):\n \"\"\"Save Classifier object to pickle file.\"\"\"\n if os.path.isfile(path):\n LOG.info('File existed! Use load() method.')\n else:\n pickle.dump(obj, open(path, 'wb'), pickle.HIGHEST_PROTOCOL)\n\n\ndef load(path):\n \"\"\"Load Classifier object from pickle file\"\"\"\n if not os.path.isfile(path):\n LOG.info('File doesnt existed!')\n raise IOError()\n else:\n return pickle.load(open(path, 'rb'))\n\n\ndef split_train_test(X, y, estimator, test_size=0.4):\n LOG.info('Evaluate with test_size is %2.0f%%' % (test_size * 100))\n # Split into a training and testing set\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=test_size)\n estimator.fit(X_train, y_train)\n y_pred = estimator.predict(X_test)\n y_score = estimator.predict_proba(X_test)\n result = {\n 'X_train': X_train,\n 'X_test': X_test,\n 'y_train': y_train,\n 'y_test': y_test,\n 'y_pred': y_pred,\n 'y_score': y_score\n }\n return result\n\n\ndef evaluate(X, y, estimator, test_size=0.4, confusion=False):\n \"\"\"Evaluate algorithm.\n :param numpy matrix X: dataset.\n :param numpy matrix y: label matrix.\n :param float test_size: testset's size/alldataset's size.\n :param boolean confusion: use confusion matrix or not.\n \"\"\"\n result = split_train_test(X, y, estimator, test_size)\n\n if not confusion:\n return classification_report(result['y_test'], result['y_pred'])\n else:\n return confusion_matrix(result['y_test'], result['y_pred'])\n\n\ndef visualize_with_plot(X, y, estimator, test_size=0.4):\n result = split_train_test(X, y, estimator, test_size)\n target_names = ['business', 'entertainment', 'health', 'politics',\n 'sports', 'technology']\n plot.confusion_matrix(result['y_test'], result['y_pred'],\n target_names=target_names)\n\n\ndef visualize(X, y, estimator, test_size=0.4, html=False):\n result = split_train_test(X, y, estimator, test_size)\n target_names = ['business', 'entertainment', 'health', 'politics',\n 'sports', 'technology']\n ce = ClassifierEvaluator(estimator, result['y_test'], result['y_pred'],\n result['y_score'], target_names=target_names)\n if html:\n template = '''\n # Report\n {estimator_type}\n {date}\n {confusion_matrix}\n {roc}\n {precision_recall}\n '''\n\n ce.generate_report(template, path='report.html')\n else:\n ce.confusion_matrix\n","sub_path":"machine_learning/classification/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"460268897","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__version__ = \"0.1.0\"\n\n__copyright__ = \"\"\"\n pyObjUtils - Object file library for Python.\n\n (C) 2010-2016 by Christoph Schueler \n\n All Rights Reserved\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License along\n with this program; if not, write to the Free Software Foundation, Inc.,\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\"\"\"\n\nimport enum\nfrom objutils.elf import defs\n\n# Processor specific flags for the ELF header e_flags field.\nEF_ARM_RELEXEC = 0x01\nEF_ARM_HASENTRY = 0x02\nEF_ARM_INTERWORK = 0x04\nEF_ARM_APCS_26 = 0x08\nEF_ARM_APCS_FLOAT = 0x10\nEF_ARM_PIC = 0x20\nEF_ARM_ALIGN8 = 0x40 # 8-bit structure alignment is in use.\nEF_ARM_NEW_ABI = 0x80\nEF_ARM_OLD_ABI = 0x100\nEF_ARM_SOFT_FLOAT = 0x200\nEF_ARM_VFP_FLOAT = 0x400\nEF_ARM_MAVERICK_FLOAT = 0x800\n\n# Frame unwind information\nPT_ARM_EXIDX = defs.PT_LOPROC + 1\n\n# Other constants defined in the ARM ELF spec. version B-01.\nEF_ARM_SYMSARESORTED = 0x04 # NB conflicts with EF_INTERWORK.\nEF_ARM_DYNSYMSUSESEGIDX = 0x08 # NB conflicts with EF_APCS26.\nEF_ARM_MAPSYMSFIRST = 0x10 # NB conflicts with EF_APCS_FLOAT.\nEF_ARM_EABIMASK = 0xFF000000\n\n# New constants defined in the ARM ELF spec. version XXX.\n# Only valid in conjunction with EF_ARM_EABI_VER5.\nEF_ARM_ABI_FLOAT_SOFT = 0x200 # NB conflicts with EF_ARM_SOFT_FLOAT.\nEF_ARM_ABI_FLOAT_HARD = 0x400 # NB conflicts with EF_ARM_VFP_FLOAT.\n\n# Constants defined in AAELF.\nEF_ARM_BE8 = 0x00800000\nEF_ARM_LE8 = 0x00400000\n\nEF_ARM_EABI_UNKNOWN = 0x00000000\nEF_ARM_EABI_VER1 = 0x01000000\nEF_ARM_EABI_VER2 = 0x02000000\nEF_ARM_EABI_VER3 = 0x03000000\nEF_ARM_EABI_VER4 = 0x04000000\nEF_ARM_EABI_VER5 = 0x05000000\n\n# Local aliases for some flags to match names used by COFF port.\nF_INTERWORK = EF_ARM_INTERWORK\nF_APCS26 = EF_ARM_APCS_26\nF_APCS_FLOAT = EF_ARM_APCS_FLOAT\nF_PIC = EF_ARM_PIC\nF_SOFT_FLOAT = EF_ARM_SOFT_FLOAT\nF_VFP_FLOAT = EF_ARM_VFP_FLOAT\n\n# Additional symbol types for Thumb.\nSTT_ARM_TFUNC = defs.STT_LOPROC # A Thumb function.\nSTT_ARM_16BIT = defs.STT_HIPROC # A Thumb label.\n\n# /* Additional section types.\nSHT_ARM_EXIDX = 0x70000001 # Section holds ARM unwind info.\nSHT_ARM_PREEMPTMAP = 0x70000002 # Section pre-emption details.\nSHT_ARM_ATTRIBUTES = 0x70000003 # Section holds attributes.\nSHT_ARM_DEBUGOVERLAY = 0x70000004 # Section holds overlay debug info.\nSHT_ARM_OVERLAYSECTION = 0x70000005 # Section holds GDB and overlay integration info.\n\n# ARM-specific values for sh_flags.\nSHF_ENTRYSECT = 0x10000000 # Section contains an entry point.\nSHF_COMDEF = 0x80000000 # Section may be multiply defined in the input to a link step.\n\n# ARM-specific program header flags.\nPF_ARM_SB = 0x10000000 # Segment contains the location addressed by the static base.\nPF_ARM_PI = 0x20000000 # Segment is position-independent.\nPF_ARM_ABS = 0x40000000 # Segment must be loaded at its base address.\n\n# /* Values for the Tag_CPU_arch EABI attribute.\nTAG_CPU_ARCH_PRE_V4 = 0\nTAG_CPU_ARCH_V4 = 1\nTAG_CPU_ARCH_V4T = 2\nTAG_CPU_ARCH_V5T = 3\nTAG_CPU_ARCH_V5TE = 4\nTAG_CPU_ARCH_V5TEJ = 5\nTAG_CPU_ARCH_V6 = 6\nTAG_CPU_ARCH_V6KZ = 7\nTAG_CPU_ARCH_V6T2 = 8\nTAG_CPU_ARCH_V6K = 9\nTAG_CPU_ARCH_V7 = 10\nTAG_CPU_ARCH_V6_M = 11\nTAG_CPU_ARCH_V6S_M = 12\nTAG_CPU_ARCH_V7E_M = 13\nTAG_CPU_ARCH_V8 = 14\nMAX_TAG_CPU_ARCH = 14\n# Pseudo-architecture to allow objects to be compatible with the subset of\n# armv4t and armv6-m. This value should never be stored in object files.\nTAG_CPU_ARCH_V4T_PLUS_V6_M = (MAX_TAG_CPU_ARCH + 1)\n\n\nclass ElfArmRelocType(enum.IntEnum):\n\n# AAELF official names and numbers.\n R_ARM_NONE = 0\n R_ARM_PC24 = 1 # deprecated\n R_ARM_ABS32 = 2\n R_ARM_REL32 = 3\n R_ARM_LDR_PC_G0 = 4\n R_ARM_ABS16 = 5\n R_ARM_ABS12 = 6\n R_ARM_THM_ABS5 = 7\n R_ARM_ABS8 = 8\n R_ARM_SBREL32 = 9\n R_ARM_THM_CALL = 10\n R_ARM_THM_PC8 = 11\n R_ARM_BREL_ADJ = 12\n R_ARM_TLS_DESC = 13\n R_ARM_THM_SWI8 = 14 # obsolete\n R_ARM_XPC25 = 15 # obsolete\n R_ARM_THM_XPC22 = 16 # obsolete\n R_ARM_TLS_DTPMOD32 = 17\n R_ARM_TLS_DTPOFF32 = 18\n R_ARM_TLS_TPOFF32 = 19\n R_ARM_COPY = 20 # Copy symbol at runtime.\n R_ARM_GLOB_DAT = 21 # Create GOT entry.\n R_ARM_JUMP_SLOT = 22 # Create PLT entry.\n R_ARM_RELATIVE = 23 # Adjust by program base.\n R_ARM_GOTOFF32 = 24 # 32 bit offset to GOT.\n R_ARM_BASE_PREL = 25 # 32 bit PC relative offset to GOT.\n R_ARM_GOT_BREL = 26 # 32 bit GOT entry.\n R_ARM_PLT32 = 27 # deprecated - 32 bit PLT address.\n R_ARM_CALL = 28\n R_ARM_JUMP24 = 29\n R_ARM_THM_JUMP24 = 30\n R_ARM_BASE_ABS = 31\n R_ARM_ALU_PCREL7_0 = 32 # obsolete\n R_ARM_ALU_PCREL15_8 = 33 # obsolete\n R_ARM_ALU_PCREL23_15 = 34 # obsolete\n R_ARM_LDR_SBREL_11_0 = 35 # deprecated, should have _NC suffix\n R_ARM_ALU_SBREL_19_12 = 36 # deprecated, should have _NC suffix\n R_ARM_ALU_SBREL_27_20 = 37 # deprecated, should have _CK suffix\n R_ARM_TARGET1 = 38\n R_ARM_SBREL31 = 39 # deprecated\n R_ARM_V4BX = 40\n R_ARM_TARGET2 = 41\n R_ARM_PREL31 = 42\n R_ARM_MOVW_ABS_NC = 43\n R_ARM_MOVT_ABS = 44\n R_ARM_MOVW_PREL_NC = 45\n R_ARM_MOVT_PREL = 46\n R_ARM_THM_MOVW_ABS_NC = 47\n R_ARM_THM_MOVT_ABS = 48\n R_ARM_THM_MOVW_PREL_NC = 49\n R_ARM_THM_MOVT_PREL = 50\n R_ARM_THM_JUMP19 = 51\n R_ARM_THM_JUMP6 = 52\n R_ARM_THM_ALU_PREL_11_0 = 53\n R_ARM_THM_PC12 = 54\n R_ARM_ABS32_NOI = 55\n R_ARM_REL32_NOI = 56\n R_ARM_ALU_PC_G0_NC = 57\n R_ARM_ALU_PC_G0 = 58\n R_ARM_ALU_PC_G1_NC = 59\n R_ARM_ALU_PC_G1 = 60\n R_ARM_ALU_PC_G2 = 61\n R_ARM_LDR_PC_G1 = 62\n R_ARM_LDR_PC_G2 = 63\n R_ARM_LDRS_PC_G0 = 64\n R_ARM_LDRS_PC_G1 = 65\n R_ARM_LDRS_PC_G2 = 66\n R_ARM_LDC_PC_G0 = 67\n R_ARM_LDC_PC_G1 = 68\n R_ARM_LDC_PC_G2 = 69\n R_ARM_ALU_SB_G0_NC = 70\n R_ARM_ALU_SB_G0 = 71\n R_ARM_ALU_SB_G1_NC = 72\n R_ARM_ALU_SB_G1 = 73\n R_ARM_ALU_SB_G2 = 74\n R_ARM_LDR_SB_G0 = 75\n R_ARM_LDR_SB_G1 = 76\n R_ARM_LDR_SB_G2 = 77\n R_ARM_LDRS_SB_G0 = 78\n R_ARM_LDRS_SB_G1 = 79\n R_ARM_LDRS_SB_G2 = 80\n R_ARM_LDC_SB_G0 = 81\n R_ARM_LDC_SB_G1 = 82\n R_ARM_LDC_SB_G2 = 83\n R_ARM_MOVW_BREL_NC = 84\n R_ARM_MOVT_BREL = 85\n R_ARM_MOVW_BREL = 86\n R_ARM_THM_MOVW_BREL_NC = 87\n R_ARM_THM_MOVT_BREL = 88\n R_ARM_THM_MOVW_BREL = 89\n R_ARM_TLS_GOTDESC = 90\n R_ARM_TLS_CALL = 91\n R_ARM_TLS_DESCSEQ = 92\n R_ARM_THM_TLS_CALL = 93\n R_ARM_PLT32_ABS = 94\n R_ARM_GOT_ABS = 95\n R_ARM_GOT_PREL = 96\n R_ARM_GOT_BREL12 = 97\n R_ARM_GOTOFF12 = 98\n R_ARM_GOTRELAX = 99\n R_ARM_GNU_VTENTRY = 100 # deprecated - old C++ abi\n R_ARM_GNU_VTINHERIT = 101 # deprecated - old C++ abi\n R_ARM_THM_JUMP11 = 102\n R_ARM_THM_JUMP8 = 103\n R_ARM_TLS_GD32 = 104\n R_ARM_TLS_LDM32 = 105\n R_ARM_TLS_LDO32 = 106\n R_ARM_TLS_IE32 = 107\n R_ARM_TLS_LE32 = 108\n R_ARM_TLS_LDO12 = 109\n R_ARM_TLS_LE12 = 110\n R_ARM_TLS_IE12GP = 111\n # 112 - 127 private range\n R_ARM_ME_TOO = 128 # obsolete\n R_ARM_THM_TLS_DESCSEQ = 129\n\n R_ARM_IRELATIVE = 160\n\n # Extensions? R=read-only?\n R_ARM_RXPC25 = 249\n R_ARM_RSBREL32 = 250\n R_ARM_THM_RPC22 = 251\n R_ARM_RREL32 = 252\n R_ARM_RABS32 = 253\n R_ARM_RPC24 = 254\n R_ARM_RBASE = 255\n\n # Unofficial names for some of the relocs.\n R_ARM_GOTOFF = R_ARM_GOTOFF32 # 32 bit offset to GOT.\n R_ARM_THM_PC22 = R_ARM_THM_CALL\n R_ARM_THM_PC11 = R_ARM_THM_JUMP11\n R_ARM_THM_PC9 = R_ARM_THM_JUMP8\n\n # Relocs with both a different name, and (apparently) different meaning in\n # GNU usage.\n R_ARM_GOTPC = R_ARM_BASE_PREL # 32 bit PC relative offset to GOT.\n R_ARM_GOT32 = R_ARM_GOT_BREL # 32 bit GOT entry.\n R_ARM_ROSEGREL32 = R_ARM_SBREL31 # ???\n R_ARM_AMP_VCALL9 = R_ARM_BREL_ADJ # Thumb-something. Not used.\n\n R_ARM_max = 256\n\n\ndef decodeARMMachineFlags(flags):\n result = ''\n unknown = False\n eabi = flags & EF_ARM_EABIMASK\n flags &= ~EF_ARM_EABIMASK\n\n if flags & EF_ARM_RELEXEC:\n result += \", relocatable0 executable\"\n flags &= ~ EF_ARM_RELEXEC\n\n if flags & EF_ARM_HASENTRY:\n result += \", has entry point\"\n flags &= ~ EF_ARM_HASENTRY\n\n if eabi == EF_ARM_EABI_VER1:\n result += \", Version1 EABI\"\n while flags:\n flag = flags & -flags\n flags &= ~ flag\n if flag == EF_ARM_SYMSARESORTED:\n result += \", sorted symbol tables\"\n else:\n unknown = True\n elif eabi == EF_ARM_EABI_VER2:\n result += \", Version2 EABI\"\n while flags:\n flag = flags & -flags\n flags &= ~ flag\n if flag == EF_ARM_SYMSARESORTED: # Conflicts with EF_ARM_INTERWORK.\n result += \", sorted symbol tables\"\n elif flsg == EF_ARM_DYNSYMSUSESEGIDX:\n result += \", dynamic symbols use segment index\"\n elif flag == EF_ARM_MAPSYMSFIRST:\n result += \", mapping symbols precede others\"\n else:\n unknown = True\n elif eabi == EF_ARM_EABI_VER3:\n result += \", Version3 EABI\"\n elif eabi == EF_ARM_EABI_VER4:\n result += \", Version4 EABI\"\n while flags:\n flag = flags & -flags\n flags &= ~ flag\n if flag == EF_ARM_BE8:\n result += \", BE8\"\n elif flag == EF_ARM_LE8:\n result += \", LE8\"\n else:\n unknown = True\n elif eabi == EF_ARM_EABI_VER5:\n result += \", Version5 EABI\"\n while flags:\n flag = flags & -flags\n flags &= ~ flag\n if flag == EF_ARM_BE8:\n result += \", BE8\"\n elif flag == EF_ARM_LE8:\n result += \", LE8\"\n elif flag == EF_ARM_ABI_FLOAT_SOFT: # Conflicts with EF_ARM_SOFT_FLOAT.\n result += \", soft-float ABI\"\n elif flag == EF_ARM_ABI_FLOAT_HARD: # Conflicts with EF_ARM_VFP_FLOAT.\n result += \", hard-float ABI\"\n else:\n unknown = True\n elif eabi == EF_ARM_EABI_UNKNOWN:\n result += \", GNU EABI\"\n while flags:\n flag = flags & -flags\n flags &= ~ flag\n if flag == EF_ARM_INTERWORK:\n result += \", interworking enabled\"\n elif flag == EF_ARM_APCS_26:\n result += \", uses APCS/26\"\n elif flag == EF_ARM_APCS_FLOAT:\n result += \", uses APCS/float\"\n elif flag == EF_ARM_PIC:\n result += \", position independent\"\n elif flag == EF_ARM_ALIGN8:\n result += \", 8 bit structure alignment\"\n elif flag == EF_ARM_NEW_ABI:\n result += \", uses new ABI\"\n elif flag == EF_ARM_OLD_ABI:\n result += \", uses old ABI\"\n elif flag == EF_ARM_SOFT_FLOAT:\n result += \", software FP\"\n elif flag == EF_ARM_VFP_FLOAT:\n result += \", VFP\"\n elif flag == EF_ARM_MAVERICK_FLOAT:\n result += \", Maverick FP\"\n else:\n unknwown = True\n\n else:\n result += \", \"\n if flags:\n unknown = True\n if unknown:\n result += \", \"\n return result\n\n","sub_path":"objutils/armabi.py","file_name":"armabi.py","file_ext":"py","file_size_in_byte":13370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"597191449","text":"# 모든 알파벳을 오름차순으로 정렬하여 이어서 출력한 뒤에, 그 뒤에 모든 숫자를 더한 값을 출력하기 \nN = input()\n\nNUM = []\n\nALPA = \"\"\n\nfor i in N:\n if i.isdigit():\n NUM.append(int(i))\n else:\n ALPA += i\n\nprint(''.join(sorted(ALPA))+str(sum(NUM)))\n","sub_path":"BAEKJOON/구현/문자열 재정렬.py","file_name":"문자열 재정렬.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"285032024","text":"# -*- coding: mbcs -*-\r\nfrom __future__ import division\r\nfrom part import *\r\nfrom material import *\r\nfrom section import *\r\nfrom assembly import *\r\nfrom step import *\r\nfrom interaction import *\r\nfrom load import *\r\nfrom mesh import *\r\nfrom optimization import *\r\nfrom job import *\r\nfrom sketch import *\r\nfrom visualization import *\r\nfrom connectorBehavior import *\r\nfrom odbAccess import *\r\nimport numpy \r\nimport mesh\r\nimport math\r\n\r\n############ parametres geometriques\r\n## attention a la coherence des parametres entre eux\r\nm= mdb.Model(name='Pantographe', modelType=STANDARD_EXPLICIT)\r\nbb =1.0 \r\n\r\nk=1.0 # longeur connexion\r\npp=2*bb/10 # epaisseur connexion\r\nc=2.0 # diagonale carre \r\n\r\n# r = 0.0003 #0.0005 # 0.0003# rayon du cercle inscrit virtuel dont dependent tous les RdC\r\n# dimensions cellule\r\nl1 = 2*bb+k+(c/2)\r\nl2 = 2*bb # l2 doit etre de meme taille que le 1er terme de l1\r\n\r\n# beta = atan (pp/(k/2)) en radian = 180* ( atan (pp/(k/2)) ) / pi en degres \r\n# angles pr RDC en radian\r\nalpha1= atan( ((2*bb)-(c/2))/ 2*bb ) \r\nalpha2= (pi + alpha1 - atan (pp/(k/2)) - pi*0.5 ) /2\r\nalpha3= (0.5*pi-atan (pp/(k/2))-alpha1)/2\r\nalpha4= pi*45.0/180\r\nalpha5= (pi*90/180) - alpha1 \r\n\r\nr1=(( sqrt((r*tan(alpha1))**2 + r**2) + r*tan(alpha1) ) / cos(alpha1) ) - r\r\nr2=(( sqrt((r*tan(alpha2))**2 + r**2) + r*tan(alpha2) ) / cos(alpha2) ) - r\r\nr3=(( sqrt((r*tan(alpha3))**2 + r**2) + r*tan(alpha3) ) / cos(alpha3) ) - r\r\nr4=(( sqrt((r*tan(alpha4))**2 + r**2) + r*tan(alpha4) ) / cos(alpha4) ) - r\r\nr5=(( sqrt((r*tan(alpha5))**2 + r**2) + r*tan(alpha5) ) / cos(alpha5) ) - r\r\n\r\n############ parametres materiaux\r\nEv = 0.001\r\nYoung = 1 \r\nPoisson = 0.3\r\n\r\n############ parametres maillage\r\nn=5.0\r\nminSizeMesh = 2*pi*r/(3*n) # r ? r/n ? \r\nmaxSizeMesh = l1/50\r\nmaxSizeMesh2 = l1/100 # pour les connexions\r\nforme = QUAD # forme des elements ( TRI pour triangulaire )\r\nelemcodtyp1 = CPE4 # elements quadratiques DefPlanes ( CPE8R pr quadra , CPE4R pr lineaire)\r\nelemcodtyp2 = CPE3 # elements quadratiques DefPlanes ( CPE6M pr quadra , CPE3 pr lineraire)\r\n\r\n############ parametres post processing\r\nAmp = 1.0\r\nBCSym = 1\r\n\r\n############ PART rectangle ##############################\r\nm.ConstrainedSketch(name='__profile__', sheetSize=200.0)\r\npr = mdb.models['Pantographe'].sketches['__profile__']\r\npr.rectangle(point1=(0.0, l2),point2=(l1, 0.0))\r\nm.Part(dimensionality=TWO_D_PLANAR, name='Part-1', type= DEFORMABLE_BODY)\r\nm.parts['Part-1'].BaseShell(sketch= pr)\r\n\r\n#### paramterisation des arretes du rectangle \r\np = mdb.models['Pantographe'].parts['Part-1']\r\ne = p.edges\r\n\r\n# repere grid point bas gauche\r\nedges = e.findAt(((l1/2,l2, 0.0),))\r\np.Set(edges=edges, name='haut')\r\n \r\nedges = e.findAt(((l1,l2/2, 0.0),))\r\np.Set(edges=edges, name='droite')\r\n \r\nedges = e.findAt(((l1/2,0.0, 0.0),))\r\np.Set(edges=edges, name='bas')\r\n \r\nedges = e.findAt(((0.0,l2/2, 0.0),))\r\np.Set(edges=edges, name='gauche')\r\n\r\n### partition de la geometrie interne \r\nm.ConstrainedSketch(gridSpacing=5.19, name='__profile__',\r\nsheetSize=207.84, transform= p.MakeSketchTransform(\r\n sketchPlane=p.faces[0], \r\n sketchPlaneSide=SIDE1, sketchOrientation=RIGHT, origin=(0.0, 0.0, 0.0)))\r\np.projectReferencesOntoSketch(filter=COPLANAR_EDGES, sketch=m.sketches['__profile__'])\r\nmdb.models['Pantographe'].sketches['__profile__'].sketchOptions.setValues(\r\n gridOrigin=(-l1/2, -l2/2))\r\npr = mdb.models['Pantographe'].sketches['__profile__']\r\ns = pr.geometry\r\n\r\n## repere partition toujours point bas gauche contrairement a letoile\r\npr.Line(point1=(0.0,c/2),point2=(c/2,0.0)) #a (A-F)\r\npr.Line(point1=(0.0,-c/2),point2=(c/2,0.0)) #b (A3-F)\r\npr.Line(point1=(0.0,c/2),point2=(-c/2,0.0)) #c (A-F2)\r\n\r\n#* curve a-b c3\r\npr.FilletByRadius(curve1= s.findAt(( (c/2)/2, (c/2)/2 )),\r\ncurve2=s.findAt(( (c/2)/2, -(c/2)/2 )), \r\nnearPoint1=((c/2)/2, (c/2)/2), nearPoint2=((c/2)/2, -(c/2)/2), radius=r4)\r\n \r\n#* curve c-a c10\r\npr.FilletByRadius(curve1= s.findAt(( (c/2)/2, (c/2)/2 )),\r\ncurve2=s.findAt(( -(c/2)/2, (c/2)/2 )), \r\nnearPoint1=((c/2)/2, (c/2)/2), nearPoint2=(-(c/2)/2, (c/2)/2), radius=r4) \r\n \r\npr.Line(point1=((2*bb)+k+(c/2),2*bb-(c/2) ) , point2=( (2*bb)+k,2*bb )) #d (D-C)\r\npr.Line(point1=((2*bb)+k+(c/2),2*bb-(c/2) ) , point2=( (2*bb)+k+c,2*bb )) #e (D-C2)\r\npr.Line(point1=((2*bb)+k,2*bb) , point2=((2*bb)+k+(c/2),(2*bb)+(c/2) ) ) #f (C-D3) \r\n \r\n#* curve d-e c9 \r\npr.FilletByRadius(curve1= s.findAt(( ((2*bb)+k+(c/2)+(2*bb)+k)/2, (2*bb-(c/2)+2*bb)/2 )),\r\ncurve2=s.findAt(( ((2*bb)+k+(c/2)+(2*bb)+k+c)/2, (2*bb-(c/2)+2*bb)/2 )), \r\nnearPoint1=(( (2*bb)+k+(c/2)+(2*bb)+k)/2, (2*bb-(c/2)+2*bb)/2), \r\nnearPoint2=(((2*bb)+k+(c/2)+(2*bb)+k+c)/2, (2*bb-(c/2)+2*bb)/2 ), radius=r4) \r\n \r\n#* curve d-f c4\r\npr.FilletByRadius(curve1= s.findAt(( ( (2*bb)+k+(c/2)+(2*bb)+k)/2, (2*bb-(c/2)+2*bb)/2 )),\r\ncurve2=s.findAt(( ((2*bb)+k+ (2*bb)+k+(c/2))/2, (2*bb+(2*bb)+(c/2))/2 )), \r\nnearPoint1=(( (2*bb)+k+(c/2)+(2*bb)+k)/2, (2*bb-(c/2)+2*bb)/2), \r\nnearPoint2=(((2*bb)+k+ (2*bb)+k+(c/2))/2, (2*bb+(2*bb)+(c/2))/2 ), radius=r4) \r\n \r\npr.Line(point1=(k+(c/2),0.0),point2=((2*bb)+k+(c/2),2*bb-(c/2))) #m (E-D) \r\npr.Line(point1=(2*bb+k+(c/2),(2*bb)-(c/2)),point2=((c/2)+k+(4*bb),0.0)) #o (D-E2) \r\npr.Line(point1=(0.0,c/2),point2=(2*bb,2*bb)) #p (A-B) \r\npr.Line(point1=(0.0,c/2),point2=(-(2*bb),2*bb)) #q (A-B2)\r\npr.Line(point1=(k+(c/2),0.0),point2=((2*bb)+k+(c/2),-(2*bb)+(c/2))) #n (E-D2)\r\npr.Line(point1=(2*bb,2*bb),point2=(0.0,(4*bb)-(c/2))) #r (B-A2)\r\n \r\n# curve m-o c11 \r\npr.FilletByRadius(curve1= s.findAt(( (k+(c/2)+(2*bb)+k+(c/2))/2, (0.0+2*bb-(c/2))/2 )),\r\ncurve2=s.findAt(( (2*bb+k+(c/2)+(c/2)+k+(4*bb))/2, ((2*bb)-(c/2)+0.0)/2 )), \r\nnearPoint1=((k+(c/2)+(2*bb)+k+(c/2))/2, (0.0+2*bb-(c/2))/2), \r\nnearPoint2=((2*bb+k+(c/2)+(c/2)+k+(4*bb))/2, ((2*bb)-(c/2)+0.0)/2 ), radius=r5) \r\n \r\n# curve q-p c12 \r\npr.FilletByRadius(curve1= s.findAt(( (0.0+2*bb)/2, ((c/2)+2*bb)/2 )),\r\ncurve2=s.findAt(( (0.0-(2*bb))/2, (c/2+2*bb)/2 )), \r\nnearPoint1=((0.0+2*bb)/2, ((c/2)+2*bb)/2), \r\nnearPoint2=((0.0-(2*bb))/2, (c/2+2*bb)/2 ), radius=r5) \r\n \r\n# curve m-n c2\r\npr.FilletByRadius(curve1= s.findAt(( (k+(c/2)+(2*bb)+k+(c/2))/2, (0.0+-(2*bb)+(c/2))/2 )),\r\ncurve2=s.findAt(( (k+(c/2)+(2*bb)+k+(c/2))/2, (0.0+2*bb-(c/2))/2 )), \r\nnearPoint1=( (k+(c/2)+(2*bb)+k+(c/2))/2, (0.0+-(2*bb)+(c/2))/2 ) , \r\nnearPoint2=( (k+(c/2)+(2*bb)+k+(c/2))/2, (0.0+2*bb-(c/2))/2 ) , radius=r1) \r\n\r\n# curve p-r c1 \r\npr.FilletByRadius(curve1= s.findAt(( (0.0+2*bb)/2, (c/2+2*bb)/2 )),\r\ncurve2=s.findAt(( (2*bb+0.0)/2, (2*bb+(4*bb)-(c/2))/2 )), \r\nnearPoint1=( (0.0+2*bb)/2, (c/2+2*bb)/2 ) , \r\nnearPoint2=( (2*bb+0.0)/2, (2*bb+(4*bb)-(c/2))/2 ) , radius=r1) \r\n \r\npr.Line(point1=(c/2,0.0),point2=(2*bb,2*bb)) #g (F-B)\r\npr.Line(point1=(c/2,0.0),point2=((c/2)+(k/2),pp)) #i (F-G)\r\npr.Line(point1=(k+(c/2),0.0),point2=((2*bb)+k,2*bb)) #h (E-C)\r\npr.Line(point1=((2*bb)+(k/2),(2*bb)-pp),point2=((2*bb)+k,2*bb)) #l (H-C)\r\npr.Line(point1=((c/2)+(k/2),pp),point2=(k+(c/2),0.0)) #j (G-E)\r\npr.Line(point1=(2*bb,2*bb),point2=((2*bb)+(k/2),(2*bb)-pp)) #k (B-H) \r\n \r\n#curve g-i c7\r\npr.FilletByRadius(curve1= s.findAt(( (c/2+2*bb)/2, (0.0+2*bb)/2 )),\r\ncurve2=s.findAt(( (c/2+(c/2)+(k/2))/2, (0.0+pp)/2 )), \r\nnearPoint1=( (c/2+2*bb)/2, (0.0+2*bb)/2 ) , nearPoint2=( (c/2+(c/2)+(k/2))/2, (0.0+pp)/2 ) , radius=r3) \r\n \r\n###curve h-l c6\r\npr.FilletByRadius(curve1= s.findAt(( (k+(c/2)+(2*bb)+k)/2, (0.0+2*bb)/2 )),\r\ncurve2=s.findAt(( ((2*bb)+(k/2)+(2*bb)+k)/2, ((2*bb)-pp+2*bb)/2 )), \r\nnearPoint1=( (k+(c/2)+(2*bb)+k)/2, (0.0+2*bb)/2 ) , nearPoint2=( ((2*bb)+(k/2)+(2*bb)+k)/2, ((2*bb)-pp+2*bb)/2 ) , radius=r3) \r\n \r\n###curve g-k c5 \r\npr.FilletByRadius(curve1= s.findAt(( (c/2+2*bb)/2, (0.0+2*bb)/2 )),\r\ncurve2=s.findAt(( (2*bb+(2*bb)+(k/2))/2, (2*bb+(2*bb)-pp)/2 )), \r\nnearPoint1=( (c/2+2*bb)/2, (0.0+2*bb)/2 ) , nearPoint2=( (2*bb+(2*bb)+(k/2))/2, (2*bb+(2*bb)-pp)/2 ) , radius=r2) \r\n\r\n###curve j-h c8\r\npr.FilletByRadius(curve1= s.findAt(( ((c/2)+(k/2)+k+(c/2))/2, (pp+0.0)/2 )),\r\ncurve2=s.findAt(( (k+(c/2)+(2*bb)+k)/2, (0.0+2*bb)/2 )), \r\nnearPoint1=( ((c/2)+(k/2)+k+(c/2))/2, (pp+0.0)/2 ) , nearPoint2=( (k+(c/2)+(2*bb)+k)/2, (0.0+2*bb)/2 ) , radius=r2)\r\n \r\n### partitions des diamants \r\npr.Line(point1=(c/2-r,0.0),point2=(c/2+r,0.0)) \r\npr.Line(point1=(c/2+k-r,0.0),point2=(c/2+k+r,0.0))\r\npr.Line(point1=(2*bb-r,2*bb),point2=(2*bb+r,2*bb))\r\npr.Line(point1=(2*bb+k-r,2*bb),point2=(2*bb+k+r,2*bb))\r\n \r\n \r\n \r\np = mdb.models['Pantographe'].parts['Part-1']\r\np.PartitionFaceBySketch(faces=p.faces.findAt(((0.0, 0.0, 0.0), )),sketch=pr)\r\n###### supprimer la partie vide ######\r\nf = p.faces\r\nindex_pantographe = mdb.models['Pantographe'].parts['Part-1'].faces.findAt((l1/4,l2/2,0.0),).index\r\nmdb.models['Pantographe'].parts['Part-1'].RemoveFaces(deleteCells=False, \r\n faceList=f[0:index_pantographe]+f[index_pantographe+1:6])\r\n\r\n################## proprietes ###########\r\nm = mdb.models['Pantographe']\r\np = m.parts['Part-1']\r\nm.Material(name='Material-plein')\r\nm.materials['Material-plein'].Elastic(table=((Young, Poisson),))\r\nm.HomogeneousSolidSection(material='Material-plein', \r\nname='plein', thickness=None)\r\n\r\n## repere point bas gauche \r\np.Set(faces=m.parts['Part-1'].faces.findAt(((c/2, bb/4, 0.0), )), name='Cellule')\r\np.SectionAssignment(offset=0.0, offsetField='', offsetType=MIDDLE_SURFACE, region=p.sets['Cellule'], sectionName='plein',thicknessAssignment=FROM_SECTION)\r\n\r\n################ Assembly #############\r\nm.rootAssembly.DatumCsysByDefault(CARTESIAN)\r\nm.rootAssembly.Instance(dependent=ON, name='Part-1-1', part=m.parts['Part-1']) \r\n \r\n###################### mesh ##################\r\n# repere Grid ( bas gauche ) \r\n \r\n#g\r\npickedEdges = e.findAt((( (c/2+2*bb)/2, (0.0+2*bb)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh, constraint=FINER) \r\n# i\r\npickedEdges1 = e.findAt((( (c/2+(c/2)+(k/2))/2, (0.0+pp)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=SINGLE, end1Edges=pickedEdges1, minSize=minSizeMesh, maxSize=maxSizeMesh2, constraint=FINER) \r\n# j\r\npickedEdges2 = e.findAt((( ((c/2)+(k/2)+k+(c/2))/2, (pp+0.0)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=SINGLE, end2Edges=pickedEdges2, minSize=minSizeMesh, maxSize=maxSizeMesh2, constraint=FINER)\r\n# k\r\npickedEdges2 = e.findAt((( (2*bb+(2*bb)+(k/2))/2, (2*bb+(2*bb)-pp)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=SINGLE, end1Edges=pickedEdges1, minSize=minSizeMesh, maxSize=maxSizeMesh2, constraint=FINER)\r\n# l\r\npickedEdges1 = e.findAt((( ((2*bb)+(k/2)+(2*bb)+k)/2, ((2*bb)-pp+2*bb)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=SINGLE, end2Edges=pickedEdges2, minSize=minSizeMesh, maxSize=maxSizeMesh2, constraint=FINER)\r\n#h\r\npickedEdges = e.findAt((( (k+(c/2)+(2*bb)+k)/2, (0.0+2*bb)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh, constraint=FINER) \r\n# p\r\npickedEdges = e.findAt((( (0.0+2*bb)/2, ((c/2)+2*bb)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh, constraint=FINER)\r\n# m\r\npickedEdges = e.findAt((( (k+(c/2)+(2*bb)+k+(c/2))/2, (0.0+2*bb-(c/2))/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh, constraint=FINER)\r\n# a\r\npickedEdges = e.findAt((( (c/2)/2, (c/2)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh, constraint=FINER)\r\n# d\r\npickedEdges = e.findAt((( ((2*bb)+k+(c/2)+(2*bb)+k)/2, (2*bb-(c/2)+2*bb)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh, constraint=FINER)\r\n\r\n### arretes connexion x et y\r\npickedEdges = e.findAt((( (c/2+(c/2)+k)/2, (0.0+0.0)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh2, constraint=FINER)\r\npickedEdges = e.findAt((( (2*bb+2*bb+k)/2, (2*bb+2*bb)/2, 0.0 ),))\r\np.seedEdgeByBias(biasMethod=DOUBLE, endEdges=pickedEdges, minSize=minSizeMesh, maxSize=maxSizeMesh2, constraint=FINER)\r\n\r\n#### elements constants sur partitions part et dautre des arrets x y\r\npickedEdges = e.findAt((( (c/2-r+c/2+r)/2, (0.0+0.0)/2, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\npickedEdges = e.findAt((( (c/2+k-r+c/2+k+r)/2, (0.0+0.0)/2, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\npickedEdges = e.findAt((( (2*bb-r+2*bb+r)/2, (2*bb+2*bb)/2, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER) \r\npickedEdges = e.findAt((( (2*bb+k-r+2*bb+k+r)/2, (2*bb+2*bb)/2, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER) \r\n\r\n## elements constants sur les rdc\r\n#c1\r\npickedEdges = e.findAt((( 2*bb-r, 2*bb-0.00001, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER) \r\n#c2\r\npickedEdges = e.findAt((( (c/2)+k+r, 0.00001, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n#c3\r\npickedEdges = e.findAt((( (c/2)-r, 0.00001, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n#c4\r\npickedEdges = e.findAt((( 2*bb+k+r, 2*bb-0.00001, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n###c5\r\npickedEdges = e.findAt((( 2*bb+r*cos(atan ( pp/(k/2) )+alpha2),2*bb-r*sin(atan ( pp/(k/2) )+alpha2) , 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n###c6\r\npickedEdges = e.findAt((( 2*bb+k-r*cos(atan ( pp/(k/2) )+alpha3) ,2*bb-r*sin(atan ( pp/(k/2) )+alpha3) , 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n###c7\r\npickedEdges = e.findAt((( c/2+r*cos(alpha3+atan ( pp/(k/2) )) ,r*sin(alpha3+atan ( pp/(k/2) )),0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n###c8\r\npickedEdges = e.findAt((( c/2+k-r*cos(alpha2+atan (pp/(k/2))) ,r*sin(alpha2+atan ( pp/(k/2) )) , 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n#c9\r\npickedEdges = e.findAt((( (2*bb)+k+(c/2)-0.00001, 2*bb-(c/2)+r, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n#c10\r\npickedEdges = e.findAt((( 0.00001, (c/2)-r, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n#c11\r\npickedEdges = e.findAt((( (2*bb)+k+(c/2)-0.00001, 2*bb-(c/2)-r, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n#c12 \r\npickedEdges = e.findAt((( 0.00001, (c/2)+r, 0.0 ),))\r\np.seedEdgeBySize(edges=pickedEdges, size=minSizeMesh, deviationFactor=0.1,constraint=FINER)\r\n\r\n###*******\r\nelemType1 = mesh.ElemType(elemCode=elemcodtyp1, elemLibrary=STANDARD, # CPE4R lin / CPE8R quad\r\nsecondOrderAccuracy=OFF, hourglassControl=DEFAULT, \r\ndistortionControl=DEFAULT)\r\nelemType2 = mesh.ElemType(elemCode=elemcodtyp2, elemLibrary=STANDARD) # CPE3 lin / CPE6M quad\r\np = mdb.models['Pantographe'].parts['Part-1']\r\nf = p.faces\r\nfaces = f.findAt( ( (c/2, bb/4, 0.0), ) )\r\npickedRegions =(faces, )\r\np.setElementType(regions=pickedRegions, elemTypes=(elemType1, elemType2))\r\npickedRegions = f.findAt( ( (c/2, bb/4, 0.0), ))\r\np.setMeshControls(regions=pickedRegions, elemShape=forme) \r\np = mdb.models['Pantographe'].parts['Part-1']\r\np.generateMesh()\r\ncc = mdb.models['Pantographe']\r\nt= cc.rootAssembly.instances['Part-1-1']\r\n\r\n# referencepoint = (c/2 - r , 0 , 0)\r\nreferencepoint = (0 , 0 , 0)\r\nMatrix1_C = isotrope2d(Young,Poisson)\r\nMaterialSets_C={'Cellule':Matrix1_C}\r\nMatrix2_C = isotrope2d_nsym(Young,Poisson)\r\nNewMaterialSets_C = {'Cellule':Matrix2_C}\r\nMatrix_S = isotrope2d_inverse(Young,Poisson)\r\nMaterialSets_S={'Cellule':Matrix_S}\r\nmdb.models[modelname].rootAssembly.Set(name='Set-1', vertices=t.vertices.findAt(((c/2 - r, 0.0, 0.0), )))","sub_path":"Pantographe.py","file_name":"Pantographe.py","file_ext":"py","file_size_in_byte":17664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"575371632","text":"\"\"\"\nSettings helper for Flask\n\"\"\"\n\nimport os\nimport sys\n\nENV_CONFIG = [\n ('APP_NAME', 'OPENSHIFT_APP_NAME', 'feeder', None),\n ('SECRET_KEY', 'SECRET_KEY', 'my secret', None),\n ('MONGODB_SETTINGS.DB', 'OPENSHIFT_APP_NAME', 'feeder', None),\n ('MONGODB_SETTINGS.HOST', 'OPENSHIFT_MONGODB_DB_HOST', 'localhost', None),\n ('MONGODB_SETTINGS.PORT', 'OPENSHIFT_MONGODB_DB_PORT', '27017', int),\n ('MONGODB_SETTINGS.USERNAME', 'OPENSHIFT_MONGODB_DB_USERNAME', None, None),\n ('MONGODB_SETTINGS.PASSWORD', 'OPENSHIFT_MONGODB_DB_PASSWORD', None, None),\n ]\n\nTHIS_MODULE = sys.modules[__name__]\nfor (conf, env, default, type) in ENV_CONFIG:\n d = None\n splits = conf.split('.')\n for s in splits[0:-1]:\n if hasattr(THIS_MODULE, s):\n d = getattr(THIS_MODULE, s)\n else:\n d = {}\n setattr(THIS_MODULE, s, d)\n\n value = os.environ.get(env, default)\n if type:\n value = type(value)\n\n if d is not None:\n d[splits[-1]] = value\n else:\n setattr(THIS_MODULE, conf, os.environ.get(env, default))\n","sub_path":"feeder/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"299004911","text":"# encoding: utf-8\nfrom __future__ import absolute_import, unicode_literals\nimport os\nfrom celery import Celery\nfrom kombu import Queue, Exchange\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbtsite.settings')\n\napp = Celery('dbtsite')\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n\n# configurations\napp.conf.result_expires = 10 * 60\napp.conf.timezone = 'Asia/Shanghai'\napp.conf.result_serializer = 'json'\napp.conf.task_serializer = 'json'\napp.conf.accept_content = ['json']\napp.conf.worker_max_tasks_per_child = 10\napp.conf.broker_url = 'amqp://myuser:mypassword@127.0.0.1:5672/myvhost'\napp.conf.result_backend = 'redis://127.0.0.1:6379/3'\n\n# queues\napp.conf.task_default_queue = 'default'\napp.conf.task_queues = (\n Queue('cron', exchange=Exchange('cron', type='direct'), routing_key='cron'),\n Queue('mon', exchange=Exchange('mon', type='direct'), routing_key='mon'),\n Queue('job', exchange=Exchange('job', type='direct'), routing_key='job'),\n)\napp.conf.task_routes = {\n 'tasks.tasks.assign_task': {\n 'queue': 'job'\n },\n 'tasks.tasks.mon_task': {\n 'queue': 'mon'\n },\n 'tasks.tasks.cron_task': {\n 'queue': 'cron'\n },\n}\n\n# scheduler\napp.conf.beat_scheduler = 'django_celery_beat.schedulers:DatabaseScheduler'\n","sub_path":"dbtsite/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"616783294","text":"from flask import Flask, request, abort\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage, ImageSendMessage, VideoSendMessage, StickerSendMessage, AudioSendMessage, JoinEvent, LeaveEvent\n)\nimport os\nimport random\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\ndb = SQLAlchemy(app)\nimport time\n\nclass Room(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n group_id = db.Column(db.String(80))\n phase = db.Column(db.String(80))\n player = db.Column(db.Integer)\n roomact = db.Column(db.Integer)\n\n def __init__(self, group_id,phase, player, roomact):\n self.group_id = group_id\n self.phase = phase\n self.player = player\n self.roomact = roomact \n\nclass Player(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n player_id = db.Column(db.String(80))\n name = db.Column(db.String(80))\n ans = db.Column(db.String(80))\n playact = db.Column(db.Integer)\n vote = db.Column(db.Integer)\n\n def __init__(self, player_id,name, ans, playact, vote):\n self.player_id = player_id\n self.name =name\n self.ans = ans\n self.playact = playact \n self.vote = vote \n \n \n# def thisRoom():\n# rooms = session.query(Room).filter(Room.id==event.source.group_id).all()\n# return rooms(0)\n\n\n#環境変数取得\nLINE_CHANNEL_ACCESS_TOKEN = os.environ[\"LINE_CHANNEL_ACCESS_TOKEN\"]\nLINE_CHANNEL_SECRET = os.environ[\"LINE_CHANNEL_SECRET\"]\n\nline_bot_api = LineBotApi(LINE_CHANNEL_ACCESS_TOKEN)\nhandler = WebhookHandler(LINE_CHANNEL_SECRET)\n\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n return 'OK'\n\ndef show_room(event):\n # all_rooms = db.session.query(Room).all()\n # print(all_rooms)\n groupId = event.source.group_id\n return groupId\n\ndef record_room(event):\n groupId = event.source.group_id\n match_rooms = db.session.query(Room).filter(Room.group_id==groupId)\n if len(match_rooms) ==0:\n db.session.add(Room(groupId, \"suspend\", 0, 0))\n db.session.commit()\n message = \"グループID({})をDBに保存しました\".format(groupId) \n else:\n message=\"該当グループidが存在します\" \n return message + \"開始する場合は「開始」と入力してください\"\n\ndef receive_message(event):\n groupId = event.source.group_id\n userMessage = event.message.text\n profile = line_bot_api.get_profile(event.source.user_id)\n f = db.session.query(Room).filter(Room.group_id == groupId)[0]\n if userMessage == \"開始\":\n f.phase = \"invite\"\n db.session.commit()\n message = \"参加者は「参加」と入力してください\"\n if f.phase == \"invite\":\n if userMessage == \"参加\":\n db.session.add(Player(profile.user_id,profile.displey_name,\"\",0,0))\n\n elif userMessage == \"締切り\":\n messsage = \"sixtupai\" \n else: \n message = \"不正な入力です\"\n return message\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n message = receive_message(event)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text = message))\n\n@handler.add(JoinEvent)\ndef handle_join(event):\n message = record_room(event)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text = message))\n\nif __name__ == \"__main__\":\n# app.run()\n port = int(os.getenv(\"PORT\", 5000))\n app.run(host=\"0.0.0.0\", port=port)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"506096484","text":"import unittest\nimport os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nos.chdir(parentdir)\nsys.path.insert(0,os.getcwd())\n\nfrom src.main.draft_check import check_draft\nfrom lib.file_management.file_management_lib import DirManagement, FileEditor\n\nclass TestDraftCheck(unittest.TestCase):\n def setUp(self) -> None:\n \"\"\"\n create env in test dir:\n - ~/ta\n - ~/ta/draft.json\n \"\"\"\n self.ta_dir = os.path.join(currentdir,\"ta\")\n DirManagement().create_dir(self.ta_dir)\n FileEditor().create_file(self.ta_dir,\"draft.json\")\n return super().setUp()\n\n def test_check_draft(self):\n \"\"\"call function check_draft need to be return True\n \"\"\"\n self.assertTrue(check_draft(currentdir))\n \n def tearDown(self) -> None:\n \"\"\"\n remove ~/ta when the test is finish\n \"\"\"\n DirManagement().remove_dir(currentdir+r\"\\ta\")\n return super().tearDown()\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test/test_draft_check.py","file_name":"test_draft_check.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"283529797","text":"import os\nop = os.path\n\nimport numpy as num\n\nfrom pyrocko import trace, util, model, pz\nfrom pyrocko.fdsn import station\nfrom pyrocko.guts import Object, Tuple, String, Timestamp, Float\n\nclass Channel(Object):\n nslc = Tuple.T(4, String.T())\n tmin = Timestamp.T(optional=True)\n tmax = Timestamp.T(optional=True)\n lat = Float.T()\n lon = Float.T()\n elevation = Float.T()\n depth = Float.T()\n dip = Float.T()\n azimuth = Float.T()\n input_unit = String.T()\n output_unit = String.T()\n response = trace.FrequencyResponse.T()\n\n def spans(self, *args):\n if len(args) == 0:\n return True\n elif len(args) == 1:\n return ((self.tmin is None or\n self.tmin <= args[0]) and\n (self.tmax is None or\n args[0] <= self.tmax))\n\n elif len(args) == 2:\n return ((self.tmin is None or\n args[1] >= self.tmin) and\n (self.tmax is None or\n self.tmax >= args[0]))\n\nclass EnhancedSacPzError(Exception):\n pass\n\n\ndef read_enhanced_sac_pz(filename):\n zeros, poles, constant, comments = pz.read_sac_zpk(filename=filename, get_comments=True)\n d = {}\n for line in comments:\n toks = line.split(':', 1)\n if len(toks) == 2:\n temp = toks[0].strip('* \\t')\n for k in ('network', 'station', 'location', 'channel', 'start', 'end', \n 'latitude', 'longitude', 'depth', 'elevation', 'dip', 'azimuth',\n 'input unit', 'output unit'):\n if temp.lower().startswith(k):\n d[k] = toks[1].strip()\n\n response = trace.PoleZeroResponse(zeros, poles, constant)\n\n try:\n channel = Channel(\n nslc=(d['network'], d['station'], d['location'], d['channel']),\n tmin=util.str_to_time(d['start'], format='%Y-%m-%dT%H:%M:%S'),\n tmax=util.str_to_time(d['end'], format='%Y-%m-%dT%H:%M:%S'),\n lat=float(d['latitude']),\n lon=float(d['longitude']),\n elevation=float(d['elevation']),\n depth=float(d['depth']),\n dip=float(d['dip']),\n azimuth=float(d['azimuth']),\n input_unit=d['input unit'],\n output_unit=d['output unit'],\n response=response)\n except:\n raise EnhancedSacPzError('cannot get all required information from file %s' % filename)\n\n return channel\n\nclass MetaData:\n\n def __init__(self):\n self._content = {}\n\n def add_channel(self, channel):\n nslc = channel.nslc\n if nslc not in self._content:\n self._content[nslc] = []\n\n self._content[nslc].append(channel)\n\n def get_pyrocko_response(\n self, nslc, time=None, timespan=None, fake_input_units=None):\n\n tt = ()\n if time is not None:\n tt = (time,)\n elif timespan is not None:\n tt = timespan\n\n candidates = [c for c in self._content.get(nslc, []) if c.spans(*tt)]\n\n if not candidates:\n raise station.NoResponseInformation('%s.%s.%s.%s' % nslc)\n elif len(candidates) > 1:\n raise station.MultipleResponseInformation('%s.%s.%s.%s' % nslc)\n\n channel = candidates[0]\n if fake_input_units:\n if channel.input_unit != fake_input_units:\n raise station.NoResponseInformation(\n 'cannot convert between units: %s, %s'\n % (fake_input_units, channel.input_unit))\n\n return channel.response\n\n def get_pyrocko_stations(self, time=None, timespan=None,\n inconsistencies='warn'):\n \n tt = ()\n if time is not None:\n tt = (time,)\n elif timespan is not None:\n tt = timespan\n\n by_nsl = {}\n for nslc in self._content.keys():\n nsl = nslc[:3]\n for channel in self._content[nslc]:\n if channel.spans(*tt):\n if nsl not in by_nsl:\n by_nsl[nsl] = []\n by_nsl[nsl].append(channel)\n\n pstations = []\n for nsl, channels in by_nsl.iteritems():\n vals = []\n for channel in channels:\n vals.append((channel.lat, channel.lon, channel.depth, channel.elevation))\n\n lats, lons, depths, elevations = zip(*vals)\n same = station.same\n inconsistencies = not (same(lats) and same(lons) and same(depths) and same(elevations))\n\n if inconsistencies == 'raise':\n raise InconsistentChannelLocations(\n 'encountered inconsistencies in channel '\n 'lat/lon/elevation/depth '\n 'for %s.%s.%s: \\n%s' % (nsl + (info,)))\n\n elif inconsistencies == 'warn':\n logger.warn(\n 'cannot create station object for '\n '%s.%s.%s due to inconsistencies in '\n 'channel lat/lon/elevation/depth\\n%s'\n % (nsl + (info,)))\n\n continue\n\n pchannels = []\n for channel in channels:\n pchannels.append(model.Channel(\n channel.nslc[-1],\n azimuth=channel.azimuth,\n dip=channel.dip))\n\n pstations.append(model.Station(\n *nsl,\n lat=num.mean(lats),\n lon=num.mean(lons),\n elevation=num.mean(elevations),\n depth=num.mean(depths),\n channels=pchannels))\n \n return pstations\n\n @property\n def nslc_code_list(self):\n return list(self._content.keys())\n\n\ndef load(dirnames):\n if isinstance(dirnames, basestring):\n dirnames = [dirnames]\n\n m = MetaData()\n for dn in dirnames:\n for fn in os.listdir(dn):\n channel = read_enhanced_sac_pz(op.join(dn, fn))\n m.add_channel(channel)\n\n return m\n","sub_path":"modules/pz_archive.py","file_name":"pz_archive.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"160730437","text":"from flask import render_template, redirect, url_for, request, g, make_response\nfrom profapp.forms.article import ArticleForm\nfrom profapp.models.articles import Article, ArticleCompany, ArticlePortalDivision\nfrom profapp.models.users import User\n# from profapp.models.company import Company\n# from db_init import db_session\nfrom .blueprints_declaration import article_bp\nfrom .request_wrapers import ok, object_to_dict\nfrom ..constants.ARTICLE_STATUSES import ARTICLE_STATUS_IN_COMPANY, ARTICLE_STATUS_IN_PORTAL\n# import os\nfrom .pagination import pagination\nfrom config import Config\nfrom .views_file import crop_image, update_croped_image\nfrom ..models.files import ImageCroped, File\nfrom ..models.pr_base import PRBase\nfrom utils.db_utils import db\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom ..models.translate import TranslateTemplate\n\n\n\n@article_bp.route('/translate/', methods=['POST'])\n@ok\ndef translate(json):\n translation = TranslateTemplate.getTranslate(request.json['template'], request.json['phrase'])\n return {'phrase': translation}\n\n\n@article_bp.route('/save_translate/', methods=['POST'])\n@ok\ndef save_translate(json):\n return TranslateTemplate.getTranslate(request.json['template'], request.json['phrase'], request.json['url'])\n\n@article_bp.route('/update_last_accessed/', methods=['POST'])\n@ok\ndef update_last_accessed(json):\n return TranslateTemplate.update_last_accessed(request.json['template'], request.json['phrase'])\n\n@article_bp.route('/list/', methods=['GET'])\ndef show_mine():\n return render_template(\n 'article/list.html',\n angular_ui_bootstrap_version='//angular-ui.github.io/bootstrap/ui-bootstrap-tpls-0.14.2.js')\n\n\n@article_bp.route('/list/', methods=['POST'])\n@ok\ndef load_mine(json):\n current_page = json.get('pages')['current_page'] if json.get('pages') else 1\n chosen_company_id = json.get('chosen_company')['id'] if json.get('chosen_company') else 0\n params = {'search_text': json.get('search_text'), 'user_id': g.user_dict['id']}\n original_chosen_status = None\n article_status = json.get('chosen_status')\n if chosen_company_id:\n params['company_id'] = chosen_company_id\n if article_status and article_status != 'All':\n params['status'] = original_chosen_status = article_status\n subquery = ArticleCompany.subquery_user_articles(**params)\n\n articles, pages, current_page = pagination(subquery,\n page=current_page)\n\n all, companies = ArticleCompany.get_companies_where_user_send_article(g.user_dict['id'])\n statuses = {status: status for status in ARTICLE_STATUS_IN_COMPANY.all}\n statuses['All'] = 'All'\n\n articles_with_time = []\n\n for (article, time) in articles.all():\n article_dict = article.get_client_side_dict()\n article_dict['md_tm'] = time\n articles_with_time.append({'article': article_dict,\n 'company_count': len(article_dict['submitted_versions']) + 1})\n\n return {'articles': articles_with_time,\n 'companies': companies,\n 'search_text': json.get('search_text') or '',\n 'original_search_text': json.get('search_text') or '',\n 'chosen_company': json.get('chosen_company') or all,\n 'pages': {'total': pages,\n 'current_page': current_page,\n 'page_buttons': Config.PAGINATION_BUTTONS},\n 'chosen_status': json.get('chosen_status') or statuses['All'],\n 'original_chosen_status': original_chosen_status,\n 'statuses': statuses}\n\n\n@article_bp.route('/create/', methods=['GET'])\ndef show_form_create():\n return render_template('article/create.html')\n\n\n@article_bp.route('/create/', methods=['POST'])\n@ok\ndef load_form_create(json):\n action = g.req('action', allowed=['load', 'validate', 'save'])\n if action == 'load':\n return {'id': '', 'title': '', 'short': '', 'long': '', 'coordinates': '',\n 'ratio': Config.IMAGE_EDITOR_RATIO}\n if action == 'validate':\n del json['coordinates'], json['ratio']\n\n return Article.save_new_article(g.user_dict['id'],\n **g.filter_json(json, 'title,short,long,keywords')).mine_version.validate(\n 'insert')\n else:\n image_id = json.get('image_file_id')\n if image_id:\n json['image_file_id'] = crop_image(image_id, json.get('coordinates'))\n del json['coordinates'], json['ratio']\n article = Article.save_new_article(g.user_dict['id'], **json)\n g.db.add(article)\n return article.get_client_side_dict()\n\n\n@article_bp.route('/update//', methods=['GET'])\ndef show_form_update(article_company_id):\n return render_template('article/update.html',\n article_company_id=article_company_id)\n\n\n@article_bp.route('/update//', methods=['POST'])\n@ok\ndef load_form_update(json, article_company_id):\n action = g.req('action', allowed=['load', 'save', 'validate'])\n article = ArticleCompany.get(article_company_id)\n if action == 'load':\n article = article.get_client_side_dict()\n article.update(ratio=Config.IMAGE_EDITOR_RATIO)\n image_id = article.get('image_file_id')\n if image_id:\n try:\n article['image_file_id'], coordinates = ImageCroped. \\\n get_coordinates_and_original_img(image_id)\n article.update(coordinates)\n except NoResultFound:\n pass\n return article\n else:\n article.attr({key: val for key, val in json.items() if key in\n ['keywords', 'title', 'short', 'long']})\n if action == 'save':\n image_id = json.get('image_file_id')\n coordinates = json.get('coordinates')\n if image_id:\n if db(ImageCroped, original_image_id=image_id).count():\n update_croped_image(image_id, coordinates)\n else:\n article.image_file_id = crop_image(image_id, coordinates)\n article.save()\n\n article = article.get_client_side_dict()\n # print(article['image_file_id'])\n return article\n else:\n # return {'errors': {}, 'warnings': {}, 'notices': {}}\n article.detach()\n return article.validate('update')\n\n\n@article_bp.route('/save//', methods=['POST'])\n@ok\ndef save(json, article_company_id):\n pass\n # return ret.get_client_side_dict()\n\n\n@article_bp.route('/details//', methods=['GET'])\ndef details(article_id):\n return render_template('article/details.html',\n article_id=article_id)\n\n\n@article_bp.route('/details//', methods=['POST'])\n@ok\ndef details_load(json, article_id):\n return Article.get(article_id).get_client_side_dict()\n\n\n@article_bp.route('/search_for_company_to_submit/', methods=['POST'])\n@ok\ndef search_for_company_to_submit(json):\n companies = Article().search_for_company_to_submit(\n g.user_dict['id'], json['article_id'], json['search'])\n return companies\n\n\n@article_bp.route('/submit_to_company//', methods=['POST'])\n@ok\ndef submit_to_company(json, article_id):\n a = Article.get(article_id)\n a.mine_version.clone_for_company(json['company_id']).save()\n return {'article': a.get(article_id).get_client_side_dict(),\n 'company_id': json['company_id']}\n\n\n@article_bp.route('/resubmit_to_company//', methods=['POST'])\n@ok\ndef resubmit_to_company(json, article_company_id):\n a = ArticleCompany.get(article_company_id)\n if not a.status == ARTICLE_STATUS_IN_COMPANY.declined:\n raise Exception('article should have %s to be resubmited' %\n ARTICLE_STATUS_IN_COMPANY.declined)\n a.status = ARTICLE_STATUS_IN_COMPANY.submitted\n return {'article': a.save().get_client_side_dict()}\n","sub_path":"profapp/controllers/views_article.py","file_name":"views_article.py","file_ext":"py","file_size_in_byte":8056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"520164861","text":"import pytest\nimport numpy as np\nimport nway.meta_registration as mr\nimport nway.transforms as tf\n\n\n@pytest.mark.parametrize(\"include_original\", [True, False])\ndef test_MetaRegistration_init(include_original):\n m = mr.MetaRegistration(include_original=include_original)\n names = []\n for candidate in m.candidates:\n assert isinstance(candidate, tf.TransformList)\n names.append([i.__class__.__name__\n for i in candidate.transforms])\n if include_original:\n assert ['CLAHE', 'ECC'] in names\n if not include_original:\n assert ['CLAHE', 'ECC'] not in names\n\n assert isinstance(m.contrast, tf.CLAHE)\n assert callable(m.measure)\n\n\ndef test_MetaRegistration_estimate():\n candidates = [\n tf.TransformList(\n transforms=[tf.Crop(edge_buffer=1), tf.Transform()]),\n tf.TransformList(\n transforms=[tf.Crop(edge_buffer=257), tf.Transform()]),\n tf.TransformList(transforms=[tf.Transform(), tf.Transform()])]\n src = np.ones((512, 512)).astype('uint8')\n dst = np.ones((512, 512)).astype('uint8')\n\n # transforms do not have matrix attribute before estmation\n for candidate in candidates:\n for tform in candidate.transforms:\n assert not hasattr(tform, 'matrix')\n candidates, failed = mr.MetaRegistration.estimate(candidates, src, dst)\n\n # intentionally made one raise an exception (the large crop)\n assert True in failed\n\n # successful estimations now have matrix attribute\n for fail, candidate in zip(failed, candidates):\n if not fail:\n for tform in candidate.transforms:\n assert hasattr(tform, 'matrix')\n\n\ndef test_MetaRegistration_evaluate():\n candidates = [\n tf.TransformList(\n transforms=[tf.Crop(edge_buffer=1), tf.Transform()]),\n tf.TransformList(\n transforms=[tf.Crop(edge_buffer=257), tf.Transform()]),\n tf.TransformList(transforms=[tf.Transform(), tf.Transform()])]\n src = np.ones((512, 512)).astype('uint8')\n dst = np.ones((512, 512)).astype('uint8')\n candidates, failed = mr.MetaRegistration.estimate(candidates, src, dst)\n\n def measure(src, dst):\n return 1.0\n contrast = tf.CLAHE(CLAHE_grid=24, CLAHE_clip=2.5)\n\n scores = mr.MetaRegistration.evaluate(candidates, failed,\n 'MOTION_EUCLIDEAN',\n contrast, measure, src, dst)\n assert len(scores) == len(candidates)\n for fail, score in zip(failed, scores):\n if fail:\n assert score is None\n else:\n assert score == 1.0\n\n\n@pytest.mark.parametrize(\n \"scores, expected\",\n [\n ([0.1, 0.2, 0.3, 0.14], 2),\n ([0.1, 0.2, None, 0.14], 1)])\ndef test_MetaRegistration_select(scores, expected):\n index = mr.MetaRegistration.select(scores)\n assert index == expected\n\n\ndef test_MetaRegistration_call():\n src = np.ones((512, 512)).astype('uint8')\n dst = np.ones((512, 512)).astype('uint8')\n\n m = mr.MetaRegistration()\n m(src, dst)\n for att in ['failed', 'scores', 'best_matrix', 'best_candidate']:\n assert hasattr(m, att)\n","sub_path":"tests/test_meta_registration.py","file_name":"test_meta_registration.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"188434453","text":"# ugly but functional\r\n# a rewrite would look nothing like this\r\n\r\n\r\nlens = []\r\nseq = []\r\ninp = []\r\n\r\ncur_ind = 0\r\nskip = 0\r\n\r\nwith open(\"input10.txt\") as f:\r\n\tfor line in f.readlines():\r\n\t\tinp = line.strip()\r\n\t\tfor itm in line.strip().split(\",\"):\r\n\t\t\tlens.append(int(itm))\r\n\r\nseq = list(range(256))\r\n\r\ndef sparse_hash(sequence, lengths, cur_ind=0, skip=0):\r\n\t\r\n\tfor itm in lengths:\r\n\t\tif itm == 0:\r\n\t\t\tpass\r\n\t\t\t\r\n\t\telif cur_ind + itm > 256:\r\n\t\t\tend_len = 256 - cur_ind\r\n\t\t\tstart_len = itm - end_len\r\n\t\t\t\r\n\t\t\tsub_sequence = list(reversed(sequence[cur_ind:] + sequence[:start_len]))\r\n\t\t\t\r\n\t\t\tsequence = sub_sequence[end_len:] + sequence[start_len:cur_ind] + sub_sequence[:end_len]\r\n\t\telse:\r\n\t\t\tsub_sequence = sequence[cur_ind:cur_ind + itm]\r\n\t\t\r\n\t\t\tsequence = sequence[:cur_ind] + list(reversed(sub_sequence)) + sequence[cur_ind + itm:]\r\n\t\t\r\n\t\tcur_ind += itm + skip\r\n\t\tskip += 1\r\n\t\t\r\n\t\tcur_ind %= 256\r\n\treturn sequence, cur_ind, skip\r\nseq, cur_ind, skip = sparse_hash(seq, lens)\r\n\r\nprint(\"First two numbers, multiplied: {} x {} = {}\".format(seq[0], seq[1], seq[0] * seq[1]))\r\n\r\n# part 2\r\nprint()\r\n\r\n\r\ninp = \"flqrgnkx-0\"\r\n\r\n# convert input to list of ints\r\nlens = list(int(ord(x)) for x in inp)\r\n\r\n# append standard suffix values\r\nlens += [17, 31, 73, 47, 23]\r\n\r\n# re-init\r\nseq = list(range(256))\r\n\r\n# re-init, preserve between rounds\r\nskip = 0\r\ncur_ind = 0\r\n\r\n# sparse_hash 64 times\r\nfor n in range(64):\r\n\tseq, cur_ind, skip = sparse_hash(seq, lens, cur_ind, skip)\r\n\r\n# bitwise xor to get 16 numbers\r\nbitted = []\r\ncur_num = 0\r\nfor i in range(len(seq)):\r\n\tif i % 16 == 0:\r\n\t\tcur_num = seq[i]\r\n\telse:\r\n\t\tcur_num = cur_num ^ seq[i]\r\n\t\tif i % 16 == 15:\r\n\t\t\tbitted.append(cur_num)\r\n\r\n# hex\r\nbitted = [str(hex(x))[2:].zfill(2) for x in bitted]\r\n\r\n# make string\r\nprint(\"Hashed string from \\\"{}\\\": \\\"{}\\\"\".format(inp, \"\".join(bitted)))\r\n","sub_path":"2017/day10/advent10.py","file_name":"advent10.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"216667181","text":"import unittest\n\nfrom solutions.utils.utils import grid_to_str, Location\nfrom solutions.day15.day15 import scan_input, Creature\n\nSMALL_CAVE: str = \"\"\"\n#######\n#.G.E.#\n#E.G.E#\n#.G.E.#\n#######\n\"\"\".strip()\n\nSMALL_CAVE_MAP: str = \"\"\"\n#######\n#.....#\n#.....#\n#.....#\n#######\n\"\"\".strip()\n\n\nclass TestSmallCave(unittest.TestCase):\n def test_small_cave_map_can_be_extracted(self):\n self.assertEqual(\n grid_to_str(scan_input(SMALL_CAVE).cave_map),\n SMALL_CAVE_MAP\n )\n\n def test_small_cave_creatures_can_be_extracted(self):\n self.assertEqual(\n scan_input(SMALL_CAVE).creatures,\n [\n Creature('G', Location(2, 1)),\n Creature('E', Location(4, 1)),\n Creature('E', Location(1, 2)),\n Creature('G', Location(3, 2)),\n Creature('E', Location(5, 2)),\n Creature('G', Location(2, 3)),\n Creature('E', Location(4, 3))\n ]\n )\n\n def test_small_cave_str_returns_input(self):\n self.assertEqual(\n str(scan_input(SMALL_CAVE)),\n SMALL_CAVE\n )\n\n\nDESTINATION_PRACTICE_CAVE: str = \"\"\"\n#######\n#E..G.#\n#...#.#\n#.G.#G#\n#######\n\"\"\".strip()\n\n\nclass TestDestination(unittest.TestCase):\n def test_find_targets(self):\n subject = Creature('E', Location(1, 1))\n self.assertListEqual(\n scan_input(DESTINATION_PRACTICE_CAVE).find_targets(subject),\n [\n Location(4, 1),\n Location(2, 3),\n Location(5, 3)\n ]\n )\n\n def test_find_potential_destinations(self):\n subject = Creature('E', Location(1, 1))\n self.assertEqual(\n scan_input(DESTINATION_PRACTICE_CAVE).find_potential_destinations(subject),\n [\n Location(3, 1),\n Location(5, 1),\n Location(2, 2),\n Location(5, 2),\n Location(1, 3),\n Location(3, 3)\n ]\n )\n","sub_path":"solutions/day15/day15_test.py","file_name":"day15_test.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"213068383","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.sql import func, extract, between\n\nfrom webapp.config import Config\nfrom webapp.models import Sales\n\nengine = create_engine(Config.SQLALCHEMY_DATABASE_URI)\n\nDBSession = sessionmaker(bind=engine)\n\nsession = DBSession()\n\nyear = [2010, 2010]\nregion = 'Nunavut'\n\nquery = (session.query(func.date_trunc('month', Sales.order_date).label('date'),\n func.sum(Sales.sales).label('sales'))\n .group_by(func.date_trunc('month', Sales.order_date))\n .order_by(func.date_trunc('month', Sales.order_date)))\n\nif year:\n query = query.filter(between(extract('year', Sales.order_date), year[0], year[1]))\n\nif region:\n query = query.filter(Sales.region == 'Nunavut')\n\nfor row in query:\n print(row)\n","sub_path":"testing DB.py","file_name":"testing DB.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"528078714","text":"import logging\nimport os\n\nLOG = logging.getLogger(__name__)\n\nfrom avi.netscaler_converter.ns_service_converter import ServiceConverter\nfrom avi.netscaler_converter.monitor_converter import MonitorConverter\nfrom avi.netscaler_converter.lbvs_converter import LbvsConverter\nfrom avi.netscaler_converter.csvs_converter import CsvsConverter\nfrom avi.netscaler_converter import ns_util\nfrom avi.netscaler_converter.profile_converter import ProfileConverter\nfrom avi.netscaler_converter.lbvs_converter import tmp_avi_config\n\ndef convert(ns_config_dict, tenant, version, output_dir, input_dir,\n skipped_cmds, vs_state):\n\n status_file = output_dir + os.path.sep + \"ConversionStatus.csv\"\n csv_file = open(status_file, 'w')\n ns_util.add_csv_headers(csv_file)\n LOG.debug('Conversion Started')\n try:\n avi_config = {\n \"META\": {\n \"supported_migrations\": {\n \"versions\": [\n \"14_2\",\n \"15_1\",\n \"15_1_1\",\n \"15_2\",\n \"15_2_3\",\n \"15_3\",\n \"current_version\"\n ]\n },\n \"version\": {\n \"Product\": \"controller\",\n \"Version\": version,\n \"min_version\": 15.2,\n \"ProductName\": \"Avi Cloud Controller\"\n },\n \"upgrade_mode\": False,\n \"use_tenant\": tenant\n }\n }\n\n monitor_converter = MonitorConverter()\n monitor_converter.convert(ns_config_dict, avi_config, input_dir)\n\n profile_converter = ProfileConverter()\n profile_converter.convert(ns_config_dict, avi_config, input_dir)\n\n service_converter = ServiceConverter()\n service_converter.convert(ns_config_dict, avi_config)\n\n lbvs_converter = LbvsConverter()\n lbvs_converter.convert(ns_config_dict, avi_config, vs_state)\n\n csvs_converter = CsvsConverter()\n csvs_converter.convert(ns_config_dict, avi_config, vs_state)\n\n ns_util.update_status_for_skipped(skipped_cmds)\n LOG.debug('Conversion completed successfully')\n\n ns_util.cleanup_config(tmp_avi_config)\n\n except:\n LOG.error('Error in config conversion', exc_info=True)\n\n csv_file.close()\n return avi_config","sub_path":"python/avi/netscaler_converter/netscaler_config_converter.py","file_name":"netscaler_config_converter.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"307371376","text":"\"\"\" The mapping from urls to views\"\"\"\nfrom django.conf.urls.defaults import patterns, url, include\nfrom django.conf import settings\nfrom smra.smra_portal.views import SMRAAboutView \nfrom django.views.generic.simple import redirect_to \n\n\n \nfrom django.contrib import admin\nadmin.autodiscover()\n \n \nurlpatterns = patterns('',\n \n url(r'^test/$','smra.smra_portal.views.test'),\n \n url(r'^$', redirect_to, {'url': '%s/virts/' % settings.SUB_SITE}), \n url(r'^%s/$' % settings.SUB_SITE, redirect_to,\n {'url': '/%s/virts/' % settings.SUB_SITE}), \n \n # Retrieve virtual collections\n url(r'^%s/virts/$' % settings.SUB_SITE,\n 'smra.smra_portal.views.collections',\n name=\"publicvirts\"),\n \n # Retrieve virtual collections\n url(r'^%s/mycollections/$' % settings.SUB_SITE, \n 'smra.smra_portal.views.my_collections',\n name=\"virts\"), \n \n # Create and update virtual collection \n url(r'^%s/virt/(?P\\d+)/$' % settings.SUB_SITE,\n 'smra.smra_portal.views.update_virt',\n name=\"virt\"), \n \n # Create and edit metadata for existing virtual colletion \n url(r'^%s/virt/(?P\\d+)/meta/$' % settings.SUB_SITE,\n 'smra.smra_portal.views.edit_virt_metadata',\n name=\"virtmeta\"),\n \n \n # Create and edit metadata for existing virtual colletion \n url(r'^%s/virt/(?P\\d+)/publish/$' % settings.SUB_SITE,\n 'smra.smra_portal.views.publish',\n name=\"virtpublish\"),\n \n url(r'^%s/rif_cs/$' % settings.SUB_SITE,'smra.smra_portal.views.rif_cs',\n name=\"rifcs\"),\n \n url(r'^%s/about/' % settings.SUB_SITE, SMRAAboutView.as_view(),\n name=\"about\"),\n \n url(r'^%s/virt/$' % settings.SUB_SITE,\n redirect_to, {'url': '/%s/virts/' % settings.SUB_SITE}),\n\n # Retrieve media objects\n #url(r'^%s/mediaobjects/page(?P[0-9]+)/$' % settings.SUB_SITE, \n # MediaObjectListView.as_view(), \n # name=\"browse\"), \n \n # Retrieve media objects\n url(r'^%s/mediaobjects/' % settings.SUB_SITE, \n 'smra.smra_portal.views.media_object_list',\n name=\"browse\"),\n \n url(r'^%s/mediaobject/(?P\\d+)/$' % settings.SUB_SITE, \n\t 'smra.smra_portal.views.mediaobject_details',\n name=\"details\"),\n \n \n\n url(r'^%s/admin/doc/' % settings.SUB_SITE,\n include('django.contrib.admindocs.urls')),\n \n url(r'^%s/admin/' % settings.SUB_SITE, include(admin.site.urls)),\n \n url(r'^%s/accounts/login/$' % settings.SUB_SITE,\n 'django.contrib.auth.views.login', \n\t {'template_name': 'smra_portal/login.html'}),\n url(r'^%s/accounts/logout/$' % settings.SUB_SITE, \n 'django.contrib.auth.views.logout',{'next_page':'/'}),\n \n \n url(r'site_media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.STATIC_DOC_ROOT}),\n \n \n url(r'^%s/links/$' % settings.SUB_SITE, \n 'smra.smra_portal.views.checklinks',name='deadlinks') \n \n #url(r'^%s/makelinkcheck/$' % settings.SUB_SITE, \n # 'smra.smra_portal.views.make_link_check',name='makelinkcheck')\n \n \n #url(r'%s/mint/$' % settings.SUB_SITE,\n # 'smra.smra_portal.views.mint_handle',name='mint')\n )","sub_path":"smra/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"614839185","text":"#This programme cacolates the payload(weight) an s92 helicopter can carry for a \r\n#given mission. results displayed in a terminal window/\r\n\r\nclass dispatch():\r\n def payloadavailable(self,aps,crew,fuel): # calculate disposable weight\r\n return 26500-aps-crew-fuel\r\n\r\n\r\n def zerofuelweight(self,aps,crew): # calculate Zero fuel weight\r\n return aps + crew\r\n\r\n\r\n def mslsload(self,paxw,bag,freight,zfw,fuel): #calculate MSLS Loaad (paxw=pax weight)\r\n return paxw + bag + freight + zfw + fuel\r\n\r\ndef main():\r\n d = dispatch()\r\n #collect all required data\r\n \r\n aps = int(input(\"enter APS weight as seen on MSLS:\"))\r\n pilot = int(input(\"enter Pilot weight:\"))\r\n copilot = int(input(\"enter copilot weight:\"))\r\n crew = pilot + copilot\r\n fuel = int(input(\"enter fuel loaded:\"))\r\n paxw = int(input(\"enter pax weight:\"))\r\n bag = int(input(\"enter baggage weight:\"))\r\n freight = int(input(\"enter freight weight:\"))\r\n luggage = bag + freight\r\n question = input(\"What would you like to calculate?:\")\r\n\r\n if question == \"payload\":\r\n p = str(d.payloadavailable(aps,crew,fuel))\r\n print(p +\"lbs\") \r\n \r\n elif question == \"take-off weight\":\r\n l = str(luggage)\r\n if luggage > 1000:\r\n print(l + \"lbs\",\"exceeds the maximum allowable (1000lbs) in the baggage compartment!\")\r\n print(\"please reduce luggage!!\")\r\n \r\n zfw = d.zerofuelweight(aps,crew)\r\n TO_weight = d.mslsload(paxw,bag,freight,zfw,fuel)\r\n t = str(TO_weight)\r\n if TO_weight > 26500:\r\n print(t + \"lbs\",\"exceeds the maximum alloawable(26500lbs) take-off weight\")\r\n print(\"It is illegal to fly!\")\r\n else:\r\n print(t + \"lbs\") \r\n print(\"Enjoy your flight!\")\r\n else:\r\n print(\"invalid entry, restart program and enter 'payload' or 'take-off weight'\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"pilots flight dispatch.py","file_name":"pilots flight dispatch.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"518301697","text":"import os\nfrom menu import Menu\nfrom coffee_maker import CoffeeMaker\nfrom money_machine import MoneyMachine\n\n# initialize coffee machine\ncoffee_machine = CoffeeMaker()\nmenu = Menu()\nmoney_bank = MoneyMachine()\nexit_machine = False\n\n\ndef main():\n while not exit_machine:\n cls()\n boot_screen()\n service()\n exit_console()\n\n\ndef boot_screen():\n '''Boot screen with custom logos a resource list and menu items'''\n coffee_machine.boot_screen()\n print(menu.print_menu())\n\n\ndef service():\n # Ask User what they'd like\n while True:\n try:\n user_input = int(input('\\nWhat would you like?: '))\n order = menu.find_drink(user_input)\n break\n except IndexError:\n print('\\nInvalid input. Try again')\n continue\n\n if coffee_machine.is_resource_sufficient(order) is True:\n money_bank.make_payment(order.cost)\n coffee_machine.make_coffee(order)\n\n\ndef exit_console():\n global exit_machine\n while True:\n user_input = input('\\nExit machine? (Y/N): ').upper()\n if user_input == 'Y':\n exit_machine = True\n break\n elif user_input == 'N':\n break\n else:\n print('\\nInvalid Input')\n continue\n\n\n# Clear Screen\ndef cls():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\nmain()\n","sub_path":"Coffee-Machine-OOP/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"231839332","text":"#!/usr/bin/env python\n#%%\nimport sys\nimport os \n\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn import preprocessing\n\n\nWORK_DIR=\"./backflow_workdir/\"\n\n\nclass Backflow(object):\n \"\"\"Backflow class handles the handling of the the model, and\n interaction with the SUT.\n\n The constructor expects a working directory. The directory should\n contain a file 'train.csv' with the data to be used for training\n the model. The folder 'tf_model' will be created to persist the\n state of the model. The file 'adversarial.csv' will contain all\n generated adversarial examples.\n \"\"\"\n\n def __init__(self, work_dir):\n self._work_dir = work_dir\n self._model_dir = os.path.join(self._work_dir, \"tf_model\")\n self._meta_graph_fname = os.path.join(self._model_dir, \"tf_model.meta\")\n self._orig_data_fname = os.path.join(self._work_dir, \"train.csv\")\n self._adv_data_fname = os.path.join(self._work_dir, \"adversarial.csv\")\n self._train_data = None\n self._val_data = None\n self._scaler = None\n self._fields = None\n\n # NOTE: We currently use the global graph. We should find a\n # way to store the graph internally to this object.\n # It seems that many functions in TF modify global structures\n self._tf_sess = None\n self._x_hat = None\n self._y_hat = None\n self._out = None\n\n def load_model(self):\n \"\"\"Load the model from the model_dir\n\n If the model does not exist, it is created using the\n train_data set\n \"\"\"\n self.parse_dataset()\n if not os.path.exists(self._model_dir):\n # If the model dir does not exist, we need to create the\n # initial model\n print(\"Model not found. Creating new model.\")\n self.fit(self._train_data, self._val_data)\n\n tf.reset_default_graph()\n with tf.Session().as_default() as sess:\n self._tf_sess = sess\n saver = tf.train.import_meta_graph(self._meta_graph_fname)\n saver.restore(sess,self._model_dir)\n if __debug__:\n # This shows that we are able to load the model correctly\n #\n # Obtain the expressions for the input and output layer.\n # NOTE: get_collection returns a list. We know that there\n # is only one element in those lists.\n output_layer = tf.get_collection(\"my_output_layer\")[0]\n input_layer = tf.get_collection(\"my_input_layer\")[0]\n predictions = sess.run([output_layer], \n feed_dict={input_layer: self._val_data[0]})\n eval_predictions(predictions[0], self._val_data)\n\n # Rebuild graph by using x_hat as input layer\n x_hat = tf.Variable(tf.random_uniform((1 ,9)), name=\"x_hat\")\n y_hat = tf.placeholder(tf.float32, [1, 1])\n out = self._build_model(x_hat, load=True)\n # Show that we recreated the graph correctly\n predictions = []\n for val_example in self._val_data[0]:\n val_example = np.reshape(val_example, (1,9))\n p = sess.run([out], \n feed_dict={x_hat: val_example})\n predictions.append(p)\n predictions = np.asarray(predictions)\n eval_predictions(predictions, self._val_data)\n\n # Initialize x_hat with a purely random value\n sess.run(tf.variables_initializer([x_hat]))\n self._x_hat = x_hat\n self._y_hat = y_hat\n self._out = out\n\n def save_model(self, sess):\n \"\"\"Save the model in the model directory.\"\"\"\n # Saves all variables. \n saver = tf.train.Saver()\n saver.save(sess, self._model_dir)\n saver.export_meta_graph(filename=self._meta_graph_fname)\n print('Model saved to: {}'.format(self._model_dir))\n\n def _build_model(self, input_layer, load=False):\n \"\"\"Creates the NN model using the given input layer.\n \n Returns the output_layer.\n \"\"\"\n # Network shape : Relu on hidden layer but Identity on the output\n if not load:\n with tf.variable_scope(\"main\") as main_scope:\n w_hidden = tf.Variable(tf.random_uniform((9, 5)), name=\"w_hidden\")\n b_hidden = tf.Variable(tf.random_uniform((5,)), name=\"b_hidden\")\n w_output = tf.Variable(tf.random_uniform((5, 1)), name=\"w_output\")\n b_output = tf.Variable(tf.random_uniform((1,)), name=\"b_output\")\n else:\n # TODO: Find a proper way to reload these variables\n # convert variables in a dict for easy access\n vars_ = dict((x.name, x) for x in tf.global_variables()\n if x.name.startswith('main'))\n mk_const = lambda x: tf.constant(self._tf_sess.run([x])[0])\n w_hidden = mk_const(vars_['main/w_hidden:0'])\n b_hidden = mk_const(vars_['main/b_hidden:0'])\n w_output = mk_const(vars_['main/w_output:0'])\n b_output = mk_const(vars_['main/b_output:0'])\n\n in_hidden = tf.matmul(input_layer, w_hidden) + b_hidden\n in_hidden = tf.nn.relu(in_hidden)\n output_layer = tf.matmul(in_hidden, w_output) + b_output\n\n return output_layer\n\n def parse_dataset(self, split=0.8, include_adversarial=False):\n \"\"\"Parse the datasets and return a tuple (train, validate).\n\n split indicates the percentage of datapoints to be used for\n training. E.g., split=0.8 will use 80% of the datapoints for\n training and 20% for validation.\n\n include_adversarial indicates whether the adversarial examples\n generated by backflow should be included in the dataset.\n \"\"\"\n from csv import DictReader\n out_field = 'braking_distance'\n self._scaler = preprocessing.StandardScaler()\n\n # Original Data\n data_in = []\n data_out = []\n with open(self._orig_data_fname, 'r') as fin:\n reader = DictReader(fin)\n fields = sorted(reader.fieldnames)\n for r in reader:\n data_out.append(float(r[out_field]))\n data_in.append([float(r[field]) for field in fields\\\n if field != out_field])\n\n # Adv Data\n if include_adversarial:\n with open(self._adv_data_fname, 'r') as fin:\n reader = DictReader(fin)\n # print(len(fields), fields)\n for r in reader:\n data_out.append(float(r[out_field]))\n data_in.append([float(r[field]) for field in fields\\\n if field != out_field])\n self._fields = reader.fieldnames\n self._fields.remove(out_field)\n\n\n # TODO: Shuffle\n # Currently only reverse, so that adv examples come first\n data_in = data_in[::-1]\n data_out = data_out[::-1]\n\n # Split data\n train_size = int(len(data_in) * split)\n train_data = (data_in[:train_size], data_out[:train_size])\n assert len(train_data[0]) == train_size\n val_data = (data_in[train_size:], data_out[train_size:])\n assert len(train_data[0])+len(val_data[0]) == len(data_in)\n\n # Normalize train data\n sk_in = np.array(train_data[0])\n self._scaler.fit(sk_in)\n sk_in = self._scaler.transform(sk_in)\n sk_out = np.array(train_data[1])\n train_data = (sk_in, sk_out)\n \n # Normalize val data\n sk_in = np.array(val_data[0])\n sk_in = self._scaler.transform(sk_in)\n sk_out = np.array(val_data[1])\n val_data = (sk_in, sk_out)\n\n # Split into train and validate\n self._train_data = train_data\n self._val_data = val_data\n\n\n def fit(self, train, validate):\n \"\"\"Fit the model using the given datasets.\"\"\"\n input_layer = tf.placeholder(tf.float32, (None, 9))\n output_layer = self._build_model(input_layer)\n target_output = tf.placeholder(tf.float32, (None))\n # TODO: Define asymmetric loss function\n loss_fun = tf.losses.mean_squared_error(target_output, output_layer,\n reduction=tf.losses.Reduction.SUM)\n def train_data_fn(epochs=200):\n # TODO: Use proper functionalities of TF to make this\n # function smarter.\n in_data, out_data = self._train_data\n for i in range(epochs):\n yield {input_layer: np.reshape(np.asarray(in_data), (len(in_data), 9)),\n target_output: np.reshape(np.asarray(out_data), (len(out_data), 1))}\n\n learning_rate = tf.placeholder(tf.float32, ())\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_fun)\n with tf.Session().as_default() as sess:\n learning_rate_val = 0.1\n # Initialize variables\n sess.run(tf.global_variables_initializer())\n # Start Training\n for i, data in enumerate(train_data_fn(epochs=2000)):\n data[learning_rate] = learning_rate_val\n _, loss_val = sess.run([optimizer, loss_fun], feed_dict=data)\n if i % 100 == 0:\n print(loss_val)\n # Run a prediction\n predictions = sess.run([output_layer], \n feed_dict={input_layer: self._val_data[0]})\n eval_predictions(predictions[0], self._val_data)\n # Name the input and output layer expressions in order to\n # be able to restore it later on.\n tf.add_to_collection(\"my_output_layer\", output_layer)\n tf.add_to_collection(\"my_input_layer\", input_layer)\n self.save_model(sess)\n\n def compute_input(self, output, \n learning_rate_val=1e-4, steps=300):\n \"\"\"Returns a pair (input, loss_value) compatible with the output.\"\"\"\n print(\"Target output value : \" + str(output))\n y_hat = self._y_hat\n x_hat = self._x_hat\n out = self._out\n with self._tf_sess.as_default() as sess:\n assert np.shape(output) == (1,1)\n loss = tf.abs(out - y_hat)\n learning_rate = tf.placeholder(tf.float32, ())\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n optim_step = optimizer.minimize(loss, var_list=[x_hat])\n\n # Execute the search\n for i in range(steps):\n # gradient descent step\n _, loss_value = sess.run([optim_step, loss],\n feed_dict={y_hat: output,\n learning_rate: learning_rate_val})\n if (i+1) % (steps/10) == 0:\n # Reduce learning rate to improve convergence\n learning_rate_val = learning_rate_val/2\n print('step %d, loss=%g' % (i+1, loss_value))\n\n # Get adversarial data\n adv = x_hat.eval()\n adv_out = sess.run(out, feed_dict={x_hat: adv})\n print(\"Adversarial output: %f\" % adv_out)\n print(\"Adversarial input:\")\n print(adv)\n # \n re_scaled = list(self._scaler.inverse_transform(adv)[0])\n adv_dict = dict(zip(self._fields, re_scaled))\n print(adv_dict)\n return (adv_dict, adv, loss_value)\n\n#%%\n\ndef eval_predictions(predictions, val_data):\n predictions = np.reshape(predictions, (predictions.shape[0],))\n accuracy = r2_score(predictions, val_data[1])\n print(\"TF: Accuracy %0.2f\" % accuracy)\n abs_error = val_data[1] - predictions\n rel_error = abs(abs_error) / val_data[1]\n print(\"TF: avg %0.2f, min %0.2f, max %0.2f\" % \n (sum(abs_error)/len(abs_error), min(abs_error), max(abs_error)))\n return accuracy\n\n\ndef r2_score(pred, true):\n u = ((true - pred) ** 2).sum()\n v = ((true - true.mean()) ** 2).sum()\n accuracy = (1 - u/v)\n return accuracy\n \ndef demo1():\n bf = Backflow(\"bf_workdir\")\n # bf_workdir must contain a file called train.csv with the data\n bf.load_model()\n target_output = ([208.564966],)\n adv_dict, target_input, loss_val = bf.compute_input(target_output)\n from obu import braking_distance\n print(braking_distance(**adv_dict))\n \n\nif __name__ == \"__main__\":\n demo1()\n","sub_path":"Jupyter-Playground/ai-testing/OBU/backflow.py","file_name":"backflow.py","file_ext":"py","file_size_in_byte":12512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"59177445","text":"string = input()\nalphabets = digits = special = 0\nfor i in range(len(string)):\n if(string[i].isalpha()):\n alphabets = alphabets + 1\n elif(string[i].isdigit()):\n digits = digits + 1\n else:\n special = special + 1\nprint('',special)\n","sub_path":"countspecialcharacters.py","file_name":"countspecialcharacters.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"431263647","text":"# This Python 3 environment comes with many helpful analytics libraries installed\r\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\r\n# For example, here's several helpful packages to load in \r\n\r\nimport numpy as np # linear algebra\r\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\r\nimport matplotlib.pyplot as plt\r\n\r\nimport plotly.plotly as py\r\nimport plotly.graph_objs as go\r\nfrom plotly.offline import init_notebook_mode, iplot # plotly offline mode\r\ninit_notebook_mode(connected=True) \r\n\r\nimport seaborn as sns\r\nimport cv2\r\n\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, Dropout, Flatten\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import MaxPooling2D,MaxPool2D\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.optimizers import Adam\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n# Input data files are available in the \"../input/\" directory.\r\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\r\n\r\nimport os\r\nprint(os.listdir(\"../input/flowers/flowers\"))\r\n\r\n# Any results you write to the current directory are saved as output.\r\n\r\n# plotting a random image\r\nimg = plt.imread(\"../input/flowers/flowers/daisy/100080576_f52e8ee070_n.jpg\")\r\nimg = cv2.resize(img,(124,124))\r\nplt.imshow(img)\r\nplt.axis(\"off\")\r\nplt.show()\r\n\r\nx_ = list()\r\ny = list()\r\nIMG_SIZE = 256\r\nfor i in os.listdir(\"../input/flowers/flowers/daisy\"):\r\n try:\r\n path = \"../input/flowers/flowers/daisy/\"+i\r\n img = plt.imread(path)\r\n img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))\r\n x_.append(img)\r\n y.append(0)\r\n except:\r\n None\r\nfor i in os.listdir(\"../input/flowers/flowers/dandelion\"):\r\n try:\r\n path = \"../input/flowers/flowers/dandelion/\"+i\r\n img = plt.imread(path)\r\n img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))\r\n x_.append(img)\r\n y.append(1)\r\n except:\r\n None\r\nfor i in os.listdir(\"../input/flowers/flowers/rose\"):\r\n try:\r\n path = \"../input/flowers/flowers/rose/\"+i\r\n img = plt.imread(path)\r\n img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))\r\n x_.append(img)\r\n y.append(2)\r\n except:\r\n None\r\nfor i in os.listdir(\"../input/flowers/flowers/sunflower\"):\r\n try:\r\n path = \"../input/flowers/flowers/sunflower/\"+i\r\n img = plt.imread(path)\r\n img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))\r\n x_.append(img)\r\n y.append(3)\r\n except:\r\n None\r\nfor i in os.listdir(\"../input/flowers/flowers/tulip\"):\r\n try:\r\n path = \"../input/flowers/flowers/tulip/\"+i\r\n img = plt.imread(path)\r\n img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))\r\n x_.append(img)\r\n y.append(4)\r\n except:\r\n None\r\nx_ = np.array(x_)\r\n\r\n#plottin one of all flower types in data\r\nplt.figure(figsize = (20,20))\r\nfor i in range(5):\r\n img = x_[950*i]\r\n plt.subplot(1,5,i+1)\r\n plt.imshow(img)\r\n plt.axis(\"off\")\r\n plt.title(y[950*i])\r\n\r\n# for replacement process i'll use keras.to_categorical \r\nfrom keras.utils.np_utils import to_categorical\r\ny = to_categorical(y,num_classes = 5)\r\n\r\n# test split\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(x_,y,test_size = 0.15,random_state = 42)\r\n\r\n# validation and trains split\r\nx_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size = 0.15,random_state = 42)\r\n\r\nplt.figure(figsize = (20,20))\r\nfor i in range(5):\r\n img = x_train[600*i]\r\n plt.subplot(1,5,i+1)\r\n plt.imshow(img)\r\n plt.axis(\"off\")\r\n plt.title(y_train[600*i])\r\nplt.show()\r\n\r\nx_train.shape # look traing shape\r\n\r\nmodel = Sequential()\r\n# 1st Convolutional Layer\r\nmodel.add(Conv2D(filters=64, kernel_size=(3,3),padding=\"Same\",activation=\"relu\" , input_shape = (IMG_SIZE,IMG_SIZE,3)))\r\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dropout(0.2))\r\n# 2nd Convolutional Layer\r\nmodel.add(Conv2D(filters=128, kernel_size=(3,3),padding=\"Same\",activation=\"relu\"))\r\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dropout(0.3))\r\n# 3rd Convolutional Layer\r\nmodel.add(Conv2D(filters=128, kernel_size=(3,3),padding=\"Same\",activation=\"relu\"))\r\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dropout(0.3))\r\n# 4th Convolutional Layer\r\nmodel.add(Conv2D(filters=256,kernel_size = (3,3),padding=\"Same\",activation=\"relu\"))\r\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dropout(0.2))\r\n# 5th Convolutional Layer\r\nmodel.add(Conv2D(filters=512,kernel_size = (3,3),padding=\"Same\",activation=\"relu\"))\r\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dropout(0.3))\r\n\r\nmodel.add(Flatten())\r\n# 1st Fully Connected Layer\r\nmodel.add(Dense(1024,activation=\"relu\"))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(BatchNormalization())\r\n# Add output layer\r\nmodel.add(Dense(5,activation=\"softmax\"))\r\n\r\nmodel.summary() # print summary my model\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy']) #compile model\r\n\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(lr=0.001),\r\n metrics=['accuracy'])\r\n\r\nepoch = 50 \r\nbatch_size = 64\r\n\r\ndatagen = ImageDataGenerator(\r\n featurewise_center=False, # set input mean to 0 over the dataset\r\n samplewise_center=False, # set each sample mean to 0\r\n featurewise_std_normalization=False, # divide inputs by std of the dataset\r\n samplewise_std_normalization=False, # divide each input by its std\r\n rotation_range=60, # randomly rotate images in the range (60, 0 to 180)\r\n zoom_range = 0.1, # Randomly zoom image \r\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\r\n height_shift_range=0.1,\r\n shear_range=0.1,\r\n fill_mode = \"reflect\"\r\n ) \r\ndatagen.fit(x_train)\r\n\r\nhistory = model.fit_generator(datagen.flow(x_train,y_train,batch_size=batch_size),\r\n epochs = epoch,\r\n\t\t\t\t\t\t\t validation_data = (x_val,y_val),\r\n steps_per_epoch = x_train.shape[0] // batch_size\r\n )\r\n\r\nprint(\"Test Accuracy: {0:.2f}%\".format(model.evaluate(x_test,y_test)[1]*100)) #get score acording to test datas\r\n\r\nx_ = np.array(range(len(history.history['loss']))) # get loss values from the history\r\ntrace1 = go.Scatter(\r\n x = x_,\r\n y = history.history['loss'], # get loss values from the history\r\n mode = \"lines\",\r\n marker = dict(color = \"rgba(0,255,0,0.9)\"),\r\n text = \"Loss\"\r\n)\r\ntrace2 = go.Scatter(\r\n x = x_,\r\n y = history.history['acc'],# get accuracy values from the history\r\n mode = \"lines\",\r\n marker = dict(color = \"rgba(0,0,255,0.9)\"),\r\n text = \"Accuracy\"\r\n)\r\ndata = [trace1,trace2]\r\nlayout = dict(title = \"Training Accuracy and Loss\")\r\nfig = dict(data = data,layout=layout)\r\niplot(fig)\r\n\r\nx_ = np.array(range(len(history.history['val_loss'])))# get validation loss values from the history\r\ntrace1 = go.Scatter(\r\n x = x_,\r\n y = history.history['val_loss'], # get validation loss values from the history\r\n mode = \"lines\",\r\n marker = dict(color = \"rgba(0,0,0,0.9)\"),\r\n text = \"Validation Loss\"\r\n)\r\ntrace2 = go.Scatter(\r\n x = x_,\r\n y = history.history['val_acc'],# get validation accuracy values from the history\r\n mode = \"lines\",\r\n marker = dict(color = \"rgba(255,0,0,0.9)\"),\r\n text = \"Validation Accuracy\"\r\n)\r\ndata = [trace1,trace2]\r\nlayout = dict(title = \"Validation Accuracy and Loss\")\r\nfig = dict(data = data,layout=layout)\r\niplot(fig)\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nY_pred = model.predict(x_val)\r\nY_pred_classes = np.argmax(Y_pred,axis = 1)\r\nY_true = np.argmax(y_val,axis = 1)\r\nconfusion_mtx = confusion_matrix(Y_true,Y_pred_classes)\r\nf,ax = plt.subplots(figsize = (8,8))\r\nsns.heatmap(confusion_mtx,annot=True,linewidths = 0.01,cmap=\"Reds\",\r\n linecolor = \"gray\",fmt = \".2f\",ax=ax\r\n )\r\nplt.xlabel(\"Predicted label\")\r\nplt.ylabel(\"True Label\")\r\nplt.title(\"Confusion matrix\")\r\nplt.show()\r\n\r\nmodel.save('../output/cusstom_cnn_model.h5') # Save a model as HDF5 file\r\n\r\nloaded_model = load_model('../output/cusstom_cnn_model.h5') # Load the saved model for predictions\r\n","sub_path":"Custom_CNN_Model.py","file_name":"Custom_CNN_Model.py","file_ext":"py","file_size_in_byte":8539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"49455251","text":"from Gaudi.Configuration import *\n\n# Data service\nfrom Configurables import FCCDataSvc\nbatch_dir = \"/eos/user/t/toprice/private/FCC/FCCSW/v8.0/XYZ/\"\nfccsw_version = \"FCCSW0.8\"\ndet_config = \"\"\nrun_config = \"\"\nfile = \"\"\nfile = file[7:]\n#inputfile = batch_dir+\"/\"+det_config+\"_\"+fccsw_version+\"/\"+run_config+\"/\"+file\ninputfile = batch_dir+\"/\"+det_config+\"/\"+run_config+\"/output_\"+file\npodiosvc = FCCDataSvc(\"EventDataSvc\", input=inputfile)\n\n\nfrom Configurables import PodioInput\npodioinput = PodioInput(\"PodioReader\", collections=[\"positionedCaloHits\", \"GenParticles\"], OutputLevel=DEBUG)\n\n# DD4hep geometry service\nfrom Configurables import GeoSvc\ngeoservice = GeoSvc(\"GeoSvc\", detectors=[ 'file:/afs/cern.ch/user/t/toprice/private/FCC/FCCSW/Detector/DetFCChhBaseline1/compact/FCChh_DectEmptyMaster.xml',\n 'file:/afs/cern.ch/user/t/toprice/private/FCC/FCCSW/Detector/DetFCChhECalDigital/compact/FCChh_DECalBarrel_'+det_config[:-9]+'.xml'\n],\n OutputLevel = INFO)\n\n\nfrom Configurables import FilterSiliconEcalHits\nfiltered = FilterSiliconEcalHits(\"FilterSiEcal\",\n readoutName = \"BarDECal_Readout\",\n digitalFlag = 1)\nfiltered.deposits.Path=\"positionedCaloHits\"\nfiltered.filtered.Path=\"filteredCaloHits\" \n\n# add a processor which generates noise hits\n# input - noise level\n# - threshold level\n# - segmentation name\n\n# add a processor which adds noise event in to genuine hits\n# take input of noise hits and genuine hits\n# output a combination for the event\n\nfrom Configurables import RedoSegmentation\nresegment = RedoSegmentation(\"ReSegmentation\",\n # old bitfield (readout)\n oldReadoutName = \"BarDECal_Readout\",\n # specify which fields are going to be deleted\n oldSegmentationIds = [\"x\",\"y\",\"z\"],\n # new bitfield (readout), with new segmentation\n newReadoutName=\"BarDECal_Pads\",\n OutputLevel = INFO)\n# clusters are needed, with deposit position and cellID in bits\nresegment.inhits.Path = \"filteredCaloHits\"\nresegment.outhits.Path = \"newCaloHits\"\n\nfrom Configurables import CreateCaloCells\ncreatecells = CreateCaloCells(\"CreateCaloCells\",\n doCellCalibration = False,\n addCellNoise = False, filterCellNoise = False, sumPixelsPerCell = False,\n OutputLevel = INFO)\ncreatecells.hits.Path=\"newCaloHits\"\ncreatecells.cells.Path=\"newCaloCells\"\n\nfrom Configurables import DECalAnalysis\nhist = DECalAnalysis(\"DECalAnalysis\", \n pixelReadoutName = \"BarDECal_Readout\",\n padReadoutName = \"BarDECal_Pads\",\n layerFieldName = \"layer\",\n numLayers = 50, # one more because index starts at 1 - layer 0 will be always empty\n OutputLevel = INFO)\nhist.pixels.Path=\"filteredCaloHits\"\nhist.pads.Path=\"newCaloCells\"\nhist.truth.Path=\"GenParticles\"\n\n#THistSvc().Output = [\"rec DATAFILE='\"+batch_dir+\"/\"+det_config+\"_\"+fccsw_version+\"/\"+run_config+\"/' TYP='ROOT' OPT='RECREATE'\"]\nTHistSvc().Output = [\"rec DATAFILE='\"+batch_dir+\"/\"+det_config+\"/\"+run_config+\"/digital_\"+file+\"' TYP='ROOT' OPT='RECREATE'\"]\nTHistSvc().PrintAll=False\nTHistSvc().AutoSave=True\nTHistSvc().AutoFlush=True\nTHistSvc().OutputLevel=INFO\n\n#CPU information\nfrom Configurables import AuditorSvc, ChronoAuditor\nchra = ChronoAuditor()\naudsvc = AuditorSvc()\naudsvc.Auditors = [chra]\nhist.AuditExecute = True\n\nfrom Configurables import FCCDataSvc, PodioOutput\n#podiosvc = FCCDataSvc(\"EventDataSvc\")\npodioout = PodioOutput(\"out\", filename=batch_dir+\"/\"+det_config+\"/\"+run_config+\"/digital_podio_\"+file)\npodioout.outputCommands = [\"keep *\"]\n\n# ApplicationMgr\nfrom Configurables import ApplicationMgr\nApplicationMgr( TopAlg = [podioinput, filtered, resegment, createcells,hist, podioout],\n EvtSel = 'NONE',\n # EvtMax = 10,\n # order is important, as GeoSvc is needed by G4SimSvc\n ExtSvc = [podiosvc,geoservice, audsvc],\n OutputLevel = INFO\n)\n","sub_path":"Detector/DetStudies/tests/options/decalAnalysis_DEcal_batch.py","file_name":"decalAnalysis_DEcal_batch.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"291850066","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n # path('', views.home, name=\"home\"),\n path('', views.index, name=\"index\"),\n path('index/', views.index, name=\"index\"),\n path('registration/', views.registration, name=\"registration\"),\n path('about/', views.about, name=\"about\"),\n path('login/', views.login, name=\"login\"),\n path('logout/', views.logout, name=\"logout\"),\n path('ajax/load-cities', views.load_city, name=\"load_city\"),\n path('ajax/load-state', views.load_state, name=\"load_state\"),\n path('ajax/fetch_pdt', views.fetch_pdt, name=\"fetch_pdt\"),\n path('ajax/load-creator/', views.load_creator, name=\"load_creator\"),\n path('contact/', views.contact, name=\"contact\"),\n path('designs/', views.designs, name=\"designs\"),\n path('user_dashboard/', views.user_dashboard, name=\"user_dashboard\"),\n path('admin/design_element/', views.design_element, name=\"design_element\"),\n path('product_list/', views.product_list, name=\"product_list\"),\n path('product/', views.product, name=\"product\"),\n path('cart/', views.cart, name=\"cart\"),\n path('product/', views.product, name=\"product\"),\n path('cart/', views.cart, name=\"cart\"),\n path('ajax/addtocart', views.addtocart, name=\"addtocart\"),\n path('ajax/addtocart/update', views.update_addtocart, name=\"update_addtocart\"),\n path('product_list/', views.product_list, name=\"product_list\"),\n path('admin/design_element/', views.design_element, name=\"design_element\"),\n path('checkout/', views.checkout, name=\"checkout\"),\n path('order/', views.order, name=\"order\"),\n path('addAddress/', views.addAddress, name=\"addAddress\"),\n path('placeOrder/', views.placeOrder, name=\"placeOrder\"),\n path('designProduct/', views.designProduct, name=\"designProduct\"),\n # path('order_test/', views.order_test, name=\"order\"),\n path('designer/login/', views.designer_login, name=\"designer_login\"),\n path('chat/', views.chat, name=\"chat\"),\n path('designer/chat/', views.designer_chat, name=\"designer_chat\"),\n path('ajax/chat/getMsgs/', views.getMsgs, name=\"getMsgs\"),\n path('ajax/chat/getChatList/', views.getChatList, name=\"getChatList\"),\n path('ajax/chat/send_msg/', views.send_msg, name=\"send_msg\"),\n path('ajax/chat/getUnseenMsg/', views.getUnseenMsg, name=\"getUnseenMsg\"),\n path('ajax/chat/unseenCnt/', views.unseenCnt, name=\"unseenCnt\"),\n path('ajax/chat/sendAttach/', views.sendAttach, name=\"sendAttach\"),\n path('designer_dashboard/', views.designer_dashboard, name=\"designer_dashboard\"),\n path('designer_design/', views.designer_design, name=\"designer_design\"),\n path('designer_design/add', views.designer_design_add, name=\"designer_design_add\"),\n path('designer_design/edit/', views.designer_design_edit, name=\"designer_design_edit\"),\n path('designer_design/delete/', views.designer_design_delete, name=\"designer_design_delete\"),\n path('generate/', views.GeneratePdf.as_view(), name=\"generate\"),\n]\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"463274563","text":"# -*- coding: utf-8 -*-\nimport logging\nimport os\n\nfrom lxml import etree\n\nfrom odoo.loglevels import ustr\nfrom odoo.tools import misc, view_validation\nfrom odoo.modules.module import get_resource_path\n\n_logger = logging.getLogger(__name__)\n\n_gantt_validator = None\n\n\n@view_validation.validate('gantt')\ndef schema_gantt(arch, **kwargs):\n global _gantt_validator\n\n if _gantt_validator is None:\n with misc.file_open(os.path.join('web_gantt_view', 'views', 'gantt.rng')) as f:\n # gantt.rng needs to include common.rng from the `base/rng/` directory. The idea\n # here is to set the base url of lxml lib in order to load relative file from the\n # `base/rng` directory.\n base_url = os.path.join(get_resource_path('base', 'rng'), '')\n _gantt_validator = etree.RelaxNG(etree.parse(f, base_url=base_url))\n\n if _gantt_validator.validate(arch):\n return True\n\n for error in _gantt_validator.error_log:\n _logger.error(ustr(error))\n return False\n","sub_path":"web_gantt_view/view_validation.py","file_name":"view_validation.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"33118112","text":"import os\nimport logging\nfrom datetime import (\n datetime, \n\ttimedelta\n)\n\n#########################################################\n#\n# Load Environment Variables\n#\n#########################################################\n\n#AWS_ACCESS_KEY_ID = os.getenv(\"AWS_ACCESS_KEY_ID\")\n#AWS_SECRET_ACCESS_KEY = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\n\n\n########################################################\n#\n# DAG Settings\n#\n#########################################################\n\nfrom airflow import DAG\n\ndag_default_args = {\n 'owner': 'BDE_LAB_6',\n 'start_date': datetime.now() - timedelta(days=1),\n 'email': [],\n 'email_on_failure': True,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=60),\n 'depends_on_past': False,\n 'wait_for_downstream': False,\n}\n\ndag = DAG(\n dag_id='exercise_1',\n default_args=dag_default_args,\n schedule_interval='@hourly',\n catchup=True,\n max_active_runs=1,\n concurrency=5\n)\n\n\n#########################################################\n#\n# Custom Logics for Operator\n#\n#########################################################\n\n\ndef print_bde():\n logging.info(\"Big Data Engineering\")\n\ndef addition():\n logging.info(f\"1 + 1 = {1+1}\")\n\ndef subtraction():\n logging.info(f\"6 -2 = {6-2}\")\n\ndef division():\n logging.info(f\"10 / 2 = {int(20/2)}\")\n\n\n#########################################################\n#\n# DAG Operator Setup\n#\n#########################################################\n\nfrom airflow.operators.python_operator import PythonOperator\n\n\nprint_bde_task = PythonOperator(\n task_id=\"print_bde_task_id\",\n python_callable=print_bde,\n dag=dag)\n\naddition_task = PythonOperator(\n task_id=\"addition_task_id\",\n python_callable=addition,\n dag=dag)\n\nsubtraction_task = PythonOperator(\n task_id=\"subtraction_task_id\",\n python_callable=subtraction,\n dag=dag)\n\ndivision_task = PythonOperator(\n task_id=\"division_task_id\",\n python_callable=division,\n dag=dag)\n\nprint_bde_task >> addition_task\nprint_bde_task >> subtraction_task\nsubtraction_task >> division_task\naddition_task >> division_task\n\n\n\n","sub_path":"dags/dag_exercise_1_solution.py","file_name":"dag_exercise_1_solution.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"79934767","text":"from setuptools import setup\nimport securitycenter\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nsetup(\n name=\"pySecurityCenter\",\n version=securitycenter.__version__,\n description=\"Security Center API Library\",\n long_description=long_description,\n author=\", \".join(securitycenter.__authors__),\n author_email=\"steve@chigeek.com\",\n url=\"https://github.com/SteveMcGrath/pySecurityCenter\",\n packages=[\n \"securitycenter\",\n \"securitycenter.orm\",\n \"securitycenter.orm.modules\",\n ],\n install_requires=[\"requests\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.4\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\"\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"359440067","text":"\nimport tensorflow as tf\nimport tensorflow.contrib as contrib\n\n\"\"\"\nDataset与placeholder不同的地方:\nplaceholder: 用placeholder从内存中读取数据时,首先是定义占位符,然后通过feed_dict将数据喂入占位符来输入数据, 喂入占位符的数据必须是已经经过处理的数据,\n 如已经通过了shuffle,batch处理\nDataset: Dataset从内存中读取数据, 只需要用内存中的数据创建一个dataset对象,然后生成迭代器读取即可\n\"\"\"\n# placeholder 的训练方式\nx = tf.placeholder(dtype=tf.float32, shape=[None, 227, 227, 3], name='images')\ny = tf.placeholder(dtype=tf.int64, shape=[None, 1], name='labels')\n\nlogits = model(x)\nloss = loss_function(logits, y)\ntrain_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(train_op, feed_dict={\n x: images,\n y: labels\n })\n\n# Dataset的训练方式\ndataset = contrib.data.Dataset.from_tensor_slices(\n (np.array([1.0, 2.0, 3.0, 4.0, 5.0]), np.random.uniform(size=(5, 2)))\n)\n# 根据map里面的函数处理数据\ndataset = dataset.map(...)\n# 乱序\ndataset = dataset.shuffle(buffer_size=10000)\n# 以batchsize的大小打包数据\ndataset = dataset.batch(32)\n# 重复多少个epochs\ndataset = dataset.repeat(num_epochs)\n\n# 创建迭代器iterator\niterator = dataset.make_one_shot_iterator()\n# 得到训练数据的input和label\nnext_images, next_labels = iterator.get_next()\n\nlogits = model(next_images)\nloss = loss_function(logits, next_labels)\ntrain_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(train_op)\n\n\n\n\"\"\"\nDataset读取硬盘文件与队列不同的地方:\n\n\"\"\"","sub_path":"Dataset/Dataset_compared.py","file_name":"Dataset_compared.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"401680760","text":"#!/usr/bin/env python\n\n\"\"\"Simple python macro to run ibd_main.cc\"\"\"\n\nfrom load import ROOT as R\nfrom stdvector import stdvector\n\ndef main(args):\n R.ibd_main(stdvector(args.input), args.output)\n\n print('Done processing file', args.input)\n print('Write output file', args.output)\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('input', nargs='+', default=(), help='input files')\n parser.add_argument('-o', '--output', required=True, help='output file name')\n\n main(parser.parse_args())\n","sub_path":"macro/ibd_main_cpp.py","file_name":"ibd_main_cpp.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"579740334","text":"from sgMaya import sgCmds\nimport pymel.core\n\nsels = pymel.core.ls( sl=1 )\nnewObjs = []\nfor sel in sels:\n newObj = sgCmds.putObject( sel, 'locator' )\n sgCmds.constrain_parent( sel, newObj )\n newObjs.append( newObj )\npymel.core.select( newObjs )","sub_path":"maya_menus/_MAINMENU_PMC_Rigging/05.Put Object-(RP[N])/13.Put Constrained/02.Locators.py","file_name":"02.Locators.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"335363723","text":"from collections import deque\n\nopen_list = ['(', '{', '[']\nclose_list = [')', '}', ']']\ndef check(mystr):\n stack = deque()\n for i in mystr:\n if i in open_list:\n stack.append(i)\n elif i in close_list:\n pos = close_list.index(i)\n if((len(stack)>0) and (open_list[pos] == stack[len(stack)-1])):\n stack.pop()\n else:\n return \"unbalanced\"\n if(len(stack)==0):\n return \"balanced\"\n else:\n return \"unbalanced\"\nstr = '([])'\nprint(check(str))\n \n \n\n","sub_path":"stack/parenthesis_check.py","file_name":"parenthesis_check.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"480485445","text":"## Project: Money Does Grow On Trees ##\n## Authors: Aranya Sutharsan, Rinoa Malapaya Noshin Rahman, Carol Altimas ##\n\nimport time # Imports a module to add a pause\n\n# User Responses\nanswer_A = [\"A\", \"a\"]\nanswer_B = [\"B\", \"b\"]\nanswer_C = [\"C\", \"c\"]\nyes = [\"Y\", \"y\", \"yes\"]\nno = [\"N\", \"n\", \"no\"]\n\n# Variables for the game\ngreen_points = 0\nchequings_acc = 1000\nsavings_acc = 0\nmoney = 0\n\n\n## Introduction ##\n\n\n## Morning ##\n\n\n## Afternoon ##\n\n## Night ##\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"171194882","text":"from sqlalchemy import Column, DateTime, Integer, String, func\n\nfrom src.models.base import Base\nfrom src.models.model_utils import RepresentableMixin\n\n\nclass SPLTokenBackfillTransaction(Base, RepresentableMixin):\n __tablename__ = \"spl_token_backfill_txs\"\n last_scanned_slot = Column(Integer, primary_key=True, nullable=False)\n signature = Column(String, nullable=False)\n created_at = Column(DateTime, nullable=False, default=func.now())\n updated_at = Column(\n DateTime, nullable=False, default=func.now(), onupdate=func.now()\n )\n","sub_path":"discovery-provider/src/models/indexing/spl_token_backfill_transaction.py","file_name":"spl_token_backfill_transaction.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"316717686","text":"import requests\nimport pytest\n\n\ntest_data_zip_codes = [\n(\"us\", \"90210\", \"Beverly Hills\"),\n(\"ca\", \"B2A\", \"North Sydney South Central\"),\n(\"it\", \"50123\", \"Firenze\")\n]\n\n@pytest.mark.parametrize(\"country_code, zip_code, expected_place_name\", test_data_zip_codes)\ndef test_using_test_data_object_get_locations_data_check_place_name(country_code, zip_code, expected_place_name):\n response = requests.get(f\"http://api.zippopotam.us/{country_code}/{zip_code}\")\n response_body = response.json()\n assert response_body[\"places\"][0][\"place name\"] == expected_place_name\n\n","sub_path":"softwaretesting_examples/test_using_data.py","file_name":"test_using_data.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"589922712","text":"# Написать функцию strong_enough(earthquake, age), которая вычисляет достаточно ли безопасное здание,\n# чтобы выдержать землетрясение. Здание рухнет, если сила землетрясения будет больше, чем сила здания.\n# Earthquake – список, состоящий из спсика ударных волн.\n# Вычисление силы землетрясения для [[5,3,7], [3,3,1], [4,1,2]]\n# -> ((5 + 3 + 7) * (3 + 3 + 1) * (4 + 1 + 2)) = 735.\n# Прочность нового здания 1000, при этом это значение уменьшается на 1% каждый год\n\n\nimport traceback\n\n\ndef strong_enough(earthquake, age):\n wava_summa = 0\n waves_result = 1\n strength = 1000\n for wave_data in earthquake:\n for number in wave_data:\n wava_summa = wava_summa + number\n if wava_summa != 0:\n # print(wava_summa)\n waves_result = waves_result * wava_summa\n # print(waves_result)\n wava_summa = 0\n\n strength = (strength / 100) * (100 - age)\n if strength < waves_result:\n return False\n else:\n return True\n\n\n# strong_enough([[2,3,1],[3,1,1],[1,1,2]], 2)\n\n\n# Тесты\ntry:\n assert strong_enough([[2,3,1],[3,1,1],[1,1,2]], 2) == True\n assert strong_enough([[5,8,7],[3,3,1],[4,1,2]], 2) == True\n assert strong_enough([[5,8,7],[3,3,1],[4,1,2]], 3) == False\nexcept AssertionError:\n print(\"TEST ERROR\")\n traceback.print_exc()\nelse:\n print(\"TEST PASSED\")\n","sub_path":"task388.py","file_name":"task388.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"637353703","text":"from flask import Flask, render_template, request, redirect, url_for, flash, jsonify\napp = Flask(__name__)\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n\n#Fake Restaurants\n# restaurant = {'name': 'The CRUDdy Crab', 'id': '1'}\n\n# restaurants = [{'name': 'The CRUDdy Crab', 'id': '1'}, {'name':'Blue Burgers', 'id':'2'},{'name':'Taco Hut', 'id':'3'}]\n\n\n# #Fake Menu Items\n# items = [ {'name':'Cheese Pizza', 'description':'made with fresh cheese', 'price':'$5.99','course' :'Entree', 'id':'1'}, {'name':'Chocolate Cake','description':'made with Dutch Chocolate', 'price':'$3.99', 'course':'Dessert','id':'2'},{'name':'Caesar Salad', 'description':'with fresh organic vegetables','price':'$5.99', 'course':'Entree','id':'3'},{'name':'Iced Tea', 'description':'with lemon','price':'$.99', 'course':'Beverage','id':'4'},{'name':'Spinach Dip', 'description':'creamy dip with fresh spinach','price':'$1.99', 'course':'Appetizer','id':'5'} ]\n# item = {'name':'Cheese Pizza','description':'made with fresh cheese','price':'$5.99','course' :'Entree'}\n\n\n#ADD JSON API ENDPOINT HERE\n\n@app.route('/restaurants/JSON/')\ndef restaurantJSON():\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\trestaurants = session.query(Restaurant).all()\n\treturn jsonify(Restaurants=[i.serialize for i in restaurants])\n\n@app.route('/restaurants//menu/JSON/')\ndef restaurantMenuJSON(restaurant_id):\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\titems = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()\n\tMenuItems=[i.serialize for i in items]\n\treturn jsonify(MenuItems=[i.serialize for i in items])\n\n@app.route('/restaurants//menu//JSON/')\ndef menuItemJSON(restaurant_id, menu_id):\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\tmenuItem = session.query(MenuItem).filter_by(id=menu_id).one()\n\treturn jsonify(MenuItem=menuItem.serialize)\n\n\n#RESTAURANTS CODE\n\n\n@app.route('/restaurants/')\ndef showRestaurants():\n\t#return \"this page will show all restaurants\"\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\trestaurants = session.query(Restaurant).all()\n\treturn render_template('restaurants.html', restaurants = restaurants)\n\n@app.route('/restaurants/new', methods=['GET','POST'])\ndef newRestaurant():\n\t#return \"this page will be for making new restaurants\"\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\tif request.method == 'POST':\n\t\tnewRestaurant = Restaurant(name = request.form['name'])\n\t\tsession.add(newRestaurant)\n\t\tsession.commit()\n\t\tflash(\"new restaurant created!\")\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('newrestaurant.html')\n\n@app.route('/restaurants//edit', methods=['GET','POST'])\ndef editRestaurant(restaurant_id):\n\t#return \"this page will be for editing restaurant %s\" %restaurant_id\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\teditedRestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n\tif request.method == 'POST':\n\t\tif request.form['name']:\n\t\t\teditedRestaurant.name = request.form['name']\n\t\tsession.add(editedRestaurant)\n\t\tsession.commit()\n\t\tflash(\"restaurant name edited!\")\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('editrestaurant.html', restaurant = editedRestaurant)\n\n\n@app.route('/restaurants//delete', methods=['GET','POST'])\ndef deleteRestaurant(restaurant_id):\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\tdeleteRestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n\tif request.method == 'POST':\n\t\tsession.delete(deleteRestaurant)\n\t\tsession.commit()\n\t\t#flash(\"restaurant deleted!\")\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('deleterestaurant.html', restaurant=deleteRestaurant)\n\n\n#RESTAURANT MENU CODE\n\n\n\n@app.route('/restaurants//')\n@app.route('/restaurants//menu')\ndef restaurantMenu(restaurant_id):\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\trestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n\titems = session.query(MenuItem).filter_by(restaurant_id = restaurant_id)\n\t#print items.first()\n\tif items.first() != None:\n\t\treturn render_template('menu.html', restaurant=restaurant, items = items, restaurant_id = restaurant_id)\n\telse:\n\t\treturn render_template('emptymenu.html', restaurant=restaurant)\n\n\n\n@app.route('/restaurants//new', methods=['GET','POST'])\ndef newMenuItem(restaurant_id):\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\tif request.method == 'POST':\n\t\t#newItem = MenuItem(name = request.form['name'], restaurant_id = restaurant_id)\n\t\tnewItem = MenuItem(name = request.form['name'], \n\t\t\tdescription = request.form['description'], \n\t\t\tprice = request.form['price'], \n\t\t\tcourse = request.form['course'], \n\t\t\trestaurant_id = restaurant_id)\n\t\t#print request.form['course']\n\t\tsession.add(newItem)\n\t\tsession.commit()\n\t\tflash(\"new menu item created!\")\n\t\treturn redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n\telse:\n\t\treturn render_template('newmenuitem.html', restaurant_id = restaurant_id)\n\n\n\n\n@app.route('/restaurants///edit', methods = ['GET', 'POST'])\ndef editMenuItem(restaurant_id, menu_id):\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\teditedItem = session.query(MenuItem).filter_by(id = menu_id).one()\n\tif request.method == 'POST':\n\t\tif request.form['name']:\n\t\t\teditedItem.name = request.form['name']\n\t\tif request.form['description']:\n\t\t\teditedItem.description = request.form['description']\n\t\t\t#editedItem.description = request.form['name']\n\t\tif request.form['price']:\n\t\t\teditedItem.price = request.form['price']\n\t\tif request.form['course']:\n\t\t\teditedItem.course = request.form['course']\n\t\tsession.add(editedItem)\n\t\tsession.commit()\n\t\tflash(\"menu item edited!\")\n\n\t\treturn redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n\telse:\n\t\t\n\t\treturn render_template('editmenuitem.html', restaurant_id = restaurant_id, menu_id = menu_id, item = editedItem)\n\t\n\n\n@app.route('/restaurants///delete', methods = ['GET','POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n\tDBSession = sessionmaker(bind=engine)\n\tsession = DBSession()\n\titemToDelete = session.query(MenuItem).filter_by(id = menu_id).one() \n\tif request.method == 'POST':\n\t\tsession.delete(itemToDelete)\n\t\tsession.commit()\n\t\tflash(\"menu item deleted!\")\n\t\treturn redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n\telse:\n\t\treturn render_template('deletemenuitem.html', restaurant_id = restaurant_id, menu_id = menu_id, item = itemToDelete)\n\n\n\n\n\nif __name__ == '__main__':\n\tapp.secret_key = 'super_secret_key'\n\tapp.debug = True\n\tapp.run(host = '0.0.0.0', port = 5000)\n\n\n","sub_path":"vagrant/finalproject.py","file_name":"finalproject.py","file_ext":"py","file_size_in_byte":7157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"440144157","text":"#!/usr/bin/env python\n#coding:utf-8\n\n'''\n循环嵌套\npython3 中, / 计算的可为浮点数\n'''\n\ni = 2\nwhile (i < 100):\n j = 2\n while (j <= (i//j)):\n print(i % j)\n if not (i%j): break\n j += 1\n if (j > i//j):\n print(i, '质数')\n i += 1\n\n","sub_path":"python_02/control/3_loop_nest.py","file_name":"3_loop_nest.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"397544039","text":"from app.models import Recipe\nfrom app import db\n\n_id_cnt = 0\n_known_recipes = {} \n \ndef get_recipe(recipe_id):\n return Recipe.query.get(recipe_id)\n\ndef add_recipe(recipe):\n global _id_cnt \n #_id_cnt += 1\n #recipe.id = _id_cnt\n #_known_recipes[_id_cnt] = recipe\n db.session.add(recipe)\n db.session.flush()\n id = recipe.id\n db.session.commit()\n return id\n\ndef edit_recipe(recipe_id, edit):\n recipe = get_recipe(recipe_id)\n edit.id = recipe_id\n recipe.name = edit.name\n recipe.description = edit.description\n recipe.steps = edit.steps\n recipe.ingredients = edit.ingredients\n recipe.location = edit.location\n recipe.locationtype = edit.locationtype\n edit.views = recipe.views\n recipe.tags = edit.tags\n db.session.commit()\n return recipe_id\n\ndef delete_recipe(recipe_id):\n recipe = get_recipe(recipe_id)\n db.session.delete(recipe)\n db.session.commit()\n return\n\ndef get_recipes():\n #return _known_recipes.values()\n return Recipe.query.all()\n\ndef add_recipe_view(recipe_id):\n recipe = get_recipe(recipe_id)\n recipe.views += 1\n db.session.commit()\n return recipe.views\n","sub_path":"app/dbaccess.py","file_name":"dbaccess.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"551722270","text":"\"\"\"States for managing CKAN extensions.\"\"\"\n\nimport os\n\nimport yaml\n\n\n__virtualname__ = 'ckanext'\n\n\ndef __virtual__():\n return __virtualname__\n\n\ndef _ckan():\n # XXX duplicate the whole map.jinja logic\n stream = __salt__['cp.get_file_str']('salt://ckan/defaults.yaml')\n default_settings = yaml.load(stream)\n os_family_map = __salt__['grains.filter_by'](\n {'Debian': {},\n 'RedHat': {}},\n grain=\"os_family\",\n merge=__salt__['pillar.get']('ckan:lookup')\n )\n default_settings['ckan'].update(os_family_map)\n ckan = __salt__['pillar.get'](\n 'ckan',\n default=default_settings['ckan'],\n merge=True\n )\n ckan['venv_path'] = ckan['ckan_home'] + '/venv'\n return ckan\n\n\ndef installed(name, repourl=None, rev=None, requirements_file=None):\n \"\"\"Install the `name` CKAN extension.\n \"\"\"\n ckan = _ckan()\n if rev is None:\n rev = ckan['extensions'].get(name, {}).get('rev', None)\n if rev is None:\n rev = 'master'\n if requirements_file is None:\n requirements_file = 'requirements.txt'\n ret = {\n 'changes': {},\n 'comment': '',\n 'name': name,\n 'result': None,\n }\n fullname = 'ckanext-' + name\n if repourl is None:\n repourl = ckan['extensions'].get(name, {}).get('repourl', None)\n if repourl is None:\n repourl = 'https://github.com/ckan/' + fullname\n srcdir = os.path.join(ckan['src_dir'], fullname)\n user, group = ckan['ckan_user'], ckan['ckan_group']\n bin_env = ckan['venv_path']\n if __opts__['test']:\n ret['comment'] = (\n 'would install {0} CKAN extention into {1} virtualenv'.format(\n name, bin_env))\n ret['result'] = None\n return ret\n\n def log(change_ctx, msg):\n ret['changes'][change_ctx] = msg\n\n def failed(change_ctx, res):\n msg = 'failed'\n if isinstance(res, dict):\n msg = '\\n'.join(\n [msg + ':', res.get('stderr', ''), res.get('stdout', '')])\n else:\n msg = ': '.join([msg, str(res)])\n log(change_ctx, msg)\n ret['result'] = False\n return ret\n\n def git_checkout():\n ret['changes']['sources checkout'] = __salt__['git.checkout'](\n cwd=srcdir, rev=rev)\n\n if os.path.isdir(srcdir):\n res = __salt__['git.fetch'](cwd=srcdir, opts='origin ' + rev)\n if isinstance(res, dict) and res.get('retcode'):\n return failed('sources update', res)\n log('sources fetch', res)\n git_checkout()\n res = __salt__['git.merge'](cwd=srcdir, opts='origin/' + rev)\n if isinstance(res, dict) and res.get('retcode'):\n return failed('sources update', res)\n log('sources update', res)\n else:\n ret['changes']['sources clone'] = __salt__['git.clone'](\n cwd=srcdir, repository=repourl)\n git_checkout()\n res = __salt__['file.chown'](srcdir, user=user, group=group)\n if res is not None:\n return failed('sources ownership', res)\n res = __salt__['pip.install'](editable=srcdir, user=user, bin_env=bin_env)\n if res['retcode']:\n return failed('pip install', res)\n log('pip install', 'pip installed {0}'.format(fullname))\n requirements_file = os.path.join(srcdir, requirements_file)\n if os.path.exists(requirements_file):\n res = __salt__['pip.install'](\n requirements=requirements_file, user=user, bin_env=bin_env)\n if res['retcode']:\n return failed('pip install dependencies', res)\n log('pip install dependencies',\n 'install {0} dependencies from {1}'.format(\n fullname, requirements_file))\n ret['comment'] = ' successfully installed CKAN extension {0}'.format(name)\n ret['result'] = True\n return ret\n","sub_path":"_states/ckanext.py","file_name":"ckanext.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"443973005","text":"#!/usr/bin/python3\nimport json\nimport os\nfrom PIL import Image\nimport numpy as np\n\n\nclass PixelCipher:\n\n def __init__(self):\n self.cfg = self.load_config()\n\n def load_config(self):\n with open('config/decoding_table.json', 'r') as msg:\n cfg = json.load(msg)\n return cfg\n\n def img_to_pixels_array(self, img_path):\n if not os.path.exists(img_path):\n raise FileExistsError\n img = Image.open(img_path)\n array = np.array(img)\n return array\n\n def pixels_to_img(self, pixels_array):\n img = Image.fromarray(pixels_array)\n return img\n\n def pair_pixels(self, row):\n return zip(row[0::2], row[1::2])\n\nclass CodePixel(PixelCipher):\n def __init__(self, img, msg):\n super().__init__()\n self.letter_to_number = self.cfg[\"letter_to_number\"]\n self.pixels_array = self.coded_img(img, msg)\n self.save_img(self.pixels_to_img(self.pixels_array))\n\n def path_to_save_convert_image(self):\n path = os.path.join(self.cfg[\"path_to_save_coded_img\"])\n if os.path.exists(path):\n path = os.path.join(path, self.cfg[\"name_convert_img\"] + \".\" + self.cfg[\"extension\"])\n return path\n else:\n raise FileExistsError\n\n def save_img(self, img):\n path = os.path.join(self.path_to_save_convert_image())\n img.save(path)\n\n def is_rgb_overload(self, pixel_first, pixel_second):\n if any([elem > 254 for elem in [*pixel_first, *pixel_second]]):\n return True\n return False\n\n def coded_letter_to_bin(self, msg):\n for sign in msg:\n yield '{:06b}'.format(self.letter_to_number[sign])\n\n def coded_pixels(self, bin_letter, first_pixel, second_pixel):\n iteration = 0\n for bit in bin_letter:\n if bit == '1':\n if iteration > 2:\n second_pixel[iteration-3] += 1\n else:\n first_pixel[iteration] += 1\n iteration += 1\n\n def coded_img(self, img_path, msg):\n pixels_array = self.img_to_pixels_array(img_path)\n get_bin_letter = iter(self.coded_letter_to_bin(msg))\n for row in pixels_array:\n for first_pixel, second_pixel in self.pair_pixels(row):\n if not self.is_rgb_overload(first_pixel, second_pixel):\n try:\n self.coded_pixels(next(get_bin_letter), first_pixel, second_pixel)\n except StopIteration:\n return pixels_array\n\nclass DecodedPixel(PixelCipher):\n def __init__(self, img_orginal, img_coded):\n super().__init__()\n self.number_to_letter = self.cfg[\"number_to_letter\"]\n self.decoded_msg = self.decoded_img(img_orginal, img_coded)\n\n def __str__(self):\n return self.decoded_msg\n\n def decoded_pixel(self, pair_pixel, pair_coded_pixel):\n original = [*pair_pixel[0], *pair_pixel[1]]\n coded = [*pair_coded_pixel[0], *pair_coded_pixel[1]]\n bin_code = ''\n for org, cod in zip(original, coded):\n bin_code += (str(cod - org))\n if str(int(bin_code, 2)) in self.number_to_letter.keys():\n decoded_pixel = self.number_to_letter[str((int(bin_code, 2)))]\n else:\n decoded_pixel = ''\n return decoded_pixel\n\n def decoded_img(self, img_path, coded_img_path):\n pixels_array = self.img_to_pixels_array(img_path)\n coded_img_array = self.img_to_pixels_array(coded_img_path)\n encrypted_msg = ''\n for row, coded_row in zip(pixels_array, coded_img_array):\n for pair_pixel, pair_pixel_coded in zip(self.pair_pixels(row), self.pair_pixels(coded_row)):\n encrypted_msg += self.decoded_pixel(pair_pixel, pair_pixel_coded)\n return encrypted_msg\n\nclass Message:\n def __init__(self, cipher_message):\n self.msg = cipher_message\n self.code_table = self.get_code_table()\n\n def __str__(self):\n return 'message length: {}\\nmessage content: \"{}\"'.format(len(self.msg), self.msg)\n\n def get_code_table(self):\n with open('config/decoding_table.json', 'r') as msg:\n cfg = json.load(msg)\n return cfg[\"letter_to_number\"].keys()\n\n def in_coded_table(self, msg):\n for elem in msg:\n if elem not in self.code_table:\n raise Exception(\"Can't coded sign:\", elem)\n return True\n","sub_path":"pixel_cipher.py","file_name":"pixel_cipher.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"6462444","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 21 21:41:12 2020\n@author: hwan\n\"\"\"\nimport sys\nimport os\nsys.path.insert(0, os.path.realpath('../../src'))\nsys.path.append('../')\n\nimport yaml\nfrom attrdict import AttrDict\nimport scipy.sparse as sparse\n\n# For generating vtk files\nfrom dolfin import *\n\n# Import src code\nfrom utils_mesh.construct_mesh_rectangular import construct_mesh\nfrom utils_prior.smoothness_prior_autocorr import smoothness_prior_autocorr\nfrom utils_prior.gaussian_field import construct_matern_covariance\nfrom utils_io.prior import load_prior\nfrom utils_prior.draw_from_distribution import draw_from_distribution\nfrom utils_io.load_dataset import load_dataset\nfrom utils_fenics.plot_fem_function_fenics_3d import plot_fem_function_fenics_3d\nfrom utils_fenics.construct_prematrix import construct_prematrix\nfrom utils_fenics.construct_boundary_matrices_and_load_vector import\\\n construct_boundary_matrices_and_load_vector\nfrom utils_io.load_fem_matrices import load_boundary_matrices_and_load_vector\nfrom utils_fenics.convert_array_to_dolfin_function import\\\n convert_array_to_dolfin_function\nfrom utils_misc.positivity_constraints import positivity_constraint_exp,\\\n positivity_constraint_log_exp\nfrom utils_mesh.observation_points import form_interior_observation_points,\\\n form_observation_data\n\n# Import project utilities\nfrom utils_project.filepaths import FilePaths\nfrom utils_project.weak_forms import stiffness\nfrom utils_project.solve_poisson_3d import solve_pde_prematrices\n\nimport pdb #Equivalent of keyboard in MATLAB, just add \"pdb.set_trace()\"\n\n###############################################################################\n# Driver #\n###############################################################################\nif __name__ == \"__main__\":\n\n ##################\n # Setting Up #\n ##################\n #=== Options ===#\n with open('config_files/options.yaml') as f:\n options = yaml.safe_load(f)\n options = AttrDict(options)\n options.num_nodes = (options.num_nodes_x + 1) * (options.num_nodes_y + 1)\\\n *(options.num_nodes_z + 1)\n\n #=== File Paths ===#\n filepaths = FilePaths(options)\n\n #=== Creating Directory ===#\n if not os.path.exists(filepaths.directory_dataset):\n os.makedirs(filepaths.directory_dataset)\n\n ############\n # Mesh #\n ############\n #=== Construct Mesh ===#\n fe_space, meta_space,\\\n nodes, dof_fe, dof_meta = construct_mesh(options)\n\n ############################\n # Prior and Parameters #\n ############################\n #=== Construct Prior ===#\n if options.construct_prior == 1:\n if options.prior_type_AC == 1:\n smoothness_prior_autocorr(filepaths,\n nodes,\n options.prior_mean_AC,\n options.prior_variance_AC,\n options.prior_corr_AC)\n if options.prior_type_matern == 1:\n construct_matern_covariance(filepaths,\n nodes,\n options.prior_kern_type,\n options.prior_cov_length)\n\n #=== Load Prior ===#\n prior_mean, _, _, prior_covariance_cholesky, _ = load_prior(filepaths, dof_meta)\n\n #=== Draw Parameters from Prior ===#\n if options.draw_and_save_parameters == 1:\n draw_from_distribution(filepaths,\n prior_mean, prior_covariance_cholesky, dof_meta,\n options.save_standard_gaussian_draws,\n num_samples = options.num_data)\n\n #=== Load Parameters ===#\n parameters = load_dataset(filepaths.parameter, dof_meta, options.num_data)\n\n #=== Plot Parameters ===#\n if options.plot_parameters == 1:\n for n in range(0, options.num_data):\n plot_fem_function_fenics_3d(meta_space, parameters[n,:],\n 'parameter',\n filepaths.directory_figures + 'parameter_%d.png' %(n),\n 90, 270,\n (5,5))\n\n ###################\n # FEM Objects #\n ###################\n #=== Construct or Load Prematrices ===#\n if options.construct_and_save_prematrices == 1:\n if not os.path.exists(filepaths.directory_dataset):\n os.makedirs(filepaths.directory_dataset)\n prestiffness = construct_prematrix(options,\n fe_space, meta_space,\n dof_fe, dof_meta,\n stiffness, test=False)\n sparse.save_npz(filepaths.prestiffness + '.npz', prestiffness)\n prestiffness = sparse.load_npz(filepaths.prestiffness + '.npz')\n\n #=== Construct or Load Boundary Matrix and Load Vector ===#\n if options.construct_and_save_boundary_matrices == 1:\n construct_boundary_matrices_and_load_vector(filepaths,\n fe_space, options.boundary_matrix_constant, options.load_vector_constant)\n boundary_matrix, load_vector = load_boundary_matrices_and_load_vector(filepaths, dof_fe)\n\n ##########################\n # Computing Solution #\n ##########################\n #=== Solve PDE with Prematrices ===#\n state = solve_pde_prematrices(options, filepaths,\n positivity_constraint_exp(parameters),\n prestiffness, boundary_matrix, load_vector)\n\n #=== Plot Solution ===#\n if options.plot_solutions == 1:\n for n in range(0, options.num_data):\n plot_fem_function_fenics_3d(meta_space, state[n,:],\n 'state',\n filepaths.directory_figures + 'state_%d.png' %(n),\n 90, 270,\n (5,5))\n\n #=== Form Observation Data ===#\n obs_indices, _ = form_interior_observation_points(options, filepaths, meta_space)\n form_observation_data(filepaths, state, obs_indices)\n\n ######################\n # Paraview Plots #\n ######################\n # Note that we use fenics_env to produce the vtk files, but the paraview\n # module was not installed under fenics. So we use a separate script from\n # this to produce the paraview plots\n if options.save_vtk_files == 1:\n for n in range(0, options.num_data):\n parameter_fe = convert_array_to_dolfin_function(meta_space, parameters[n,:])\n state_fe = convert_array_to_dolfin_function(meta_space, state[n,:])\n vtkfile_parameter = File(filepaths.figure_vtk_parameter + '_%d.pvd'%(n))\n vtkfile_parameter << parameter_fe\n vtkfile_state = File(filepaths.figure_vtk_state + '_%d.pvd'%(n))\n vtkfile_state << state_fe\n","sub_path":"projects/poisson_3d/driver_poisson_3d_generate_data_prematrices.py","file_name":"driver_poisson_3d_generate_data_prematrices.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"28352804","text":"\"\"\"\n\nhttps://github.com/topics/hypothesis-testing?l=python&o=desc&s=stars\n\nhttps://pypi.org/project/pysie/#description\n\n\"\"\"\nimport os, sys, io, pandas as pd, numpy as np\nimport typing\nimport urllib.request\nimport urllib.parse\nimport validators\nimport cv2\nimport matplotlib.pyplot as plt\nimport tifffile.tifffile\n\n\ndef log(*s):\n print(s)\n\n\ndef read_image(filepath_or_buffer: typing.Union[str, io.BytesIO]):\n \"\"\"Read a file into an image object\n Args:\n filepath_or_buffer: The path to the file, a URL, or any object\n with a `read` method (such as `io.BytesIO`)\n \"\"\"\n if isinstance(filepath_or_buffer, np.ndarray):\n return filepath_or_buffer\n if hasattr(filepath_or_buffer, 'read'):\n image = np.asarray(bytearray(filepath_or_buffer.read()), dtype=np.uint8)\n image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)\n elif isinstance(filepath_or_buffer, str):\n if validators.url(filepath_or_buffer):\n return read(urllib.request.urlopen(filepath_or_buffer))\n assert os.path.isfile(filepath_or_buffer), \\\n 'Could not find image at path: ' + filepath_or_buffer\n if filepath_or_buffer.endswith('.tif') or filepath_or_buffer.endswith('.tiff'):\n image = tifffile.imread(filepath_or_buffer)\n else:\n image = cv2.imread(filepath_or_buffer)\n\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\n# helper function for data visualization\ndef visualize_in_row(**images):\n \"\"\"Plot images in one row.\"\"\"\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n\n\n# Resizes a image and maintains aspect ratio\ndef maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):\n # Grab the image size and initialize dimensions\n dim = None\n (h, w) = image.shape[:2]\n\n # Return original image if no need to resize\n if width is None and height is None:\n return image\n\n # We are resizing height if width is none\n if width is None:\n # Calculate the ratio of the height and construct the dimensions\n r = height / float(h)\n dim = (int(w * r), height)\n # We are resizing width if height is none\n else:\n # Calculate the ratio of the width and construct the dimensions\n r = width / float(w)\n dim = (width, int(h * r))\n\n # Return the resized image\n return cv2.resize(image, dim, interpolation=inter)\n\n#############################################################################\n#############################################################################\n","sub_path":"utilmy/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"423730943","text":"import pandas as pd\nimport numpy as np\n\ndef prefilter_items(data, take_n_popular=5000, item_features=None, bad_departments=None):\n \"\"\"\n Бизнес-ограничения:\n - Нельзя рекомендовать top 3 самых популярных товаров\n - Нельзя рекомендовать товары, которые стоят < 1$\n - Нельзя рекомендовать товары, которые не продавались последние 12 месяцев\n - Нельзя рекомендовать товары, с общим числом продаж < 50\n\n Параметры:\n - item_features: если передать фичи, то убрет категории, где мало товаров, по умолчанию менее 150\n - bad_departments: принимает лист с категориями, которые здесь можно явно исключить\n \"\"\"\n\n # Уберем товары с общим числом продаж < 50\n items_by_quantity = data.groupby('item_id')['quantity'].sum().reset_index()\n items_by_quantity = items_by_quantity.loc[items_by_quantity['quantity'] < 50, 'item_id'].tolist()\n # data = data[~data['item_id'].isin(items_by_quantity)]\n\n # Уберем самые популярные товары (их и так купят)\n popularity = data.groupby('item_id')['user_id'].nunique().reset_index()\n popularity['user_id'] = popularity['user_id'] / data['user_id'].nunique() # Какая доля юзеров из общего числа покупала этот товар\n popularity.rename(columns={'user_id': 'share_unique_users'}, inplace=True)\n\n # top_popular = popularity[popularity['share_unique_users'] > 0.2].item_id.tolist()\n # data = data[~data['item_id'].isin(top_popular)]\n top_3_popular = popularity.sort_values('share_unique_users', ascending=False).head(3).item_id.tolist()\n #data = data[~data['item_id'].isin(top_3_popular)]\n\n # Уберем самые НЕ популярные товары (их и так НЕ купят)\n # top_notpopular = popularity[popularity['share_unique_users'] < 0.02].item_id.tolist()\n # data = data[~data['item_id'].isin(top_notpopular)]\n\n # Уберем товары, которые не продавались за последние 12 месяцев (это примерно 52 недели)\n old_items = data.loc[data['week_no'] < data['week_no'].max() - 52, 'item_id'].tolist()\n # data = data[~data['item_id'].isin(old_items)]\n\n # Уберем не интересные для рекоммендаций категории (department)\n # if item_features is not None:\n # # Обработаем категории с маленьким числом товаров\n # department_size = pd.DataFrame(item_features.\\\n # groupby('department')['item_id'].nunique().\\\n # sort_values(ascending=False)).reset_index()\n\n # department_size.columns = ['department', 'n_items']\n # rare_departments = department_size[department_size['n_items'] < 150].department.tolist()\n # items_in_rare_departments = item_features[item_features['department'].isin(rare_departments)].item_id.unique().tolist()\n # data = data[~data['item_id'].isin(items_in_rare_departments)]\n\n # # Можно также явно указать категории, которые не интересны\n # if bad_departments is not None: \n # items_in_bad_departments = []\n # for dept in bad_departments: #bad_departments = ['KIOSK-GAS', 'PASTRY'] - передать как параметры в функцию\n # items_in_bad_departments.extend(item_features.loc[item_features['department'] == dept, 'item_id'].tolist()) # Именно extend, чтобы добавить не списки, а их значения\n\n # data = data[~data['item_id'].isin(items_in_bad_departments)]\n\n # Уберем слишком дешевые товары (на них не заработаем). 1 покупка из рассылок стоит 60 руб.\n data['price'] = data['sales_value'] / (np.maximum(data['quantity'], 1))\n cheap_items = data.loc[data['price'] < 1, 'item_id'].unique().tolist()\n #data = data[data['price'] >= 1] # Оставляем с ценой не менее 1$\n\n # Уберем слишком дорогие товары\n # data = data[data['price'] < 50]\n\n # Возьмем топ по популярности (по умолчанию take_n_popular=5000)\n popularity = data.groupby('item_id')['quantity'].sum().reset_index()\n popularity.rename(columns={'quantity': 'n_sold'}, inplace=True)\n\n top_n = popularity.sort_values('n_sold', ascending=False).head(take_n_popular).item_id.tolist()\n \n # Заведем фиктивный item_id (если юзер покупал товары из топ-5000, то он \"купил\" такой товар)\n data.loc[~data['item_id'].isin(top_n), 'item_id'] = 999999\n\n # Собираю лист с товарами, которые нельзя рекоммендовать\n items_to_filter = list(set(items_by_quantity + top_3_popular + old_items + cheap_items + [999999])) # Drop dublicates\n\n return data, items_to_filter\n\n\ndef postfilter_items(user_id, recommednations):\n \"\"\"Бизнес-ограничения:\n - Каждому пользователю нужно порекомендовать 5 товаров, один из них должен быть обязательно товаром, который данный пользователь никогда не покупал\n \"\"\"\n pass","sub_path":"data/src/utils_project.py","file_name":"utils_project.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"290789392","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 26 17:29:00 2019\n\n@author: bao\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nnp.random.seed(seed=0)\ntorch.manual_seed(0) \n\nclass MLP_(nn.Module):\n def __init__(self,dim,hidden,dropout_rate):\n super(MLP_, self).__init__()\n self.layers = nn.Sequential(\n nn.Linear(dim, hidden),\n nn.BatchNorm1d(hidden),\n nn.ReLU(),\n nn.Dropout(dropout_rate),\n nn.Linear(hidden, 2)\n )\n nn.init.xavier_uniform_(self.layers[0].weight)\n nn.init.xavier_uniform_(self.layers[-1].weight)\n def forward(self, x):\n x = self.layers(x)\n return x\n \n\nclass MLP():\n def __init__(self,dim,hidden = 37,dropout_rate = 0.9,weight = [1.0,1.0],l2 = (0.03,0.09)):\n self.mlp = MLP_(dim,hidden,dropout_rate).cuda()\n self.EPOCHS = 30\n self.batch_size = 25\n self.opt = torch.optim.Adam(self.mlp.parameters(), lr=0.001)\n self.loss_fn = nn.CrossEntropyLoss(weight = torch.Tensor(weight)).cuda()\n self.l2 = l2\n def fit(self,X_train,y_train):\n for e in range(self.EPOCHS):\n for b in range(0,X_train.shape[0],self.batch_size):\n self.mlp.train()\n start = b\n end = min(X_train.shape[0],b+self.batch_size)\n batch_data = X_train[start:end,:]\n batch_label = y_train[start:end]\n \n output = self.mlp(torch.Tensor(batch_data).cuda())\n batch_label = torch.LongTensor(batch_label).cuda()\n loss = self.loss_fn(output,batch_label)\n\n l2_norm = torch.norm(self.mlp.layers[0].weight, p=2)\n loss += l2_norm*self.l2[0]\n \n l2_norm = torch.norm(self.mlp.layers[-1].weight, p=2)\n loss += l2_norm*self.l2[1]\n \n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n print (\"[MLP]\",end = \"\")\n return self\n def predict_proba(self,X_test):\n self.mlp.eval()\n pred_mlp = []\n with torch.no_grad():\n for b in range(0,X_test.shape[0],self.batch_size):\n start = b\n end = min(X_test.shape[0],b+self.batch_size)\n batch_data = X_test[start:end,:]\n batch_data = torch.Tensor(batch_data)\n pred_mlp_batch = torch.nn.Softmax(dim = 1)(self.mlp(batch_data.cuda())).cpu().data.numpy()\n pred_mlp.append(pred_mlp_batch.copy())\n return np.concatenate(pred_mlp,axis = 0)\n \n \n \n\n \n","sub_path":"model_dev/mlp2.py","file_name":"mlp2.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"549869490","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, TimeDistributed, RepeatVector\nfrom keras.layers import LSTM as lstm\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, TensorBoard\nfrom keras.models import load_model\nfrom keras import regularizers\nimport pickle\n\nclass LSTM:\n\tdef buildModel(shape, lstm_layers, lstm_units, dense_layers, dense_units):\n\t\tprint(shape)\n\t\tmodel = Sequential()\n\t\t# LSTM layers\t\t\n\t\tfor l in range(lstm_layers):\n\t\t\tif l == 0:\n\t\t\t\tthis_shape = (shape[1], shape[2])\n\t\t\telse:\n\t\t\t\tthis_shape = (lstm_units, 1)\n\n\t\t\tif l == lstm_layers - 1:\n\t\t\t\tthis_return = False\n\t\t\telse:\n\t\t\t\tthis_return = True\n\n\t\t\tmodel.add(lstm(lstm_units, input_shape=this_shape, return_sequences=this_return))\t\t\t\t\n\t\t\tmodel.add(Activation('relu'))\n\n\t\t# Dense layers\n\t\tfor l in range(dense_layers):\t\t\t\t\n\t\t\tmodel.add(Dense(dense_units))\n\t\t\tmodel.add(Activation('relu'))\n\t\t\tmodel.add(Dropout(0.1))\t\n\t\t\n\t\tmodel.add(Dense(1))\t\t\t\t\n\t\tmodel.compile(loss=\"mse\", optimizer=\"adam\", metrics=['mae'])\n\t\tmodel.summary()\n\t\treturn model\n\n\tdef train(x_train, y_train, x_val, y_val, model, model_name):\n\t\tcallback_earlystop = EarlyStopping(monitor=\"loss\", patience=10, verbose=1, mode=\"auto\")\n\t\tcallback_tensorboard = TensorBoard(log_dir='../logs/{}'.format(model_name))\n\t\tmodel.fit(x_train, y_train, epochs=40, batch_size=128, \n\t\t\tvalidation_data=(x_val, y_val), callbacks=[callback_tensorboard])\n\t\treturn model\n\n\tdef saveModel(model, filename):\n\t\tmodel.save(filename)\n\t\tprint('Saved model to ' + filename)\n\n\tdef loadModel(filename):\t\t\n\t\treturn load_model(filename)\n\t\tprint('Loaded model from ' + filename)\n\n","sub_path":"scripts/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"42136935","text":"# Copyright (c) 2015 Metaswitch Networks\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom subprocess import call, check_output, check_call\nimport socket\nimport logging\nimport logging.handlers\nimport os\nimport errno\nimport sys\nimport uuid\n\nfrom netaddr import IPNetwork, IPAddress\n\nfrom pycalico.datastore import IF_PREFIX\nfrom pycalico.datastore_datatypes import Endpoint, VETH_NAME\n\n_log = logging.getLogger(__name__)\n\nHOSTNAME = socket.gethostname()\n\nROOT_NETNS = \"1\"\n\"\"\"The pid of the root namespace. On almost all systems, the init system is\npid 1\n\"\"\"\n\nPREFIX_LEN = {4: 32, 6: 128}\n\"\"\"The IP address prefix length to assign, by IP version.\"\"\"\n\nPROC_ALIAS = \"/proc_host\"\n\"\"\"The alias for /proc. This is useful when the filesystem is containerized.\n\"\"\"\n\n\ndef setup_logging(logfile, level=logging.INFO):\n _log.setLevel(level)\n formatter = logging.Formatter(\n '%(asctime)s [%(levelname)s] %(name)s %(lineno)d: %(message)s')\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(level)\n handler.setFormatter(formatter)\n _log.addHandler(handler)\n handler = logging.handlers.TimedRotatingFileHandler(logfile,\n when='D',\n backupCount=10)\n handler.setLevel(level)\n handler.setFormatter(formatter)\n _log.addHandler(handler)\n\n\ndef remove_endpoint(ep_id):\n \"\"\"\n Remove an endpoint.\n\n :param ep_id: The endpoint ID to remove\n :return: Nothing\n \"\"\"\n iface = IF_PREFIX + ep_id[:11]\n call(\"ip link delete %s\" % iface, shell=True)\n\n\ndef add_ip_to_interface(container_pid, ip, interface_name,\n proc_alias=PROC_ALIAS):\n \"\"\"\n Add an IP to an interface in a container.\n\n :param container_pid: The PID of the namespace to operate in.\n :param ip: The IPAddress to add.\n :param interface_name: The interface to add the address to.\n :param proc_alias: The location of the /proc filesystem on the host.\n :return: None. raises CalledProcessError on error.\n \"\"\"\n with NamedNamespace(container_pid, proc=proc_alias) as ns:\n ns.check_call(\"ip -%(version)s addr add %(addr)s/%(len)s \"\n \"dev %(device)s\" %\n {\"version\": ip.version,\n \"len\": PREFIX_LEN[ip.version],\n \"addr\": ip,\n \"device\": interface_name},\n shell=True)\n\n\ndef remove_ip_from_interface(container_pid, ip, interface_name,\n proc_alias=PROC_ALIAS):\n \"\"\"\n Remove an IP from an interface in a container.\n\n :param container_pid: The PID of the namespace to operate in.\n :param ip: The IPAddress to remove.\n :param interface_name: The interface to remove the address from.\n :param proc_alias: The location of the /proc filesystem on the host.\n :return: None. raises CalledProcessError on error.\n \"\"\"\n with NamedNamespace(container_pid, proc=proc_alias) as ns:\n ns.check_call(\"ip -%(version)s addr del %(addr)s/%(len)s \"\n \"dev %(device)s\" %\n {\"version\": ip.version,\n \"len\": PREFIX_LEN[ip.version],\n \"addr\": ip,\n \"device\": interface_name},\n shell=True)\n\n\ndef set_up_endpoint(ip, hostname, orchestrator_id, workload_id, cpid, next_hop_ips,\n veth_name=VETH_NAME,\n proc_alias=PROC_ALIAS,\n mac=None):\n \"\"\"\n Set up an endpoint (veth) in the network namespace identified by the PID.\n\n :param ip: The IP address to assign to the endpoint (veth) as Netaddr\n IPAddress.\n :param hostname: The host that this endpoint's workload resides on.\n :param orchestrator_id: The orchestrator_id that this endpoint was created on.\n :param workload_id: The workload_id that this endpoint resides on.\n :param cpid: The PID of a process currently running in the namespace.\n :param next_hop_ips: Dict of {version: IPAddress} for the next hops of the\n default routes namespace, as opposed to the root namespace. If so, this\n method also moves the other end of the veth into the root namespace.\n :param veth_name: The name of the interface inside the container namespace,\n e.g. eth1\n :param proc_alias: The location of the /proc filesystem on the host.\n :param mac: The interface MAC to use. Set to None to auto assign a MAC.\n :return: An Endpoint describing the veth just created.\n \"\"\"\n assert isinstance(ip, IPAddress)\n\n # Generate a new endpoint ID.\n ep_id = uuid.uuid1().hex\n\n iface = IF_PREFIX + ep_id[:11]\n iface_tmp = \"tmp\" + ep_id[:11]\n\n # Provision the networking. We create a temporary link from the proc\n # alias to the /var/run/netns to provide a named namespace. If we don't\n # do this, when run from the calico-node container the PID of the\n # container process is not recognised by `ip link set netns `\n # command because that uses /proc rather than the proc alias to\n # dereference the PID.\n with NamedNamespace(cpid, proc=proc_alias) as ns:\n # Create the veth pair and move one end into container:\n check_call(\"ip link add %s type veth peer name %s\" %\n (iface, iface_tmp),\n shell=True)\n check_call(\"ip link set %s up\" % iface, shell=True)\n check_call(\"ip link set %s netns %s\" % (iface_tmp, ns.name),\n shell=True)\n\n if mac:\n ns.check_call(\"ip link set dev %s name %s address %s\" %\n (iface_tmp, veth_name, str(mac)),\n shell=True)\n else:\n ns.check_call(\"ip link set dev %s name %s\" %\n (iface_tmp, veth_name),\n shell=True)\n ns.check_call(\"ip link set %s up\" % veth_name, shell=True)\n\n # Add an IP address.\n add_ip_to_interface(cpid, ip, veth_name, proc_alias=proc_alias)\n\n with NamedNamespace(cpid, proc=proc_alias) as ns:\n # Connected route to next hop & default route.\n next_hop = next_hop_ips[ip.version]\n ns.check_call(\"ip -%(version)s route replace\"\n \" %(next_hop)s dev %(device)s\" %\n {\"version\": ip.version,\n \"device\": veth_name,\n \"next_hop\": next_hop},\n shell=True)\n ns.check_call(\"ip -%(version)s route replace\"\n \" default via %(next_hop)s dev %(device)s\" %\n {\"version\": ip.version,\n \"device\": veth_name,\n \"next_hop\": next_hop},\n shell=True)\n\n # Get the MAC address.\n mac = ns.check_output(\n \"ip link show %s | grep ether | awk '{print $2}'\" %\n (veth_name), shell=True).strip()\n\n # Return an Endpoint.\n network = IPNetwork(IPAddress(ip))\n ep = Endpoint(hostname=hostname,\n orchestrator_id=orchestrator_id,\n workload_id=workload_id,\n endpoint_id=ep_id,\n state=\"active\",\n mac=mac)\n ep.if_name = veth_name\n if network.version == 4:\n ep.ipv4_nets.add(network)\n ep.ipv4_gateway = next_hop\n else:\n ep.ipv6_nets.add(network)\n ep.ipv6_gateway = next_hop\n return ep\n\n\ndef reinstate_endpoint(cpid, old_endpoint, next_hop_ips,\n proc_alias=PROC_ALIAS):\n \"\"\"\n Re-instate and endpoint that has been removed.\n :param hostname: The hostname this endpoint resides in\n :param cpid: The PID of the namespace to operate in.\n :param old_endpoint: The old endpoint that is being re-instated.\n :param next_hop_ips: Dict of {version: IPAddress} for the next hops of the\n default routes namespace.\n :param proc_alias: The location of the /proc filesystem on the host.\n :return: A new Endpoint replacing the old one.\n \"\"\"\n nets = old_endpoint.ipv4_nets | old_endpoint.ipv6_nets\n if_name = old_endpoint.if_name\n net = nets.pop()\n new_endpoint = set_up_endpoint(ip=net.ip,\n hostname=old_endpoint.hostname,\n orchestrator_id=old_endpoint.orchestrator_id,\n workload_id=old_endpoint.workload_id,\n cpid=cpid,\n next_hop_ips=next_hop_ips,\n veth_name=if_name,\n proc_alias=proc_alias,\n ep_id=old_endpoint.endpoint_id,\n mac=old_endpoint.mac)\n for net in nets:\n add_ip_to_interface(cpid, net.ip, if_name, proc_alias=proc_alias)\n\n # Copy across the IP and profile data from the old endpoint since this is\n # unchanged.\n new_endpoint.ipv4_nets = set(old_endpoint.ipv4_nets)\n new_endpoint.ipv6_nets = set(old_endpoint.ipv6_nets)\n new_endpoint.profile_ids = old_endpoint.profile_ids\n\n return new_endpoint\n\n\nclass NamedNamespace(object):\n \"\"\"\n Create a named namespace to allow commands to be run within the namespace\n in both the calico-node and within the root namespace.\n \"\"\"\n def __init__(self, cpid, proc=PROC_ALIAS):\n self.name = uuid.uuid1().hex\n self.pid_dir = \"%s/%s/ns/net\" % (proc, cpid)\n self.nsn_dir = \"/var/run/netns/%s\" % self.name\n if not os.path.exists(self.pid_dir):\n raise NamespaceError(\"Namespace pseudofile %s does not exist.\" %\n self.pid_dir)\n\n def __enter__(self):\n \"\"\"\n Add the appropriate configuration to name the namespace. This links\n the PID to the namespace name.\n \"\"\"\n _log.debug(\"Creating link between namespace %s and PID %s\",\n self.name, self.pid_dir)\n try:\n os.makedirs(\"/var/run/netns\")\n except os.error as oserr:\n if oserr.errno != errno.EEXIST:\n _log.error(\"Unable to create /var/run/netns dir\")\n raise\n os.symlink(self.pid_dir, self.nsn_dir)\n return self\n\n def __exit__(self, _type, _value, _traceback):\n try:\n os.unlink(self.nsn_dir)\n except BaseException:\n _log.exception(\"Failed to remove link: %s\", self.nsn_dir)\n return False\n\n def check_call(self, command, shell=False):\n \"\"\"\n Run a command within the named namespace.\n :param command: The command to run.\n :param shell: Whether this is a shell command.\n \"\"\"\n _log.debug(\"Run command: %s\", command)\n check_call(\"ip netns exec %s %s\" % (self.name, command), shell=shell)\n\n def check_output(self, command, shell=False):\n \"\"\"\n Run a command within the named namespace.\n :param command: The command to run.\n :param shell: Whether this is a shell command.\n \"\"\"\n _log.debug(\"Run command: %s\", command)\n return check_output(\"ip netns exec %s %s\" % (self.name, command),\n shell=shell)\n\nclass NamespaceError(Exception):\n \"\"\"\n Error creating or manipulating a network namespace.\n \"\"\"\n pass\n","sub_path":"calico_containers/pycalico/netns.py","file_name":"netns.py","file_ext":"py","file_size_in_byte":11906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"132933895","text":"#!/usr/bin/env python3\nimport aiwolfpy\nimport aiwolfpy.contentbuilder as cb\n\nimport sys\nimport os\nfrom collections import namedtuple\nimport itertools\nimport random\n\nimport numpy as np\n\nclass Agent(object):\n ROLE5_LIST = ['SEER','VILLAGER','POSSESSED','WEREWOLF']\n VILLAGE = ['VILLAGER', 'SEER', 'MEDIUM', 'BODYGUARD']\n WOLF = ['WEREWOLF', 'POSSESSED']\n\n def __init__(self, agent_name):\n self.agent_name = agent_name\n\n def getName(self):\n return self.agent_name\n\n def update(self, base_info, diff_data, request):\n self.base_info = base_info\n self.diff_data = diff_data\n if request == 'DAILY_INITIALIZE':\n for i in range(diff_data.shape[0]):\n if diff_data.type[i] == 'identify':\n self.not_reported = True\n self.my_result = diff_data.text[i]\n if diff_data.type[i] == 'divine':\n self.not_reported = True\n self.my_result = diff_data.text[i]\n if diff_data.type[i] == 'guard':\n self.my_result = diff_data.text[i]\n if self.my_role == 'POSSESSED':\n self.not_reported = True\n\n for i in range(diff_data.shape[0]):\n if diff_data.type[i] == 'talk':\n talk = diff_data.text[i].split()\n talker = diff_data.agent[i]\n if talk[0] == 'COMINGOUT':\n self.coMap[talker] = talk[2]\n elif talk[0] == 'DIVINED':\n if talker not in self.divineMap.keys():\n self.divineMap[talker] = dict()\n self.divineMap[talker][talk[1]] = talk[2]\n elif talk[0] == 'IDENTIFIED':\n if talker not in self.mediumMap.keys():\n self.mediumMap[talker] = dict()\n self.mediumMap[talker][talk[1]] = talk[2]\n elif diff_data.type[i] == 'execute':\n self.executedAgents.append(diff_data.agent[i])\n self.aliveOthers.remove(diff_data.agent[i])\n elif diff_data.type[i] == 'dead':\n self.killedAgents.append(diff_data.agent[i])\n self.aliveOthers.remove(diff_data.agent[i])\n\n def initialize(self, base_info, diff_data, game_setting):\n self.base_info = base_info\n # game_setting\n self.game_setting = game_setting\n # print(\"initialize\")\n # print(base_info)\n # print(diff_data)\n\n self.comingout = ''\n self.my_result = ''\n self.not_reported = False\n self.vote_declare = 0\n self.talk_turn = 0\n self.day = -1\n self.my_id = base_info['agentIdx']\n self.my_role = base_info['myRole']\n self.agent_num = len(base_info['statusMap'])\n self.coMap = dict()\n self.divineMap = dict()\n self.mediumMap = dict()\n self.aliveOthers = list(range(1,16))\n self.aliveOthers.remove(self.my_id)\n self.divine_list = list(range(1,16))\n self.divine_list.remove(self.my_id)\n self.executedAgents = []\n self.killedAgents = []\n self.talkQueue = []\n self.whisperQueue = []\n self.humans = []\n self.werewolves = []\n\n self.finished = False\n\n def dayStart(self):\n self.day += 1\n self.vote_declare = 0\n self.talk_turn = 0\n return None\n\n def talk(self):\n self.talk_turn += 1\n # comingout\n if self.comingout == '':\n if self.my_role == 'SEER':\n self.comingout = 'SEER'\n elif self.my_role == 'MEDIUM':\n self.comingout = 'MEDIUM'\n elif self.my_role == 'POSSESSED':\n self.comingout = 'SEER'\n return cb.comingout(self.my_id, self.comingout)\n\n #report\n if self.not_reported:\n if self.my_role == 'SEER':\n self.not_reported = False\n return self.my_result\n elif self.my_role == 'MEDIUM':\n self.not_reported = False\n return self.my_result\n elif self.my_role == 'POSSESSED':\n self.not_reported = False\n agent = random.choice(self.aliveOthers)\n self.my_result = 'DIVINED Agent['+\"{0:02d}\".format(agent)+'] HUMAN'\n return self.my_result\n\n #declare vote\n if self.vote_declare != self.vote():\n self.vote_declare = self.vote()\n return cb.vote(self.vote_declare)\n\n #skip\n if self.talk_turn <= 10:\n return cb.skip()\n\n return cb.over()\n\n def whisper(self):\n return cb.over()\n\n def vote(self):\n return random.choice(self.aliveOthers)\n\n def attack(self):\n return random.choice(self.aliveOthers)\n\n def divine(self):\n list = self.and_list(self.aliveOthers, self.divine_list)\n if len(list) == 0:\n return self.my_id\n agent = random.choice(list)\n self.divine_list.remove(agent)\n return agent\n\n def guard(self):\n agent = random.choice(list(self.divineMap.keys()))\n if agent not in self.aliveOthers:\n agent = random.choice(self.aliveOthers)\n return agent\n\n def finish(self):\n if self.finished is True:\n return None\n self.finished = True\n\n return None\n\n def daily_finish(self):\n pass\n\n def diff_list(self, list1, list2):\n return list(set(list1) - set(list2))\n\n def and_list(self, list1, list2):\n return list(set(list1) & set(list2))\n\nagent = Agent('Flex5')\n\n# run\nif __name__ == '__main__':\n aiwolfpy.connect_parse(agent)\n","sub_path":"AIWolf-server/agent/GAT2018/flex/flex15agent.py","file_name":"flex15agent.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"356216397","text":"# -*- coding: utf-8 -*-\n\nimport importlib.util as iu\nimport logging\n\nfrom discord import File, Embed\n\ntbl = logging.getLogger('TBL')\n\n\ndef setup_badges(client):\n badges = []\n path = client.paths['badges']\n scripts = [script for script in path.iterdir() if str(script).endswith('.py')]\n for script in scripts:\n spec = iu.spec_from_file_location(script.name.split('.')[0], script)\n if spec:\n mod = iu.module_from_spec(spec)\n spec.loader.exec_module(mod)\n badges += [mod.Badge()]\n tbl.info(\"%s badge loaded.\" % badges[-1].name)\n\n return badges\n\n\nasync def check_badges(client, ctx):\n if not client.badges:\n return\n for badge in client.badges:\n award = await badge.trigger(ctx)\n if award:\n msg = \"%s has leveled up a badge!\\n%s\\n%s\" % (award[0],\n badge.flavor,\n badge.levels[award[1]]['award']['text'])\n e = Embed(title=\"%s has leveled up a badge!\" % award[0])\n e.add_field(name=\"badge: \",\n value=\"%s\" % badge.flavor,\n inline=False)\n e.add_field(name=\"lvl: %s - \" % award[1],\n value=\"%s\" % badge.levels[award[1]]['award']['text'],\n inline=False)\n await ctx.message.channel.send(embed=e)\n\n if badge.levels[award[1]]['award']['image']:\n path = client.paths['badges'].joinpath('awards').joinpath(badge.levels[award[1]]['award']['image'])\n with path.open('rb') as f:\n fname = File(f)\n await ctx.message.channel.send(\":trophy:\", file=fname)\n\n\nasync def brag(ctx):\n badges = ctx.client.profiles['users'][ctx.message.author.name]['badges']\n title = ctx.client.profiles['users'][ctx.message.author.name]['title']\n flair = ctx.client.profiles['users'][ctx.message.author.name]['flair']\n e = Embed(title=\"%s\" % ctx.message.author.name)\n if title:\n e.add_field(name=\"Title: \",\n value=title,\n inline=False)\n if flair:\n e.add_field(name=\"Flair: \",\n value=flair,\n inline=False)\n for key in badges:\n e.add_field(name=\"%s: %s\" % (key, badges[key]['title']),\n value=\"lvl: %s\\nxp: %s\\n%s\" % (badges[key]['lvl'], badges[key]['xp'], badges[key]['flair']),\n inline=False)\n await ctx.message.channel.send(embed=e)","sub_path":"tbot/badges.py","file_name":"badges.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"532016564","text":"import cudf,cugraph\nfrom datetime import datetime\nfrom sys import argv \n\nfname = argv[1]\ntries = int(argv[2])\n\nedge_df = cudf.read_csv(fname, names=['out','in'], sep=' ')\ngraph = cugraph.from_edgelist(edge_df,'out','in')\n\nfor r in range(tries):\n start_time = datetime.now()\n ccxx_results = cugraph.connected_components(graph)\n end_time = datetime.now()\n secs = (end_time - start_time).total_seconds()\n\n print(f'total ccxx time: {secs}')","sub_path":"benchmark/ccxx/ccxx_cugraph.py","file_name":"ccxx_cugraph.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"495972920","text":"# -*- coding:utf-8 -*-\n\n\nclass Stack(object):\n\n def __init__(self):\n self.list = []\n\n def push(self, val):\n self.list.append(val)\n\n def pop(self):\n return self.list.pop(len(self.list) - 1)\n\n def isEmpty(self):\n return len(self.list) == 0\n\n\ndef valid_parentheses(array):\n s = Stack()\n for i in array:\n # 左括号入栈\n if i in ['(', '[', '{']:\n s.push(i)\n continue\n # 匹配右括号\n if i == ')':\n if not s.isEmpty() and s.pop() == '(':\n continue\n else:\n return False\n if i == ']':\n if not s.isEmpty() and s.pop() == '[':\n continue\n else:\n return False\n\n if i == '}':\n if not s.isEmpty() and s.pop() == '{':\n continue\n else:\n return False\n\n return True\n\n\nif __name__ == '__main__':\n assert valid_parentheses('()[]{}') is True\n assert valid_parentheses('([)]') is False\n","sub_path":"leetcode/0020.valid-parentheses/valid-parentheses.py","file_name":"valid-parentheses.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"17649139","text":"'''\nCreated on Oct 12, 2016\n\n@author: mwitt_000\n'''\nimport queue\nimport threading\n\n\n## wrapper class for a queue of packets\nclass Interface:\n ## @param maxsize - the maximum size of the queue storing packets\n def __init__(self, maxsize=0):\n self.queue = queue.Queue(maxsize);\n \n ##get packet from the queue interface\n def get(self):\n try:\n return self.queue.get(False)\n except queue.Empty:\n return None\n \n ##put the packet into the interface queue\n # @param pkt - Packet to be inserted into the queue\n # @param block - if True, block until room in queue, if False may throw queue.Full exception\n def put(self, pkt, block=False):\n self.queue.put(pkt, block)\n \n## Implements a network layer packet (different from the RDT packet \n# from programming assignment 2).\n# NOTE: This class will need to be extended to for the packet to include\n# the fields necessary for the completion of this assignment.\nclass NetworkPacket:\n ## packet encoding lengths \n dst_addr_S_length = 5\n #1 if this this a fragment, 0 otherwise (or if it is the end of the fragmented message)\n frag_flag = 0\n frag_flag_L = 1\n #packets are, by default, 50 characters. 5 of them are the address, 45 are the content\n packet_length = 50\n packet_length_L = 5\n #set to the network_packet_id\n packet_id = 0\n packet_id_L = 4\n #the offset is the number of bits/8 (i.e., it is the number of Bytes)\n offset = 0\n offset_L = 5\n\n data_S = ''\n dst_addr = ''\n\n def set_frag_flag(self, frag_flag):\n self.frag_flag = frag_flag\n def set_packet_length(self, new_length):\n self.packet_length = new_length\n def set_offset(self, new_offset):\n self.offset = new_offset\n def set_data(self, new_data):\n self.data_S = new_data\n\n ##@param dst_addr: address of the destination host\n # @param data_S: packet payload\n def __init__(self, dst_addr, packet_id, data_S):\n self.packet_id = packet_id\n self.dst_addr = dst_addr\n self.data_S = data_S\n \n ## called when printing the object\n def __str__(self):\n return self.to_byte_S()\n \n ## convert packet to a byte string for transmission over links\n #packet is: (dest addr (5 char) packet_id (4 char) frag_flag (1 char) packet_length (5 char) offset(5 char) data)\n def to_byte_S(self):\n byte_S = str(self.dst_addr).zfill(self.dst_addr_S_length)\n byte_S += str(self.packet_id).zfill(self.packet_id_L)\n byte_S += str(self.frag_flag).zfill(self.frag_flag_L)\n byte_S += str(self.packet_length).zfill(self.packet_length_L)\n byte_S += str(self.offset).zfill(self.offset_L)\n byte_S += self.data_S\n return byte_S\n \n ## extract a packet object from a byte string\n # @param byte_S: byte string representation of the packet\n @classmethod\n def from_byte_S(self, byte_S):\n start_index = 0\n end_index = NetworkPacket.dst_addr_S_length\n dst_addr = int(byte_S[start_index : end_index])\n start_index = end_index\n end_index += NetworkPacket.packet_id_L\n self.packet_id = int(byte_S[start_index : end_index])\n start_index = end_index\n end_index += NetworkPacket.frag_flag_L\n self.frag_flag = int(byte_S[start_index : end_index])\n start_index = end_index\n end_index += NetworkPacket.packet_length_L\n self.packet_length = int(byte_S[start_index : end_index])\n start_index = end_index\n end_index += NetworkPacket.offset_L\n self.offset = int(byte_S[start_index : end_index])\n start_index = end_index\n\n data_S = byte_S[start_index : ]\n return self(dst_addr, self.packet_id, data_S)\n \n def get_data_S(self):\n return self.data_S\n def get_dst_addr(self):\n return self.dst_addr \n def get_packet_id(self):\n return self.packet_id\n def get_packet_frag_flag(self):\n return self.frag_flag\n def get_packet_offset(self):\n return self.offset\n def get_packet_length(self):\n return self.packet_length\n\n## Implements a network host for receiving and transmitting data\nclass Host:\n packet_count = 0\n receiving_fragmented_packets = 0\n receiving_byte_S = ''\n ##@param addr: address of this node represented as an integer\n def __init__(self, addr):\n self.addr = addr\n self.in_intf_L = [Interface()]\n self.out_intf_L = [Interface()]\n self.stop = False #for thread termination\n \n ## called when printing the object\n def __str__(self):\n return 'Host_%s' % (self.addr)\n \n ## create a packet and enqueue for transmission\n # @param dst_addr: destination address for the packet\n # @param data_S: data being transmitted to the network layer\n def udt_send(self, dst_addr, data_S):\n print(\"Sending packet... %s\" % data_S)\n # Should eventually update to check the max length the next link takes instead of using a magic number\n data_strings = []\n header_length = 20\n if len(data_S) > 50:\n data_strings = self.data_split(data_S, 30)\n else:\n data_strings.append(data_S)\n for data in data_strings:\n p = NetworkPacket(dst_addr, self.packet_count, data)\n print(\"Sam - \"+str(self.out_intf_L[0]))\n self.out_intf_L[0].put(p.to_byte_S()) #send packets always enqueued successfully\n print('%s: sending packet \"%s\"' % (self, p))\n self.packet_count += 1\n\n def data_split(self, data_S, max_len):\n data_strings = []\n # chop data string until it meets the max size\n while len(data_S) >= max_len:\n data_strings.append(data_S[0 : max_len])\n print(\"Data partial string = \" + data_S[0 : max_len])\n data_S = data_S[max_len : ]\n #add remaining string\n if len(data_S) > 0:\n data_strings.append(data_S)\n return data_strings\n \n ## receive packet from the network layer\n def udt_receive(self):\n pkt_S = self.in_intf_L[0].get()\n if pkt_S is not None:\n p = NetworkPacket.from_byte_S(pkt_S)\n if p.get_packet_frag_flag() == 1:\n self.receiving_fragmented_packets = 1\n self.receiving_byte_S += p.get_data_S()\n print(\"Received fragmented packet, appending '\"+p.get_data_S()+\"' onto our string\")\n print(\"Current appendage: \" + self.receiving_byte_S)\n elif self.receiving_fragmented_packets == 1 and p.get_packet_frag_flag() == 0: #self.receiving_fragmented_packets == 1:\n print(\"Received final fragmented packet\")\n self.receiving_fragmented_packets = 0\n self.receiving_byte_S += p.get_data_S()\n p_final = NetworkPacket(p.get_dst_addr(), p.get_packet_id(), self.receiving_byte_S)\n p_final.set_packet_length(20 + len(self.receiving_byte_S))\n p_final.set_offset(0)\n print('%s: received packet \"%s\"' % (self, p_final))\n self.receiving_byte_S = ''\n else:\n self.receiving_byte_S = ''\n print('%s: received packet \"%s\"' % (self, pkt_S))\n \n ## thread target for the host to keep receiving data\n def run(self):\n print (threading.currentThread().getName() + ': Starting')\n while True:\n #receive data arriving to the in interface\n self.udt_receive()\n #terminate\n if(self.stop):\n print (threading.currentThread().getName() + ': Ending')\n return\n \n\n\n## Implements a multi-interface router described in class\nclass Router:\n \n ##@param name: friendly router name for debugging\n # @param intf_count: the number of input and output interfaces \n # @param max_queue_size: max queue length (passed to Interface)\n def __init__(self, name, intf_count, max_queue_size, outgoing_l_mtu):\n self.stop = False #for thread termination\n self.name = name\n #create a list of interfaces\n self.in_intf_L = [Interface(max_queue_size) for _ in range(intf_count)]\n self.out_intf_L = [Interface(max_queue_size) for _ in range(intf_count)]\n self.outgoing_l_mtu = outgoing_l_mtu\n\n ## called when printing the object\n def __str__(self):\n return 'Router_%s' % (self.name)\n\n ## look through the content of incoming interfaces and forward to\n # appropriate outgoing interfaces\n def forward(self):\n for i in range(len(self.in_intf_L)):\n pkt_S = None\n try:\n #get packet from interface i\n pkt_S = self.in_intf_L[i].get()\n #if packet exists make a forwarding decision\n if pkt_S is not None:\n p = NetworkPacket.from_byte_S(pkt_S) #parse a packet out\n data_S = p.data_S\n offset = 0\n #break up our packet here if the outgoing link's mtu is too small\n if len(p.to_byte_S()) > self.outgoing_l_mtu:\n print (\"PACKET TOO BIG, mtu only \"+str(self.outgoing_l_mtu))\n p_new = NetworkPacket(p.get_dst_addr(), p.get_packet_id(), p.get_data_S())\n p_new.set_data(data_S[0:self.outgoing_l_mtu - 20])\n p_new.set_frag_flag(1)\n p_new.set_offset(offset)\n count = 0\n while len(data_S)+20 > self.outgoing_l_mtu:\n self.out_intf_L[i].put(p_new.to_byte_S(), True)\n print('%s: forwarding packet \"%s\" from interface %d to %d' % (self, p_new, i, i))\n p_new = NetworkPacket(p.get_dst_addr(), p.get_packet_id(), p.get_data_S())\n data_S = data_S[self.outgoing_l_mtu - 20:]\n offset += self.outgoing_l_mtu - 20\n p_new.set_data(data_S[0:self.outgoing_l_mtu - 20])\n p_new.set_frag_flag(1)\n p_new.set_offset(offset)\n if count > 10:\n break\n count+=1\n p_new.set_frag_flag(0)\n p_new.set_packet_length(len(data_S) + 20)\n self.out_intf_L[i].put(p_new.to_byte_S(), True)\n print('%s: forwarding packet \"%s\" from interface %d to %d' % (self, p_new, i, i))\n else:\n # HERE you will need to implement a lookup into the \n # forwarding table to find the appropriate outgoing interface\n # for now we assume the outgoing interface is also i\n self.out_intf_L[i].put(p.to_byte_S(), True)\n print('%s: forwarding packet \"%s\" from interface %d to %d' % (self, p, i, i))\n except queue.Full:\n print('%s: packet \"%s\" lost on interface %d' % (self, p, i))\n pass\n \n ## thread target for the host to keep forwarding data\n def run(self):\n print (threading.currentThread().getName() + ': Starting')\n while True:\n self.forward()\n if self.stop:\n print (threading.currentThread().getName() + ': Ending')\n return\n ","sub_path":"part2/network_2.py","file_name":"network_2.py","file_ext":"py","file_size_in_byte":11553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"19021184","text":"import re\nimport subprocess\nimport sqlite3\n\nDB_FILE = '/home/peter/projects/jdk/db/jdk_entries.db'\nCURRENT_DATESTAMP = \"DATETIME(CURRENT_TIMESTAMP, 'localtime')\" # special sqlite3 syntax\nTEMP_FILE = '/home/peter/projects/jdk/db/temp.jdk'\n\ndef db_execute(sql, quote_tuple, expect_return_values=False):\n db_connection = sqlite3.connect(DB_FILE) \n db_cursor = db_connection.cursor()\n db_cursor.execute(sql, quote_tuple)\n \n if (expect_return_values):\n return_values = db_cursor.fetchall()\n else:\n return_values = None\n db_connection.commit()\n\n db_connection.close()\n\n return return_values \n\ndef db_execute_old(sql, expect_return_values=False):\n db_connection = sqlite3.connect(DB_FILE) \n db_cursor = db_connection.cursor()\n db_cursor.execute(sql)\n \n if (expect_return_values):\n return_values = db_cursor.fetchall()\n else:\n return_values = None\n db_connection.commit()\n\n db_connection.close()\n\n return return_values \n\nclass Entry:\n\n entry_id = None\n title = None\n body = None\n date_created = None\n date_last_modified = None\n writeable = True \n\n def __init__(self, entry_id=None, title=None, writeable=True):\n \n if (entry_id):\n self.entry_id = entry_id \n self.populate_entry_data()\n self.writeable = writeable\n\n else:\n self.create_entry()\n self.set_entry_id()\n\n if(title):\n self.title = title\n self.update_title()\n\n @staticmethod\n def get_home_screen_data():\n quote_tuple = ()\n sql = \"SELECT jdk_entries.id, jdk_entries.title \" + \\\n \"FROM jdk_entries \" + \\\n \"ORDER BY date_last_modified DESC \" + \\\n \"LIMIT 30;\" \n\n return db_execute(sql, quote_tuple, True)\n\n @staticmethod\n def search_existing_entries(keyword=None, from_date=None, to_date=None):\n total_parameters = 0\n quote_tuple = ()\n\n if (keyword):\n keyword_string = '(jdk_entries.title LIKE ? OR ' + \\\n 'jdk_entries.body LIKE ?) '\n total_parameters += 1\n keyword = '%' + keyword + '%'\n quote_tuple += keyword, keyword\n else:\n keyword_string = \"\" \n \n if (from_date):\n from_date_string = \"jdk_entries.date_last_modified >= ? \"\n total_parameters += 1\n quote_tuple += from_date,\n else:\n from_date_string = \"\"\n\n if (to_date):\n to_date_string = \"jdk_entries.date_last_modified <= ? \"\n total_parameters += 1\n quote_tuple += to_date,\n else:\n to_date_string = \"\"\n\n sql = \"SELECT jdk_entries.id, jdk_entries.title \" + \\\n \"FROM jdk_entries \" + \\\n \"WHERE \" + \\\n keyword_string + \\\n (\"AND \" if (keyword_string and (from_date or to_date)) else \"\") + \\\n from_date_string + \\\n (\"AND \" if (from_date and to_date) else \"\") + \\\n to_date_string + \\\n \"LIMIT 30;\"\n\n return db_execute(sql, quote_tuple, True)\n\n def populate_entry_data(self):\n quote_tuple = self.entry_id,\n\n sql = \"SELECT jdk_entries.title, jdk_entries.body \" + \\\n \"FROM jdk_entries \" + \\\n \"WHERE jdk_entries.id = ?;\" \n\n self.title, self.body = db_execute(sql, quote_tuple, True)[0] # returns array\n \n return None\n \n def create_entry(self):\n quote_tuple = CURRENT_DATESTAMP, CURRENT_DATESTAMP\n\n sql = \"INSERT INTO jdk_entries \" + \\\n \"(title, body, date_created, date_last_modified)\" + \\\n \"VALUES ('', '', ?, ?);\"\n\n db_execute(sql, quote_tuple)\n\n return None\n\n def set_entry_id(self):\n quote_tuple = ()\n \n sql = \"SELECT MAX(id) FROM jdk_entries;\"\n\n # returns a nested list, need to get at it with array syntax\n self.entry_id = str(db_execute(sql, quote_tuple, True)[0][0]); \n\n return None\n\n def update_title(self, title = None):\n if (not self.title):\n self.title = title\n\n quote_tuple = self.title, self.entry_id\n\n # This will fall to a sql injection \n sql = \"UPDATE jdk_entries SET title = ?\" + \\\n \"WHERE jdk_entries.id = ?;\" \n\n db_execute(sql, quote_tuple)\n \n self.update_date_modified()\n\n return None\n\n def update_date_modified(self):\n quote_tuple = CURRENT_DATESTAMP, self.entry_id\n\n sql = \"UPDATE jdk_entries \" + \\\n \"SET date_last_modified = ? \" + \\\n \"WHERE jdk_entries.id = ?;\"\n \n db_execute(sql, quote_tuple)\n\n return None\n\n def edit_entry(self):\n self.create_temp_file()\n if (self.body is not None):\n self.write_body_to_temp_file() \n \n self.open_temp_file() # opens vim here\n \n body_new = self.get_temp_file_data()\n \n if (body_new != self.body):\n self.body = body_new \n \n quote_tuple = self.body, self.entry_id\n\n sql = \"UPDATE jdk_entries SET body = ? \" + \\\n \"WHERE jdk_entries.id = ?;\"\n\n db_execute(sql, quote_tuple)\n self.update_date_modified()\n \n self.remove_temp_file()\n \n return None\n\n def create_temp_file(self):\n subprocess.call(['touch', TEMP_FILE])\n\n return None\n\n def write_body_to_temp_file(self):\n temp_file = open(TEMP_FILE, \"w\")\n temp_file.write(self.body) \n temp_file.close()\n\n return None\n\n def open_temp_file(self):\n params = ['vim', TEMP_FILE]\n\n if (not self.writeable):\n params.insert(1, '-R') # vim syntax for read only\n\n subprocess.call(params)\n\n return None\n\n def get_temp_file_data(self):\n temp_file = open(TEMP_FILE, \"r\")\n temp_text = temp_file.read() \n temp_file.close()\n\n return temp_text\n\n def remove_temp_file(self):\n params = ['rm', TEMP_FILE]\n\n subprocess.call(params)\n \n return None\n \n def export_entry(self, location = None):\n if (not location):\n export_location = \"/home/peter/Desktop/\" + str(self.entry_id) + \".txt\"\n #subprocess.call(['touch', export_location])\n export_file = open(export_location, \"w\")\n #export_file.write(\"Date Created \" + self.date_created)\n #export_file.write(\"Date Last Modified \" + self.date_last_modified)\n export_file.write(\"id \" + str(self.entry_id) + \"\\n\")\n export_file.write(\"title \" + self.title + \"\\n\")\n export_file.write(self.body)\n export_file.close()\n\n return None\n","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"432749290","text":"\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom scipy.io import loadmat\r\n\r\n#%%\r\ndef conv(layer_name, x, out_channels, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=True):\r\n '''Convolution op wrapper, use RELU activation after convolution\r\n Args:\r\n layer_name: e.g. conv1, pool1...\r\n x: input tensor, [batch_size, height, width, channels]\r\n out_channels: number of output channels (or comvolutional kernels)\r\n kernel_size: the size of convolutional kernel, VGG paper used: [3,3]\r\n stride: A list of ints. 1-D of length 4. VGG paper used: [1, 1, 1, 1]\r\n is_pretrain: if load pretrained parameters, freeze all conv layers. \r\n Depending on different situations, you can just set part of conv layers to be freezed.\r\n the parameters of freezed layers will not change when training.\r\n Returns:\r\n 4D tensor\r\n '''\r\n \r\n in_channels = x.get_shape()[-1]\r\n \r\n with tf.variable_scope(layer_name):\r\n w = tf.get_variable(name='weights',\r\n trainable=is_pretrain,\r\n shape=[kernel_size[0], kernel_size[1], in_channels, out_channels],\r\n initializer=tf.contrib.layers.xavier_initializer()) # default is uniform distribution initialization\r\n b = tf.get_variable(name='biases',\r\n trainable=is_pretrain,\r\n shape=[out_channels],\r\n initializer=tf.constant_initializer(0.0))\r\n x = tf.nn.conv2d(x, w, stride, padding='SAME', name='conv')\r\n x = tf.nn.bias_add(x, b, name='bias_add')\r\n x = batch_norm(x) \r\n x = tf.nn.relu(x, name='relu')\r\n return x\r\n\r\n#%%\r\ndef pool(layer_name, x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True):\r\n '''Pooling op\r\n Args:\r\n x: input tensor\r\n kernel: pooling kernel, VGG paper used [1,2,2,1], the size of kernel is 2X2\r\n stride: stride size, VGG paper used [1,2,2,1]\r\n padding:\r\n is_max_pool: boolen\r\n if True: use max pooling\r\n else: use avg pooling\r\n '''\r\n if is_max_pool:\r\n x = tf.nn.max_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)\r\n else:\r\n x = tf.nn.avg_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)\r\n return x\r\n\r\n#%%\r\ndef batch_norm(x):\r\n\r\n epsilon = 1e-3\r\n batch_mean, batch_var = tf.nn.moments(x, [0])\r\n x = tf.nn.batch_normalization(x,\r\n mean=batch_mean,\r\n variance=batch_var,\r\n offset=None,\r\n scale=None,\r\n variance_epsilon=epsilon)\r\n return x\r\n\r\n#%%\r\ndef FC_layer(layer_name, x, out_nodes):\r\n '''Wrapper for fully connected layers with RELU activation as default\r\n Args:\r\n layer_name: e.g. 'FC1', 'FC2'\r\n x: input feature map\r\n out_nodes: number of neurons for current FC layer\r\n '''\r\n shape = x.get_shape()\r\n if len(shape) == 4:\r\n size = shape[1].value * shape[2].value * shape[3].value\r\n else:\r\n size = shape[-1].value\r\n\r\n with tf.variable_scope(layer_name):\r\n w = tf.get_variable('weights',\r\n shape=[size, out_nodes],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n b = tf.get_variable('biases',\r\n shape=[out_nodes],\r\n initializer=tf.constant_initializer(0.0))\r\n flat_x = tf.reshape(x, [-1, size]) # flatten into 1D\r\n \r\n x = tf.nn.bias_add(tf.matmul(flat_x, w), b)\r\n x = tf.nn.relu(x)\r\n return x\r\n\r\n#%%\r\ndef loss(logits, labels):\r\n '''Compute loss\r\n Args:\r\n logits: logits tensor, [batch_size, n_classes]\r\n labels: one-hot labels\r\n '''\r\n lamda=0.01\r\n with tf.name_scope('loss') as scope:\r\n #cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels,name='cross-entropy')\r\n# loss = tf.reduce_mean(cross_entropy, name='loss')//lable is one-hot\r\n# tf.summary.scalar(scope+'/loss', loss)\r\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy_per_example')# lable is int\r\n loss = tf.reduce_mean(cross_entropy, name='loss')\r\n# print(\"loss1\",loss)\r\n total_vars = tf.trainable_variables()\r\n# print(\"total_vars\",total_vars)\r\n# weights_name_list = [var for var in total_vars if 'bias' not in var.name ]\r\n# loss_holder = []\r\n# for w in range(len(weights_name_list)):\r\n# l2_loss = tf.nn.l2_loss(weights_name_list[w])\r\n# loss_holder.append(l2_loss)\r\n# regular_loss = tf.reduce_mean(loss_holder)*lamda\r\n# loss = loss + regular_loss\r\n print(\"loss2\",loss)\r\n tf.summary.scalar(scope+'/loss', loss)\r\n return loss\r\n \r\n#%%\r\ndef accuracy(logits, labels):\r\n \"\"\"Evaluate the quality of the logits at predicting the label.\r\n Args:\r\n logits: Logits tensor, float - [batch_size, NUM_CLASSES].\r\n labels: Labels tensor, \r\n \"\"\"\r\n with tf.name_scope('accuracy') as scope:\r\n# correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))\r\n# correct = tf.cast(correct, tf.float32)\r\n# accuracy = tf.reduce_mean(correct)*100.0\r\n# tf.summary.scalar(scope+'/accuracy', accuracy)\r\n# max_index = np.argmax(logits)\r\n print(\"logits is \", logits)\r\n n=np.array(labels)\r\n print(\"labels is \", n)\r\n correct = tf.nn.in_top_k(logits, labels, 1) \r\n correct = tf.cast(correct, tf.float16) \r\n accuracy = tf.reduce_mean(correct)\r\n# compute the mean of correct([0,0,0,0,0,0,1,1,1,1,0,0,0,0,0])16 \r\n #accuracy=4/16\r\n tf.summary.scalar(scope+'/accuracy', accuracy) \r\n return accuracy \r\n \r\n\r\n\r\n\r\n#%%\r\ndef num_correct_prediction(logits, labels):\r\n \"\"\"Evaluate the quality of the logits at predicting the label.\r\n Return:\r\n the number of correct predictions\r\n \"\"\"\r\n# correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))\r\n# correct = tf.cast(correct, tf.int32)\r\n# n_correct = tf.reduce_sum(correct)\r\n# #Computes the sum of elements across dimensions of a tensor. \r\n# return n_correct\r\n correct = tf.nn.in_top_k(logits, labels, 1) \r\n correct = tf.cast(correct, tf.float16) \r\n accuracy = tf.reduce_mean(correct)\r\n# compute the mean of correct([0,0,0,0,0,0,1,1,1,1,0,0,0,0,0])16 \r\n tf.summary.scalar('/num_correct_prediction', accuracy) \r\n return accuracy \r\n\r\n\r\n\r\n#%%\r\ndef optimize(loss, learning_rate, global_step):\r\n '''optimization, use Gradient Descent as default\r\n '''\r\n with tf.name_scope('optimizer'):\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\r\n # optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)\r\n global_step = tf.Variable(0, name='global_step', trainable=False)\r\n train_op = optimizer.minimize(loss, global_step= global_step)\r\n return train_op\r\n \r\n\r\n\r\n \r\n#%%\r\n \r\n\r\n#%% \r\ndef test_load():\r\n data_path = './/vgg-face.mat'\r\n x = loadmat(data_path)\r\n \r\n\r\n layers = x['layers']\r\n## current = input_maps\r\n# network = {}\r\n for layer in layers[0]:\r\n# print(\"layer************\\n\",layer)\r\n# \r\n name = layer[0]['name'][0][0]\r\n# # conv1_1 / relu1_1/ conv1_2/ relu1_2/conv2_1/relu2_1/conv2_2/\r\n print(\"name/n\",name)\r\n layer_type = layer[0]['type'][0][0]\r\n# \r\n if layer_type == 'conv':\r\n\r\n kernel,bias = layer[0]['weights'][0][0]\r\n \r\n \r\n print(\"kernel.shape\\n\",kernel.shape)\r\n print(\"*******************************\\n\")\r\n# print(\"kernel\\n\",kernel)\r\n #kernel = /home/hadoop/Desktop/My-TensorFlow-tutorials-master/VGG face segmentation recognition/data1/training/np.squeeze(kernel)\r\n# print(\"***********************new kernel\\n\",kernel)\r\n print(\"***********************new kernel.shape\\n\",kernel.shape)\r\n print(\"*******************************\\n\")\r\n print(\"bias.shape\\n\",bias.shape)\r\n print(\"*******************************\\n\")\r\n# print(\"bias/n\",bias)\r\n\r\n\r\n \r\n#%% \r\n\r\n\r\ndef load_with_skip(data_path, session, skip_layer):\r\n \r\n x = loadmat(data_path)\r\n# layers = x['layers']\r\n layers = x['layers']\r\n \r\n for layer in layers[0]:\r\n layer_type = layer[0]['type'][0][0]\r\n name = layer[0]['name'][0][0]\r\n if name not in skip_layer: \r\n with tf.variable_scope(name, reuse=True):\r\n if layer_type == 'conv':\r\n# kernel, bias =layer[0]['weights'][0][0]\r\n# kernel = np.squeeze(kernel)\r\n# bias = np.squeeze(bias).reshape(-1)\r\n# for b in bias:\r\n kernel,bias =layer[0]['weights'][0][0]\r\n# for w in weight:\r\n kernel = np.squeeze(kernel)\r\n# print(\"*********************name\\n\",name) \r\n# print(\"**********************K\\n\",kernel)\r\n bias=np.squeeze(bias).reshape(-1)\r\n# print(\"***********************b\\n\",bias)\r\n# for subkey, data in zip(('weights'),kernel):\r\n session.run(tf.get_variable('weights').assign(kernel))\r\n session.run(tf.get_variable('biases').assign(bias))\r\n\r\n\r\n\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n#%%\r\ndef weight(kernel_shape, is_uniform = True):\r\n ''' weight initializer\r\n Args:\r\n shape: the shape of weight\r\n is_uniform: boolen type.\r\n if True: use uniform distribution initializer\r\n if False: use normal distribution initizalizer\r\n Returns:\r\n weight tensor\r\n '''\r\n w = tf.get_variable(name='weights',\r\n shape=kernel_shape,\r\n initializer=tf.contrib.layers.xavier_initializer()) \r\n return w\r\n\r\n#%%\r\ndef bias(bias_shape):\r\n '''bias initializer\r\n '''\r\n b = tf.get_variable(name='biases',\r\n shape=bias_shape,\r\n initializer=tf.constant_initializer(0.0))\r\n return b\r\n\r\n#%%\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n ","sub_path":"tools (1).py","file_name":"tools (1).py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"414110483","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport argparse\nimport pickle\nimport h5py\nimport os\n\nimport tensorflow as tf\nimport keras\nfrom keras.preprocessing import sequence\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.utils.np_utils import to_categorical\n#from keras.utils.vis_utils import plot_model\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers import *\nfrom keras.engine.topology import Layer\n\nfrom model import *\nfrom utils import restore_from_save, tensors_key_in_file, prepare_data_for_emb, load_class_embedding, datagen\n\nparser = argparse.ArgumentParser(description='LEAM implementation in Keras')\n\nparser.add_argument('--fold', type=str, default='', help='current cross valid fold number')\nparser.add_argument('--data_path', type=str, default='dataset/', help='directory containing data')\nparser.add_argument('--emb_path', type=str, default='dataset/en_vector_google.pkl', help='directory containing word embeddings')\nparser.add_argument('--save_path', type=str, default='results/', help='path to save results')\n\nparser.add_argument('--batch_size', type=int, default=16, help='input batch size for training')\nparser.add_argument('--embedding_size', type=int, default=300, help='dimension of embeddings')\nparser.add_argument('--sentence_size', type=int, default=15, help='dimension of embeddings')\nparser.add_argument('--maxlen', type=int, default=146, help='maximum length of a sentence')\nparser.add_argument('--token_size', type=int, default=856701, help='vocabulary size')\nparser.add_argument('--class_num', type=int, default=2, help='number of classification classes')\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate')\nparser.add_argument('--epochs', type=int, default=20, help='number of epochs to train')\nparser.add_argument('--mode', type=str, default='train', help='train or eval model')\n#parser.add_argument('--optimizer', type=str, default='adam', help='choice of optimizer')\nparser.add_argument('--seed', type=int, default=0, help='random seed (default: 0)')\n\nargs = parser.parse_args()\n\nclass Options(object):\n def __init__(self):\n self.fix_emb = True\n self.restore = False\n self.W_emb = None\n self.W_class_emb = None\n self.maxlen = args.maxlen\n self.n_words = None\n self.embed_size = args.embedding_size\n self.lr = args.lr\n self.batch_size = args.batch_size\n self.dropout = 0.5\n self.part_data = False\n self.portion = 1.0\n self.clip_grad = None\n self.class_penalty = 1.0\n self.ngram = 55\n self.H_dis = 300\n self.class_num = args.class_num\n\n #Iterating along the class attributes\n def __iter__(self):\n for attr, value in self.__dict__.iteritems():\n yield attr, value\n\nopt = Options()\n\ndef emb_classifier(x_emb, x_mask, W_class, dropout=0.5, opt=opt):\n\n W_class_tran = tf.transpose(W_class, [0,2,1]) # b* e * c\n x_emb = tf.expand_dims(x_emb, 3) # b * s * e * 1\n H_enc = att_emb_ngram_encoder_cnn(x_emb, x_mask, W_class, W_class_tran, opt)\n #H_enc = att_emb_ngram_encoder_maxout(x_emb, x_mask, W_class, W_class_tran, opt)\n #H_enc = tf.squeeze(H_enc)\n #logits = discriminator_2layer(H_enc, opt, dropout, prefix='classify_', num_outputs=opt.class_num, is_reuse=False) \n\n H_enc_list= tf.unstack(H_enc, axis=-1)\n\n logits_list = []\n for i, ih in enumerate(H_enc_list):\n #logits_list.append(discriminator_0layer(ih, opt, dropout, prefix='classify_{}'.format(i), num_outputs=1, is_reuse=False) )\n logits_list.append(discriminator_2layer(ih, opt, dropout, prefix='classify_{}'.format(i), num_outputs=1, is_reuse=False) )\n\n logits = tf.concat(logits_list,-1)\n\n return logits\n\nclass AttentionLayer(Layer):\n def __init__(self, **kwargs):\n super(AttentionLayer, self).__init__(** kwargs)\n\n def build(self, input_shape):\n assert len(input_shape)==3\n self.W = self.add_weight(name='att_weight',\n shape=(input_shape[1], input_shape[1]),\n initializer='uniform',\n trainable=True)\n self.b = self.add_weight(name='att_bias',\n shape=(input_shape[1],),\n initializer='uniform',\n trainable=True)\n print(\"Attention layer weights and bias shapes: \", K.shape(self.W), K.shape(self.b))\n super(AttentionLayer, self).build(input_shape)\n\n def call(self, inputs):\n x = K.permute_dimensions(inputs, (0, 2, 1))\n a = K.softmax(K.tanh(K.dot(x, self.W) + self.b))\n outputs = K.permute_dimensions(a * x, (0, 2, 1))\n outputs = K.sum(outputs, axis=1)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[2]\n\nclass LEAM(Layer):\n\n def __init__(self, **kwargs):\n\n self.supports_masking = False\n self.is_train = tf.constant(True, dtype=tf.bool)\n super(LEAM, self).__init__(**kwargs)\n\n def compute_mask(self, input, input_mask=None):\n # need not to pass the mask to next layers\n return None\n\n def call(self, x, mask=None):\n\n #token_seq, y_seq, token_mask, class_all = x\n token_seq, class_all = x\n mask_seq = tf.ones([K.shape(token_seq)[0],K.shape(token_seq)[1]])\n rep = emb_classifier(token_seq, mask_seq, class_all)\n\n return rep\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0][0], args.class_num)\n\ndef getgen(mode):\n \n if args.mode == 'train':\n \n args.data_path = os.path.join(args.data_path, 'fold{}'.format(args.fold)) if args.fold != '' else args.data_path\n train_handle = h5py.File(os.path.join(args.data_path, 'train.h5'), 'r')\n valid_handle = h5py.File(os.path.join(args.data_path, 'valid.h5'), 'r')\n \n return train_handle, valid_handle\n \n else:\n\n test_handle = h5py.File(os.path.join(args.data_path, 'test.h5'))\n \n return test_handle\n\ndef train():\n\n # Read word embeddings from VECTOR_DIR\n with open(args.emb_path, 'rb') as f:\n word_vector = np.array(pickle.load(f))\n\t\n # f0 - Where you convert the text sequence into their respective embeddings.\n sentence_inputs = Input(shape=(args.maxlen,), dtype='int32')\n print(\"sentence_inputs, each of size max_len: \", K.int_shape(sentence_inputs))\n sentence_embeddings = Embedding(args.token_size + 1, args.embedding_size, mask_zero=False, weights=[word_vector], trainable=False)(sentence_inputs)\n print(\"sentence_embeddings, each of shape (max_len, embedding_size): \", K.int_shape(sentence_embeddings))\n\n # Calculates the attention values \\beta and then the sentence encoder - z.\n #sentence_attn = AttentionLayer()(sentence_embeddings)\n #sentence_encoder = Model(sentence_inputs,sentence_attn)\n\n # Obtain the class embedding C (K X P) = (20 X 300)\n class_all_inputs = Input((args.class_num,), dtype='int32')\n class_all_embeddings = Embedding(args.class_num, args.embedding_size,mask_zero=False)(class_all_inputs)\n\n #token_inputs = Input((args.sentence_size, args.maxlen,), dtype='int32')\n #label_inputs = Input((args.class_num,), dtype='int32')\n\n #token_encoder = TimeDistributed(sentence_encoder)(token_inputs)\n\t\n # f1 layer which outputs 'z' (average of the word embeddings weighted by the attentions score).\n #doc_leam = LEAM()([token_encoder, label_inputs, token_inputs, class_all_embeddings])\n doc_leam = LEAM()([sentence_embeddings, class_all_embeddings])\n\n # f2 layer (output) where you get the class probability after taking the sentence embedding - z (doc_leam here)\n output = Dense(args.class_num, activation='softmax')(doc_leam)\n\n #model = Model(input=[token_inputs,label_inputs,class_all_inputs], output=[output])\n model = Model(input=[sentence_inputs, class_all_inputs], output=[output])\n #plot_model(model, to_file=os.path.join(args.save_path, 'model_plot.png'), show_shapes=True, show_layer_names=True)\n\n optimizer = keras.optimizers.Adam(lr=args.lr)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])\n print(model.summary())\n\n #train_x, train_y, test_x, test_y, class_all = getdata(args.mode)\n train_handle, valid_handle = getgen(args.mode)\n history = model.fit_generator(datagen(train_handle, opt),\n epochs=args.epochs,\n steps_per_epoch=np.ceil(train_handle['x'].shape[0] / args.batch_size),\n validation_data = datagen(valid_handle, opt),\n validation_steps=np.ceil(valid_handle['x'].shape[0] / args.batch_size))\n \n #Save the cross_validation results\n if args.fold != '':\n args.save_path = os.path.join(args.save_path, 'e_{}_lr{}'.format(args.epochs, args.lr))\n args.save_path = os.path.join(args.save_path, 'fold{}'.format(args.fold))\n\n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path)\n\n with open(os.path.join(args.save_path, 'accuracy.pkl'), 'wb') as f:\n pickle.dump(history.history['acc'], f)\n\n model.save(os.path.join(args.save_path, 'baseline.h5'))\n\ndef test():\n \n #test_x, test_y, class_all = getdata(args.mode)\n test_handle = getgen(args.mode)\n model = keras.models.load_model(os.path.join(args.save_path, 'baseline.h5'), custom_objects={'LEAM': LEAM})\n result = model.evaluate_generator(datagen(test_handle, opt), steps=np.ceil(test_handle['x'].shape[0] / args.batch_size)) \n\n print(\"Result on the held-out set: \", result)\n\nif __name__ == '__main__':\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True \n #config.log_device_placement = True\n sess = tf.Session(config=config)\n set_session(sess)\n\n train() if args.mode == 'train' else test()\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":9980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"444735983","text":"import tensorflow as tf\nimport numpy as np\n\nclass dpg(object):\n def __init__(self, state_dim, action_dim, high_bound=1.0):\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.high_bound = high_bound\n self.take_action, self.update_all, self._update_actor, self._update_critic, self.update_targets, self.initialize_targets, self.save_weights, self.load_weights, self.debug= self._network()\n\n\n def _network(self, gamma=0.995, tau=0.999, action_function=tf.nn.tanh):\n\n self.tau = tau\n xavier = tf.contrib.layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32)\n # xavier = tf.random_uniform_initializer(minval=-0.003, maxval=+0.003, dtype=tf.float32)\n\n state_input = tf.placeholder(tf.float32, shape=(None, self.state_dim), name=\"State_input\")\n action_input = tf.placeholder(tf.float32, shape=(None, self.action_dim), name=\"Action_input\")\n reward_input = tf.placeholder(tf.float32, shape=(None, 1), name=\"Reward_input\")\n is_terminated = tf.placeholder(tf.float32, shape=(None, 1), name=\"Is_terminated\")\n target_state_input = tf.placeholder(tf.float32, shape=(None, self.state_dim), name=\"State_target_input\")\n is_actor_train = tf.placeholder(tf.bool, name=\"is_actor_train\")\n\n\n\n with tf.variable_scope(\"Actor_network\"):\n # gradient_input = tf.placeholder(tf.float32, (None, self.action_dim), name=\"Critic's gradient input\")\n\n actor_fc_1 = tf.contrib.layers.fully_connected(inputs=state_input, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"actor_fc_1\")\n actor_fc_2 = tf.contrib.layers.fully_connected(inputs=actor_fc_1, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"actor_fc_2\")\n actor_fc_out_before = tf.contrib.layers.fully_connected(inputs=actor_fc_2, num_outputs=self.action_dim, activation_fn=None, weights_initializer=xavier, scope=\"actor_fc_out\")\n actor_fc_out = tf.nn.tanh(actor_fc_out_before)\n\n tf.summary.histogram('Action_produced_as_linear', actor_fc_out_before)\n tf.summary.histogram('Action_produced', actor_fc_out)\n tf.summary.histogram('O_w', tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Actor_network/actor_fc_out/w\"))\n tf.summary.histogram('O_b', tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Actor_network/actor_fc_out/b\"))\n\n\n\n with tf.variable_scope(\"Target_Actor_network\"):\n\n target_actor_fc_1 = tf.contrib.layers.fully_connected(inputs=target_state_input, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"target_actor_fc_1\")\n target_actor_fc_2 = tf.contrib.layers.fully_connected(inputs=target_actor_fc_1, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"target_actor_fc_2\")\n target_actor_fc_out = tf.contrib.layers.fully_connected(inputs=target_actor_fc_2, num_outputs=self.action_dim, activation_fn=action_function, weights_initializer=xavier, scope=\"target_actor_fc_out\")*self.high_bound\n\n actor_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Actor_network\")\n target_actor_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Target_Actor_network\")\n opt_target_actor = [tf.assign(target, target*tau + (1.0 - tau)*value) for target, value in zip(target_actor_variables, actor_variables)]\n\n with tf.variable_scope(\"Critic_network\"):\n\n critic_action_input = tf.cond(is_actor_train, lambda: actor_fc_out, lambda: action_input)\n critic_fc_1 = tf.contrib.layers.fully_connected(inputs=state_input, num_outputs=self.state_dim, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"critic_fc_1\")\n critic_concat = tf.concat(1, [critic_fc_1, critic_action_input], name=\"state_action_concatination\")\n critic_fc_2 = tf.contrib.layers.fully_connected(inputs=critic_concat, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"critic_fc_2\")\n critic_fc_3 = tf.contrib.layers.fully_connected(inputs=critic_fc_2, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"critic_fc_3\")\n critic_fc_out = tf.contrib.layers.fully_connected(inputs=critic_fc_3, num_outputs=1, activation_fn=None, weights_initializer=xavier, scope=\"critic_fc_out\")\n\n tf.summary.histogram('O_w', tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Critic_network/critic_fc_out/w\"))\n tf.summary.histogram('O_b', tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Critic_network/critic_fc_out/b\"))\n tf.summary.histogram('Critic_fc_out_q_values', critic_fc_out)\n tf.summary.scalar('Q_value', tf.reduce_mean(critic_fc_out))\n\n\n with tf.variable_scope(\"Target_Critic_network\"):\n\n target_critic_fc_1 = tf.contrib.layers.fully_connected(inputs=target_state_input, num_outputs=self.state_dim, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"target_critic_fc_1\")\n target_critic_concat = tf.concat(1, [target_critic_fc_1, target_actor_fc_out], name=\"target_state_action_concatination\")\n target_critic_fc_2 = tf.contrib.layers.fully_connected(inputs=target_critic_concat, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"target_critic_fc_2\")\n target_critic_fc_3 = tf.contrib.layers.fully_connected(inputs=target_critic_fc_2, num_outputs=200, activation_fn=tf.nn.relu, weights_initializer=xavier, scope=\"target_critic_fc_3\")\n target_critic_fc_out = tf.contrib.layers.fully_connected(inputs=target_critic_fc_3, num_outputs=1, activation_fn=None, weights_initializer=xavier, scope=\"target_critic_fc_out\")\n\n critic_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Critic_network\")\n target_critic_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Target_Critic_network\")\n opt_target_critic = [tf.assign(target, target*tau + (1.0 - tau)*value, name=\"Target_critic_update\") for target, value in zip(target_critic_variables, critic_variables)]\n\n tf.summary.scalar('differnence', tf.reduce_mean(tf.squared_difference(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Critic_network/critic_fc_out/w\"), tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"Target_Critic_network/target_critic_fc_out/w\"))))\n tf.summary.scalar('Target_Q_value', tf.reduce_mean(target_critic_fc_out))\n\n\n with tf.variable_scope(\"Critic_network/Update\"):\n\n true_reward = reward_input + tf.stop_gradient(gamma*target_critic_fc_out)*(1.0 - is_terminated)\n loss_critic = tf.reduce_mean(tf.squared_difference(critic_fc_out, true_reward))\n\n l2_loss_critic = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.001), weights_list=critic_variables)\n\n opt_critic = tf.train.AdamOptimizer(learning_rate=0.001, name=\"Critic_Optimize_operation\").minimize(loss_critic + l2_loss_critic, var_list=critic_variables)\n\n tf.summary.scalar('Critic_loss', loss_critic)\n\n with tf.variable_scope(\"Actor_network/Update\"):\n\n\n critic_gradients = tf.stop_gradient(tf.gradients(critic_fc_out, critic_action_input)[0])\n\n gradient_checker = tf.stop_gradient(tf.to_float(tf.greater(critic_gradients, 0.0), name=\"Cast_To_Float\"))\n before_clip = actor_fc_out/2.0 + 0.5 -gradient_checker*actor_fc_out\n clipped_critic_gradients = tf.stop_gradient(tf.clip_by_value(before_clip, 0.0, 1.0, name=\"CLipped_gradient\")*critic_gradients)\n\n\n tf.summary.histogram('Critic_gradient', critic_gradients)\n tf.summary.histogram('Clipped_gradient', clipped_critic_gradients)\n\n l2_loss_actor = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.0008), weights_list=actor_variables)\n\n grads_actor = tf.gradients(actor_fc_out, actor_variables, grad_ys=-clipped_critic_gradients, name=\"gradients_new\")\n opt_actor = tf.train.AdamOptimizer(learning_rate=0.0001, name=\"Actor_optimizer\").apply_gradients(zip(grads_actor, actor_variables), global_step=None, name=None)\n # actor_total_grads = [tf.reduce_mean(tf.square(grad)) for grad in grads_actor]\n\n # opt_actor = tf.train.AdamOptimizer(learning_rate=0.001, name=\"Actor_optimizer\").minimize(tf.reduce_mean(-clipped_critic_gradients*actor_fc_out, 0) + l2_loss_actor, var_list=actor_variables)\n\n save = {var.name: var for var in tf.trainable_variables()}\n variable_holders = [tf.placeholder(tf.float32, shape=variable.get_shape()) for variable in tf.trainable_variables()]\n load = [tf.assign(target, value) for target, value in zip(tf.trainable_variables(), variable_holders)]\n\n tf.summary.scalar('Reward', tf.reduce_mean(reward_input))\n tf.summary.histogram('Reward_hist', reward_input)\n summaries = tf.summary.merge_all()\n\n\n def debug(sess):\n return [var.name for var in actor_variables]\n\n def take_action(sess):\n def _take_act(s1):\n if not len(s1.shape) > 1:\n state = np.expand_dims(s1, 0)\n return np.squeeze(sess.run(actor_fc_out, feed_dict={state_input:state}), 0)\n else:\n return sess.run(actor_fc_out, feed_dict={state_input:s1})\n return _take_act\n\n def update_all(sess, writer, s1, a1, r, s2, t1):\n\n summary1, loss_critic_output, _ = sess.run([summaries, loss_critic, opt_critic], feed_dict={state_input: s1, action_input: a1, reward_input: r, target_state_input: s2, is_actor_train: False, is_terminated: t1})\n action_gradient, clipped_action_gradient, _ = sess.run([critic_gradients, clipped_critic_gradients, opt_actor], feed_dict={state_input: s1, action_input: a1, is_actor_train: True, is_terminated: t1})\n target_update = sess.run([opt_target_actor, opt_target_critic])\n\n if writer and writer[1]%writer[2] == 0:\n writer[0].add_summary(summary1, writer[1])\n\n return loss_critic_output, action_gradient, clipped_action_gradient\n\n def _update_actor(sess, s1, a1, r, s2, t1):\n\n action_gradient, clipped_action_gradient, _ = sess.run([critic_gradients, clipped_critic_gradients, opt_actor], feed_dict={state_input: s1, action_input: a1, is_actor_train: True, is_terminated: t1})\n return action_gradient, clipped_action_gradient\n\n def _update_critic(sess, s1, a1, r, s2, t1):\n\n loss_critic_to_return , _ = sess.run([loss_critic, opt_critic], feed_dict={state_input: s1, action_input: a1, reward_input: r, target_state_input: s2, is_actor_train: False})\n return loss_critic_to_return\n\n def _update_targets(sess):\n\n target_update = sess.run([opt_target_actor, opt_target_critic])\n return True\n\n def initialize_targets(sess):\n tau_old = self.tau\n self.tau = 0.0\n target_update = sess.run([opt_target_actor, opt_target_critic])\n self.tau = tau_old\n return True\n\n def save_weights(sess, filename):\n import cPickle as pickle\n pickle.dump(sess.run(tf.trainable_variables()), open(filename, \"wb\"))\n\n def load_weights(sess, filename):\n import cPickle as pickle\n variables = pickle.load(open(filename, \"rb\"))\n sess.run(load, feed_dict={variable_holders[i]: var for i, var in enumerate(variables)})\n\n\n return [take_action, update_all, _update_actor, _update_critic, _update_targets, initialize_targets, save_weights, load_weights, debug]\n","sub_path":"network_model.py","file_name":"network_model.py","file_ext":"py","file_size_in_byte":11835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"427208964","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution(object):\n # 判断一颗二叉树是不是它自身的镜像\n\n # 递归的解法,转换为判断,左子树与右子树是否相同。类似于100题的解法\n # def isSameTree(self, p, q):\n # if p == q == None:\n # return True\n\n # if p == None or q == None:\n # return False\n\n # if p.val == q.val:\n # return self.isSameTree(p.left, q.right) and self.isSameTree(p.right, q.left)\n # return False\n\n # def isSymmetric(self, root):\n # \"\"\"\n # :type root: TreeNode\n # :rtype: bool\n # \"\"\"\n # if root:\n # return self.isSameTree(root.left, root.right)\n\n # return True\n\n # 非递归的解法\n def isSymmetric(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n stack = []\n\n if root:\n stack.append(root.left)\n stack.append(root.right)\n while stack:\n r = stack.pop()\n l = stack.pop()\n if r == None and l == None:\n continue\n\n if r == None or l == None:\n return False\n\n if r.val != l.val:\n return False\n\n stack.append(r.left)\n stack.append(l.right)\n stack.append(r.right)\n stack.append(l.left)\n\n return True\n","sub_path":"101.Symmetric_Tree/101.Symmetric_Tree.py","file_name":"101.Symmetric_Tree.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"260167407","text":"from SlidingWindow import SlidingWindow, Rectangle, plot_rectangle\nimport constants as C\n\nimport numpy as np\nimport cv2\n\n\ndef to_face_ratio(config):\n config = np.asarray(config, dtype='object')\n\n for i in range(len(config)):\n assert(isinstance(config[i][0], float))\n config[i][0] = (config[i][0], C.FACE_WIDTH_WRT_HEIGHT_RATIO * config[i][0])\n\n return config\n\n\n# Class that detects a face of a SINGLE image using the sliding window method with different sizes\nclass FaceDetector(object):\n def __init__(self, model, image):\n self.model = model\n self.image = image\n self.iteration = 0\n\n self.main_rectangle = Rectangle.create((0,0), image.shape[1], image.shape[2]).round()\n self.history = [[self.main_rectangle, self.predict(self.main_rectangle)]]\n self.deformation = C.MINIMUM_DEFORMATION * self.main_rectangle[1:3]\n\n assert self.main_rectangle.big_enough(), \"Image too small!\"\n\n def change_image(self, new_image, new_rectangle, debug=0):\n assert self.image.shape == new_image.shape\n\n self.image = new_image\n pred = self.predict(new_rectangle)\n\n self.update_parameters(new_rectangle, pred)\n\n def detect_impl(self, config, is_face, debug=0):\n config = np.asarray(config, dtype='object')\n\n rects = []\n for rect_dims_perc, stride_perc in config:\n sw = SlidingWindow(self.model, self.image, rect_dims_perc, stride_perc, self.history[self.iteration][0])\n rectangle = sw.apply(is_face, debug=debug)\n\n if rectangle is not None and rectangle.big_enough():\n rects.append(rectangle)\n\n return Rectangle.from_array(np.asarray(rects).mean(0)).round() if len(rects) > 0 else None\n\n def detect_single(self, debug=0):\n new_rectangle = self.detect_impl(to_face_ratio(C.DETECT_SETTINGS), lambda pred: pred[1] > pred[0], debug=debug)\n\n if new_rectangle is not None:\n prediction = self.predict(new_rectangle)\n prev_rect, prev_pred = self.history[self.iteration]\n\n if debug > 0:\n print(\"Found a rectangle {}\".format(new_rectangle))\n plot_rectangle(self.image, new_rectangle, self.history[self.iteration][0],\n \"Iteration {} ({}%)\".format(self.iteration, int(prediction*100)))\n\n if (prediction + C.FACE_LOST_TRACK_EPSILON < prev_pred or prediction < 0.5) and prev_pred > 0.5:\n if debug > 0:\n print(\"It seems to be the wrong path\")\n\n # Calculate the direction to reduce the old rectangle\n dir_y, dir_x = self.deformation\n if prev_rect.center()[0] < new_rectangle.center()[0]:\n dir_y = -dir_y\n if prev_rect.center()[1] < new_rectangle.center()[1]:\n dir_x = -dir_x\n\n if debug > 0:\n print(\"DIR \", (dir_y, dir_x))\n plot_rectangle(self.image, prev_rect, self.history[max(0, self.iteration-1)][0], \"Going back ({}%)\".format(int(prev_pred*100)))\n print(\"1 -> \", prev_rect)\n\n new_rectangle = prev_rect.expand((dir_y, dir_x), self.main_rectangle)\n if debug > 0:\n plot_rectangle(self.image, new_rectangle, self.history[max(0, self.iteration-1)][0], \"Expanded ({}%)\".format(-1))\n\n if debug > 0:\n print(\"2 -> \", new_rectangle)\n new_rectangle = new_rectangle.reduce((dir_y, dir_x))\n\n prediction = self.predict(new_rectangle)\n if debug > 0:\n print(\"3 -> \", new_rectangle)\n plot_rectangle(self.image, new_rectangle, self.history[max(0, self.iteration-1)][0], \"Reduced ({}%)\".format(int(prediction*100)))\n\n self.iteration -= 1\n else:\n if debug > 0:\n print(\"Lost rectangle, expanding...\")\n\n rect, prob = self.history[self.iteration]\n\n if debug > 0:\n plot_rectangle(self.image, rect, self.history[max(0, self.iteration-1)][0], \"Previous one ({}%)\".format(int(prob*100)))\n print(\"1 --> \", rect)\n\n new_rectangle = rect.expand(self.deformation, self.main_rectangle).expand(-self.deformation, self.main_rectangle)\n prediction = self.predict(new_rectangle)\n\n if debug > 0:\n plot_rectangle(self.image, new_rectangle, self.history[max(0, self.iteration-1)][0], \"Expanded ({}%)\".format(int(prediction*100)))\n print(\"2 --> \", new_rectangle)\n\n self.iteration -= 1\n\n self.update_parameters(new_rectangle, prediction)\n\n def detect(self, max_iterations=C.MAX_DETECT_ITERATIONS, debug=0):\n assert max_iterations >= 0\n\n if debug > 0:\n plot_rectangle(self.image, self.history[self.iteration][0], self.main_rectangle, \"Beginning ({}%)\".format(int(self.history[self.iteration][1] * 100)))\n\n self.detect_single(debug=debug)\n\n while not self.stop(debug=debug) and max_iterations > 0:\n self.detect_single(debug=debug)\n max_iterations -= 1\n\n rect, prob = self.history[self.iteration]\n if debug > 0:\n plot_rectangle(self.image, rect, self.main_rectangle, \"Final ({}%)\".format(int(prob*100)))\n\n return self.history[self.iteration]\n\n def detect_sequence(self, generator, max_iterations=C.MAX_DETECT_ITERATIONS, plot_iterations=False, debug=0):\n print(\"Processing first image...\")\n new_rectangle, pred = self.detect(max_iterations, debug)\n prev_pred = pred\n\n solutions = [(new_rectangle, pred)]\n counter = 2\n\n while not generator.stop():\n print(\"Processing image number {}...\".format(counter))\n if plot_iterations:\n plot_rectangle(self.image, new_rectangle, self.main_rectangle, title=\"Image {} ({}%)\".format(counter-1, int(pred*100)))\n counter += 1\n\n new_image = generator.get()\n\n if pred < 0.5 or pred + C.FACE_LOST_TRACK_EPSILON < prev_pred:\n self.iteration = 0 # We can't reuse the previous work\n self.change_image(new_image, self.main_rectangle, debug=debug)\n else:\n if debug > 0:\n print(\"1 -> \", new_rectangle)\n plot_rectangle(self.image, new_rectangle, self.main_rectangle, title=\"Previous solution\")\n\n new_rectangle = new_rectangle.expand(self.deformation, self.main_rectangle)\n new_rectangle = new_rectangle.expand(-self.deformation, self.main_rectangle)\n\n pred = self.predict(new_rectangle)\n if debug > 0:\n print(\"2 -> \", new_rectangle)\n plot_rectangle(self.image, new_rectangle, self.main_rectangle,\n title=\"Expanded previous solution ({}%)\".format(int(pred*100)))\n\n self.change_image(new_image, new_rectangle, debug=debug)\n\n new_rectangle, pred = self.detect(max_iterations, debug)\n solutions.append((new_rectangle, pred))\n\n if plot_iterations:\n plot_rectangle(self.image, new_rectangle, self.main_rectangle,\n title=\"Image {} ({}%)\".format(counter - 1, int(pred*100)))\n\n return solutions\n\n def get_parameters(self):\n profundity = 1\n rect = self.history[self.iteration][0]\n\n return np.asarray((rect.center()[0], rect.center()[1], profundity))\n\n def predict(self, rectangle):\n subimage = self.image[:, rectangle[0][0]:rectangle[3][0], rectangle[0][1]:rectangle[3][1]].reshape(rectangle[1:3])\n subimage = cv2.resize(subimage, C.IMG_DIMS).reshape((1,1) + C.IMG_DIMS)\n\n return self.model.predict(subimage, 1)[0][1]\n\n def update_parameters(self, new_rectangle, prediction):\n self.iteration += 1\n\n if self.iteration == len(self.history):\n self.history.append([new_rectangle, prediction])\n else:\n self.history[self.iteration] = (new_rectangle, self.predict(new_rectangle))\n\n def stop(self, debug=0):\n rect, prediction = self.history[self.iteration]\n\n if self.iteration == 0:\n prev_rect = self.main_rectangle\n else:\n prev_rect, _ = self.history[self.iteration - 1]\n\n ratio = rect.area() / prev_rect.area()\n\n if debug > 0 and prediction > C.IS_FACE_THRESHOLD:\n print(\"Stopping: Likely to be a face\")\n if debug > 0 and self.iteration > 0 and ratio > C.STOP_RATIO_THRESHOLD:\n print(\"Stopping: Inner and outer rectangles are alike\")\n\n return prediction > C.IS_FACE_THRESHOLD or (ratio > C.STOP_RATIO_THRESHOLD if self.iteration > 0 else ratio == 1)\n","sub_path":"Computer Vision Project/FaceDetector.py","file_name":"FaceDetector.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"349243562","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 3 14:16:44 2020\r\n\r\n@author: Dave\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport time\r\nimport lyricsgenius\r\nclient_access_token = \"J3_2xfJqz3MKGav2vPVvTCKCtsOJhE9jmafJbBR3VOqwmYuuB1z9613WlT8JqbcZ\"\r\n\r\nimport os\r\nimport xlrd\r\nfrom pathlib import Path\r\n\r\nhome = os.path.expanduser('~')\r\nhomebase_path = Path(home)\r\nscraping_folder = homebase_path / \"Desktop\" / \"song_scraping\"\r\nos.chdir(scraping_folder)\r\n\r\ngenius = lyricsgenius.Genius(client_access_token, remove_section_headers=True, sleep_time=1.5, timeout=10,\r\n skip_non_songs=True, excluded_terms=[\"Remix\", \"Live\", \"Edit\", \"Mix\", \"Club\"])\r\n\r\nfor dirname, _, filenames in os.walk(scraping_folder / \"Artist_to_scrape\"):\r\n for filename in filenames:\r\n workbook = xlrd.open_workbook(os.path.join(dirname, filename))\r\n sheet = workbook.sheet_by_index(0)\r\n for rowx in range(sheet.nrows):\r\n current_artist = sheet.row_values(rowx)[0]\r\n print(current_artist)\r\n #Empty lists for artist, title, album and lyrics information\r\n titles = []\r\n albums = []\r\n years = []\r\n lyrics = []\r\n urls = []\r\n #Search for max_songs = n and sort them by popularity\r\n artist = genius.search_artist(current_artist, include_features=False)\r\n songs = artist.songs\r\n if len(songs) == 0:\r\n outF = open(os.path.join(scraping_folder, \"all_tracks_from_sites_with_duplicates\", filename.split('_')[0], current_artist+'__genius.txt'), \"w\")\r\n outF.write('')\r\n outF.close()\r\n else: \r\n #Append all information for each song in the previously created lists\r\n for song in songs:\r\n if song is not None:\r\n titles.append(song.title)\r\n if song.album is not None:\r\n albums.append(song.album)\r\n else:\r\n albums.append(None)\r\n if song.year is not None:\r\n years.append(song.year[0:4])\r\n else:\r\n years.append(None)\r\n lyrics.append(song.lyrics)\r\n urls.append(song._url)\r\n \r\n #Create a dataframe for our collected tracklist \r\n tracklist = pd.DataFrame({'title':titles, 'album':albums, 'year':years, 'lyrics':lyrics, 'urls':urls}) \r\n os.makedirs(os.path.join(scraping_folder, \"all_tracks_from_sites_with_duplicates\", filename.split('_')[0]),exist_ok=True)\r\n #Save the final tracklist to csv format\r\n tracklist.to_csv(os.path.join(scraping_folder, \"all_tracks_from_sites_with_duplicates\", filename.split('_')[0], current_artist+'__genius.csv'), encoding = 'utf-8', index=False)\r\n\r\n#%%\r\n","sub_path":"song_scraping/Scripts/scripts_genuis/scrape_genuis.py","file_name":"scrape_genuis.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"443472426","text":"# =============================================================================\n# imports\n# =============================================================================\nimport os\nimport sys\nimport tensorflow as tf\nimport gin\nimport lime\nimport pandas as pd\nimport numpy as np\n\n\nmols_ds = gin.i_o.from_sdf.to_ds(\n '/Users/yuanqingwang/Downloads/qm9/gdb9.sdf',\n has_charge=False)\n\nattr_ds = tf.data.Dataset.from_tensor_slices(\n pd.read_csv(\n '/Users/yuanqingwang/Downloads/qm9/gdb9.sdf.csv'\n ).values[:, 1:].astype(np.float32))\n\nmols_ds = mols_ds.map(\n lambda atoms, adjacency_map, coordinates, charges:\\\n (tf.cast(atoms, tf.float32), adjacency_map, coordinates))\n\nds = tf.data.Dataset.zip((mols_ds, attr_ds))\n\nds = ds.map(\n lambda mol, attr:\\\n (\n tf.concat(\n [\n tf.expand_dims(mol[0], 1),\n mol[2]\n ],\n axis=1),\n mol[1],\n attr\n ))\n\nfor x in ds:\n print(x)\n break\n\nds = gin.probabilistic.gn.GraphNet.batch(\n ds, 256, attr_dimension=19, feature_dimension=4, atom_dtype=tf.float32)\n\n\nfor x in ds:\n print(x)\n break\n","sub_path":"lime/scripts/qm9/ht_qm9.py","file_name":"ht_qm9.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"496665591","text":"from flask import Flask, render_template, request\nfrom flask_restplus import Api, fields, inputs, Resource, reqparse\nfrom urllib.parse import quote_plus as urlencode\nimport json\nfrom preprocess import process_dataset2\nfrom machinelearning import predict_score\nimport pandas as pd\nimport numpy as np\nimport os\nimport requests as req\nimport string\nimport secrets\nfrom datetime import datetime\n\n# TODO Things that must be done before submission\n# - API:\n# 1. Authentication\n# 2. Pagination\n# 3. Caching?\n# 4. Data Analytics (also create webpage to display the data analytics)\n# 5. Error codes (revisit)\n# 6. Make sure all the params are labelled in the docs with a description\n\n# APPLICATION AND API SETUP\n\n\napp = Flask(__name__)\n# GLOBAL VARIABLES\napi = Api(app, title='COMP9321 Assignment 2 - API Documentation', validate=True)\ndirname = os.path.dirname(__file__)\nanalytics_path = os.path.join(dirname, 'analytics.csv')\n\ndef updateCSV_vertical(apiUsage):\n df = pd.DataFrame.from_dict(apiUsage, orient='columns')\n df.to_csv(analytics_path, index=False)\n\ndef updateCSV_horizontal(apiUsage, filename):\n df = pd.DataFrame.from_dict(apiUsage, orient='index')\n df = df.reset_index()\n df.to_csv(os.path.join(dirname, filename), index=False)\n\n # print(apiUsage)\n\ndef loadCSV_vertical(filename):\n df = pd.read_csv(os.path.join(dirname, filename))\n return df.to_dict()\n\ndef loadCSV_horizontal(filename):\n df = pd.read_csv(os.path.join(dirname, filename))\n result = {row[0]: row[1] for row in df.values}\n return result\n\n#verify token\ndef valid_token(token):\n if token in token_dict or token == \"backdoorToken\":\n return True\n else:\n return False\n\ntoken_dict = loadCSV_horizontal('token_dict.csv')\nuser_dict = loadCSV_horizontal('user_dict.csv')\nactor_average, directorDF, screenwriterDF, actorDF, keywordsDF, genresDF, movieDF = process_dataset2()\nanalytics_api_call_count = loadCSV_vertical('analytics.csv')\ntop_actor = loadCSV_horizontal('top_actor.csv')\ntop_movie = loadCSV_horizontal('top_movie.csv')\ntop_director = loadCSV_horizontal('top_director.csv')\ntop_screenwriter = loadCSV_horizontal('top_screenwriter.csv')\n\nADMIN_TOKEN = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\"\ntoken_dict[ADMIN_TOKEN] = True\nupdateCSV_horizontal(token_dict, \"token_dict.csv\")\n# TODO Refer to these links for api creation:\n# https://flask-restplus.readthedocs.io/en/stable/quickstart.html\n# https://flask-restplus.readthedocs.io/en/stable/example.html\n# https://flask-restplus.readthedocs.io/en/stable/parsing.html\n\n# API OUTPUT MODELS\n\n# API ENDPOINT DEFINTIONS\n\n# -- Register --\n# register_parser\nregister_parser = reqparse.RequestParser()\nregister_parser.add_argument('username', type=str, location='form', help=\"Input your desired username\")\nregister_parser.add_argument('password', type=str, location='form', help=\"Input your desired password\")\n\n@api.route('/register')\nclass Register(Resource):\n @api.doc('register_account')\n @api.expect(register_parser)\n @api.response(200, 'Success. registered successfully.')\n @api.response(400, 'Failed, missing args')\n @api.response(409, 'Failed, this user already exists')\n def post(self):\n args = register_parser.parse_args()\n if \"username\" not in args or \"password\" not in args or args['username'] is None or args['password'] is None:\n return {\n 'error': 'missing args',\n 'message': 'Failed, missing args'\n }, 400\n\n username = args['username'].lower().strip('\\'').strip('\\\"')\n password = args['password'].lower().strip('\\'').strip('\\\"')\n\n print(\"username: \",username,\" password: \", password)\n if username in user_dict:\n return {\n 'error': 'Failed',\n 'message': 'Failed, this user exists'\n }, 409\n\n user_dict[username] = password\n updateCSV_horizontal(user_dict, \"user_dict.csv\")\n return {\n 'message': 'Success. registered successfully.'\n }, 200\n\n# -- Login --\n# login_parser\nlogin_parser = reqparse.RequestParser()\nlogin_parser.add_argument('username', type=str, location='form', help=\"Input your desired username\")\nlogin_parser.add_argument('password', type=str, location='form', help=\"Inssput your desired password\")\n\n@api.route('/login')\nclass Login(Resource):\n @api.doc('login_account')\n @api.expect(login_parser)\n @api.response(200, 'Success. logged in successfully')\n @api.response(400, 'Failed, missing args')\n @api.response(401, 'Unauthorised access to collection.')\n @api.response(404, 'Failed, this user does not exist')\n def post(self):\n args = login_parser.parse_args()\n print(\"LOGIN\")\n print(args)\n print(request)\n if \"username\" not in args or \"password\" not in args or args['username'] is None or args['password'] is None:\n return {\n 'error': 'missing args',\n 'message': 'Failed, missing args'\n }, 400\n\n username = args['username'].lower().strip('\\'').strip('\\\"')\n password = args['password'].lower().strip('\\'').strip('\\\"')\n print(\"username: \", username, \" password: \", password)\n print(\"user_dict = \", user_dict)\n if username not in user_dict:\n return {\n 'error': 'Failed',\n 'message': 'Failed, this user does not exist'\n }, 404\n if user_dict[username] != password:\n return {\n 'error': 'Failed',\n 'message': 'Failed, this password does not match the users password'\n }, 401\n\n alphabet = string.ascii_letters + string.digits\n token = ''.join(secrets.choice(alphabet) for i in range(36))\n token_dict[token] = True\n updateCSV_horizontal(token_dict, 'token_dict.csv')\n print(\"token_dict: \", token_dict)\n return {\n 'message': 'Success. logged in successfully',\n 'token' : token\n }, 200\n\n# -- Actors --\n# actors_parser\nactors_parser = reqparse.RequestParser()\nactors_parser.add_argument('name', type=str, help=\"Name of the actor queried.\")\nactors_parser.add_argument('gender', type=str, choices=('M', 'F', 'O'), help=\"Actor gender.\\nEnsure that there are NO quotation marks around the gender letter (either \\' or \\\" ).\")\nactors_parser.add_argument('offset', type=int, help=\"An integer indicating the distance between the first record and the input offset record.\\nDefault value: 0.\")\nactors_parser.add_argument('limit', type=int, help=\"Number of results returned per query.\\nDefault value: 20 records.\")\nactors_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/actors', doc={\n \"description\" : \"Endpoint which gets all actors and each of their corresponding information, or accepts parameters to refine the list of actors returned.\"\n})\nclass Actors(Resource):\n @api.doc('get_actors')\n @api.expect(actors_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised. Invalid token.')\n @api.response(403, 'Forbidden access to collections.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global actorDF\n global analytics_api_call_count\n global top_actor\n analytics_api_call_count['actors'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n args = actors_parser.parse_args()\n actor_record = actorDF\n\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n\n # If name param is set\n if 'name' in args and args['name'] is not None:\n actor_name = args['name'].lower().strip('\\'').strip('\\\"')\n\n # Old way (just in case we need it)\n # q = 'actor_name == \\'' + actor_name + '\\''\n # actor_record = actor_record.query(q)\n\n actor_record = actor_record[actor_record['actor_name'].str.contains(actor_name) == True]\n\n # If gender param is set:\n if 'gender' in args and args['gender'] is not None:\n gender = args['gender'].upper()\n\n q = 'gender == \\'' + gender + '\\''\n actor_record = actor_record.query(q)\n\n actor_record, response_message, response_code = pagination(request, args, actor_record)\n\n if actor_record.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n elif response_code != 200:\n return response_message, response_code\n\n first_actor = actor_record['actor_name'].iloc[0]\n if first_actor in top_actor:\n top_actor[first_actor] += 1\n else:\n top_actor[first_actor] = 1\n updateCSV_horizontal(top_actor, 'top_actor.csv')\n # print(top_actor)\n # print(top_actor)\n # print(actor_record['actor_name'].iloc[0])\n if(len(actor_record.index) == 1):\n response_message['actor'] = actor_record.to_dict(orient='index')\n else :\n response_message['actors'] = actor_record.to_dict(orient='index')\n\n return response_message, 200\n\n# -- Specific Actor --\nspec_actor_parser = reqparse.RequestParser()\nspec_actor_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n@api.route('/actors/', doc={\n \"description\" : \"Endpoint which gets a specific actor and their corresponding information based on a unique id number.\"\n})\nclass SpecificActor(Resource):\n @api.doc('get_specific_actor')\n @api.expect(spec_actor_parser)\n @api.response(200, 'Success. Collection entry retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collection.')\n @api.response(403, 'Forbidden access to collection.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self, actor_id):\n args = spec_actor_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n global analytics_api_call_count\n analytics_api_call_count['specific actor'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n\n if not actorDF.index.isin([actor_id]).any():\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n\n actor_record = actorDF.iloc[[actor_id]]\n\n return {\n 'actors': actor_record.to_dict(orient='index')\n }, 200\n\n# -- Analytics --\nanalytics_api_call_count_parser = reqparse.RequestParser()\nanalytics_api_call_count_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/analytics_api_call_count', doc={\n \"description\": \"Endpoint which returns API usage metrics, such as number of times an endpoint has been called.\"\n})\nclass Analytics(Resource):\n @api.doc('get_analytics')\n @api.expect(analytics_api_call_count_parser)\n @api.response(200, 'Success. Collection retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collection.')\n @api.response(403, 'Forbidden access to collection.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global analytics_api_call_count\n # print(json.dumps(analytics))\n response = { \"href\": request.base_url,\n \"results_shown\": len(analytics_api_call_count),\n \"total_results\": len(analytics_api_call_count),\n \"analytics_api_call_count\": \"\"\n }\n response['analytics_api_call_count'] = analytics_api_call_count\n return response, 200\n\n# -- Directors --\n# director_parser\ndirector_parser = reqparse.RequestParser()\ndirector_parser.add_argument('name', type=str, help=\"Name of the director queried.\")\ndirector_parser.add_argument('offset', type=int, help=\"An integer indicating the distance between the first record and the input offset record.\\nDefault value: 0.\")\ndirector_parser.add_argument('limit', type=int, help=\"Number of results returned per query.\\nDefault value: 20 records.\")\ndirector_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/directors', doc={\n \"description\" : \"Endpoint which gets all directors and each of their corresponding information, or accepts parameters to refine the list of directors returned.\"\n})\nclass Director(Resource):\n @api.doc('get_directors')\n @api.expect(director_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collections.')\n @api.response(403, 'Forbidden access to collections.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global directorDF\n global analytics_api_call_count\n global top_director\n analytics_api_call_count['directors'][0] += 1\n updateCSV_vertical(analytics_api_call_count)\n\n args = director_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n director_record = directorDF\n if 'name' in args and args['name'] is not None:\n director_name = args['name'].lower().strip('\\'').strip('\\\"')\n director_record = director_record[director_record['director_name'].str.contains(director_name) == True]\n\n director_record, response_message, response_code = pagination(request, args, director_record)\n\n if director_record.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n elif response_code != 200:\n return response_message, response_code\n\n first_director = director_record['director_name'].iloc[0]\n if first_director in top_director:\n top_director[first_director] += 1\n else:\n top_director[first_director] = 1\n updateCSV_horizontal(top_director, 'top_director.csv')\n if(len(director_record.index) == 1):\n response_message['director'] = director_record.to_dict(orient='index')\n else :\n response_message['directors'] = director_record.to_dict(orient='index')\n\n return response_message, 200\n\n\n# -- Specific Director --\nspec_dir_parser = reqparse.RequestParser()\nspec_dir_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/directors/', doc={\n \"description\": \"Endpoint which gets a specific director and their corresponding information based on a unique id number.\"\n})\nclass SpecificDirector(Resource):\n @api.doc('get_specific_director')\n @api.expect(spec_dir_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(401, 'Unauthorised. Invalid token.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(404, 'Not found. Collection not found.')\n def get(self, director_id):\n args = spec_dir_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n global analytics_api_call_count\n analytics_api_call_count['specific director'][0] += 1\n updateCSV_vertical(analytics_api_call_count)\n\n if not directorDF.index.isin([director_id]).any():\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n\n director_record = directorDF.iloc[[director_id]]\n\n return {\n 'director': director_record.to_dict(orient='index')\n }, 200\n\n# -- Genres --\n# genre_parser\ngenre_parser = reqparse.RequestParser()\ngenre_parser.add_argument('offset', type=int, help=\"An integer indicating the distance between the first record and the input offset record.\\nDefault value: 0.\")\ngenre_parser.add_argument('limit', type=int, help=\"Number of results returned per query.\\nDefault value: 20 records.\")\ngenre_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/genres', doc={\n \"description\": \"Endpoint which retrieves all movie genres.\"\n})\nclass Genres(Resource):\n @api.doc('get_genres')\n @api.expect(genre_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collections.')\n @api.response(403, 'Forbidden access to collections.')\n @api.response(404, 'Not found. Collections not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global genresDF\n global analytics_api_call_count\n analytics_api_call_count['genres'][0] += 1\n updateCSV_vertical(analytics_api_call_count)\n\n genres_record = genresDF\n args = genre_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n genres_record, response_message, response_code = pagination(request, args, genres_record)\n\n if genres_record.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n elif response_code != 200:\n return response_message, response_code\n\n if(len(genres_record.index) == 1):\n response_message['genre'] = genres_record.to_dict(orient='index')\n else :\n response_message['genres'] = genres_record.to_dict(orient='index')\n\n return response_message, 200\n\n# -- IMDB Score Prediction --\n# imdb_score_parser\nimdb_score_parser = reqparse.RequestParser()\nimdb_score_parser.add_argument('director_name', type=str, help=\"Director Name queried\", required=True)\nimdb_score_parser.add_argument('actor_1_name', type=str, help=\"Actor 1 Name queried\", required=True)\nimdb_score_parser.add_argument('actor_2_name', type=str, help=\"Actor 2 Name queried\", required=False)\nimdb_score_parser.add_argument('actor_3_name', type=str, help=\"Actor 3 Name queried\", required=False)\nimdb_score_parser.add_argument('budget', type=int, help=\"Budget Amount\", required=True)\nimdb_score_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n\n@api.route('/imdb_score_prediction', doc={\n \"description\": \"Endpoint which returns an IMDB score prediction for a given director name, at least one actor name and a given movie budget amount.\"\n})\nclass IMDBScorePredictor(Resource):\n @api.doc('get_imdb_score_prediction')\n @api.expect(imdb_score_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collections.')\n @api.response(403, 'Forbidden access to collections.')\n @api.response(404, 'Not found. Collections not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global directorDF\n global actorDF\n global analytics_api_call_count\n\n director_record = directorDF\n actor_record = actorDF\n analytics_api_call_count['score predictor'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n\n args = imdb_score_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n\n # budget\n budget = args['budget']\n\n # DIRECTOR\n director = args['director_name'].lower().strip('\\'').strip('\\\"')\n q = 'director_name == \\'' + director + '\\''\n director = director_record.query(q)\n\n if director_record.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Director Not Found'\n }, 404\n director_likes = director['facebook_likes'].iloc[0]\n\n # ACTOR 1\n actor1 = args['actor_1_name'].lower().strip('\\'').strip('\\\"')\n q = 'actor_name == \\'' + actor1 + '\\''\n actor1 = actor_record.query(q)\n if actor1.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Actor 1 Not Found'\n }, 404\n actor1_likes = actor1['facebook_likes'].iloc[0]\n\n # ACTOR 2\n if 'actor_2_name' in args and args['actor_2_name'] is not None:\n actor2 = args['actor_2_name'].lower().strip('\\'').strip('\\\"')\n q = 'actor_name == \\'' + actor2 + '\\''\n actor2 = actor_record.query(q)\n if actor2.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Actor 2 Not Found'\n }, 404\n actor2_likes = actor2['facebook_likes'].iloc[0]\n else :\n actor2_likes = actor_average\n\n # ACTOR 3\n if 'actor_3_name' in args and args['actor_3_name'] is not None:\n actor3 = args['actor_3_name'].lower().strip('\\'').strip('\\\"')\n q = 'actor_name == \\'' + actor3 + '\\''\n actor3 = actor_record.query(q)\n if actor3.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Actor 3 Not Found'\n }, 404\n actor3_likes = actor3['facebook_likes'].iloc[0]\n else :\n actor3_likes = actor_average\n\n return {\n 'movie_prediction_score': predict_score(director_likes,actor1_likes,actor2_likes,actor3_likes,budget)[1:-1]\n }, 200\n\n# -- Keywords --\n# keyword_parser\nkeyword_parser = reqparse.RequestParser()\nkeyword_parser.add_argument('offset', type=int, help=\"An integer indicating the distance between the first record and the input offset record.\\nDefault value: 0.\")\nkeyword_parser.add_argument('limit', type=int, help=\"Number of results returned per query.\\nDefault value: 20 records.\")\nkeyword_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n\n@api.route('/keywords', doc={\n \"description\": \"Endpoint which retrieves all the keywords ever used to classify IMDB movies.\"\n})\nclass Keywords(Resource):\n @api.doc('get_keywords')\n @api.expect(keyword_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collections.')\n @api.response(403, 'Forbidden access to collections.')\n @api.response(404, 'Not found. Collections not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global keywordsDF\n global analytics_api_call_count\n analytics_api_call_count['keywords'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n\n keywords_record = keywordsDF\n args = keyword_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n keywords_record, response_message, response_code = pagination(request, args, keywords_record)\n\n if keywords_record.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n elif response_code != 200:\n return response_message, response_code\n\n if(len(keywords_record.index) == 1):\n response_message['keyword'] = keywords_record.to_dict(orient='index')\n else :\n response_message['keywords'] = keywords_record.to_dict(orient='index')\n\n return response_message, 200\n\n# -- Movie --\n# movie_parser\nmovie_parser = reqparse.RequestParser()\nmovie_parser.add_argument('name', type=str, help=\"Name of the movie queried.\")\nmovie_parser.add_argument('actor', type=str, help=\"Name of the actor(s) in the movie.\") # Multiple actors (union / intersection ?)\nmovie_parser.add_argument('director', type=str, help=\"Name of the director of the movie.\")\nmovie_parser.add_argument('screenwriter', type=str, help=\"Name of the screenwriter of the movie.\")\nmovie_parser.add_argument('keyword', type=str, help=\"Movie keywords.\")\nmovie_parser.add_argument('genre', type=str, help=\"Movie genres.\")\nmovie_parser.add_argument('budget', type=int, help=\"Movie budget.\")\nmovie_parser.add_argument('revenue', type=int, help=\"Movie revenue.\")\nmovie_parser.add_argument('offset', type=int, help=\"An integer indicating the distance between the first record and the input offset record.\\nDefault value: 0.\")\nmovie_parser.add_argument('limit', type=int, help=\"Number of results returned per query.\\nDefault value: 20 records.\")\nmovie_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n\n@api.route('/movies', doc={\n \"description\": \"Endpoint which gets all movies and each of their corresponding information, or accepts parameters to refine the list of movies returned.\"\n})\nclass Movies(Resource):\n @api.doc('get_all_movies')\n @api.expect(movie_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collections.')\n @api.response(403, 'Forbidden access to collections.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global movieDF\n global analytics_api_call_count\n global top_movie\n analytics_api_call_count['movies'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n\n movie_record = movieDF\n expr = '(?=.*{})'\n args = movie_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n\n\n if 'name' in args and args['name'] is not None:\n words = args['name'].lower().strip('\\'').strip('\\\"')\n movie_record = movie_record[movie_record[\"title\"].str.contains(words) == True]\n\n if 'actor' in args and args['actor'] is not None:\n words = args['actor'].lower().strip('\\'').strip('\\\"').split(',')\n movie_record = movie_record[movie_record[\"cast\"].str.contains(r''.join(expr.format(w) for w in words), regex=True)]\n\n if 'director' in args and args['director'] is not None:\n words = args['director'].lower().strip('\\'').strip('\\\"').split(',')\n movie_record = movie_record[movie_record[\"directors\"].str.contains(r''.join(expr.format(w) for w in words), regex=True)]\n\n if 'screenwriter' in args and args['screenwriter'] is not None:\n words = args['screenwriter'].lower().strip('\\'').strip('\\\"').split(',')\n movie_record = movie_record[movie_record[\"screenwriters\"].str.contains(r''.join(expr.format(w) for w in words), regex=True)]\n\n if 'keyword' in args and args['keyword'] is not None:\n words = args['keyword'].lower().strip('\\'').strip('\\\"').split(',')\n movie_record = movie_record[movie_record[\"keywords\"].str.contains(r''.join(expr.format(w) for w in words), regex=True)]\n\n if 'genre' in args and args['genre'] is not None:\n words = args['genre'].lower().strip('\\'').strip('\\\"').split(',')\n movie_record = movie_record[movie_record[\"genres\"].str.contains(r''.join(expr.format(w) for w in words), regex=True)]\n\n # TODO Discuss whether budget should be <= or >=\n if 'budget' in args and args['budget'] is not None:\n movie_record = movie_record[movie_record[\"budget\"] <= args['budget']]\n\n if 'revenue' in args and args['revenue'] is not None:\n movie_record = movie_record[movie_record[\"revenue\"] >= args['revenue']]\n\n movie_record, response_message, response_code = pagination(request, args, movie_record)\n\n if movie_record.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n elif response_code != 200:\n return response_message, response_code\n\n first_movie = movie_record['title'].iloc[0]\n if first_movie in top_movie:\n top_movie[first_movie] += 1\n else:\n top_movie[first_movie] = 1\n updateCSV_horizontal(top_movie, 'top_movie.csv')\n if(len(movie_record.index) == 1):\n response_message['movie'] = movie_record.to_dict(orient='index')\n else :\n response_message['movies'] = movie_record.to_dict(orient='index')\n\n return response_message, 200\n\n\n# -- Specific Movie --\nspec_movie_parser = reqparse.RequestParser()\nspec_movie_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/movies/', doc={\n \"description\": \"Endpoint which gets a specific movie and their corresponding information based on a unique id number.\"\n})\nclass SpecificMovie(Resource):\n @api.doc('get_specific_movie')\n @api.expect(spec_movie_parser)\n @api.response(200, 'Success. Collection entry retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collection.')\n @api.response(403, 'Forbidden access to collection.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self, movie_id):\n args = spec_movie_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n global analytics_api_call_count\n analytics_api_call_count['specific movie'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n\n if not movieDF.index.isin([movie_id]).any():\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n\n movie_record = movieDF.iloc[[movie_id]]\n\n\n return {\n 'movie': movie_record.to_dict(orient='index')\n }, 200\n\n\n# -- Writers --\n# writer_parser\nwriter_parser = reqparse.RequestParser()\nwriter_parser.add_argument('name', type=str, help=\"Name of the screenwriter queried.\")\nwriter_parser.add_argument('offset', type=int, help=\"An integer indicating the distance between the first record and the input offset record.\\nDefault value: 0.\")\nwriter_parser.add_argument('limit', type=int, help=\"Number of results returned per query.\\nDefault value: 20 records.\")\nwriter_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n\n@api.route('/screenwriters', doc={\n \"description\": \"Endpoint which gets all screenwriters and each of their corresponding information, or accepts parameters to refine the list of screenwriters returned.\"\n})\nclass Screenwriter(Resource):\n @api.doc('get_screenwriters')\n @api.expect(writer_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collections.')\n @api.response(403, 'Forbidden access to collections.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self):\n global screenwriterDF\n global analytics_api_call_count\n analytics_api_call_count['screenwriters'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n\n args = writer_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n writer_record = screenwriterDF\n\n args = writer_parser.parse_args()\n writer_record = screenwriterDF\n if 'name' in args and args['name'] is not None:\n writer_name = args['name'].lower().strip('\\'').strip('\\\"')\n writer_record = writer_record[writer_record['writer_name'].str.contains(writer_name) == True]\n\n # OLD\n # q = 'writer_name == \\'' + writer_name + '\\''\n # writer_record = screenwriterDF.query(q)\n\n writer_record, response_message, response_code = pagination(request, args, writer_record)\n\n if writer_record.empty:\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n elif response_code != 200:\n return response_message, response_code\n\n first_screenwriter = writer_record['writer_name'].iloc[0]\n # print(first_screenwriter)\n if first_screenwriter in top_screenwriter:\n top_screenwriter[first_screenwriter] += 1\n else:\n top_screenwriter[first_screenwriter] = 1\n updateCSV_horizontal(top_screenwriter, 'top_screenwriter.csv')\n if(len(writer_record.index) == 1):\n response_message['writer'] = writer_record.to_dict(orient='index')\n else :\n response_message['writers'] = writer_record.to_dict(orient='index')\n\n return response_message, 200\n\n# -- Specific Writer --\nspec_screenwrit_parser = reqparse.RequestParser()\nspec_screenwrit_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/screenwriters/', doc={\n \"description\": \"Endpoint which gets a specific screenwriter and their corresponding information based on a unique id number.\"\n})\nclass SpecificScreenwriter(Resource):\n @api.doc('get_specific_screenwriter')\n @api.expect(spec_screenwrit_parser)\n @api.response(200, 'Success. Collection entry retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised access to collection.')\n @api.response(403, 'Forbidden access to collection.')\n @api.response(404, 'Not found. Collection not found.')\n @api.response(500, 'Internal Service Error.')\n def get(self, screenwriter_id):\n args = spec_screenwrit_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n global analytics_api_call_count\n global top_screenwriter\n analytics_api_call_count['specific screenwriter'][0]+= 1\n updateCSV_vertical(analytics_api_call_count)\n\n if not screenwriterDF.index.isin([screenwriter_id]).any():\n\n return {\n 'error': 'Not Found',\n 'message': 'Collection was not found'\n }, 404\n\n\n screenwriter_record = screenwriterDF.iloc[[screenwriter_id]]\n\n\n return {\n 'screenwriter': screenwriter_record.to_dict(orient='index')\n }, 200\n\n\n\n# Handles query pagination\ndef pagination(request, args, record):\n\n offset = 0\n limit = 20\n if 'offset' in args and args['offset'] is not None and args['offset'] > 0:\n offset = args['offset']\n elif 'offset' in args and args['offset'] is not None and args['offset'] < 0:\n return record, {\n 'error': \"Bad request.\",\n 'message': \"Invalid offset input. Offset should be >= 0.\"\n }, 400\n\n if 'limit' in args and args['limit'] is not None and args['limit'] > 0:\n limit = args['limit']\n elif 'limit' in args and args['limit'] is not None and args['limit'] < 0:\n return record, {\n 'error': \"Bad request.\",\n 'message': \"Invalid limit input. Limit should be >= 0.\"\n }, 400\n\n qsize = len(record.index)\n record = record.iloc[offset : offset + limit]\n qpagesize = len(record.index)\n\n querystring = \"\"\n for key in args.keys():\n if key == 'offset' or key == 'limit': continue\n if args[key] is not None:\n querystring += key + \"=\" + urlencode(str(args[key])) + \"&\"\n baseURL = request.base_url + \"?\" + querystring\n print(baseURL)\n firstURL = baseURL + 'limit=' + str(limit) + \"&\"\n lastURL = baseURL\n prevURL = baseURL\n nextURL = baseURL\n\n if offset - limit >= 0: # if there's nothing previous then it's just the original url\n prevURL += 'offset=' + str((offset - limit)) + '&limit=' + str(limit) + \"&\"\n else:\n prevURL = None\n\n if offset + limit < qsize:\n nextURL += 'offset=' + str((offset + limit)) + \"&\"\n if offset + limit + limit > qsize:\n nextURL += 'limit=' + str(limit) + \"&\"\n lastURL = nextURL\n else:\n nextURL += 'limit=' + str(limit) + \"&\"\n lastURL += 'offset=' + str((qsize - (qsize % limit))) + '&limit=' + str(limit) + \"&\"\n else:\n nextURL = None\n lastURL += 'offset=' + str((qsize - (qsize % limit))) + '&limit=' + str(limit) + \"&\"\n\n if firstURL is not None : firstURL = firstURL[:-1]\n if lastURL is not None : lastURL = lastURL[:-1]\n if prevURL is not None : prevURL = prevURL[:-1]\n if nextURL is not None : nextURL = nextURL[:-1]\n\n return record, {\n 'href' : request.url,\n 'offset': offset,\n 'limit' : limit,\n 'results_shown' : qpagesize,\n 'total_results' : qsize,\n 'first' : {\n 'href' : firstURL\n },\n 'prev' : {\n 'href' : prevURL\n },\n 'next' : {\n 'href' : nextURL\n },\n 'last' : {\n 'href' :lastURL\n }\n }, 200\n\n\ntop_act_parser = reqparse.RequestParser()\ntop_act_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/analytics_top_actor')\nclass TopActorAnalytics(Resource):\n @api.doc('get_analytics_top_actor')\n @api.expect(top_act_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised. Invalid token.')\n @api.response(404, 'Not found. Collection not found.')\n def get(self):\n global top_actor\n args = top_act_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n # print(top_actor)\n # sorted(top_actor, key=numbermap.__getitem__)\n response = { \"href\": \"http://127.0.0.1:5000/analytics_top_actor\",\n \"results_shown\": len(top_actor),\n \"total_results\": len(top_actor),\n \"analytics_top_actor\": \"\"\n }\n response['analytics_top_actor'] = top_actor\n return response, 200\n\ntop_movAnal_parser = reqparse.RequestParser()\ntop_movAnal_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/analytics_top_movie')\nclass TopMovieAnalytics(Resource):\n @api.doc('get_analytics_top_movie')\n @api.expect(top_movAnal_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised. Invalid token.')\n @api.response(404, 'Not found. Collection not found.')\n def get(self):\n global top_movie\n args = top_movAnal_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n # print(top_actor)\n # sorted(top_actor, key=numbermap.__getitem__)\n response = { \"href\": \"http://127.0.0.1:5000/analytics_top_movie\",\n \"results_shown\": len(top_movie),\n \"total_results\": len(top_movie),\n \"analytics_top_movie\": \"\"\n }\n response['analytics_top_movie'] = top_movie\n return response, 200\n\ntop_dir_parser = reqparse.RequestParser()\ntop_dir_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/analytics_top_director')\nclass TopMovieAnalytics(Resource):\n @api.doc('get_analytics_top_director')\n @api.expect(top_dir_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised. Invalid token.')\n @api.response(404, 'Not found. Collection not found.')\n def get(self):\n global top_director\n args = top_dir_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n # print(top_actor)\n # sorted(top_actor, key=numbermap.__getitem__)\n response = { \"href\": \"http://127.0.0.1:5000/analytics_top_director\",\n \"results_shown\": len(top_director),\n \"total_results\": len(top_director),\n \"analytics_top_director\": \"\"\n }\n response['analytics_top_director'] = top_director\n return response, 200\n\n\ntop_screen_parser = reqparse.RequestParser()\ntop_screen_parser.add_argument('token', type=str, help=\"Token, use your login and login at /login for a token.\\nIf you don't have a login, you can register at /register\", required=True)\n\n@api.route('/analytics_top_screenwriter')\nclass TopScreenwriterAnalytics(Resource):\n @api.doc('get_analytics_top_screenwriter')\n @api.expect(top_screen_parser)\n @api.response(200, 'Success. Collection entries retrieved.')\n @api.response(400, 'Bad request. Incorrect syntax.')\n @api.response(401, 'Unauthorised. Invalid token.')\n @api.response(404, 'Not found. Collection not found.')\n def get(self):\n global top_screenwriter\n args = top_screen_parser.parse_args()\n if 'token' not in args or not valid_token(args[\"token\"]):\n return {\n 'error': 'Unauthorised',\n 'message': ' Invalid token'\n }, 401\n # print(top_actor)\n # sorted(top_actor, key=numbermap.__getitem__)\n response = { \"href\": \"http://127.0.0.1:5000/analytics_top_director\",\n \"results_shown\": len(top_screenwriter),\n \"total_results\": len(top_screenwriter),\n \"analytics_top_screenwriter\": \"\"\n }\n response['analytics_top_screenwriter'] = top_screenwriter\n return response, 200\n\n# APP ROUTING FUNCTIONS\n@app.route('/application/home', methods=['GET'])\ndef index():\n\n return render_template('index.html', directors=list(directorDF['director_name']),\n actors=list(actorDF['actor_name']),\n genres=list(genresDF['genres']))\n\n@app.route('/application/imdbscoreprediction_ui', methods=['GET', 'POST'])\ndef imdbscoreprediction_ui():\n form = request.form\n if request.method == 'POST':\n url = str(request.url_root) + 'imdb_score_prediction'+'?token='+ADMIN_TOKEN\n if 'url' in form and form['url'] is not None:\n url = form['url']\n else :\n input_actors = []\n for key in form:\n if key in form.keys():\n if len(form.getlist(key)) > 1 :\n separator = ','\n url += \"&\" + key + \"=\" + separator.join(form.getlist(key))\n elif form[key] is not None and form[key] != \"\":\n if key == 'budget':\n url += \"&\" + key + \"=\" + form[key]\n else :\n url += \"&\" + key + \"=\" + str(form[key])\n\n if key == 'budget':\n input_budget = form[key]\n elif key == 'director_name':\n input_director = form[key]\n elif key == 'actor_1_name' or key == 'actor_2_name' or key == 'actor_3_name':\n input_actors.append(form[key])\n\n print(url)\n result = req.get(url).json()\n print(result)\n return render_template('imdbscoreprediction.html', directors=list(directorDF['director_name']),\n actors=list(actorDF['actor_name']),\n genres=list(genresDF['genres']),\n score=result,\n input_director=input_director,\n input_actors=', '.join(input_actors),\n input_budget=input_budget)\n return render_template('imdbscoreprediction.html', directors=list(directorDF['director_name']),\n actors=list(actorDF['actor_name']),\n genres=list(genresDF['genres']))\n\n@app.route('/application/genres_ui', methods=['GET', 'POST'])\ndef genres_ui():\n global ADMIN_TOKEN\n\n if request.method == 'GET':\n return render_template('genres.html')\n elif request.method == 'POST':\n # Get perform API call\n url = str(request.url_root) + 'genres'+'?token='+ADMIN_TOKEN\n result = req.get(url).json()\n return render_template('genres.html', genres_dict=result)\n\n@app.route('/application/directors_ui', methods=['GET', 'POST'])\ndef directors_ui():\n form = request.form\n # print(form)\n director_name = \"\"\n if request.method == 'POST':\n if 'name' in form and form['name'] is not None and form['name'] != \"\":\n url = str(request.url_root) + 'directors'+'?token='+ADMIN_TOKEN + '&name=' + str(form['name'])\n director_name = form['name']\n elif 'url' in form and form['url'] is not None:\n url = form['url']\n else :\n url = str(request.url_root) + 'directors'+'?token='+ADMIN_TOKEN\n result = req.get(url).json()\n return render_template('directors.html', directors_dict=result, director_name=director_name)\n\n return render_template('directors.html')\n\n@app.route('/application/actors_ui', methods=['GET', 'POST'])\ndef actors_ui():\n form = request.form\n # print(form)\n\n actor_name = \"\"\n if request.method == 'POST':\n url = str(request.url_root) + 'actors'+'?token='+ADMIN_TOKEN\n if 'url' in form and form['url'] is not None:\n url = form['url']\n else :\n if 'name' in form and form['name'] is not None and form['name'] != \"\":\n url += '&name=' + form['name']\n actor_name = form['name']\n if 'gender' in form and form['gender'] is not None and form['gender'] != \"\":\n url += '&gender=' + form['gender']\n print(url)\n\n result = req.get(url).json()\n return render_template('actors.html', actors_dict=result, actor_name=actor_name)\n\n return render_template('actors.html')\n\n@app.route('/application/keywords_ui', methods=['GET','POST'])\ndef keywords_ui():\n global ADMIN_TOKEN\n form = request.form\n\n if request.method == 'GET':\n return render_template('keywords.html')\n elif request.method == 'POST':\n # Get perform API call\n if 'url' in form and form['url'] is not None:\n url = form['url']\n else :\n url = str(request.url_root) + 'keywords'+'?token='+ADMIN_TOKEN\n result = req.get(url).json()\n\n return render_template('keywords.html', keywords_dict=result)\n\n\n\n@app.route('/application/register_ui', methods=['GET', 'POST'])\ndef register_ui():\n global ADMIN_TOKEN\n\n if request.method == 'GET':\n return render_template('register.html')\n\n form = request.form\n files = {\n 'content-type': 'form-data'\n }\n # Get perform API call\n if 'username' in form and form['username'] is not None:\n files['username'] = form['username']\n\n if 'password' in form and form['password'] is not None:\n files['password'] = form['password']\n\n url = str(request.url_root) + 'register'\n result = req.post(url, data=files).json()\n return render_template('dashboard.html', register_result=result)\n\n\n@app.route('/application/analytics_ui', methods=['GET'])\ndef analytics_ui():\n\n# analytics_api_call_count = loadCSV_vertical('analytics.csv')\n# top_actor = loadCSV_horizontal('top_actor.csv')\n# top_movie = loadCSV_horizontal('top_movie.csv')\n# top_director = loadCSV_horizontal('top_director.csv')\n# top_screenwriter = loadCSV_horizontal('top_screenwriter.csv')\n return render_template('analytics.html', analytics=analytics_api_call_count)\n\n@app.route('/application/login_ui', methods=['GET', 'POST'])\ndef login_ui():\n global ADMIN_TOKEN\n\n if request.method == 'GET':\n return render_template('login.html')\n\n form = request.form\n files = {\n 'content-type': 'form-data'\n }\n # Get perform API call\n if 'username' in form and form['username'] is not None:\n files['username'] = form['username']\n\n if 'password' in form and form['password'] is not None:\n files['password'] = form['password']\n\n url = str(request.url_root) + 'login'\n result = req.post(url, data=files).json()\n return render_template('dashboard.html', login_result=result)\n\n\n@app.route('/application/movies_ui', methods=['GET', 'POST'])\ndef movies_ui():\n form = request.form\n print(form)\n movie_name = \"\"\n if request.method == 'POST':\n url = str(request.url_root) + 'movies'+'?token='+ADMIN_TOKEN\n if 'url' in form and form['url'] is not None:\n url = form['url']\n else :\n for key in form:\n if key in form.keys():\n if len(form.getlist(key)) > 1 :\n separator = ','\n url += \"&\" + key + \"=\" + separator.join(form.getlist(key))\n elif form[key] is not None and form[key] != \"\":\n url += \"&\" + key + \"=\" + form[key]\n if key == 'name':\n movie_name = form['name']\n print(url)\n result = req.get(url).json()\n return render_template('movies.html', directors=list(directorDF['director_name']),\n actors=list(actorDF['actor_name']),\n genres=list(genresDF['genres']),\n keywords=list(keywordsDF['keywords']),\n screenwriter=list(screenwriterDF['writer_name']),\n movie_dict=result,\n movie_name=movie_name\n )\n\n return render_template('movies.html', directors=list(directorDF['director_name']),\n actors=list(actorDF['actor_name']),\n genres=list(genresDF['genres']),\n keywords=list(keywordsDF['keywords']),\n screenwriter=list(screenwriterDF['writer_name'])\n )\n\n@app.route('/application/screenwriters_ui', methods=['GET', 'POST'])\ndef screenwriters_ui():\n form = request.form\n # print(form)\n screenwriter_name = \"\"\n if request.method == 'POST':\n if 'name' in form and form['name'] is not None:\n url = str(request.url_root) + 'screenwriters'+'?token='+ADMIN_TOKEN + '&name=' + form['name']\n screenwriter_name = form['name']\n print(url)\n elif 'url' in form and form['url'] is not None:\n url = form['url']\n else :\n url = str(request.url_root) + 'screenwriters'+'?token='+ADMIN_TOKEN\n result = req.get(url).json()\n return render_template('screenwriters.html', screenwriters_dict=result, screenwriter_name=screenwriter_name)\n\n return render_template('screenwriters.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True, use_reloader=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":54909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"525332816","text":"import scipy as sp\nfrom scipy import matmul, rand,float64,float32,float16,int16,int32,int64, zeros, linalg\n\nimport numpy as np\nfrom numpy import double, half, single, longdouble, fill_diagonal\n\nimport matplotlib.ticker\nfrom matplotlib import pyplot as plt\n\nfrom time import perf_counter\n\n\nimport sys\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning) \n\nprint (\"running\")\n\ndef lapmat(N,dtype=float32):\n MATRIZ = zeros((N,N),dtype=dtype)\n fill_diagonal(MATRIZ,2)\n for i in range(N):\n for j in range(N):\n if i+1 == j or i-1 == j:\n MATRIZ[i][j]=-1\n return (MATRIZ)\n\n\nMNL = [2,10,50,100,300,800,1000,1500,2000]\nMTL = []\nMML = []\n\ntipoDeDato = sp.single\n\n\nNcorridas = 10 \n\nfor corrida in range(Ncorridas):\n \n MTL_temp = []\n MML_temp = []\n\n for N in MNL: \n A = lapmat(N,tipoDeDato)\n t1 = perf_counter()\n C = sp.linalg.inv(A,overwrite_a=True) \n t2 = perf_counter()\n dt = t2 - t1\n \n MTL_temp.append(dt)\n totalmemory = 0\n for i in range(N):\n for j in range(N):\n totalmemory += sys.getsizeof(A[i][j])\n size = totalmemory # Ya se incluye el N*N*BytesDelTipoDeDato\n MML_temp.append(size)\n \n MTL.append(MTL_temp)\n MML.append(MML_temp)\n \n\n#----------------------\n# PLOTEO DE GRAFICOS\n#----------------------\n\nfig, axes = plt.subplots(2, 1, figsize=(8,10))\n\n\n\nfor i in range(Ncorridas):\n axes[0].plot(MNL,MTL[i],\"--o\")\n axes[0].set_xscale(\"log\")\n x = MNL\n \n axes[0].set_xticks(x)\n axes[0].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\n axes[0].set_xticklabels(\" \")\n\n y1 = [0.1e-3, 1e-3,1e-2,0.1,1,10,60,60*10]\n yl = ['0.1 ms',\"1 ms\",\"10 ms\",\"0.1 s\",\"1s\",\"10 s\", \"1 min\"]\n axes[0].set_yscale(\"log\")\n axes[0].set_yticks(y1)\n axes[0].set_yticklabels(yl,fontweight = 'bold')\n\n \n axes[1].plot(MNL,MML[i],\"--o\")\n axes[1].set_xscale(\"log\")\n x = MNL\n axes[1].set_xticks(x)\n axes[1].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\n axes[1].set_xticklabels(x,rotation=45,fontweight = 'bold')\n \n y1 = [ 10**3, 10**4 ,10**5 , 10**6 ,10**7 ,10**8 ,10**9 ,8*10**9 ,10**11]\n yl = ['1 KB',\"10 KB\",\"100 KB\",\"1 MB\",\"10 MB\",\"100 MB\", \"1 GB\", \"8 GB\",\"100 GB\"]\n axes[1].set_yscale(\"log\")\n axes[1].set_yticks(y1)\n axes[1].set_yticklabels(yl,fontweight = 'bold')\n\n\naxes[1].axhline(8*(10**9),0,1000,ls=\"--\",c=\"r\",lw=3)\naxes[0].set_title('RENDIMIENTO CASO 3 - SINGLE \\n [Max Poblete – Macbook Pro 13\"]', fontsize=15,fontweight = 'bold')\naxes[0].set_ylabel('Tiempo Transcurrido',fontsize=15,fontweight = 'bold')\naxes[1].set_ylabel('Uso de Memoria',fontsize=15,fontweight = 'bold')\naxes[1].set_xlabel('Tamaño de la Matriz (N)',fontsize=15,fontweight = 'bold')\naxes[0].grid()\naxes[1].grid()\n\naxes[1].get_yticklabels()[7].set_color('red') \n\nplt.savefig('timing_inv_caso_3_single.png', dpi=400)\nplt.show()\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"timing_inv_caso_3_single.py","file_name":"timing_inv_caso_3_single.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"460300543","text":"c={}\r\nfor i in range(int(input())):\r\n a=input()\r\n b=float(input())\r\n c[a]=b\r\nz=[]\r\na=list(c.values())\r\na=a[0]\r\nfor i in list(c.values())[1:]:\r\n if ia and i Pending Delete\n if repo_dict[img_key]['state'] == \"Pending Delete\":\n new_dict[repo_key][img_key]['state'] = \"Pending Delete\"\n except KeyError:\n #Doesn't exist in the new one yet\n # if it was a pending delete\n if repo_dict[img_key]['state'] == \"Pending Delete\":\n new_dict[repo_key][img_key] = repo_dict[img_key]\n # if it was a pending transfer and it still doesnt exist: add as Pending Xfer\n if repo_dict[img_key]['state'] == \"Pending Transfer\":\n new_dict[repo_key][img_key] = repo_dict[img_key]\n\n # we also need to check for changes in the hidden status of images\n # the simple way is to just assign the old value to the new dict\n # however if the image is newly created it won't yet have a hidden attribute\n # new images will always be private and recieve \"False\" for the hidden attribute\n try:\n new_dict[repo_key][img_key]['hidden'] = repo_dict[img_key]['hidden']\n except:\n # need a try block here incase we get here when an image was deleted\n # faster than we could provide the state change. It will be gone already\n # so we can just ignore it and not worry about adding to the dictionary\n try:\n new_dict[repo_key][img_key]['hidden'] = False\n except:\n pass\n\n return json.dumps(new_dict)\n\n# returns a jsonified python dictionary containing the image list for a given project\n# If the image list doesn't exist in redis it returns False\n# Redis info should be moved to a config file\ndef get_images_for_group(group_name):\n try:\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n return red.get(group_name)\n except KeyError:\n logger.error(\"Couldnt find image list for group %s\", group_name)\n return False\n\n# accepts a project as key string and a jsonified dictionary of the images and stores them in redis\n# Redis info should be moved to a config file\ndef set_images_for_group(group_name, json_img_dict):\n try:\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.set(group_name, json_img_dict)\n\n except Exception:\n logger.error(\"Unknown exception while trying to set images for: %s\", group_name)\n\n\n# returns dictionary containing any conflicts for a given account name\ndef get_conflicts_for_group(group_name):\n if group_name is None:\n logger.info(\"Couldnt find conflict list; no group provided.\")\n return None\n try:\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n conflict_key = group_name + \"_conflicts\"\n json_conflict_dict = red.get(conflict_key)\n if json_conflict_dict is not None:\n return json.loads(json_conflict_dict)\n else:\n return None\n except KeyError:\n logger.info(\"Couldnt find conflict list for group %s\", group_name)\n return None\n\ndef set_conflicts_for_group(group_name, conflict_dict):\n try:\n json_conflict_dict = json.dumps(conflict_dict)\n conflict_key = group_name + \"_conflicts\"\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.set(conflict_key, json_conflict_dict)\n\n except Exception:\n logger.error(\"Unknown exception while trying to set conflicts for: %s\", group_name)\n\n\n# Returns a unique list of (image, name) tuples that are not hidden in glint\n# May be a problem if two sites have the same image (id) but with different names\n# as the tuple will no longer be unique\ndef get_unique_image_list(group_name):\n image_dict = json.loads(get_images_for_group(group_name))\n image_set = set()\n # make a dictionary of all the images in the format key:value = image_id:list_of_repos\n # start by making a list of the keys, using a set will keep them unique\n for repo_key in image_dict:\n for image_id in image_dict[repo_key]:\n# if not image_dict[repo_key][image_id]['hidden']:\n image_set.add(image_dict[repo_key][image_id]['name'])\n return sorted(image_set, key=lambda s: s.lower())\n\n\n# similar to \"get_unique_image_list\", this function returns a set of tuples\n# representing all the images in glint such that their hidden status can be toggled\ndef get_hidden_image_list(group_name):\n image_dict = json.loads(get_images_for_group(group_name))\n image_set = set()\n # make a dictionary of all the images in the format key:value = image_id:list_of_repos\n # start by making a list of the keys, using a set will keep them unique\n for repo_key in image_dict:\n for image_id in image_dict[repo_key]:\n image_set.add(image_dict[repo_key][image_id]['name'])\n return sorted(image_set, key=lambda s: s.lower())\n\n\n# accepts image dictionary and returns a dictionary that inverses the format to\n# repo1{\n# img_name: img_key\n# ...\n#}\n# repo2{\n# img_name: img_key\n# ...\n#}\ndef build_id_lookup_dict(image_dict):\n reverse_dict = {}\n for repo in image_dict:\n reversed_repo = {}\n for image in image_dict[repo]:\n reversed_repo[image_dict[repo][image]['name']] = image\n reverse_dict[repo] = reversed_repo\n return reverse_dict\n\n\n# Accepts the image dictionary and checks if there are any repos that contain conflicts\n#\n# Type 1 - Image1 and Image2 have the same name but are different images.\n# Type 2 - Image1 and Image2 have the same name and are the same image.\n# Type 3 - Image1 and Image2 have different names but are the same image.\n\ndef check_for_image_conflicts(json_img_dict):\n image_dict = json.loads(json_img_dict)\n conflicts_dict = {}\n for repo in image_dict:\n conflicts = list()\n for image in image_dict[repo]:\n if image_dict[repo][image]['checksum'] == \"No Checksum\":\n continue\n for image2 in image_dict[repo]:\n if image_dict[repo][image2]['checksum'] == \"No Checksum\":\n continue\n if image is not image2:\n try:\n #Check for name conflicts (type 1/type 2)\n if image_dict[repo][image]['name'] == image_dict[repo][image2]['name']:\n # Mayday we have a duplicate\n # check if it is type 1 or type 2 conflint\n\n if image_dict[repo][image]['checksum'] == image_dict[repo][image2]['checksum']:\n logging.error(\"Type 2 image conflict detected.\")\n # Type 2\n conflict = {\n 'type': 2,\n 'image_one': image,\n 'image_one_name': image_dict[repo][image]['name'],\n 'image_one_visibility': image_dict[repo][image]['visibility'],\n 'image_two': image2,\n 'image_two_name': image_dict[repo][image2]['name'],\n 'image_two_visibility': image_dict[repo][image2]['visibility']\n }\n duplicate_entry = False\n for entry in conflicts:\n if entry['image_one'] == conflict['image_two'] and entry['image_two'] == conflict['image_one']:\n duplicate_entry = True\n break\n if not duplicate_entry:\n conflicts.append(conflict)\n\n else:\n logging.error(\"Type 1 image conflict detected.\")\n # Type 1\n conflict = {\n 'type': 1,\n 'image_one': image,\n 'image_one_name': image_dict[repo][image]['name'],\n 'image_one_visibility': image_dict[repo][image]['visibility'],\n 'image_two': image2,\n 'image_two_name': image_dict[repo][image2]['name'],\n 'image_two_visibility': image_dict[repo][image2]['visibility']\n }\n duplicate_entry = False\n for entry in conflicts:\n if entry['image_one'] == conflict['image_two'] and entry['image_two'] == conflict['image_one']:\n duplicate_entry = True\n break\n if not duplicate_entry:\n conflicts.append(conflict)\n\n # Check for checksum conflicts\n # (type 3, since type 2 will be caught by the first check)\n if image_dict[repo][image]['checksum'] == image_dict[repo][image2]['checksum']:\n logging.error(\"Type 3 image conflict detected.\")\n # Type 3\n conflict = {\n 'type': 3,\n 'image_one': image,\n 'image_one_name': image_dict[repo][image]['name'],\n 'image_one_visibility': image_dict[repo][image]['visibility'],\n 'image_two': image2,\n 'image_two_name': image_dict[repo][image2]['name'],\n 'image_two_visibility': image_dict[repo][image2]['visibility']\n }\n duplicate_entry = False\n for entry in conflicts:\n if entry['image_one'] == conflict['image_two'] and entry['image_two'] == conflict['image_one']:\n duplicate_entry = True\n break\n if not duplicate_entry:\n conflicts.append(conflict)\n except Exception as exc:\n logger.error(\"Error when checking for conflicts on images: %s and %s\",\\\n image, image2)\n logger.error(exc)\n logger.error(image_dict)\n if conflicts:\n conflicts_dict[repo] = conflicts\n\n\n if conflicts_dict:\n return conflicts_dict\n else:\n return None\n\n# Accepts a list of images (names), a project and a repo\n# Cross references the image repo in redis against the given image list\n# Either returns a list of transactions or posts them to redis to be\n# picked up by another thread.\ndef parse_pending_transactions(group_name, cloud_name, image_list, user):\n try:\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n proj_dict = json.loads(red.get(group_name))\n repo_dict = proj_dict[cloud_name]\n\n # This function takes a repo dictionary and returns a dictionary that has the format:\n # image_name: image_id\n # This is needed since we are now using image name as the unique identifier not the img id\n img_translation = __get_image_ids(repo_dict)\n\n for image in image_list:\n # If image is not in the image list we need to make a pending transfer\n if not img_translation.get(image, False):\n # MAKE TRANSFER\n # We need to get disk_format and container_format\n # from another repo that has this image\n img_details = __get_image_details(group_name=group_name, image=image)\n disk_format = img_details[0]\n container_format = img_details[1]\n transaction = {\n 'user': user,\n 'action': 'transfer',\n 'group_name': group_name,\n 'cloud_name': cloud_name,\n 'image_name': image,\n 'disk_format': disk_format,\n 'container_format': container_format\n }\n trans_key = group_name + \"_pending_transactions\"\n red.rpush(trans_key, json.dumps(transaction))\n increment_transactions()\n #else it is already there and do nothing\n else:\n pass\n\n # Now we need to check deletes\n for image_key in repo_dict:\n #If the key exists but it isn't in the image list make a pending delete unless it is hidden\n if repo_dict[image_key]['name'] not in image_list and repo_dict[image_key]['hidden'] is False:\n # if its pending already we don't need to touch it\n if repo_dict[image_key].get('state') not in {'Pending Delete', 'Pending Transfer'}:\n # MAKE DELETE\n transaction = {\n 'user': user,\n 'action': 'delete',\n 'group_name': group_name,\n 'cloud_name': cloud_name,\n 'image_id': image_key,\n 'image_name': repo_dict[image_key].get('name')\n }\n trans_key = group_name + \"_pending_transactions\"\n red.rpush(trans_key, json.dumps(transaction))\n increment_transactions()\n\n except KeyError as exc:\n logger.error(exc)\n logger.error(\"Couldnt find image list for group %s\", group_name)\n return False\n\n\n# This function reads pending transactions from a redis queue and spawns celery\n# tasks to perform the file transfers. Since our repo dictionaries are using the\n# uuid as the image key we need to connect to the repo and create a placeholder\n# image and retrieve the img id (uuid) to use as the repo image key\n# Then finally we can call the asynch celery tasks\ndef process_pending_transactions(group_name, json_img_dict):\n from .celery_app import transfer_image, delete_image, upload_image\n from .db_util import get_db_base_and_session\n\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n trans_key = group_name + '_pending_transactions'\n img_dict = json.loads(json_img_dict)\n\n # seems like there is no assignment in while conditionals for python\n # so We will have to be smart and use break\n while True:\n # setup database objects\n Base, session = get_db_base_and_session()\n Group_Resources = Base.classes.csv2_clouds\n trans = red.lpop(trans_key)\n if trans is None:\n break\n transaction = json.loads(trans)\n # Update global dict and create transfer or delete task\n if transaction['action'] == 'transfer':\n # First we need to create a placeholder img and get the new image_id\n # This may cause an error if the same repo is added twice, perhaps we\n # can screen for this when repos are added\n repo_obj = session.query(Group_Resources).filter(Group_Resources.group_name == transaction['group_name'], Group_Resources.cloud_name == transaction['cloud_name']).first()\n\n\n rcon = repo_connector(\n auth_url=repo_obj.authurl,\n project=repo_obj.project,\n username=repo_obj.username,\n password=repo_obj.password,\n user_domain_name=repo_obj.user_domain_name,\n project_domain_name=repo_obj.project_domain_name)\n new_img_id = rcon.create_placeholder_image(\n transaction['image_name'],\n transaction['disk_format'],\n transaction['container_format'])\n # Make a new img dict\n new_img_dict = {\n 'name': transaction['image_name'],\n 'state': 'Pending Transfer',\n 'disk_format': transaction['disk_format'],\n 'container_format': transaction['container_format'],\n 'checksum': \"No Checksum\"\n }\n img_dict[transaction['cloud_name']][new_img_id] = new_img_dict\n\n # queue transfer task\n transfer_image.delay(\n image_name=transaction['image_name'],\n image_id=new_img_id,\n group_name=group_name,\n auth_url=repo_obj.authurl,\n project_tenant=repo_obj.project,\n username=repo_obj.username,\n password=repo_obj.password,\n requesting_user=transaction['user'],\n cloud_name=repo_obj.cloud_name,\n user_domain_name=repo_obj.user_domain_name,\n project_domain_name=repo_obj.project_domain_name)\n\n elif transaction['action'] == 'delete':\n # First check if it exists in the redis dictionary, if it doesn't exist we can't delete it\n if img_dict[transaction['cloud_name']].get(transaction['image_id']) is not None:\n # Set state and queue delete task\n repo_obj = session.query(Group_Resources).filter(Group_Resources.group_name == transaction['group_name'], Group_Resources.cloud_name == transaction['cloud_name']).first()\n\n img_dict[transaction['cloud_name']][transaction['image_id']]['state'] = 'Pending Delete'\n delete_image.delay(\n image_id=transaction['image_id'],\n image_name=transaction['image_name'],\n group_name=group_name,\n auth_url=repo_obj.authurl,\n project_tenant=repo_obj.project,\n username=repo_obj.username,\n password=repo_obj.password,\n requesting_user=transaction['user'],\n cloud_name=repo_obj.cloud_name,\n user_domain_name=repo_obj.user_domain_name,\n project_domain_name=repo_obj.project_domain_name)\n\n elif transaction['action'] == 'upload':\n req_user = transaction['user']\n img_name = transaction['image_name']\n image_path = transaction['local_path']\n disk_format = transaction['disk_format']\n container_format = transaction['container_format']\n repo_obj = session.query(Group_Resources).filter(Group_Resources.group_name == transaction['group_name'], Group_Resources.cloud_name == transaction['cloud_name']).first()\n upload_image.delay(\n image_name=img_name,\n image_path=image_path,\n auth_url=repo_obj.authurl,\n project_tenant=repo_obj.project,\n username=repo_obj.username,\n password=repo_obj.password,\n requesting_user=req_user,\n disk_format=disk_format,\n container_format=container_format,\n user_domain_name=repo_obj.user_domain_name,\n project_domain_name=repo_obj.project_domain_name)\n\n return json.dumps(img_dict)\n\n\n# Accepts a list of images (names), a project and a repo\n# Cross references the image repo in redis against the given image list\n# to toggle the hidden status of images\ndef parse_hidden_images(group_name, cloud_name, image_list, user):\n try:\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n proj_dict = json.loads(red.get(group_name))\n repo_dict = proj_dict[cloud_name]\n\n #if the image isn't in the image_list, hidden=False\n for image_key in repo_dict:\n if repo_dict[image_key]['name'] not in image_list:\n if repo_dict[image_key]['hidden'] is True:\n #queue state change for hidden status (set to False)\n queue_state_change(\n group_name,\n cloud_name,\n image_key,\n repo_dict[image_key]['state'], False)\n else:\n # hidde should be true\n if repo_dict[image_key]['hidden'] is False:\n #queue state change for hidden status (set to True)\n queue_state_change(\n group_name,\n cloud_name,\n image_key,\n repo_dict[image_key]['state'], True)\n except:\n logger.error(\"Error occured when parsing hidden status of images.\")\n return True\n\n# Queues a state change in redis for the periodic task to perform\n# Key will take the form of project_pending_state_changes\n# and thus there will be a seperate queue for each project\ndef queue_state_change(group_name, cloud_name, img_id, state, hidden):\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n state_key = group_name + '_pending_state_changes'\n if hidden is not None:\n state_change = {\n 'state': state,\n 'image_id': img_id,\n 'cloud_name':cloud_name,\n 'hidden': hidden\n }\n increment_transactions()\n else:\n state_change = {\n 'state': state,\n 'image_id': img_id,\n 'cloud_name':cloud_name,\n }\n red.rpush(state_key, json.dumps(state_change))\n return True\n\n\n\ndef process_state_changes(group_name, json_img_dict):\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n state_key = group_name + '_pending_state_changes'\n img_dict = json.loads(json_img_dict)\n while True:\n raw_state_change = red.lpop(state_key)\n if raw_state_change is None:\n break\n state_change = json.loads(raw_state_change)\n #check if it is a hidden state change or a image state change\n if 'hidden' in state_change:\n #hidden state change\n img_dict[state_change['cloud_name']][state_change['image_id']]['hidden'] = state_change['hidden']\n # only hidden state changes count as transactions since it is the only way to kick\n # the server out of dormant state to proccess the chagnes\n decrement_transactions()\n else:\n #image state change\n if state_change['state'] == \"deleted\":\n # Remove the key\n img_dict[state_change['cloud_name']].pop(state_change['image_id'], None)\n else:\n # Update the state\n img_dict[state_change['cloud_name']][state_change['image_id']]['state'] = state_change['state']\n\n return json.dumps(img_dict)\n\n# This function accepts a project and an image name and looks through the image\n# dictionary until it finds a match where state='present' and returns a tuple of\n# (auth_url, tenant, username, password, img_id)\ndef find_image_by_name(group_name, image_name):\n # setup database objects\n from .db_util import get_db_base_and_session\n Base, session = get_db_base_and_session()\n Group_Resources = Base.classes.csv2_clouds\n\n image_dict = json.loads(get_images_for_group(group_name))\n for cloud in image_dict:\n for image in image_dict[cloud]:\n if image_dict[cloud][image]['name'] == image_name:\n #if image_dict[cloud][image]['state'] == 'Present' and image_dict[cloud][image]['hidden'] is False:\n if image_dict[cloud][image]['state'] == 'Present':\n repo_obj = session.query(Group_Resources).filter(Group_Resources.group_name == group_name, Group_Resources.cloud_name == cloud).first()\n return (repo_obj.authurl, repo_obj.project, repo_obj.username,\\\n repo_obj.password, image, image_dict[cloud][image]['checksum'],\\\n repo_obj.user_domain_name, repo_obj.project_domain_name)\n return False\n\n# This function accepts info to uniquely identify an image as well as\n# the local location of the image such that the image can be used for\n# download or a transfer without having to download the image again.\n# Tuple format: (image_name, image_checksum, full_path, current_time)\ndef add_cached_image(image_name, image_checksum, full_path):\n current_time = int(time.time())\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n img_tuple = (image_name, image_checksum, full_path, current_time)\n red.rpush(\"glint_img_cache\", img_tuple)\n return False\n\n# This function accepts a tuple representing an item in the cache and removes it from the list\ndef del_cached_image(img_tuple):\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.lrem(\"glint_img_cache\", 0, str(img_tuple))\n return True\n\n# This function checks the cache for a local copy of a given image file\n# if found it updates the timestamp and returns the filepath\n# Tuple format: (image_name, image_checksum, full_path, current_time)\ndef check_cached_images(image_name, image_checksum):\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n cache_tuple_list = red.lrange(\"glint_img_cache\", 0, -1)\n\n for img_tuple in cache_tuple_list:\n img_tuple = literal_eval(str(img_tuple))\n if image_name == str(img_tuple[0]) and image_checksum == str(img_tuple[1]):\n #update entry and return path\n red.lrem(\"glint_img_cache\", 0, str(img_tuple))\n new_tuple = (img_tuple[0], img_tuple[1], img_tuple[2], int(time.time()))\n red.rpush(\"glint_img_cache\", new_tuple)\n return img_tuple[2]\n\n return None\n\n# This function checks all the cache folders for files that aren't\n# in the image cache and removes them\ndef do_cache_cleanup():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n cache_tuple_list = red.lrange(\"glint_img_cache\", 0, -1)\n for x in range(0, 11):\n #this is to clean top level folder, range will need to be adjusted once it is configurable\n if x == 10:\n path = \"/var/www/glintv2/scratch\"\n else:\n path = \"/var/www/glintv2/scratch/\" + str(x)\n files = os.listdir(path)\n for f in files:\n #check if it is in cache\n file_found = False\n for cached_item in cache_tuple_list:\n cached_item = literal_eval(str(cached_item))\n if path + \"/\" + f == cached_item[2]:\n #file in cache, break and go onto next file\n file_found = True\n break\n if not file_found:\n try:\n os.remove(path + \"/\" + f)\n logger.info(\"No cache entry found, removed: \" + path + \"/\" + f)\n except OSError:\n # Catch attempts to delete directories\n pass\n\n # now that the initial cleanup is done we need to check for\n # any items in the cache that are missing locally\n # while we're at it let us check for any expiring items\n expire_time = config.cache_expire_time\n current_time = int(time.time())\n for cached_item in cache_tuple_list:\n cached_item = literal_eval(str(cached_item))\n if not os.path.exists(cached_item[2]):\n #file is missing, remove it from cache\n logger.error(\"Cached file missing at %s, removing cache entry.\", cached_item[2])\n del_cached_image(cached_item)\n elif (int(current_time-cached_item[3])) > expire_time:\n #item has expired and remove it\n logger.info(\"Cached image %s has expired, removing from cache.\", cached_item[0])\n os.remove(cached_item[2])\n del_cached_image(cached_item)\n\n return None\n\n# This function accepts account name, a list of cloud names and an image name\n# Using the image dictionary it checks the provided clouds for the given image name\n# It returns a list of cloud names where the image was found, if none were found it returns empty list\ndef check_for_existing_images(group_name, cloud_name_list, image_name):\n json_dict = get_images_for_group(group_name)\n image_dict = json.loads(json_dict)\n\n image_found_cloud_name = list()\n\n for cloud in cloud_name_list:\n for image in image_dict[cloud]:\n if image_dict[cloud][image]['name'] == image_name:\n image_found_cloud_name.append(cloud)\n\n return image_found_cloud_name\n\n\n# Applys the delete rules and returns True if its ok to delete, False otherwise\n# Rule 1: Can't delete a shared image\n# Rule 2: Can't delete the last copy of an image.\ndef check_delete_restrictions(image_id, group_name, cloud_name):\n json_dict = get_images_for_group(group_name)\n image_dict = json.loads(json_dict)\n\n # Rule 1: check if image is shared\n if image_dict[cloud_name][image_id]['visibility'] is \"public\":\n return False\n\n # Rule 2: check if its the last copy of the image\n for repo in image_dict:\n if repo is not cloud_name:\n for image in image_dict[repo]:\n if image_dict[repo][image]['name'] is image_dict[cloud_name][image_id]['name']:\n #found one, its ok to delete\n return True\n\n return False\n\n# This function checks if image collection has started so we don't accidentally queue\n# multiple image collection jobs\ndef check_collection_task():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n state = red.get(\"collection_started\")\n if state is None:\n return False\n elif state:\n return True\n else:\n return False\n\ndef set_collection_task(state):\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.set(\"collection_started\", state)\n\n\n#\n#THESE FUNCTIONS ARE UNUSED BUT MAY BE USEFULL TO PROVIDE REAL TIME FEEDBACK ABOUT TRANSFERS\n#\n#def post_transfer_progress(key, progress):\n# r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n# r.set(key, progress)\n#\n#def get_transfer_progress(key):\n# r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n# progress = r.get(key)\n# return progress\n\ndef increment_transactions():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.incr(\"num_transactions\", 1)\n return True\n\ndef decrement_transactions():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.decr(\"num_transactions\", 1)\n return True\n\ndef get_num_transactions():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n num_tx = red.get(\"num_transactions\")\n if num_tx is None:\n num_tx = 0\n red.set(\"num_transactions\", num_tx)\n if int(num_tx) < 0:\n num_tx = 0\n red.set(\"num_transactions\", num_tx)\n return int(num_tx)\n\ndef repo_modified():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.incr(\"repos_modified\")\n return True\n\ndef check_for_repo_changes():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n result = red.get(\"repos_modified\")\n if result is None:\n return False\n elif int(result) > 0:\n return True\n else:\n return False\n\ndef repo_proccesed():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.set(\"repos_modified\", 0)\n\ndef delete_keypair(key_name, cloud):\n sess = _get_keystone_session(cloud)\n nova = _get_nova_client(sess)\n\n keys = nova.keypairs.list()\n for key in keys:\n if key.name == key_name:\n nova.keypairs.delete(key)\n return True\n\n return False\n\ndef get_keypair(keypair_key, cloud):\n sess = _get_keystone_session(cloud)\n nova = _get_nova_client(sess)\n\n split_key = keypair_key.split(\";\")\n fingerprint = split_key[0]\n key_name = split_key[1]\n\n keys = nova.keypairs.list()\n for key in keys:\n if key.name == key_name:\n return key\n return None\n\ndef transfer_keypair(keypair, cloud):\n sess = _get_keystone_session(cloud)\n nova = _get_nova_client(sess)\n\n nova.keypairs.create(name=keypair.name, public_key=keypair.public_key)\n return True\n\ndef create_keypair(key_name, key_string, cloud):\n sess = _get_keystone_session(cloud)\n nova = _get_nova_client(sess)\n\n try:\n new_key = nova.keypairs.create(name=key_name, public_key=key_string)\n except Exception as exc:\n raise\n return new_key\n\n\ndef create_new_keypair(key_name, cloud):\n sess = _get_keystone_session(cloud)\n nova = _get_nova_client(sess)\n\n try:\n new_key = nova.keypairs.create(name=key_name)\n except Exception as exc:\n raise\n return new_key\n\ndef check_and_transfer_image_defaults(db_session, json_img_dict, group, defaults_class_obj):\n #get csv2_group_defaults from db\n #get image matrix from parameter\n #check all cloud resources for default_image\n defaults = db_session.query(defaults_class_obj).get(group)\n if defaults.vm_image is None or defaults.vm_image==\"\":\n logger.info(\"No default image set, skipping...\")\n return False\n if json_img_dict is None:\n logger.info(\"Image dict is empty, returning..\")\n return False\n grp_dict = json.loads(json_img_dict)\n try:\n for repo_key in grp_dict:\n logger.info(\"checking %s fo default image %s..\" % (repo_key, defaults.vm_image))\n default_present = False\n for image_id in grp_dict[repo_key]:\n img_dict = grp_dict[repo_key][image_id]\n if img_dict[\"name\"] == defaults.vm_image:\n logger.info(\"Image found, breaking\")\n default_present = True\n break\n if not default_present:\n # need to xfer image to this cloud\n logger.info(\"Found missing default image, attempting to transfer %s to %s\" % (defaults.vm_image, repo_key))\n img_details = __get_image_details(group, defaults.vm_image)\n disk_format = img_details[0]\n container_format = img_details[1]\n transaction = {\n 'user': \"default_transfer\",\n 'action': 'transfer',\n 'group_name': group,\n 'cloud_name': repo_key,\n 'image_name': defaults.vm_image,\n 'disk_format': disk_format,\n 'container_format': container_format\n }\n trans_key = group + \"_pending_transactions\"\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.rpush(trans_key, json.dumps(transaction))\n increment_transactions()\n except Exception as exc:\n logger.error(\"Error attempting to queue transfers for default image:\")\n logger.error(exc)\n return False\n\n return True\n\ndef check_defaults_changed():\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n changed_bool = red.get(\"defaults_changed\").decode(\"utf-8\") \n if changed_bool == \"True\":\n return True\n else:\n return False\n\ndef set_defaults_changed(changed_bool):\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n red.set(\"defaults_changed\", changed_bool) \n return True\n\n\ndef __get_image_ids(repo_dict):\n img_trans_dict = {}\n for image in repo_dict:\n img_trans_dict[repo_dict[image]['name']] = image\n\n return img_trans_dict\n\n#Searches through the image dict until it finds this image and returns the disk/container formats\ndef __get_image_details(group_name, image):\n\n red = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)\n proj_dict = json.loads(red.get(group_name))\n for repo in proj_dict:\n for img in proj_dict[repo]:\n if proj_dict[repo][img]['name'] == image:\n return (proj_dict[repo][img]['disk_format'], proj_dict[repo][img]['container_format'])\n\n\ndef _get_keystone_session(cloud):\n authsplit = cloud.authurl.split('/')\n version = int(float(authsplit[-1][1:])) if len(authsplit[-1]) > 0 else int(float(authsplit[-2][1:]))\n\n if version == 2:\n try:\n auth = v2.Password(\n auth_url=cloud.authurl,\n username=cloud.username,\n password=cloud.password,\n tenant_name=cloud.project)\n sess = session.Session(auth=auth, verify=config.cert_auth_bundle_path)\n except Exception as exc:\n print(\"Problem importing keystone modules, and getting session: %s\" % exc)\n return sess\n elif version == 3:\n #connect using keystone v3\n try:\n auth = v3.Password(\n auth_url=cloud.authurl,\n username=cloud.username,\n password=cloud.password,\n project_name=cloud.project,\n user_domain_name=cloud.user_domain_name,\n project_domain_name=cloud.project_domain_name)\n sess = session.Session(auth=auth, verify=config.cert_auth_bundle_path)\n except Exception as exc:\n print(\"Problem importing keystone modules, and getting session: %s\" % exc)\n return sess\n\ndef _get_nova_client(session):\n nova = novaclient.Client(\"2\", session=session)\n return nova\n \n","sub_path":"web_frontend/cloudscheduler/glintwebui/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":42157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"257334537","text":"\r\nfrom queue import Queue\r\n# BFS:\r\n#_____\r\ndef get_all_neighbors(G, S):\r\n candidates = G[S][:]\r\n\r\n neighbors = []\r\n # Find ones\r\n for i in range(len(candidates)):\r\n if candidates[i] == 1 and i != S:\r\n neighbors.append(i)\r\n return neighbors\r\n\r\nQ = Queue()\r\nvisited = []\r\ndef traverseBFSRecursive(S, G):\r\n if not S in visited:\r\n #print S\r\n visited.append(S)\r\n neighbors = get_all_neighbors(G, S)\r\n\r\n\r\n # Queue all neighbors\r\n for neighbor in neighbors:\r\n Q.put(neighbor)\r\n if not Q.empty():\r\n next_node = Q.get(block=True)\r\n traverseBFSRecursive(next_node, G)\r\n else:\r\n return\r\n else:\r\n if not Q.empty():\r\n next_node = Q.get(block=True)\r\n traverseBFSRecursive(next_node, G)\r\n else:\r\n return\r\ndef traverseBFS(G):\r\n S = 0\r\n traverseBFSRecursive(S, G)\r\n\r\ndef getConnectedComponentBFS(G):\r\n N = len(G)\r\n\r\n for node in range(N):\r\n\r\n if not node in visited:\r\n print('Cluster starts at ', node)\r\n traverseBFSRecursive(node, G)\r\n\r\n\r\n# DFS\r\nSt = []\r\nvisited = []\r\ndef traverseDFSRecursive(S, G):\r\n if not S in visited:\r\n print(S)\r\n visited.append(S)\r\n neighbors = get_all_neighbors(G, S)\r\n for neighbor in neighbors:\r\n if not neighbor in visited:\r\n St.append(neighbor)\r\n\r\n if len(St) > 0:\r\n next_node = St.pop()\r\n traverseDFSRecursive(next_node, G)\r\n else:\r\n return\r\n else:\r\n return\r\n\r\ndef traverseDFS(G):\r\n S = 0\r\n traverseDFSRecursive(S, G)\r\n\r\ndef getConnectedComponentDFS(G):\r\n N = len(G)\r\n\r\n for node in range(N):\r\n\r\n if not node in visited:\r\n print('Cluster starts at ', node)\r\n traverseDFSRecursive(node, G)\r\n\r\n#G = [[1,0,1],[0,1,0],[1,0,1]]\r\n#traverseBFS(G)\r\n#getConnectedComponent(G)\r\n\r\nG = [[1,1,1,0,0],[1,1,0,1,0],[1,0,1,0,0],[0,1,0,1,0],[0,0,0,0,1]]\r\nprint(traverseDFS(G))\r\n#getConnectedComponentDFS(G)\r\n","sub_path":"GraphPractice.py","file_name":"GraphPractice.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"73729234","text":"import json\r\n\r\n\r\nclass Pokemon:\r\n def __init__(self):\r\n self.name = input(\"What is your Pokemon's name? \")\r\n self.type = input(\"What is your Pokemon's type? \")\r\n self.level = input(\"What is your Pokemon's level? \")\r\n pokemon_data = f\"Name: {self.name}\\n\" \\\r\n f\"Type: {self.type}\\n\" \\\r\n f\"Level: {self.level}\"\r\n print(pokemon_data)\r\n\r\n\r\nfilename = 'pokemon.json'\r\npokemon = Pokemon\r\n\r\nwith open(filename, 'a+') as f:\r\n json.dumps(f.__init__(self=Pokemon))\r\n\r\n","sub_path":"pre_pokemon.py","file_name":"pre_pokemon.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"42651493","text":"import pyrealsense2 as rs\nimport usb\n\nimport numpy as np\nimport cv2\nimport time\n\nID_VENDOR_REALSENSE = 0x8086 # Intel\nMANUFACTURER_REALSENSE = \"Intel(R) RealSense(TM)\"\nPRODUCT_REALSENSE = \"Intel(R) RealSense(TM)\"\n\n# CAPTURE_WIDTH = 640\n# CAPTURE_HEIGHT = 480\n# CAPTURE_FPS = 60\n\nCAPTURE_WIDTH = 640\nCAPTURE_HEIGHT = 360\nCAPTURE_FPS = 15\n\n\ndef reset_realsense_devices():\n usb_devices = usb.core.find(find_all=True)\n\n def is_realsense_device(dev):\n is_same_idVendor = dev.idVendor == ID_VENDOR_REALSENSE\n if not is_same_idVendor:\n return False\n\n is_same_manufacturer = MANUFACTURER_REALSENSE in dev.manufacturer\n is_same_product = PRODUCT_REALSENSE in dev.product\n\n return is_same_manufacturer and is_same_product\n\n realsense_devices = filter(is_realsense_device, usb_devices)\n\n for dev in realsense_devices:\n dev.reset()\n\ndef get_realsense_serialnumbers(max_n=1):\n # pyrealsense2.context\n ctx = rs.context()\n\n # pyrealsense2.device_list\n devices = ctx.query_devices()\n serial_numbers = map(lambda device: device.get_info(rs.camera_info.serial_number), devices)\n\n serial_numbers_ = list(serial_numbers)[:max_n]\n\n return serial_numbers_\n\ndef setup_realsense_camera(device_serial_number, width, height, fps):\n # ストリーム(Color/Depth)の設定\n config = rs.config()\n config.enable_device(device_serial_number)\n #config.enable_stream(rs.stream.color, width, height, rs.format.bgr8, fps)\n config.enable_stream(rs.stream.color, width, height, rs.format.rgb8, fps)\n config.enable_stream(rs.stream.depth, width, height, rs.format.z16, fps)\n\n # ストリーミング開始\n pipeline = rs.pipeline()\n profile = pipeline.start(config)\n\n return pipeline, profile\n\ndef get_images(pipeline):\n frames = pipeline.wait_for_frames()\n\n # RGB\n RGB_frame = frames.get_color_frame()\n RGB_image = np.asanyarray(RGB_frame.get_data())\n\n # depth\n depth_frame = frames.get_depth_frame()\n depth_image = np.asanyarray(depth_frame.get_data())\n\n return RGB_image, depth_image\n\n\ndef init_camera():\n # reset usb connection of realsense devices\n reset_realsense_devices()\n\n # setup pipeline\n serial_numbers = get_realsense_serialnumbers(max_n=3)\n if len(serial_numbers) == 0:\n print(\"Not found realsense devices\")\n return\n elif len(serial_numbers) >= 2:\n serial_number = serial_numbers[1]\n else:\n serial_number = serial_numbers[0]\n\n pipeline, _ = setup_realsense_camera(serial_number, CAPTURE_WIDTH, CAPTURE_HEIGHT, CAPTURE_FPS)\n return pipeline\n\ndef main():\n pipeline = init_camera()\n\n # get image and show\n i = 0\n while True:\n print(i)\n RGB_image, depth_image = get_images(pipeline)\n # depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.08), cv2.COLORMAP_JET)\n\n cv2.imshow(\"realsense image\", RGB_image)\n key = cv2.waitKey(1)\n if key == 27: # ESC\n break\n\n i += 1\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"spot/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"114238434","text":"import gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport numpy as np\nimport math\n\n# the following is to give import access for membrane_base\n\nimport gymdrl\nimport sys\n\nsys.path.append(gymdrl.__file__[:-11] + 'envs') # hacky but necessary\n\nimport membrane_base\n\nfrom Box2D import (b2World, b2CircleShape, b2FixtureDef, b2LoopShape, b2PolygonShape,\n b2RevoluteJointDef, b2_pi)\n\n# MEMBRANE BOUNCE ENVIRONMENT\n#\n# Copyright (c) 2017 William Choi, Alex Kyriazis, Ivan Zinin; all rights reserved\n\nFPS = 50\n\n####################\n# Noise Parameters #\n####################\nOBJ_POS_STDDEV = membrane_base.BOX_WIDTH / 100.0\nOBJ_VEL_STDDEV = 0 # Nothing set currently\nACTUATOR_POS_STDDEV = membrane_base.BOX_WIDTH / 100.0\nACTUATOR_VEL_STDDEV = 0 # Nothing set currently\n\n#################################\n# Reward Calculation Parameters #\n#################################\n# Desired Object Position\n\nMAX_DIST_TO_TARGET = np.sqrt(np.square(membrane_base.BOX_WIDTH) + np.square(membrane_base.BOX_HEIGHT))\n# Maximum distance adjacent actuators can be apart veritically due to the membrane\nMAX_VERT_DIST_BETWEEN_ACTUATORS = membrane_base.BOX_WIDTH / 4\n# Maximum steps at the target before the episode is deemed to be successfully completed\nMAX_TARGET_COUNT = 100\n\n########################\n# Rendering Parameters #\n########################\nTEMPW = membrane_base.BOX_WIDTH\nTEMPH = membrane_base.BOX_HEIGHT_BELOW_ACTUATORS + membrane_base.BOX_HEIGHT\n\nVIEWPORT_W = int(1000 * TEMPW / (TEMPW + TEMPH))\nVIEWPORT_H = int(1000 * TEMPH / (TEMPW + TEMPH))\n\nPD_CONTROL_KP = 1.0\nPD_CONTROL_KD = 0.0\n\n\nclass MembraneMoveArbPoscontrol(gym.Env):\n\n class PDControl:\n\n def __init__(self, kp = 1.0, kd = 0.0):\n self.Kp = kp\n self.Kd = kd\n self.derivator = 0.0\n self.set_point = 0.0\n\n def update(self, current_value):\n self.error = self.set_point - current_value\n self.Pval = self.Kp * self.error\n self.Dval = self.Kd * (self.error - self.derivator)\n self.derivator = self.error\n\n return self.Pval + self.Dval\n\n def set_point(self, new_point):\n self.set_point = new_point\n self.derivator = 0.0\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': FPS\n }\n\n # Flag that indicates whether to run the env with or without linkages\n with_linkage = True\n\n def __init__(self):\n self._seed()\n self.viewer = None # to be used later for rendering\n self.target_pos = None\n membrane_base.init_helper(self)\n\n self.object = None\n\n # Drawlist for rendering\n self.drawlist = []\n\n self.prev_state = None\n\n # Observation Space\n # [object posx, object posy, actuator1 pos.y, ... , actuator5 pos.y, actuator1 speed.y, ... , actuator5 speed.y]\n high = np.array([np.inf] * 16)\n self.observation_space = spaces.Box(low=-high, high=high)\n\n # Continuous action space; one for each linear actuator (5 total)\n # action space represents velocity\n self.action_space = spaces.Box(-1, 1, (5,))\n self.prev_shaping = None # for reward calculation\n\n self.pd_control_list = []\n\n self._reset()\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _check_actuator_pos(self):\n result = True\n for i in range(4):\n dist_diff = np.abs(self.actuator_list[i + 1].position.y - self.actuator_list[i].position.y)\n if dist_diff > MAX_VERT_DIST_BETWEEN_ACTUATORS:\n result = False\n break\n return result\n\n def _destroy(self):\n if not self.exterior_box: return # return if the exterior box hasn't been created\n membrane_base.destroy_helper(self)\n self.world.DestroyBody(self.object)\n self.object = None\n\n def _reset(self):\n self._destroy()\n\n membrane_base.reset_helper(self)\n\n # Creating the object to manipulate\n object_fixture = b2FixtureDef(\n shape=b2CircleShape(radius=membrane_base.OBJ_SIZE / 2),\n density=0.3,\n friction=0.6,\n restitution=0.5\n )\n object_position = (membrane_base.BOX_WIDTH / 2, membrane_base.OBJ_SIZE / 2 + membrane_base.LINK_HEIGHT)\n self.object = self.world.CreateDynamicBody(\n position=object_position,\n fixtures=object_fixture,\n linearDamping=0.6 # Control this parameter for surface friction\n )\n self.object.color1 = (1, 1, 0)\n self.object.color2 = (0, 0, 0)\n\n self.drawlist = self.drawlist + [self.object]\n\n self.target_pos = [\n np.random.rand() * (membrane_base.BOX_WIDTH - membrane_base.OBJ_SIZE) + membrane_base.OBJ_SIZE / 2,\n np.random.rand() + 5]\n\n for i in range(5):\n self.pd_control_list[i] = self.PDControl(PD_CONTROL_KP, PD_CONTROL_KD)\n\n\n return self._step(np.array([0, 0, 0, 0, 0]))[0] # action: zero motor speed\n\n def _step(self, action):\n\n # if self.prev_state is not None:\n # action = self.programmed_policy(self.prev_state)\n\n # Use pd control to actuate simulation\n for i, actuator in enumerate(self.actuator_list):\n actuator.joint.motorSpeed = float(membrane_base.MOTOR_SPEED * self.pd_control_list[i].update(actuator.position.y))\n self.pd_control_list[i].set_point(np.clip(action[i], -1, 1))\n\n # Move forward one frame\n self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)\n\n # Required values to be acquired from the platform\n object_pos = [\n np.random.normal(self.object.position.x, OBJ_POS_STDDEV),\n np.random.normal(self.object.position.y, OBJ_POS_STDDEV)\n ]\n object_vel = [\n self.object.linearVelocity.x,\n self.object.linearVelocity.y\n ]\n actuator_pos = [\n np.random.normal(self.actuator_list[0].position.y, ACTUATOR_POS_STDDEV),\n np.random.normal(self.actuator_list[1].position.y, ACTUATOR_POS_STDDEV),\n np.random.normal(self.actuator_list[2].position.y, ACTUATOR_POS_STDDEV),\n np.random.normal(self.actuator_list[3].position.y, ACTUATOR_POS_STDDEV),\n np.random.normal(self.actuator_list[4].position.y, ACTUATOR_POS_STDDEV)\n ]\n actuator_vel = [\n self.actuator_list[0].linearVelocity.y,\n self.actuator_list[1].linearVelocity.y,\n self.actuator_list[2].linearVelocity.y,\n self.actuator_list[3].linearVelocity.y,\n self.actuator_list[4].linearVelocity.y\n ]\n\n # Observation space (state)\n state = [\n (self.target_pos[0] - membrane_base.BOX_WIDTH / 2) / (membrane_base.BOX_WIDTH / 2),\n (self.target_pos[1] - membrane_base.BOX_HEIGHT / 2) / (membrane_base.BOX_HEIGHT / 2),\n (object_pos[0] - membrane_base.BOX_WIDTH / 2) / (membrane_base.BOX_WIDTH / 2),\n (object_pos[1] - membrane_base.BOX_HEIGHT / 2) / (membrane_base.BOX_HEIGHT / 2),\n 2 * object_vel[0] / membrane_base.MOTOR_SPEED,\n 2 * object_vel[1] / membrane_base.MOTOR_SPEED,\n (actuator_pos[0] - membrane_base.ACTUATOR_TRANSLATION_MEAN) / membrane_base.ACTUATOR_TRANSLATION_AMP,\n (actuator_pos[1] - membrane_base.ACTUATOR_TRANSLATION_MEAN) / membrane_base.ACTUATOR_TRANSLATION_AMP,\n (actuator_pos[2] - membrane_base.ACTUATOR_TRANSLATION_MEAN) / membrane_base.ACTUATOR_TRANSLATION_AMP,\n (actuator_pos[3] - membrane_base.ACTUATOR_TRANSLATION_MEAN) / membrane_base.ACTUATOR_TRANSLATION_AMP,\n (actuator_pos[4] - membrane_base.ACTUATOR_TRANSLATION_MEAN) / membrane_base.ACTUATOR_TRANSLATION_AMP,\n 2 * (actuator_vel[0]) / membrane_base.MOTOR_SPEED,\n 2 * (actuator_vel[1]) / membrane_base.MOTOR_SPEED,\n 2 * (actuator_vel[2]) / membrane_base.MOTOR_SPEED,\n 2 * (actuator_vel[3]) / membrane_base.MOTOR_SPEED,\n 2 * (actuator_vel[4]) / membrane_base.MOTOR_SPEED,\n ]\n self.prev_state = state\n\n assert len(state) == 16\n\n # Rewards\n reward = 0\n shaping = \\\n -200 * np.abs(self.target_pos[0] - object_pos[0]) / membrane_base.BOX_WIDTH \\\n - 200 * np.abs(self.target_pos[1] - object_pos[1]) / membrane_base.BOX_HEIGHT \\\n - 50 * np.abs(state[2]) \\\n - 50 * np.abs(state[3])\n\n if self.prev_shaping is not None:\n reward = shaping - self.prev_shaping\n self.prev_shaping = shaping\n\n if (np.abs(object_pos[0] - self.target_pos[0])) < 1:\n if (np.abs(object_pos[1] - self.target_pos[1])) < 1:\n reward += 50\n\n # Reduce reward for using the motor\n for a in action:\n reward -= 5 * np.clip(np.abs(a), 0, 1)\n\n done = False\n\n return np.array(state), reward, done, {}\n\n def _render(self, mode='human', close=False):\n from gym.envs.classic_control import rendering\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n if self.viewer is None:\n self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)\n self.viewer.set_bounds(0, membrane_base.BOX_WIDTH, -membrane_base.BOX_HEIGHT_BELOW_ACTUATORS,\n membrane_base.BOX_HEIGHT)\n\n # Target Position Visualized\n self.viewer.draw_polyline([(self.target_pos[0], 0), (self.target_pos[0], membrane_base.BOX_HEIGHT)],\n color=(1, 0, 0))\n self.viewer.draw_polyline([(0, self.target_pos[1]), (membrane_base.BOX_WIDTH, self.target_pos[1])],\n color=(1, 0, 0))\n\n membrane_base.render_helper(self)\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n def programmed_policy(self, state):\n\n FAST_SPEED = 1;\n MEDIUM_SPEED = 0.5;\n SLOW_SPEED = 0.1;\n VERY_SLOW_SPEED = 0.05;\n\n ACTUATOR_START = membrane_base.BOX_SIDE_OFFSET\n ACTUATOR_SPACING = membrane_base.GAP\n\n p = (self.object.position.x - ACTUATOR_START) / ACTUATOR_SPACING\n\n q = (self.target_pos[0] - ACTUATOR_START) / ACTUATOR_SPACING\n\n action = -SLOW_SPEED * np.ones(5)\n\n if (self.target_pos[0] - self.object.position.x) > 0:\n # move right\n action[int(np.floor(p))] = VERY_SLOW_SPEED\n if int(np.ceil(q) + 1) <= 4:\n action[int(np.ceil(q) + 1)] = VERY_SLOW_SPEED\n else:\n # move left\n if int(np.ceil(q) - 1) >= 0:\n action[int(np.ceil(q) - 1)] = VERY_SLOW_SPEED\n\n action[int(np.ceil(p))] = VERY_SLOW_SPEED\n\n return action\n\n\nif __name__ == \"__main__\":\n env = Membrane()\n s = env.reset()\n","sub_path":"gym-drl/gymdrl/envs/membrane_move_arb_poscontrol.py","file_name":"membrane_move_arb_poscontrol.py","file_ext":"py","file_size_in_byte":11029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"517986285","text":"# -*- coding: utf-8 -*-\nimport csv\n\n\n\ncsvfile = open('/home/furkan/Masaüstü/karismis.csv', 'r').readlines()\nfilename = 1\nfor i in range(len(csvfile)):\n\tif i % 4101 == 0:\n\t\topen(str(filename) + '.csv', 'w+').writelines(csvfile[i:i+4101])\n\t\tfilename +=1\n","sub_path":"bol.py","file_name":"bol.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"342949169","text":"#!usr/bin/python\n\n#import pyodbc\nimport os\nimport datetime\nimport sys\nimport ptsutils3\n\n#Usage audio_test.py 2018-01-30 13\n\nstr_date = sys.argv[1]\nstr_channel = sys.argv[2]\n\n#run in windows\nserver = '10.13.210.33'\nnas1 = '10.13.200.22'\nAPPROVED_DIR = r'\\\\10.13.200.22\\mamnas1\\Approved'\n#server = '10.13.220.34'\ndatabase = 'MAM'\nusername = 'audiotest' #db owner for executing SPL procedure\npassword = 'audiotest'\n\nconnStr = 'DRIVER={{ODBC Driver 13 for SQL Server}};SERVER={s};PORT=1443;DATABASE={d};UID={u};PWD={p}'.format(s = server, d = database, u = username, p = password)\n\ndef get_playlist_all_test(pdate, channel):\n fvideo_id = \"videoid-\" + pdate + \"-\" + channel + \".txt\"\n playlist = []\n video_id_dict = {}\n count = 0 #count queue for all str_channel\n count_ch = 0 #count queue number for specified channel\n cnxn = pyodbc.connect(connStr)\n cursor = cnxn.cursor()\n #tsql = (\"exec SP_Q_GET_PLAY_VIDEOID_LIST\")\n tsql = (\"select * from VW_PLAYLIST_FROM_NOW order by FDDATE, FSCHANNEL_ID, QUEUE\")\n\n fh = open(fvideo_id, \"w\")\n\n with cursor.execute(tsql):\n row = cursor.fetchone()\n while row:\n record = \"\"\n queueno = int(row[0])\n name = str(row[1]).rstrip()\n episode = int(row[2])\n fddate = str(row[3])\n fsplaytime = str(row[4])\n fschannel = str(row[5])\n video_id = str(row[6]).rstrip()\n queue = [queueno, name, episode, fddate, fsplaytime, fschannel, video_id]\n playlist.append(queue)\n if (fddate == pdate and fschannel == channel):\n if (len(video_id) > 0 and video_id != 'None' and video_id not in video_id_dict): # use regular expression instead\n video_id_dict[video_id[2:]] = queue\n fh.writelines(video_id)\n count_ch = count_ch + 1\n\n \"\"\"\n for i in range(len(row)):\n record = record + str(row[i]).rstrip() + \"|\"\n print(record)\n \"\"\"\n count = count + 1\n row = cursor.fetchone()\n for q in playlist:\n if (q[3] == pdate and q[5] == channel):\n print(q)\n print(\"Total number of queue : \" + str(count))\n\n not_found = []\n for vid in video_id_dict.keys():\n print(vid + \":\" + str(video_id_dict[vid]))\n target_file = APPROVED_DIR + \"\\\\\" + vid + \".mxf\"\n if (os.path.isfile(target_file)):\n print (\"Found \" + target_file)\n else:\n print (\"Cannot find \" + target_file)\n not_found.append(vid)\n\n print(\"\\n\\nTotal number of queue (all channel): \" + str(count))\n print(\"Total number of queue (ch-\" + channel + \", \" + pdate + \"): \" + str(count_ch))\n print(\"Total number of file: \" + str(len(video_id_dict)))\n print(\"\\n\\n--------------------------- File not found ---------------------------------\")\n for vid in not_found:\n print(str(video_id_dict[vid]) + \" not found\")\n\n return video_id_dict\n\n\ndef comp_approved_atsin(video_id_dict, src_dir, target_dir):\n for id in video_id_dict.keys():\n print(\"Check \" + id + \" ------\")\n src_f = src_dir + \"\\\\\" + id + \".mxf\"\n target_f = target_dir + \"\\\\\" + id + \".mxf\"\n\n if (os.path.isfile(target_f)):\n #print(\"Found \" + target_f)\n if (os.path.isfile(src_f)):\n print(\"remove \" + src_f)\n else:\n print(\"Cannot find \" + target_f)\n print(video_id_dict[id])\n if (os.path.isfile(src_f)):\n print(\"move file to Approved \" + src_f)\n\ndef move_file_by_video_id_org(video_id_dict, src_dir, target_dir):\n for id in video_id_dict.keys():\n print(\"Check \" + id + \" ------\")\n src_f = src_dir + \"\\\\\" + id + \".mxf\"\n target_f = target_dir + \"\\\\\" + id + \".mxf\"\n\n if (os.path.isfile(target_f)):\n print(\"Found \" + target_f)\n else:\n if (os.path.isfile(src_f)):\n print(\"move file to \" + target_f)\n try:\n os.rename(src_f, target_f)\n except Exception:\n print(\"Permission denied\")\n continue\n\n\n\n\n\ndef get_playlist_prog():\n\n #cnxn = pyodbc.connect('DRIVER={ODBC Driver 13 for SQL Server};SERVER=' + server + ';PORT=1443;DATABASE=' + database + ';UID=' + username + ';PWD=' + password)\n #cnxn = pyodbc.connect(SERVER=' + server + ';PORT=1443;DATABASE=' + database + ';UID=' + username + ';PWD=' + password)\n cursor = cnxn.cursor()\n\n print ('Reading data from TBUSERS')\n tsql = \"select * from TBUSERS where FSUSER = 'hsing1';\"\n tsql = (\"select FSPROG_ID, FNEPISODE, FSPROG_NAME, FDDATE, FSPLAY_TIME from TBPGM_COMBINE_QUEUE \" +\n \"where FDDATE = '2018-01-11' and FSCHANNEL_ID = '12' order by FSPLAY_TIME;\")\n tsql = (\"select B.FSVIDEO_PROG, A.FSPROG_ID, A.FNEPISODE, A.FSPROG_NAME, A.FDDATE, A.FSPLAY_TIME from TBPGM_COMBINE_QUEUE A left join TBLOG_VIDEO B on A.FSVIDEO_ID = B.FSVIDEO_PROG \" +\n \"where FDDATE between '2018-01-25' and '2018-01-26' and A.FSCHANNEL_ID in ('12', '13', '14', '07') order by A.FSPLAY_TIME\")\n\n\n count = 0\n main_prog = []\n playlist = []\n queue = []\n video_id_dict = {}\n\n with cursor.execute(tsql):\n row = cursor.fetchone()\n print(len(row))\n while row:\n record = \"\"\n video_id = str(row[0]).rstrip()\n prog_id = str(row[1]).rstrip()\n episode = int(row[2])\n prog_name = str(row[3]).rstrip()\n play_date = str(row[4]).rstrip()\n play_time = str(row[5]).rstrip()\n queue = [video_id, prog_id, episode, prog_name, play_date, play_time]\n if (len(video_id) > 0 and video_id != 'None' and video_id not in video_id_dict): # use regular expression instead\n video_id_dict[video_id[2:]] = queue\n playlist.append(queue)\n \"\"\"\n for i in range(len(row)):\n record = record + str(row[i]).rstrip() + \"|\"\n print(record)\n \"\"\"\n count = count + 1\n row = cursor.fetchone()\n\n for queue in playlist:\n print(queue)\n print(\"Total count : \" + str(count))\n\n not_found = []\n for vid in video_id_dict.keys():\n print(vid + \":\" + str(video_id_dict[vid]))\n target_file = APPROVED_DIR + \"\\\\\" + vid + \".mxf\"\n if (os.path.isfile(target_file)):\n print (\"Found \" + target_file)\n else:\n print (\"Cannot find \" + target_file)\n not_found.append(vid)\n\n print(\"\\n\\n\\n\\n--------------------------- File not found ---------------------------------\")\n for vid in not_found:\n print(str(video_id_dict[vid]) + \" not found\")\n\n#ptsutils3.create_database(\"audio_test.db3\")\n\n#get_playlist_prog()\n#video_id_dict = ptsutils3.get_playlist_all(str_date, str_channel)\n\n#move testing files to ATS_IN\nSRC_DIR = r'\\\\10.13.200.22\\mamnas1\\Approved'\nTARGET_DIR = r'\\\\10.13.200.22\\mamnas1\\ATS_IN'\n#ptsutils3.copy_file_by_video_id(video_id_dict, SRC_DIR, TARGET_DIR)\n#ptsutils3.move_file_by_video_id(video_id_dict, SRC_DIR, TARGET_DIR)\n\nSRC_DIR = r'\\\\10.13.200.22\\mamnas1\\Approved'\nTARGET_DIR = r'\\\\10.13.200.22\\mamnas1\\Approved'\n#ptsutils3.rename_video_id(video_id_dict, SRC_DIR, TARGET_DIR)\n\nSRC_DIR = r'\\\\10.13.200.22\\mamnas1\\Approved\\ATS_IN'\nTARGET_DIR = r'\\\\10.13.200.22\\mamnas1\\Approved'\n#comp_approved_atsin(video_id_dict, SRC_DIR, TARGET_DIR)\n\n#convert louth lst file\n#ptsutils3.modify_louth_lst(\"1320180130(louth).lst\")\n#ptsutils3.modify_louth_lst(\"1320180201(louth).lst\")\nptsutils3.modify_louth_lst(\"1320180131(louth).lst\")\n\n\"\"\"\nfor vid in video_id_dict.keys():\n print(vid[3:])\n f = r'\"\\\\10.13.200.6\\MAMDownload\\AUDIO_TEST\\ATS_IN\\0046MA.mxf\"' #是目前用來啟動 CMD.EXE 的目錄路徑。不支援 UNC 路徑\n f = r'U:\\AUDIO_TEST\\ATS_IN\\' + vid + r'.mxf'\n cmd = \"powershell.exe -noexit '& new-item -verbose -itemtype file -path \" + f\n os.system(cmd)\n\"\"\"\n","sub_path":"Python/audio_test.py","file_name":"audio_test.py","file_ext":"py","file_size_in_byte":8029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"486155940","text":"import cv2\nimport os\nfrom PIL import Image\nimport numpy as np\n\npath = '../Data/Image'\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\ndetector = cv2.CascadeClassifier(\"../Data/haarcascade_frontalface_default.xml\")\n\n# function to get the images and label data\ndef getImagesAndLabels(path):\n imagePaths = [os.path.join(path,f) for f in os.listdir(path)]\n faces_detect=[]\n ids = []\n for imagePath in imagePaths:\n PIL_img = Image.open(imagePath).convert('L')\n img_numpy = np.array(PIL_img,'uint8')\n ident = int(os.path.split(imagePath)[-1].split(\".\")[1])\n faces = detector.detectMultiScale(img_numpy)\n for (x,y,w,h) in faces:\n faces_detect.append(img_numpy[y:y+h,x:x+w])\n ids.append(ident)\n return faces_detect,ids\n\n\nprint (\"\\n [INFO] Training faces. It will take a few seconds. Wait ...\")\nfaces,ids = getImagesAndLabels(path)\nrecognizer.train(faces, np.array(ids))\n\n# Save the model into trainer/trainer.yml\nrecognizer.write('../Data/trainer.yml') \nprint(\"\\n [INFO] {0} faces trained. Exiting Program\".format(len(np.unique(ids))))","sub_path":"OldModel/Train/DataTrainning.py","file_name":"DataTrainning.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"559818506","text":"#!/usr/bin/env python3\r\n\r\nimport asyncio\r\nimport argparse\r\nimport datetime\r\nimport json\r\nimport logging\r\nimport os\r\nimport pprint\r\nimport sys\r\nimport shutil\r\nimport time\r\n\r\nfrom image_cache import ImageCache\r\nfrom image_cache import ImageHelper\r\n\r\nfrom PIL import Image\r\nfrom PIL.ExifTags import TAGS\r\n\r\nfrom typing import Dict\r\n\r\n# Setup a logger\r\nlogging.basicConfig(\r\n format=\"[%(asctime)s] %(message)s\",\r\n datefmt=\"[%Y-%m-%d %I:%M:%S]\",\r\n level=logging.INFO,\r\n)\r\nlogger = logging.getLogger(\"image_util\")\r\n\r\n\r\nasync def gen_database(path: str, fast: bool) -> None:\r\n \"\"\"\r\n Takes in a target directory and computes information about\r\n the images contained therin\r\n \"\"\"\r\n ic = ImageCache(fast=fast)\r\n await ic.gen_cache_from_directory(path)\r\n\r\n report = {}\r\n\r\n queries = {\r\n # \"all_data\": \"SELECT * FROM {};\",\r\n \"image_types\": \"SELECT COUNT(DISTINCT img_type) FROM {};\",\r\n \"total_images\": \"SELECT COUNT(*) FROM {};\",\r\n \"average_size\": \"SELECT AVG(size) FROM {};\",\r\n \"total_size\": \"SELECT SUM(size) FROM {};\",\r\n }\r\n\r\n for k, v in queries.items():\r\n rows = ic.query(v.format(ic.get_table()))\r\n report[k] = rows[0][0]\r\n\r\n # Get duplicate and ambiguous images\r\n report[\"duplicates\"] = ic.get_duplicates()\r\n report[\"ambiguous\"] = ic.get_ambiguous()\r\n report[\"process_time\"] = ic.processing_time\r\n\r\n pp = pprint.PrettyPrinter(indent=2, compact=False)\r\n pp.pprint(report)\r\n\r\n logger.info(\"Completed database generation.\")\r\n logger.info(f\"Processed {ic.get_count()} images in {ic.processing_time} seconds.\")\r\n logger.info(f\"Encountered {len(report['duplicates'])} duplicate images.\")\r\n\r\n tstamp = datetime.datetime.now().strftime(\"gen_database_%Y-%m-%d.json\")\r\n with open(tstamp, \"w\") as fout:\r\n fout.write(json.dumps(report))\r\n logger.info(f\"Report written to {tstamp}\")\r\n\r\n\r\nasync def find_dupes(\r\n source: str, target: str, skip: bool, fast: bool\r\n) -> Dict[str, any]:\r\n \"\"\"\r\n Use the Image Cache helper class to read in the source directory\r\n to an sqlite3 DB, compute hashes and any necessary pieces for checking\r\n if the two images are the same. Then given the target directory, check\r\n to see if the image already exists, if it does to a pprint report\r\n about all potential dupes\r\n \"\"\"\r\n ic = ImageCache(fast=fast)\r\n if not skip:\r\n await ic.gen_cache_from_directory(source)\r\n logger.info(f\"Processing took {ic.processing_time} seconds.\")\r\n\r\n logger.info(\r\n f\"Beginning processing of {target} for potential duplicates. \"\r\n + \"Report will be displayed with duplicates, ambiguous files, \"\r\n + \"and suggested files for copying when finished. This may take a \"\r\n + \"long time.\"\r\n )\r\n\r\n report = {\r\n \"duplicates\": [],\r\n \"ambiguous\": [],\r\n \"migrate\": [],\r\n }\r\n\r\n for root, _, filenames in os.walk(target):\r\n logger.info(f\"Processing {len(filenames)} files in {root}\")\r\n for f in filenames:\r\n full: str = os.path.join(root, f)\r\n image: ImageHelper = ImageHelper(full)\r\n image.check_image_type()\r\n if not image.is_image:\r\n continue\r\n\r\n image.read_image()\r\n image.compute_md5()\r\n\r\n # Check if the file/size exists in the db.\r\n row = ic.lookup(f\"WHERE md5 = '{image.md5}'\")\r\n if len(row) > 0:\r\n\r\n # If file and size are the same, grab the crc32 and md5 to verify dupe\r\n logger.warning(\r\n f\"Duplicate image verified: {full} already exists in \"\r\n + f\"at {row[2]}\"\r\n )\r\n report[\"duplicates\"].append(image.full_path)\r\n continue\r\n else:\r\n row = ic.lookup(\r\n f\"WHERE crc32 = '{image.crc32}' and size = '{image.size}'\"\r\n )\r\n if len(row) > 0:\r\n logger.warning(\r\n f\"Ambiguous files detected. {full} has same size and \"\r\n + f\"crc32 as source directory file {row[2]}, but md5 \"\r\n + \"does not match.\"\r\n )\r\n report[\"ambiguous\"].append(image.full_path)\r\n continue\r\n\r\n # Add the file to the list of potentials to migrate\r\n report[\"migrate\"].append(image.full_path)\r\n\r\n pp = pprint.PrettyPrinter(indent=2, compact=False)\r\n pp.pprint(report)\r\n\r\n logger.info(\"Completed duplicate scan.\")\r\n logger.info(f\"Processed {ic.get_count()} images in {ic.processing_time} seconds.\")\r\n logger.info(\r\n f\"Report:\\n\\tDuplicates:\\t{len(report['duplicates'])}\"\r\n + f\"\\n\\tAmbiguous:\\t{len(report['ambiguous'])}\"\r\n + f\"\\n\\tUnique:\\t{len(report['migrate'])}\"\r\n )\r\n tstamp = datetime.datetime.now().strftime(\"find_dupes_%Y-%m-%d.json\")\r\n with open(tstamp, \"w\") as fout:\r\n fout.write(json.dumps(report))\r\n logger.info(f\"Report written to {tstamp}\")\r\n\r\n\r\ndef get_exif(img_path: str) -> Dict[str, str]:\r\n image = Image.open(img_path)\r\n exif = {}\r\n img_exif = image._getexif()\r\n if img_exif is not None:\r\n for (k, v) in img_exif.items():\r\n exif[TAGS.get(k)] = v\r\n return exif\r\n\r\n\r\nasync def sort_images(source: str, dest: str) -> None:\r\n # Helper function to read in a directory of pictures and sort them all\r\n # based off of exif metadata. By default the sorting happens by /YYYY/MM\r\n for root, _, filenames in os.walk(source):\r\n logger.info(f\"Processing {len(filenames)} files in {root}\")\r\n for f in filenames:\r\n full = os.path.join(root, f)\r\n # Use an ImageHelper for convenience\r\n ic = ImageHelper(full)\r\n ic.check_image_type()\r\n if not ic.is_image:\r\n logger.warning(\r\n f\"Encountered file which is not an image: {ic.full_path}\"\r\n )\r\n continue\r\n exif = get_exif(full)\r\n if exif == {}:\r\n logger.warning(f\"Failed to find exif data for {full}\")\r\n continue\r\n\r\n dt = time.strptime(exif[\"DateTimeOriginal\"], \"%Y:%m:%d %H:%M:%S\")\r\n new_dest = os.path.join(dest, str(dt.tm_year), str(dt.tm_mon))\r\n if not os.path.exists(new_dest):\r\n os.makedirs(new_dest)\r\n\r\n # Copy the file, including metadata\r\n shutil.copy(full, os.path.join(new_dest, f))\r\n shutil.copystat(full, os.path.join(new_dest, f))\r\n\r\n\r\nasync def main(\r\n source: str, target: str, genstats: bool, should_sort: bool, skip: bool, fast: bool\r\n) -> None:\r\n\r\n if not os.path.exists(source):\r\n logger.error(f\"Directory does not exist: {source}\")\r\n sys.exit()\r\n\r\n # TODO: Might be able to immediate declare/make an ImageCache, as\r\n # everyone already takes the `source` dir...\r\n if should_sort:\r\n await sort_images(source, target)\r\n elif genstats:\r\n await gen_database(source, fast)\r\n return\r\n else:\r\n if not os.path.exists(target):\r\n logger.error(f\"Directory does not exist: {target}\")\r\n sys.exit()\r\n await find_dupes(source, target, skip, fast)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # TODO: Argument parser groups for the different features\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n \"-s\",\r\n \"--source\",\r\n action=\"store\",\r\n help=\"The 'source of truth' image directory. Should be ones total \"\r\n + \"image store. This is used for sorting, comparing against, and \"\r\n + \"generating image stats.\",\r\n )\r\n parser.add_argument(\r\n \"-t\",\r\n \"--target\",\r\n action=\"store\",\r\n help=\"The target folder containing potential duplicate images\",\r\n )\r\n parser.add_argument(\r\n \"--skip_cache_gen\",\r\n action=\"store_true\",\r\n default=False,\r\n help=\"Skips the generation of the source image cache. Use this if you\"\r\n + \" are sure that no image changes have taken place since the last\"\r\n + \" run of this program.\",\r\n )\r\n parser.add_argument(\"-g\", \"--genstats\", default=False, action=\"store_true\")\r\n parser.add_argument(\r\n \"--database\",\r\n action=\"store\",\r\n help=\"Optional path where the ImageCache database should be stored. \"\r\n + \"Defaults to the current working directory.\",\r\n )\r\n parser.add_argument(\r\n \"--sort_images\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"When set, sort the images specified with '-d' by year and \"\r\n + \"as extracted from exif metadata on the image.\",\r\n )\r\n parser.add_argument(\"-f\", \"--fast\", default=False, action=\"store_true\")\r\n args = parser.parse_args()\r\n\r\n # TODO: Use args/kwargs :P\r\n asyncio.run(\r\n main(\r\n args.source,\r\n args.target,\r\n args.genstats,\r\n args.sort_images,\r\n args.skip_cache_gen,\r\n args.fast,\r\n )\r\n )\r\n","sub_path":"image_utils/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":9126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"472627689","text":"\n\nimport feedback # quelques fonctions utiles plbank/pysrc/src/utils \nimport sys\nimport json\n\n\nfb = feedback.Feedback()\ntry:\n with open(\"student.py\",\"r\") as f:\n exec(f.read(),globals()) # chargement du code de l'élève ou X doit être défini \n if X == \"Toto\":\n fb.setsuccess(True)\n fb.addFeedback(\"Bravo c'est bon !!!\")\n else:\n fb.setsuccess(False)\n fb.addFeedback(\"La variable est mal initialisé !!!\")\n \nexcept NameError as e:\n fb.setsuccess(False)\n fb.addFeedback(\"Il faut déclarer une variable X \\n\"+str(e))\nexcept Exception as e :\n fb.setsuccess(False)\n fb.addFeedback(\"Proposez un code qui compile\"+str(e))\nprint(json.dumps(fb.outputdic()))\n# pour être sur que le grader se termine bien ;)\nsys.exit(0)\n\n","sub_path":"home/lib/pl/bas/deuzegrader.py","file_name":"deuzegrader.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"216359065","text":"from flask import Flask, render_template, request, flash, url_for, redirect, session\nfrom functools import wraps\n\napp = Flask(__name__)\n\nimport db_query\nimport aux_functs\n\napp.secret_key = 'supersecretkeythatnoonewillguess'\n\n# Requires_login function to check whether a user is logged in and\n# if not redirects to the index page.\ndef requires_login(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n status = session.get('logged_in', False)\n if not status:\n return redirect(url_for('root', next=request.path))\n return f(*args, **kwargs)\n return decorated\n\n\n@app.route(\"/\")\ndef root():\n return render_template('index.html')\n\n\n@app.route(\"/signup/\", methods=['POST', 'GET'])\ndef signup():\n # Sign up function to take user details and store them in the database.\n if request.method == 'GET':\n return render_template('signup.html')\n else:\n _username = request.form['username']\n _email = request.form['email']\n _password = request.form['password']\n if db_query.exists(_email) is 0:\n db_query.addNew(_username, _email, _password)\n flash('Account created! Please now login')\n return redirect(url_for('login'))\n else:\n flash('Account already exists!')\n return render_template('signup.html')\n\n\n@app.route(\"/signin/\", methods=['POST', 'GET'])\ndef login():\n if request.method == 'GET':\n return render_template('signin.html')\n else:\n _email = request.form['email']\n _password = request.form['password']\n if db_query.loginValidate(_email, _password) == 1:\n session['username'] = db_query.getUserName(_email)\n # Session variable 'logged_in' is used by @requires_login to confirm\n # a user is logged in.\n session['logged_in'] = True\n return redirect(url_for('home'))\n else:\n flash('Email or password incorrect!')\n return render_template('signin.html')\n\n@app.route(\"/logout/\")\ndef logout():\n session.pop('username')\n session['logged_in'] = False\n return redirect('/')\n\n@app.route(\"/userhome/\")\n@requires_login\ndef home():\n name = session['username']\n return render_template('userhome.html', username=name)\n\n@app.route(\"/userhome/genre/\")\n@requires_login\ndef genre():\n name = session['username']\n return render_template('genre.html', username=name)\n\n@app.route(\"/userhome/title/\")\n@requires_login\ndef title():\n name = session['username']\n return render_template('title.html', username=name)\n\n@app.route(\"/userhome/author/\")\n@requires_login\ndef author():\n name = session['username']\n return render_template('author.html', username=name)\n\n@app.route(\"/userhome/genre//\")\n@requires_login\ndef genre_list(type):\n name = session['username']\n header = aux_functs.getGenreHeader(type)\n select_genre = aux_functs.getGenre(type)\n # getGenre returns a list of Python dictionaries.\n Books = db_query.getGenre(select_genre)\n return render_template('booklist.html', Books=Books, header=header,\n username=name, type=type)\n\n@app.route(\"/userhome/genre//\")\n@requires_login\ndef book_genre(category, id):\n name = session['username']\n if aux_functs.checkCategory(category):\n this_book = db_query.getBook(id)\n # getBook returns a single Python dictionary.\n return render_template('bookpage.html', book=this_book, username=name)\n else:\n return redirect(\"/userhome/genre/\")\n\n@app.route(\"/userhome/author//\")\n@requires_login\ndef author_list(type):\n name = session['username']\n header = aux_functs.getAuthor(type)\n Books = db_query.getAuthor(header)\n return render_template('booklist.html', Books=Books, header=header, username=name, type=type)\n\n@app.route(\"/userhome/author//\")\n@requires_login\ndef book_author(category, id):\n name = session['username']\n if aux_functs.checkCategory(category):\n this_book = db_query.getBook(id)\n return render_template('bookpage.html', book=this_book, username=name)\n else:\n return redirect(\"/userhome/author\")\n\n\n@app.route(\"/userhome/title//\")\n@requires_login\ndef title_list(type):\n name = session['username']\n header = aux_functs.getTitle(type)\n Books = db_query.getTitle(header)\n return render_template('booklist.html', Books=Books, header=header, username=name, type=type)\n\n@app.route(\"/userhome/title//\")\n@requires_login\ndef book_title(category, id):\n name = session['username']\n if aux_functs.checkCategory(category):\n this_book = db_query.getBook(id)\n return render_template('bookpage.html', book=this_book, username=name)\n else:\n return redirect(\"/userhome/title\")\n\n@app.errorhandler(404)\ndef error_404(exception):\n return render_template('error.html')\n\n@app.errorhandler(500)\ndef error_500(exception):\n return render_template('error.html')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n","sub_path":"src/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"296352778","text":"#!/usr/bin/env python\n\nfrom __future__ import division\nfrom math import pi as M_PI # used by many snippets\nimport os.path\nimport sys\n\nimport cairo\nimport gtk\nimport pango\n\nwidth, height = 400, 400\n\ndef expose_event(widget, event):\n ctx = widget.window.cairo_create()\n x, y = 0.1, 0.5\n x1, y1 = 0.4, 0.9\n x2, y2 = 0.6, 0.1\n x3, y3 = 0.9, 0.5\n\n snippet_normalize (ctx, width, height)\n\n ctx.move_to (x, y)\n ctx.curve_to (x1, y1, x2, y2, x3, y3)\n\n ctx.stroke ()\n\n ctx.set_source_rgba (1,0.2,0.2,0.6)\n ctx.set_line_width (0.03)\n ctx.move_to (x,y); ctx.line_to (x1,y1)\n ctx.move_to (x2,y2); ctx.line_to (x3,y3)\n ctx.stroke ()\n\ndef snippet_normalize (ctx, width, height):\n ctx.scale (width, height)\n ctx.set_line_width (0.04)\n\nwin = gtk.Window()\nwin.connect('destroy', gtk.main_quit)\n\ndrawingarea = gtk.DrawingArea()\nwin.add(drawingarea)\ndrawingarea.connect('expose_event', expose_event)\ndrawingarea.set_size_request(600,400)\n\nwin.show_all()\ngtk.main()\n","sub_path":"cairo/dibujando/curvas.py","file_name":"curvas.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"261055312","text":"#==========DATA-LOADER FOR MACHINE LEARNING FRAMEWORKS==========#\n#==========MADE WITH LOVE BY RUMEET SINGH==========# \n\nimport numpy as np\nimport os\nimport cv2\nfrom sklearn.model_selection import train_test_split\n\nclass DataLoader:\n\n def __init__(self,DIR,CATEGORIES):\n self.DIR = DIR\n self.CATEGORIES = CATEGORIES\n\n def create_data(self,cmap,test_size=0.25,random_state=None,normalize=False,size_x=50,size_y=50):\n training_data = []\n X = []\n y = []\n\n for category in self.CATEGORIES:\n path = os.path.join(self.DIR,category)\n class_num = self.CATEGORIES.index(category)\n\n for img in os.listdir(path):\n if cmap=='rgb':\n try:\n brg_img = cv2.imread(os.path.join(path,img))\n b,g,r = cv2.split(brg_img) # get b,g,r\n img_array = cv2.merge([r,g,b]) # switch it to rgb\n img_array = cv2.resize(img_array,(size_x,size_y))\n if normalize:\n img_array = img_array/255\n training_data.append([img_array,class_num])\n except Exception as e:\n pass \n elif cmap=='gray':\n try:\n img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)\n if normalize:\n img_array = img_array/255\n training_data.append([img_array,class_num])\n except Exception as e:\n pass \n\n for images,labels in training_data:\n X.append(images)\n y.append(labels)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,random_state=random_state)\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n X_test = np.array(X_test)\n y_test = np.array(y_test) \n\n if cmap=='gray':\n X_train = np.expand_dims(X_train, -1)\n X_test = np.expand_dims(X_test, -1)\n\n return X_train, X_test, y_train, y_test\n","sub_path":"src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"617352164","text":"# coding=utf-8\n'''\nAll text work utilities (formatting messages, layout etc).\n'''\nfrom __future__ import absolute_import, unicode_literals, print_function\nimport textwrap\n\n\ndef cli_info(data, title='Info'):\n '''\n Prints an info on CLI with the title.\n Useful for infos, general errors etc.\n\n :param data:\n :param title:\n :return:\n '''\n\n wrapper = textwrap.TextWrapper()\n wrapper.initial_indent = ' ' * 4\n wrapper.subsequent_indent = wrapper.initial_indent\n\n return '{title}:\\n\\n{text}'.format(title=title, text=wrapper.fill(data))\n","sub_path":"salt/utils/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"327053091","text":"class Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n res = []\n self.dfs(n, k, 1, [], res)\n return res\n\n def dfs(self, n, k, start, combination, res):\n if len(combination) == k:\n res.append(list(combination))\n return\n\n for i in range(start, n + 1):\n # if choices are less than what we need\n if n - i + 1 < k - len(combination):\n return\n combination.append(i)\n self.dfs(n, k, i + 1, combination, res)\n combination.pop()\n","sub_path":"Week_03/77. Combinations.py","file_name":"77. Combinations.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"458265193","text":"#!/usr/bin/env python3\n\n# TimeCryption decryption for OFB\nimport sys\n\nimport os\nimport hashlib\nimport binascii\nimport struct\nfrom Crypto.Cipher import AES\n\n\nif __name__=='__main__':\n\tfname = sys.argv[1]\n\twith open(fname, \"rb\") as f:\n\t\tlines = f.readlines()\n\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tl = line.split(b\": \")\n\t\tif l[1].startswith(b\"b'\") and l[1][-1] == 39:\n\t\t\tl[1] = l[1][2:-1]\n\t\tvars()[l[0].decode(\"utf-8\").lower()] = l[1].strip().decode(\"utf-8\")\n\n\tfor v in [\"key1\", \"key2\", \"iv\", \"ciphertext\"]:\n\t\tvars()[v] = binascii.unhexlify(vars()[v])\n\n\tassert not key1 == key2\n\tofb1 = AES.new(key1, AES.MODE_OFB, iv)\n\tofb2 = AES.new(key2, AES.MODE_OFB, iv)\n\tplaintxt1 = ofb1.decrypt(ciphertext)\n\tplaintxt2 = ofb2.decrypt(ciphertext)\n\tassert not plaintxt1 == plaintxt2\n\n\thash = hashlib.sha256(ciphertext).hexdigest()[:8].lower()\n\n\tfname = os. path.splitext(fname)[0] # remove file extension\n\n\texts = exts.split(\" \")[-2:]\n\twith open(\"%s.%s.%s\" % (fname, hash, exts[0]), \"wb\") as file1:\n\t\tfile1.write(plaintxt1)\n\n\twith open(\"%s.%s.%s\" % (fname, hash, exts[1]), \"wb\") as file2:\n\t\tfile2.write(plaintxt2)\n\n\tprint(\"key1:\", key1.rstrip(b\"\\0\"))\n\tprint(\"key1:\", key2.rstrip(b\"\\0\"))\n\tprint(\"iv:\", iv)\n\tprint(\"Success!\")\n\tprint()\n\tprint(\"plaintext1:\", binascii.hexlify(plaintxt1[:16]),\"...\")\n\tprint(\"plaintext2:\", binascii.hexlify(plaintxt2[:16]),\"...\")\n","sub_path":"utils/ofb/timecryption/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"513812962","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 19 14:07:08 2018\n\n@author: jerem\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport functools as ft\n\nNUM_SPACES = 40\nJAIL_SPACE = 10\nGOTO_JAIL_SPACE = 30\n\n#Random Variable that defines number of spaces \ndef N():\n return np.random.choice(range(4),1, p=[.2,.4,.3,.1])[0]\n\ndef N_Card():\n return np.random.choice(range(4),1, p=[.5,.2,.1,.2])[0]\n\n#Use that to generate game board movement\n#Take into account chance/community chest cards\n\n#Simulate dice rolls. Returns sum of dice and if it was a double\ndef rollDice():\n rs = np.random.choice(range(1,7) , 2, replace=True)\n return([sum(rs), rs[0] == rs[1]])\nrollDice()\n\n#1000 rolls Using List comprehhensions\nrolls = [rollDice()[0] for r in range(1000)]\ncounts = pd.Series(rolls).value_counts().sort_index(0).to_frame()\n\nplt.hist(counts)\ncounts.index\nplt.bar(counts.index, counts[0])\n\n#Runs 1 \"turn\". Returns rolls of spaces + number doubles\n#If 3 doubles are rolled in a row, they do not move the third time.\ndef diceSim():\n spaces_moved = np.array([])\n num_dubs = 0\n \n while True:\n roll = rollDice()\n if (roll[1]):\n num_dubs += 1\n if (num_dubs != 3):\n spaces_moved = np.append(spaces_moved, roll[0])\n else: break\n else:\n spaces_moved = np.append(spaces_moved, roll[0])\n break\n \n return(np.array([spaces_moved, num_dubs]))\n\ndiceSim()\ndef simTurns(n):\n return([diceSim() for x in range(n)])\n\nsims = simTurns(10000)\nnum_dubs = np.array([sims[x][1] for x in range(len(sims))])\n\n#Compare. Going to jail isn't very frequent from dice rolls alone\nplt.hist(num_dubs)\nplt.show()\nnp.mean(num_dubs == 3)\n1/6**3\n\n#Rolls for those that went to jail\n[sims[i][0] for i in np.where(num_dubs == 3)[0]]\n[sims[i][0] for i in np.where(num_dubs == 2)[0]][:20] #Those with 2 doubles move 3 times\n\n#Game Board Movement\n#Take Cumulative sum of each row\nall_rolls = np.concatenate([sims[i][0] for i in range(len(sims))])\ncumSum = all_rolls\nfor i in range(1, len(cumSum)):\n cumSum[i] += cumSum[i-1]\n\nplt.hist(cumSum % NUM_SPACES)\nspace_freqs = pd.Series(cumSum % NUM_SPACES).value_counts().sort_index(0)\nspace_freqs #Looks uniform\n\n#Fix: Going to jail on 3rd double or landing on jail space. Jail = 10 spot. \n#Assume we always move ahead\ndef goToJail(ind): #Returns overall space after moving to jail from current index\n return(np.ceil((ind - JAIL_SPACE) / NUM_SPACES)*NUM_SPACES + JAIL_SPACE)\n\n#Problem: Once they are in jail, they have to roll doubles to get out\n#For now, I'll just program it so they go to the jail space and assume they pay right away to get out\n#I'm going to have to cumulate spaces in this for loop\nnRuns = 50000\nturns = simTurns(nRuns)\n\ndef simPayOutOfJail(simst): #Assume you pay to get out of jail immeateately. \n rolls2 = np.array([0]) #Start at GO\n for turn in range(len(simst)):\n cumList = np.concatenate(([rolls2[-1]], simst[turn][0])) #Contains previous space # + new ones\n for r in range(1, len(cumList)): #Accumulates space # from rolls\n cumList[r] += cumList[r-1]\n if(simst[turn][1] == 3): #If three triples are thrown, add the jail space after last roll\n cumList = np.concatenate((cumList, [goToJail(cumList[-1])]))\n rolls2 = np.concatenate((rolls2, cumList[1:])) #Put everything together, but don't include previous space twice\n return(rolls2)\n\nsim1 = simPayOutOfJail(turns) % NUM_SPACES\npd.Series(sim1).value_counts().sort_index(0)\n\nplt.hist(simPayOutOfJail(sims) % NUM_SPACES)\nplt.show()\n\n#If every space were uniform, this is the expected # movements per game\n(5/6*1+5/36*2+1/36*3)*nRuns #119444\n#np.sum(pd.Series(sim1).value_counts().sort_index(0)) #119616\nnp.mean(pd.Series(sim1).value_counts().sort_index(0)) #2990\n\nl = [np.sum(pd.Series(simPayOutOfJail(simTurns(nRuns)) % NUM_SPACES).value_counts().sort_index(0))/nRuns for i in range(100)]\n\n#Idea: Make it so the number of turns to pay for jail has probabilities. \n#First program it to try once to get out of jail, and then pay\n\n#n is the maximum number of times you will roll to get out of jail\ndef RollOutOfJail(n):\n non_dubs = 0\n while non_dubs < n:\n roll = rollDice()\n if (roll[1]):\n roll[1] = False\n return(roll)\n non_dubs += 1\n return(rollDice())\n\nplt.hist([RollOutOfJail(2)[0] for i in range(10000)], bins=11)\n#Looks good\n\n# n = 2, dice frequencies\npd.Series([RollOutOfJail(2)[0] for i in range(10000)]).value_counts().sort_index(0)\n\n#Theoretical mean of doubles\n1-(5/6)*(5/6)*(5/6)\n\n#n is a random variabule. w/graph\nnp.random.choice(range(3),1, p=[.2,.4,.3,.1]) \nplt.hist([np.random.choice(range(4),1, p=[.2,.4,.3,.1])[0] for i in range(10000)])\n\n#Histogram of spaces moved after being jailed\nplt.hist([RollOutOfJail(np.random.choice(range(4),1, p=[.2,.4,.3,.1])[0])[0] for i in range(10000)], bins=11)\n\n#Proportion of rolls out of jail that are doubles. \nnp.mean([RollOutOfJail(np.random.choice(range(4),1, p=[.2,.4,.3,.1])[0])[1] for i in range(10000)])\n\n#Next: Spaces on board / Cards\n#Spaces on the board. Index corresponds to space value\nSPACES = [\"GO\", \"BRW-Medit\", \"CC1\", \"BRW-Baltic\", \"TAX-Income\", \"RR-Read\", \"WHT-Orient\",\n \"Chance1\", \"WHT-Verm\", \"WHT-Connec\", \"Jail\", \"PUR-StChar\", \"UTL-Elect\", \"PUR-States\",\n \"PUR-Virg\", \"RR-Penn\", \"ORG-StJames\", \"CC2\", \"ORG-Tenn\", \"ORG-NY\", \"Free-Parking\",\n \"RED-Kent\", \"Chance2\", \"RED-Indiana\", \"RED-Illi\", \"RR-B&O\", \"YEL-Atlantic\",\n \"YEL-Vent\", \"UTL-Water\", \"YEL-Marv\", \"GoToJail\", \"GRE-Pacific\", \"GRE-NC\", \"CC3\",\n \"GRE-Penn\", \"RR-SHLine\", \"Chance3\", \"BLU-ParkPl\", \"TAX-Luxury\", \"BLU-Boardwalk\"]\nCHANCE_SPACES = np.where([\"Chance\" in s for s in SPACES])[0]\nCC_SPACES = np.where([\"CC\" in s for s in SPACES])[0]\n\nfor i in range(NUM_SPACES): print(str(i) + \" \" + SPACES[i]) \n \nCHANCE_DECK = [0, 24, 11, \"Util\", \"RR\", \"NA\", \"JailFree\", \"B3\", \"Jail\", \"NA\", \"NA\", 5,\n 39, \"NA\", \"NA\", \"NA\"]\n \nCC_DECK = np.concatenate(([0, \"JailFree\", \"Jail\"] , np.repeat(\"NA\", 13)))\n\n\n# Find the nearest RR (round to nearest 5)\ndef nearestRR(space):\n return int(np.ceil((space+5)/10)*10-5)\n\n#Nearest Utility @ space 28 and 12. ***Maybe make this more robust with arrays\ndef nearestUtil(space):\n rounds = np.floor(space / NUM_SPACES)\n sp = space % NUM_SPACES\n if (sp > 12 and sp <= 28):\n return int(28 + rounds*NUM_SPACES)\n elif (sp >28):\n return int(12+(rounds+1)*NUM_SPACES)\n else:\n return int(12 + rounds*NUM_SPACES)\n\n# Returns final location after moving from current spot to a particular space\ndef goToSpace(curr, space):\n return int(np.ceil((curr - space) / NUM_SPACES)*NUM_SPACES + space)\n\n# Returns the resulting movement after drawing a chance card, \n# if they are jailed, if they got a jail-free card and the drawn deck\ndef drawDeck(typDeck, deck, curr):\n if len(deck) == 0: #Reset and Shuffle the Deck\n deck.extend(typDeck)\n result = [np.nan, False, 0, deck]\n \n from numbers import Number\n card = deck[np.random.randint(0,len(deck),1)[0]]\n # print(\"Card Drawn: {}\".format(card))\n if isinstance(card, Number):\n result[:2] = [goToSpace(curr, card), False]\n print(\"Card Drawn: Move to {}: space {}\".format(SPACES[result[0]], result[0]))\n elif card == \"Util\":\n print(\"Card drawn: Move to nearest Utility\")\n result[0] = nearestUtil(curr)\n elif card == \"RR\":\n print(\"Card drawn: Move to nearest Railroad\")\n result[0] = nearestRR(curr)\n elif card == \"B3\":\n print(\"Card drawn: Move backward 3 spaces\")\n result[0] = curr - 3\n elif card == \"JailFree\":\n result[2] = 1\n elif card == \"Jail\":\n print(\"Card drawn: Go to Jail!! \")\n result[1] = True\n elif card == np.nan:\n print(\"Card drawn: Payment (no movement)\")\n deck.remove(card)\n return result\n\n#Explicit functions to avoid mixing them up\ndef drawChance(deck, curr):\n return drawDeck(CHANCE_DECK, deck, curr)\n\ndef drawCC(deck, curr):\n return drawDeck(CC_DECK, deck, curr)\n\n\n\n#Now time to put everything together\nchdeck = CHANCE_DECK[:] \nccdeck = list(CC_DECK)\njail_free = 0\nspaces = np.array([0])\nnum_dubs = 0\nnon_dubs = 0\nn = N()\ncurr_space = 0\njailed = False\n\ndef turn(curr_space, jailed=False, jail_free=0, non_dubs=0, n=N()):\n if jailed:\n assert curr_space % NUM_SPACES == JAIL_SPACE, \"ERROR: You must be on the jail space to be jailed\"\n assert curr_space % NUM_SPACES != GOTO_JAIL_SPACE, \"ERROR: You can't start on the Go to Jail space\"\n\n spaces = np.array([curr_space])\n print(\"You have {} get out of jail free card(s)\".format(jail_free))\n print(\"Starting at space {} ({}): {} \".format(curr_space % NUM_SPACES, curr_space, SPACES[curr_space % NUM_SPACES]))\n \n if jail_free > 0 and jailed:\n print(\"You are Jailed!\")\n jail_free -= 1\n print(\"You used a get out of Jail Free card! {} jail-free cards left\".format(jail_free))\n print(\"You are out of Jail!\")\n jailed = False\n \n if jailed:\n # Roll out of jail\n print(\"You are Jailed!\")\n print(\"You've rolled {} out of a max of {} rolls before you'll pay.\".format(non_dubs, n))\n if non_dubs < n:\n roll = rollDice()\n if roll[1]:\n print(\"You got out of jail! You rolled a double! Move {} spaces\".format(roll[0]))\n jailed = False\n curr_space += roll[0]\n print(\"Moved to space {} ({}): {} \".format(curr_space % NUM_SPACES, curr_space, SPACES[curr_space % NUM_SPACES]))\n spaces = np.append(spaces, curr_space)\n # Check Space\n print(\"Checking Space...\")\n non_dubs = 0\n n = N()\n print(\"Next time you'll roll a max number of {} times to get out of jail\".format(n))\n return spaces # End Turn\n else:\n non_dubs += 1\n print(\"Still in Jail! Did not roll a double. So far, tried {} times\".format(non_dubs))\n \n else:\n print(\"You've hit your max number of turns. Pay to get out of Jail!\".format(non_dubs))\n jailed=False\n print(\"You are now a free citizen (out of jail)!\")\n \n \n if not jailed:\n num_dubs = 0\n roll = rollDice()\n print(\"Rolled a dice total of {}\".format(roll[0]))\n if roll[1]:\n num_dubs = 1\n print(\"Double! #{}\".format(num_dubs))\n curr_space += roll[0]\n print(\"Moved to space {} ({}): {} \".format(curr_space % NUM_SPACES, curr_space, SPACES[curr_space % NUM_SPACES]))\n spaces = np.append(spaces, curr_space)\n #Check Space\n print(\"Checking Space...\")\n while roll[1]: #If you rolled a double\n roll = rollDice()\n print(\"Rolled a dice total of {}\".format(roll[0]))\n if roll[1]:\n num_dubs += 1\n print(\"Double! #{}\".format(num_dubs))\n if (num_dubs < 3): \n curr_space += roll[0]\n print(\"Moved to space {} ({}): {} \".format(curr_space % NUM_SPACES, curr_space, SPACES[curr_space % NUM_SPACES]))\n spaces = np.append(spaces, curr_space)\n #Check Space\n print(\"Checking Space...\")\n else: #JAILED\n print(\"Crap! 3 Doubles in a row!\")\n curr_space = goToSpace(curr_space, JAIL_SPACE)\n print(\"Moved to space {} ({}): {} \".format(curr_space % NUM_SPACES, curr_space, SPACES[curr_space % NUM_SPACES]))\n spaces = np.append(spaces, curr_space)\n jailed = True\n print(\"You are now Jailed!\")\n break\n return spaces\n\n# Checking resonableness of getting out of jail\nls = np.array([len(turn(50, True, 1,2)) for x in range(1000)])\nnp.mean(ls > 1)\n\n\n# CheckSpace function. x = current space. \n# Returns additional spaces moved, if they were jailed, and # of drawn get out of jail cards\ndef checkSpace(x):\n jail_free = 0\n spaces = np.array([])\n space = x % NUM_SPACES\n jailed = False\n chdeck = CHANCE_DECK[:]\n ccdeck = list(CC_DECK)\n\n things_to_check = [space in CHANCE_SPACES, space in CC_SPACES, space == GOTO_JAIL_SPACE]\n\n if things_to_check[0]:\n print(\"Chance! Draw chance card!\")\n res = drawChance(chdeck, x)\n if isinstance(res[0], int):\n # print(\"Card Drawn: Move to {}: space {}\".format(SPACES[res[0]], res[0]))\n x = goToSpace(x, res[0])\n spaces = np.append(spaces, x)\n print(\"Moved to space {} ({}): {} \".format(x % NUM_SPACES, x, SPACES[x % NUM_SPACES]))\n # elif res[0] == np.nan:\n # print(\"Card drawn: Payment (no movement)\")\n if res[1]: # JAILED\n # print(\"You drew: Go to Jail!\")\n curr_space = goToSpace(x, JAIL_SPACE)\n print(\"Moved to space {} ({}): {} \".format(curr_space % NUM_SPACES, curr_space, SPACES[curr_space % NUM_SPACES]))\n spaces = np.append(spaces, x)\n jailed = True\n print(\"You are now Jailed!\")\n if res[2] == 1:\n jail_free += res[2]\n print(\"You got a get out of jail free card! You now have {} total\".format(jail_free))\n chdeck = res[3]\n print(\"Deck: {} of {} cards left\".format(len(chdeck), len(CHANCE_DECK)))\n\n if things_to_check[1]:\n res = drawCC(ccdeck, x)\n if res[0] != np.nan: \n spaces = np.append(spaces, res[0])\n space = res[0]\n if res[1]:\n print(\"Jailed\")\n # GO TO JAIL\n jail_free += res[2]\n ccdeck = res[3]\n if things_to_check[2]:\n print(\"Jailed\")\n # GO TO JAIL\n if sum(things_to_check) == 0:\n print(\"Nothing to see here....\")\n return [spaces, jailed, jail_free]\n\n[str(r) + \" \" + str(isinstance(r, int)) for r in CHANCE_DECK]\n \n# roll = rollDice() \n# if (roll[1]): #If you roll a double\n# num_dubs += 1\n# if (num_dubs < 3):\n# curr_space += roll[0]\n# spaces = np.append(spaces, curr_space)\n# #Check space\n\n\n\n#Going to jail\n#Rolling three doubles\n#Landing on space 30\n#Drawing a chance/CC\n","sub_path":"Monopoly/Monopoly_draft.py","file_name":"Monopoly_draft.py","file_ext":"py","file_size_in_byte":14407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"622071950","text":"import multiprocessing as mp\nimport time\n\nimport floto\n\n\n@floto.activity(name='activity1', version='v5')\ndef activity1(context):\n print('activity1_v5 started' + 20 * '.')\n result = {'workflow': context['workflow'],\n 'status': 'finished'}\n time.sleep(1)\n print('activity1_v5 finished' + 20 * '.')\n return result\n\n\n@floto.activity(name='activity2', version='v4')\ndef activity2():\n print('activity2 started' + 20 * '.')\n result = {'status': 'finished'}\n time.sleep(1)\n print('activity2 finished' + 20 * '.')\n return result\n\n\n@floto.activity(name='activity3', version='v2')\ndef activity3(context):\n print('activity3 started' + 20 * '.')\n activity1_result = [v for k, v in context.items() if 'activity1' in k][0]\n result = {'status': 'finished',\n 'activity1': activity1_result}\n print('activity3 finished' + 20 * '.')\n return result\n\n\nFAILURE_COUNT_1 = 0\n\n\n@floto.activity(name='activity_fails_3', version='v2')\ndef activity_fails_3(context):\n print('activity_fails_3 started' + 20 * '.')\n global FAILURE_COUNT_1\n FAILURE_COUNT_1 += 1\n result = {'workflow_input': context['workflow'],\n 'status': 'finished'}\n if FAILURE_COUNT_1 <= 3:\n print('activity_fails_3 finished with error' + 20 * '.')\n raise Exception('Something went wrong')\n else:\n FAILURE_COUNT_1 = 0\n print('activity_fails_3 finished' + 20 * '.')\n return result\n\n\nFAILURE_COUNT_2 = 0\n\n\n@floto.activity(name='activity_fails_2', version='v2')\ndef activity_fails_2():\n print('activity_fails_2 started' + 20 * '.')\n global FAILURE_COUNT_2\n FAILURE_COUNT_2 += 1\n if FAILURE_COUNT_2 >= 2: FAILURE_COUNT_2 = 0\n print('activity_fails_2 finished with error' + 20 * '.')\n raise Exception('Something went wrong')\n\n\n@floto.activity(name='activity4', version='v2')\ndef activity_4(context):\n print('activity_4 started' + 20 * '.')\n print('activity_4 finished' + 20 * '.')\n return context\n\n\nTIMEOUT_COUNT_1 = 0\n\n\n@floto.activity(name='activity5', version='v1')\ndef activity_5():\n print('activity_5 started' + 20 * '.')\n global TIMEOUT_COUNT_1\n TIMEOUT_COUNT_1 += 1\n result = {'sleep_time': 0}\n if TIMEOUT_COUNT_1 <= 1:\n print('activity_5 sleeping for 2 seconds' + 20 * '.')\n time.sleep(2)\n result['sleep_time'] = 2\n else:\n TIMEOUT_COUNT_1 = 0\n print('activity_5 finished' + 20 * '.')\n return result\n\n\nclass ActivityWorkerProcess(object):\n def __init__(self, domain, task_list):\n self._process = None\n self.worker = floto.ActivityWorker(domain=domain, task_list=task_list)\n self.worker.heartbeat = 2\n\n def start(self):\n self._process = mp.Process(target=self.worker.run)\n self._process.start()\n\n def terminate(self):\n self._process.terminate()\n\n\nif __name__ == '__main__':\n worker = ActivityWorkerProcess(domain='floto_test', task_list='floto_activities')\n worker.start()\n","sub_path":"tests/integration/activity_worker.py","file_name":"activity_worker.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"357147384","text":"#!/usr/bin/env python\n\n# Week 3 Exercises: Functions\n\n# EXERCISE 1:\n\n# Implement FizzBuzz using function(s) with Clean Code standards\n\n# -----FIZZBUZZ CODE HERE-----\n\n\ndef fizzbuzz(num):\n\n if num % 15 == 0:\n return 'FIZZBUZZ'\n elif num % 5 == 0:\n return 'BUZZ'\n elif num % 3 == 0:\n return 'FIZZ'\n else:\n return num\n\n\nif __name__ == '__main__':\n\n list_1to100 = range(1, 100)\n list_FIZZBUZZ = []\n\n for i in list_1to100:\n list_FIZZBUZZ.append(fizzbuzz(i))\n\n print(list_FIZZBUZZ)\n\n# -----END FIZBUZZ CODE-----\n\n# EXERCISE 2:\n\n# Review the payroll.java code in Listing 3-4 in the book\n\n# Re-implement it as clean Python code. See Listing 3-5 for guidance.\n\n# -----PAYROLL CODE HERE-----\n\n\n# -----END PAYROLL CODE-----\n","sub_path":"week3/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"210874496","text":"T = int(input())\n\nwhile(T > 0):\n S = input() \n answerArr = {}\n for c in S:\n if c not in answerArr:\n answerArr[c] = 0\n else:\n answerArr[c] += 1 \n \n if len(answerArr) == 2:\n print('YES')\n else:\n print('NO')\n T -= 1\n\n","sub_path":"LTIME23/TICKETS5.py","file_name":"TICKETS5.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"510144449","text":"import logging\n\nfrom app.domain.models import CreateDatasetRequest, ArkivInfo, CreateDatasetResponse\nfrom app.settings import Settings\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_dataset_keys(client, settings: Settings) -> CreateDatasetResponse:\n return client.request_dataset_keys(_payload(settings))\n\n\ndef _payload(settings: Settings) -> CreateDatasetRequest:\n return CreateDatasetRequest(\n arkiv_info=ArkivInfo(\n **settings.aktor.dict(),\n arkivbeskrivelse=settings.arkivbeskrivelse,\n arkivskaper=settings.arkivskaper,\n arkiv_startdato=settings.arkiv_startdato,\n arkiv_sluttdato=settings.arkiv_sluttdato,\n avgiver=settings.avgiver,\n avtalenummer=settings.avtalenummer,\n merkelapp=settings.merkelapp,\n objekt_id=settings.objekt_id,\n raderett=settings.raderett,\n sjekksum_sha256=settings.sjekksum_sha256,\n storrelse=settings.storrelse,\n system=settings.system,\n uttrekksformat=settings.uttrekksformat,\n # TO BE DELETED\n arkiveier_organisasjon=settings.arkiveier_organisasjon,\n arkivskaper_organisasjon=settings.arkivskaper_organisasjon,\n slutt_dato=settings.slutt_dato,\n start_dato=settings.start_dato,\n tittel=settings.tittel,\n ),\n client_name=\"iam-open-dataset\",\n databehandler=settings.databehandler,\n # TO BE DELETED\n depot_institusjon=settings.depot_institusjon,\n )\n","sub_path":"iam-open-dataset/app/services/bevaring_service.py","file_name":"bevaring_service.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"311754928","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport requests\nimport io\nimport PyPDF2\n\nTARGET_HOST = 'http://www.hkexnews.hk'\nTARGET_FILE = ['中期報告']\nTARGET_STRING = ['普通股']\n\nif __name__ == '__main__':\n target_url = 'http://www.hkexnews.hk/listedco/listconews/advancedsearch/search_active_main_c.aspx'\n __VIEWSTATE = ''\n __VIEWSTATEGENERATOR = ''\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'referer': target_url\n }\n\n try:\n get_view_step_res = requests.post(target_url, data={}, headers=headers)\n web_content = get_view_step_res.content\n soup = BeautifulSoup(web_content, 'html.parser')\n \n __VIEWSTATE = soup.find('input', {'id': '__VIEWSTATE'}).get('value')\n __VIEWSTATEGENERATOR = soup.find('input', {'id': '__VIEWSTATEGENERATOR'}).get('value')\n except Exception as ex:\n print(ex)\n\n post_params = {\n 'ctl00$txt_today': '20181019',\n 'ctl00$txt_stock_code': '01828',\n #'ctl00$hfStatus': 'ACM',\n #'ctl00$hfAlert': '沒有此股份代號或股份名單資料'\n 'ctl00$txt_stock_name': '',\n ### 文件種類 ### -> 財務報表/環境、社會及管治資料\n 'ctl00$rdo_SelectDocType': 'rbAfter2006',\n 'ctl00$sel_tier_1': '4',\n ###############\n 'ctl00$sel_DocTypePrior2006': '-1',\n 'ctl00$sel_tier_2_group': '-2',\n 'ctl00$sel_tier_2': '-2',\n 'ctl00$ddlTierTwo': '59,1,7',\n 'ctl00$ddlTierTwoGroup': '26,5',\n 'ctl00$txtKeyWord': '',\n 'ctl00$rdo_SelectDateOfRelease': 'rbManualRange',\n 'ctl00$sel_DateOfReleaseFrom_d': '01',\n 'ctl00$sel_DateOfReleaseFrom_m': '01',\n 'ctl00$sel_DateOfReleaseFrom_y': '2018',\n 'ctl00$sel_DateOfReleaseTo_d': '18',\n 'ctl00$sel_DateOfReleaseTo_m': '10',\n 'ctl00$sel_DateOfReleaseTo_y': '2018',\n 'ctl00$sel_defaultDateRange': 'SevenDays',\n 'ctl00$rdo_SelectSortBy': 'rbDateTime',\n '__VIEWSTATEGENERATOR': __VIEWSTATEGENERATOR,\n '__VIEWSTATE': __VIEWSTATE,\n '__VIEWSTATEENCRYPTED': '',\n }\n\n company_file_res = requests.post(target_url, data=post_params, headers=headers)\n soup = BeautifulSoup(company_file_res.content, 'html.parser')\n\n for link in soup.find_all('a'):\n for string in TARGET_FILE:\n if string in link.text:\n target_pdf_url = '{}{}'.format(TARGET_HOST,link.get('href'))\n print(link.text)\n print(target_pdf_url)\n pdf_content = urlopen(target_pdf_url).read()\n #print(pdf_content)","sub_path":"pei_helper.py","file_name":"pei_helper.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"379498228","text":"# manually insert specific whitlised ip\nimport sqlite3\n\n\ndef add_whi(ip):\n conn = sqlite3.connect('whitelist.db')\n c = conn.cursor()\n with conn:\n c.execute(\"INSERT INTO whitelist VALUES (:ip)\",{'ip':ip})\n conn.commit()\n conn.close()\n\n# inp = input('what is the ip that you want to whitelisted: ')\n# add_whi(inp)\n# print('done')\n","sub_path":"version 3/addwhitelist.py","file_name":"addwhitelist.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"422277933","text":"import sys\nimport json\nimport netaddr\nimport re\nimport traceback\nimport urlparse\n\nclass UrlDeconstruction:\n\n def __init__(self):\n self._urlComponents = {}\n self._urlString = ''\n\n def findPattern(self, patternList, testString):\n try:\n for regExpKey in patternList.keys():\n regExpPattern = patternList[regExpKey]\n pattern = regExpPattern.match(testString)\n if pattern:\n return regExpKey\n else:\n return False\n except Exception:\n traceback.print_exc()\n\n def updateStates(self, data):\n try:\n (components, urlString) = data\n self._urlString = urlString\n self._urlComponents.update(components)\n except Exception:\n traceback.print_exc()\n\n def returnJson(self):\n return json.dumps(self._urlComponents, sort_keys=True, indent=4, separators=(',', ': '))\n\n def parseIpv4(self, urlString):\n try:\n # Standard Dotted Notation\n regDotNot = re.compile('^((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(:([\\d]{1,5})(/|$)|/|$)')\n # Dotted Hexadecimal\n regDotHex = re.compile('^(0x[A-F0-9]{2}\\.0x[A-F0-9]{2}\\.0x[A-F0-9]{2}\\.0x[A-F0-9]{2})(:([\\d]{1,5})(/|$)|/|$)', re.IGNORECASE)\n # Dotted Octal\n regDotOct = re.compile('^([\\d]{4}\\.[\\d]{4}\\.[\\d]{4}\\.[\\d]{4})(:([\\d]{1,5})(/|$)|/|$)')\n # Hexadecimal\n regHexDec = re.compile('^(0x[\\dA-F]{8})(:([\\d]{1,5})(/|$)|/|$)', re.IGNORECASE)\n # Octal\n regOct = re.compile('^([\\d]{12})(:([\\d]{1,5})(/|$)|/|$)')\n # Decimal\n regDec = re.compile('^([\\d]{10})(:([\\d]{1,5})(/|$)|/|$)')\n\n # Collection of patterns\n ipv4RegPatterns = {'DotNot': regDotNot, 'DotHex': regDotHex, 'DotOct': regDotOct, 'HexDec': regHexDec, 'Oct': regOct, 'Dec': regDec}\n\n # Create Dict & Vars for results\n results = {}\n results['ipv4'] = {}\n newUrlString = ''\n\n # Find Pattern to use\n regExpKey = self.findPattern(ipv4RegPatterns, urlString)\n\n # Parse urlString\n if regExpKey:\n regPattern = ipv4RegPatterns[regExpKey]\n out = [m for m in regPattern.findall(urlString)]\n ipv4Data = [(w, y, len(w + x)) for w, x, y, z in out][0]\n ipAddress = ipv4Data[0]\n ipPort = ipv4Data[1]\n splitPos = ipv4Data[2]\n if ipPort:\n results['ipv4']['port'] = ipPort\n if regExpKey != 'DotNot':\n results['ipv4']['notation'] = str(netaddr.IPAddress(ipAddress))\n results['ipv4']['address'] = ipAddress\n results['ipv4']['type'] = regExpKey.lower()\n\n newUrlString = urlString[splitPos:]\n else:\n results = None\n except Exception:\n traceback.print_exc()\n finally:\n return (results, newUrlString)\n\n def parseDomain(self, urlString):\n try:\n # Domain Regex\n regDom = re.compile('^([\\w\\-\\.]*\\.[\\w]*)(:([\\d]{1,5})(/|$)|/|$)', re.IGNORECASE)\n regHst = re.compile('^(localhost)(:([\\d]{1,5})(/|$)|/|$)', re.IGNORECASE)\n regLoc = re.compile('^([\\w\\-\\.]{1,}[a-z]{1})(:([\\d]{1,5})(/|$)|/|$)', re.IGNORECASE)\n\n # Collection of patterns\n domRegPatterns = {'Dom': regDom, 'Loc': regLoc, 'Hst': regHst}\n\n # Create Dict & Vars for results\n results = {}\n results['domain'] = {}\n newUrlString = ''\n\n # Find Pattern to use\n regExpKey = self.findPattern(domRegPatterns, urlString)\n\n # Parse urlString\n if regExpKey:\n regPattern = domRegPatterns[regExpKey]\n out = [m for m in regPattern.findall(urlString)]\n fqdnData = [(w, y, len(w + x)) for w, x, y, z in out][0]\n fqdn = fqdnData[0]\n port = fqdnData[1]\n splitPos = fqdnData[2]\n tldPos = fqdn.rfind('.') + 1 if fqdn.find('.') != -1 else None\n tld = fqdn[tldPos:]\n if port:\n results['domain']['port'] = port\n if fqdn:\n results['domain']['fqdn'] = fqdn\n if tldPos:\n results['domain']['tld'] = tld\n\n # Extract SLD Information\n subData = [(x.start(), x.end()) for x in re.finditer('\\.', fqdn)] # Get tuples of all '.' positions\n if len(subData) == 1: # Domain contains only SLD\n results['domain']['sld'] = fqdn[:subData[0][0]]\n elif len(subData) > 1: # Domain has more than one sub domain\n posSLD = (subData[len(subData) - 2][1], subData[len(subData) - 1][0])\n results['domain']['sld'] = fqdn[posSLD[0]:posSLD[1]]\n posHostSLD = posSLD[0] - 1\n results['domain']['host'] = fqdn[:posHostSLD]\n else:\n pass\n\n newUrlString = urlString[splitPos:]\n else:\n results = None\n except Exception:\n traceback.print_exc()\n finally:\n return (results, newUrlString)\n\n\n\n def parseScheme(self, urlString):\n try:\n # Scheme Regex\n regScheme = re.compile('^([\\w-]*)://')\n\n # Create Dict & vars for results\n results = {}\n results['scheme'] = {}\n newUrlString = ''\n\n # Parse urlString\n out = [m.end(0) for m in regScheme.finditer(urlString)]\n if out:\n results['scheme'] = urlString[:out[0]]\n newUrlString = urlString[out[0]:]\n else:\n results = None\n except Exception:\n traceback.print_exc()\n finally:\n return (results, newUrlString)\n \"\"\"\n def parseCredentials(self, urlString):\n try:\n # Credential Regex\n regCreds = re.compile('~([\\w]*):([\\w*])@|([a-z0-9]*)@')\n\n # Create Dict & Vars for results\n results = {}\n results['credential'] = {}\n newUrlString = ''\n\n # Parse urlString\n out = [m.end(0) for m in regCreds.finditer(urlString)]\n if out:\n credString = urlString[:(out[0] - 1)]\n # Separate User:Pass if present\n if credString.find(':') > 0:\n (credUser, credPass) = credString.split(':')\n results['credential']['username'] = credUser\n results['credential']['password'] = credPass\n else:\n results['credential']['username'] = credString\n\n newUrlString = urlString[out[0]:]\n else:\n results = None\n except Exception:\n traceback.print_exc()\n finally:\n return (results, newUrlString)\n \"\"\"\n\n def parseAnchor(self, urlString):\n try:\n # Create Dict & Vars for results\n results = {}\n newUrlString = ''\n\n # If urlString Contains an Anchor, extract that now\n if urlString.find('#') >= 0:\n anchorPos = urlString.find('#')\n results['#'] = urlString[anchorPos + 1: ]\n newUrlString = urlString[:anchorPos]\n else:\n results = None\n except Exception:\n traceback.print_exc()\n finally:\n return (results, newUrlString)\n\n def parsePath(self, urlString):\n try:\n # Path Regrex\n regPath = re.compile('^([\\w\\./\\(\\,)\\-]*)')\n\n # Create Dict & Vars for results\n results = {}\n results['path'] = {}\n newUrlString = ''\n\n # Parse urlString\n out = regPath.match(urlString)\n if out:\n if out.groups()[0] != '':\n results['path'] = out.groups()[0]\n newUrlString = urlString[out.end() + 1: ]\n else:\n results = None\n else:\n results = None\n except Exception:\n traceback.print_exc()\n finally:\n return (results, newUrlString)\n\n def parseCGI(self, urlString):\n try:\n # CGI Regrex\n regCGI = re.compile('([\\w\\-\\.]+)[:= ] ?\"?([\\w\\-\\.\\+\\(\\)\\s:\\/]+)\"?|^([\\w\\-\\.]*)|;([\\w\\-\\.]*)$')\n\n # Create Dict & Vars for results\n results = {}\n results['cgi'] = {}\n foundAnchor = False\n\n # Parse urlString\n out = regCGI.findall(urlString)\n if out != [('', '', '', '')]:\n for values in out:\n v0 = values[0]\n v1 = values[1]\n v2 = values[2]\n v3 = values[3]\n if v0:\n results['cgi'][v0] = v1\n if v2:\n results['cgi'][v2] = ''\n if v3:\n results['cgi'][v3] = ''\n else:\n if not foundAnchor:\n results = None\n except Exception:\n traceback.print_exc()\n finally:\n return (results, '')\n\n\n def urlParseEngine(self, urlInput):\n try:\n # 0. Clear vars\n self._urlComponents = {}\n self._urlString = ''\n\n # 1. Clean the url, as it may be quoted\n cleanUrl = urlparse.unquote(urlInput)\n # print cleanUrl\n\n # 2. Record the result\n self._urlComponents['input_url'] = urlInput\n # 2a. Make note if it was different, see urlInput to new cleaned input\n if cleanUrl != urlInput:\n self._urlComponents['clean_url'] = cleanUrl\n urlInput = cleanUrl\n\n # 3. Set the urlString variable\n self._urlString = urlInput\n\n # 4. Begin main parse logic\n # --Scheme\n outScheme = self.parseScheme(self._urlString)\n # print outScheme\n if outScheme != (None, ''): self.updateStates(outScheme)\n\n # --Credentials\n # outCreds = self.parseCredentials(self._urlString)\n # if outCreds != (None, ''): self.updateStates(outCreds)\n\n # 5. IPv4 and Domain Parsing, First match wins\n matchToggle = False\n outIpv4 = self.parseIpv4(self._urlString)\n if outIpv4 != (None, '') and matchToggle is False:\n self.updateStates(outIpv4)\n matchToggle = True\n\n outDomain = self.parseDomain(self._urlString)\n if outDomain != (None, '') and matchToggle is False:\n self.updateStates(outDomain)\n matchToggle = True\n\n if not matchToggle:\n warningMessage = ({'warning_message': 'unable to determine host, stopping parser'}, '')\n self.updateStates(warningMessage)\n return self._urlComponents\n\n # 6. Anchor Parsing\n outAnchor = self.parseAnchor(self._urlString)\n if outAnchor != (None, ''):\n self.updateStates(outAnchor)\n\n # 7. Path Parsing\n outPath = self.parsePath(self._urlString)\n if outPath != (None, ''):\n self.updateStates(outPath)\n\n # 8. CGI/Anchor Parsing\n outCGI = self.parseCGI(self._urlString)\n if outCGI != (None, ''):\n self.updateStates(outCGI)\n\n\n except Exception as err:\n traceback.print_exc()\n finally:\n return self._urlComponents\n\ndef run(urlString):\n urld = UrlDeconstruction()\n urld.urlParseEngine(urlString)\n print(urld.returnJson())\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n run(sys.argv[1])\n else:\n sys.exit(-1)\n","sub_path":"src/urlParser.py","file_name":"urlParser.py","file_ext":"py","file_size_in_byte":12205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"100247958","text":"import time\nimport glob\nimport common.gym_interface as gym_interface\nimport pybullet as p\nimport os\nimport pybullet_data\nimport shutil\nimport re\nimport numpy as np\nimport random\ndef set_torque(jointIndex, torque):\n p.setJointMotorControl2(bodyIndex=robot,\n jointIndex=jointIndex,\n controlMode=p.TORQUE_CONTROL,\n force=torque)\n\np.connect(p.GUI)\np.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)\n\nfor body in [800]:\n print(f\"body {body}\")\n p.resetSimulation()\n filename = os.path.join(pybullet_data.getDataPath(), \"plane_stadium.sdf\")\n _ = p.loadSDF(filename)\n filename = f\"../input_data/bodies/{body}.xml\"\n (robot,) = p.loadMJCF(filename)\n print(f\"\\n\\nbody {body}\\n\\n\")\n for joint_id in range(p.getNumJoints(robot)):\n info = p.getJointInfo(robot, joint_id)\n print(f\"joint {joint_id}\", end=\", \")\n print(info[1], end=\", \")\n d = p.getDynamicsInfo(robot, joint_id)\n print(d[0])\n \n for step in range(100000):\n set_torque(0,100)\n # break\n p.stepSimulation()\n time.sleep(0.01)\n","sub_path":"project/experiments/exp_027_randomly_align/src/show_body.py","file_name":"show_body.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"478723878","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass MediaJobError(Model):\n \"\"\"Details of JobOutput errors.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar code: Error code describing the error. Possible values include:\n 'ServiceError', 'ServiceTransientError', 'DownloadNotAccessible',\n 'DownloadTransientError', 'UploadNotAccessible', 'UploadTransientError',\n 'ConfigurationUnsupported', 'ContentMalformed', 'ContentUnsupported'\n :vartype code: str or ~azure.eventgrid.models.MediaJobErrorCode\n :ivar message: A human-readable language-dependent representation of the\n error.\n :vartype message: str\n :ivar category: Helps with categorization of errors. Possible values\n include: 'Service', 'Download', 'Upload', 'Configuration', 'Content'\n :vartype category: str or ~azure.eventgrid.models.MediaJobErrorCategory\n :ivar retry: Indicates that it may be possible to retry the Job. If retry\n is unsuccessful, please contact Azure support via Azure Portal. Possible\n values include: 'DoNotRetry', 'MayRetry'\n :vartype retry: str or ~azure.eventgrid.models.MediaJobRetry\n :ivar details: An array of details about specific errors that led to this\n reported error.\n :vartype details: list[~azure.eventgrid.models.MediaJobErrorDetail]\n \"\"\"\n\n _validation = {\n 'code': {'readonly': True},\n 'message': {'readonly': True},\n 'category': {'readonly': True},\n 'retry': {'readonly': True},\n 'details': {'readonly': True},\n }\n\n _attribute_map = {\n 'code': {'key': 'code', 'type': 'MediaJobErrorCode'},\n 'message': {'key': 'message', 'type': 'str'},\n 'category': {'key': 'category', 'type': 'MediaJobErrorCategory'},\n 'retry': {'key': 'retry', 'type': 'MediaJobRetry'},\n 'details': {'key': 'details', 'type': '[MediaJobErrorDetail]'},\n }\n\n def __init__(self, **kwargs):\n super(MediaJobError, self).__init__(**kwargs)\n self.code = None\n self.message = None\n self.category = None\n self.retry = None\n self.details = None\n","sub_path":"sdk/eventgrid/azure-eventgrid/azure/eventgrid/models/media_job_error.py","file_name":"media_job_error.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"15192922","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.conf import settings\nfrom django.contrib import admin\nimport os\n\nadmin.autodiscover()\nsite_static = os.path.join(\n os.path.dirname(__file__), 'templates'\n)\n\n\nurlpatterns = patterns('',\n url(r'^$', 'bars.news.views.hello'),\n url(r'^hello/', 'bars.news.views.hello'),\n url(r'^news/', 'bars.news.views.news'),\n url(r'^getlog/$', 'bars.upgmaker.views.getlog'),\n url(r'^log/', 'bars.upgmaker.views.log'),\n url(r'^runbranch/$', 'bars.upgmaker.views.runbranch'),\n url(r'^getChlists/$', 'bars.upgmaker.views.getChlists'),\n url(r'^upgmaker/', 'bars.upgmaker.views.upgmaker'),\n url(r'^knowledge/', 'bars.knowledge.views.knowledge'),\n url(r'^about/', 'bars.about.views.about'),\n url(r'^faq/', 'bars.faq.views.faq'),\n url( r'^accounts/login/$', 'django.contrib.auth.views.login', { \"template_name\": \"accounts/login.html\" } ),\n url( r'^accounts/logout/$','bars.accounts.views.doLogout'),\n url( r'^login/$', 'bars.accounts.views.doLogin'),\n url( r'^logout/$', 'bars.accounts.views.doLogout'),\n (r'^site_static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"313884207","text":"#!/usr/bin/env python3\n# __author__:\"hengda_fu\"\n\n'''\n基于socket实现简单的ftp功能,暂时实现从client 下载server文件功能。\n\n'''\n\nimport socket\nimport os\nimport hashlib\n\nserver = socket.socket()\nserver.bind(('127.0.0.1', 9999))\nserver.listen()\nprint('开始监听9999端口')\n\nwhile True:\n conn, addr = server.accept()\n print('receive addr:{0} '.format(addr))\n while True:\n file_name = conn.recv(1024).decode()\n print('接收文件参数:{0}'.format(file_name))\n if not file_name:\n break\n\n # 判断文件是否存在\n if os.path.isfile(file_name):\n # 获取文件大小并发送给客户端\n file_size = str(os.path.getsize(file_name))\n conn.send(file_size.encode('utf-8'))\n print('文件大小: {0}'.format(file_size))\n\n accept_info = conn.recv(1024).decode()\n if accept_info == 'ok':\n send_size = 0\n\n m5 = hashlib.md5()\n\n with open(file_name, 'rb') as f1:\n for lines in f1:\n send_size += len(lines)\n m5.update(lines)\n conn.send(lines)\n m5_res = m5.hexdigest()\n print('文件发送完成!,MD5: {0}'.format(m5_res))\n conn.send(m5_res.encode('utf-8'))\n else:\n print('文件不存在!')\n conn.send('0'.encode('utf-8'))\n\n\nserver.close()\n\n","sub_path":"oldboy/day08-socket网络编程/基于socket的FTP/01ftp_socket_server.py","file_name":"01ftp_socket_server.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"560688901","text":"from random import randint\na=int(input('Do ilu punktów chcesz grać? '))\npp=0\nhs=0\nep=0\ndef score(a,b):\n\tif a>b:\n\t\ths=a\n\telse:\n\t\ths=b\n\treturn(hs)\nwhile hs!=a:\n\tprint('1. Kamień')\n\tprint('2. Papier')\n\tprint('3. Nożyce')\n\tp=int(input())\n\te=randint(1,3)\n\tif p==e:\n\t\tprint('Remis!')\n\telif(p==1 and e==3) or (p==2 and e==1) or (p==3 and e==2):\n\t\tprint('Zdobywasz punkt!')\n\t\tpp+=1\n\t\ths=score(pp,ep)\n\telse:\n\t\tprint('Przeciwnik zdobywa punkt!')\n\t\tep+=1\n\t\ths=score(pp,ep)\nif pp>ep:\n\tprint('Wygrałeś!')\nelse:\n\tprint('Przegrałeś!')\n","sub_path":"06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"56809005","text":"# python\nimport requests\n\n# coogger-pyhton\nfrom coogger.user import SteemConnectUserApi, UserFilterApi\nfrom coogger.utils import CONTENT_API, CONTENT_FILTER_API\n\n\nclass ContentApi(SteemConnectUserApi):\n \"\"\"\n ContentApi class is used to view and update a specific content.\n \"\"\"\n def __init__(self, username, permlink, data):\n super().__init__(username, data)\n self.permlink = permlink\n self.api_url = CONTENT_API.format(self.username, self.permlink)\n\n\nclass ContentFilterApi(UserFilterApi):\n \"\"\"\n The ContentFilterApi class allows content to be filtered by below property.\n\n \"\"\"\n def __init__(self, data):\n super().__init__(data)\n self.api_url = CONTENT_FILTER_API\n","sub_path":"coogger/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"168982810","text":"import numpy as np\nimport itertools\nimport json\nimport time\nfrom math import sqrt\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\n# This function is from https://github.com/kuangliu/pytorch-ssd.\ndef calc_iou_tensor(box1, box2):\n \"\"\" Calculation of IoU based on two boxes tensor,\n Reference to https://github.com/kuangliu/pytorch-ssd\n input:\n box1 (N, 4) \n box2 (M, 4)\n output:\n IoU (N, M)\n \"\"\"\n N = box1.shape[0]\n M = box2.shape[0]\n\n be1 = np.expand_dims(box1, 1).repeat(M, axis=1)\n be2 = np.expand_dims(box2, 0).repeat(N, axis=0)\n lt = np.maximum(be1[:,:,:2], be2[:,:,:2])\n rb = np.minimum(be1[:,:,2:], be2[:,:,2:])\n\n delta = rb - lt\n delta[delta < 0] = 0\n intersect = delta[:,:,0]*delta[:,:,1]\n\n delta1 = be1[:,:,2:] - be1[:,:,:2]\n area1 = delta1[:,:,0]*delta1[:,:,1]\n delta2 = be2[:,:,2:] - be2[:,:,:2]\n area2 = delta2[:,:,0]*delta2[:,:,1]\n\n iou = intersect/(area1 + area2 - intersect)\n return iou\n\ndef softmax_cpu(x, dim=-1):\n x = np.exp(x)\n s = np.expand_dims(np.sum(x, axis=dim), dim)\n return x/s\n\ndef dboxes_R34_coco():\n figsize = [1200, 1200]\n strides = [3,3,2,2,2,2]\n feat_size = [[50, 50], [25, 25], [13, 13], [7, 7], [3, 3], [3, 3]]\n steps=[(int(figsize[0]/fs[0]),int(figsize[1]/fs[1])) for fs in feat_size]\n # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py\n scales = [(int(s*figsize[0]/300),int(s*figsize[1]/300)) for s in [21, 45, 99, 153, 207, 261, 315]] \n aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]] \n dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)\n return dboxes\n\n# This function is from https://github.com/kuangliu/pytorch-ssd.\nclass Encoder(object):\n \"\"\"\n Inspired by https://github.com/kuangliu/pytorch-ssd\n Transform between (bboxes, lables) <-> SSD output\n \n dboxes: default boxes in size 8732 x 4, \n encoder: input ltrb format, output xywh format\n decoder: input xywh format, output ltrb format \n \n decode:\n input : bboxes_in (Tensor 8732 x 4), scores_in (Tensor 8732 x nitems)\n output : bboxes_out (Tensor nboxes x 4), labels_out (Tensor nboxes)\n criteria : IoU threshold of bboexes\n max_output : maximum number of output bboxes\n \"\"\"\n\n def __init__(self, dboxes):\n self.dboxes = dboxes(order=\"ltrb\")\n #self.dboxes_xywh = dboxes(order=\"xywh\").unsqueeze(dim=0)\n self.dboxes_xywh = np.expand_dims(dboxes(order=\"xywh\"),0)\n self.nboxes = self.dboxes.shape[0]\n #print(\"# Bounding boxes: {}\".format(self.nboxes))\n self.scale_xy = dboxes.scale_xy\n self.scale_wh = dboxes.scale_wh\n\n def scale_back_batch(self, bboxes_in, scores_in,device):\n \"\"\"\n Do scale and transform from xywh to ltrb\n suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox\n \"\"\"\n \n bboxes_in = bboxes_in.transpose([0,2,1])\n scores_in = scores_in.transpose([0,2,1])\n\n bboxes_in[:, :, :2] = self.scale_xy*bboxes_in[:, :, :2]\n bboxes_in[:, :, 2:] = self.scale_wh*bboxes_in[:, :, 2:]\n\n bboxes_in[:, :, :2] = bboxes_in[:, :, :2]*self.dboxes_xywh[:, :, 2:] + self.dboxes_xywh[:, :, :2]\n bboxes_in[:, :, 2:] = np.exp(bboxes_in[:, :, 2:])*self.dboxes_xywh[:, :, 2:]\n\n # Transform format to ltrb \n l, t, r, b = bboxes_in[:, :, 0] - 0.5*bboxes_in[:, :, 2],\\\n bboxes_in[:, :, 1] - 0.5*bboxes_in[:, :, 3],\\\n bboxes_in[:, :, 0] + 0.5*bboxes_in[:, :, 2],\\\n bboxes_in[:, :, 1] + 0.5*bboxes_in[:, :, 3]\n\n bboxes_in[:, :, 0] = l\n bboxes_in[:, :, 1] = t\n bboxes_in[:, :, 2] = r\n bboxes_in[:, :, 3] = b\n\n return bboxes_in, softmax_cpu(scores_in, dim=-1)\n \n def decode_batch(self, bboxes_in, scores_in, criteria = 0.45, max_output=200,device=0):\n bboxes, probs = self.scale_back_batch(bboxes_in, scores_in,device)\n output = []\n for bbox, prob in zip(bboxes, probs):\n output.append(self.decode_single(bbox, prob, criteria, max_output))\n #print(output[-1])\n return output\n\n # perform non-maximum suppression\n def decode_single(self, bboxes_in, scores_in, criteria, max_output, max_num=200):\n # Reference to https://github.com/amdegroot/ssd.pytorch\n \n bboxes_out = [] \n scores_out = []\n labels_out = []\n\n for i, score in enumerate(np.split(scores_in, scores_in.shape[1], 1)):\n # skip background\n # print(score[score>0.90])\n if i == 0: continue\n score = score.squeeze(1)\n mask = score > 0.05\n\n bboxes, score = bboxes_in[mask, :], score[mask]\n if score.shape[0] == 0: continue\n\n score_sorted = np.sort(score, axis=0)\n score_idx_sorted = np.argsort(score, axis=0)\n\n # select max_output indices\n score_idx_sorted = score_idx_sorted[-max_num:]\n candidates = []\n \n while score_idx_sorted.size > 0:\n idx = score_idx_sorted[-1].item()\n bboxes_sorted = bboxes[score_idx_sorted, :]\n bboxes_idx = np.expand_dims(bboxes[idx, :],0)\n iou_sorted = calc_iou_tensor(bboxes_sorted, bboxes_idx).squeeze()\n # we only need iou < criteria \n score_idx_sorted = score_idx_sorted[iou_sorted < criteria]\n candidates.append(idx)\n\n bboxes_out.append(bboxes[candidates, :])\n scores_out.append(score[candidates])\n labels_out.extend([i]*len(candidates))\n\n bboxes_out = np.concatenate(bboxes_out, axis=0)\n labels_out = np.array(labels_out, dtype=np.long)\n scores_out = np.concatenate(scores_out, axis=0)\n\n max_ids = np.argsort(scores_out, axis=0)\n max_ids = max_ids[-max_output:]\n return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids]\n\n\nclass DefaultBoxes(object):\n def __init__(self, fig_size, feat_size, steps, scales, aspect_ratios, \\\n scale_xy=0.1, scale_wh=0.2):\n\n self.feat_size = feat_size\n self.fig_size_w,self.fig_size_h = fig_size\n\n self.scale_xy_ = scale_xy\n self.scale_wh_ = scale_wh\n \n # According to https://github.com/weiliu89/caffe\n # Calculation method slightly different from paper\n self.steps_w = [st[0] for st in steps]\n self.steps_h = [st[1] for st in steps]\n self.scales = scales\n fkw = self.fig_size_w//np.array(self.steps_w)\n fkh = self.fig_size_h//np.array(self.steps_h)\n self.aspect_ratios = aspect_ratios\n\n self.default_boxes = []\n # size of feature and number of feature\n for idx, sfeat in enumerate(self.feat_size):\n sfeat_w,sfeat_h=sfeat\n sk1 = scales[idx][0]/self.fig_size_w\n sk2 = scales[idx+1][1]/self.fig_size_h\n sk3 = sqrt(sk1*sk2)\n all_sizes = [(sk1, sk1), (sk3, sk3)]\n for alpha in aspect_ratios[idx]:\n w, h = sk1*sqrt(alpha), sk1/sqrt(alpha)\n all_sizes.append((w, h))\n all_sizes.append((h, w))\n for w, h in all_sizes:\n for i, j in itertools.product(range(sfeat_w), range(sfeat_h)):\n cx, cy = (j+0.5)/fkh[idx], (i+0.5)/fkw[idx]\n self.default_boxes.append((cx, cy, w, h)) \n self.dboxes = np.array(self.default_boxes)\n self.dboxes.clip(min=0, max=1, out=self.dboxes)\n # For IoU calculation\n self.dboxes_ltrb = self.dboxes.copy()\n self.dboxes_ltrb[:, 0] = self.dboxes[:, 0] - 0.5*self.dboxes[:, 2]\n self.dboxes_ltrb[:, 1] = self.dboxes[:, 1] - 0.5*self.dboxes[:, 3]\n self.dboxes_ltrb[:, 2] = self.dboxes[:, 0] + 0.5*self.dboxes[:, 2]\n self.dboxes_ltrb[:, 3] = self.dboxes[:, 1] + 0.5*self.dboxes[:, 3]\n \n @property\n def scale_xy(self):\n return self.scale_xy_\n \n @property \n def scale_wh(self):\n return self.scale_wh_\n\n def __call__(self, order=\"ltrb\"):\n if order == \"ltrb\": return self.dboxes_ltrb\n if order == \"xywh\": return self.dboxes\n\n# This function is from https://github.com/zengarden/light_head_rcnn\ndef cocoval(detected_json, eval_json):\n eval_gt = COCO(eval_json)\n\n eval_dt = eval_gt.loadRes(detected_json)\n cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')\n\n # cocoEval.params.imgIds = eval_gt.getImgIds()\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,\n np.int16, np.int32, np.int64, np.uint8,\n np.uint16,np.uint32, np.uint64)):\n return int(obj)\n elif isinstance(obj, (np.float_, np.float16, np.float32, \n np.float64)):\n return float(obj)\n elif isinstance(obj, (np.ndarray,)): # add this line\n return obj.tolist() # add this line\n return json.JSONEncoder.default(self, obj)\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"others/cloud/single_stage_detector/tensorflow/utils_tf.py","file_name":"utils_tf.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"568038212","text":"import platform\nimport subprocess\nimport zenpacket.banner as banner\nimport zenpacket.process as process\n\n\n\nclass Interceptor(object):\n \"\"\"This is the class responsible for intercepting packages in real time,\n interpreting these packets, interpreting the preconditions, executions\n and post-conditions of the template and forwarding the modified package\n to the target machine.\"\"\"\n\n def __init__(self, tcp_ingress_rules=\"iptables -A INPUT -p tcp -m multiport --dports 80,443 -m conntrack --ctstate NEW,ESTABLISHED -j NFQUEUE --queue-num 2\",\n tcp_egress_rules=\"iptables -A OUTPUT -p tcp -m multiport --dports 80,443 -m conntrack --ctstate ESTABLISHED -j NFQUEUE --queue-num 2\"):\n\n \"\"\"Initialization method of the `Interceptor` class.\n\n Parameters\n ----------\n template : :obj:`Template`\n A `Template` objet that will be parsed to obtain the conditions\n and other values.\n iptables_rule : :obj:`str`\n Iptables rule for intercepting packets.\n ip6tables_rule : :obj:`str`\n Iptables rule for intercepting packets for ipv6.\n\n \"\"\"\n self.tcp_ingress_rules = tcp_ingress_rules\n self.tcp_egress_rules = tcp_egress_rules\n\n self.packet = None\n self._functions = []\n\n def set_iptables_rules(self):\n subprocess.check_output(self.tcp_ingress_rules, shell=True, stderr=subprocess.STDOUT)\n subprocess.check_output(self.tcp_egress_rules, shell=True, stderr=subprocess.STDOUT)\n \n def clean_iptables(self):\n subprocess.check_output(\"iptables -F\", shell=True, stderr=subprocess.STDOUT)\n subprocess.check_output(\"ip6tables -F\", shell=True, stderr=subprocess.STDOUT)\n\n def linux_modify(self, packet):\n \"\"\"This is the callback method that will be called when a packet\n is intercepted. It is responsible of executing the preconditions,\n executions and postconditions of the `Template`.\n\n Parameters\n ----------\n packet : :obj:`Packet`\n Netfilterqueue packet object. The packet that is intercepted.\n\n \"\"\"\n # Initialization of the Packet with the new raw bytes\n self.packet = packet\n print(\"packet\",self.packet)\n pkt = process.process_packet(self.packet)\n if pkt:\n print(\"PASSING\")\n packet.set_payload(pkt)\n packet.accept()\n else:\n print(\"BLOCK\")\n packet.drop()\n\n def windows_modify(self, packet, w, pydivert):\n \"\"\"This is the callback method that will be called when a packet\n is intercepted. It is responsible of executing the preconditions,\n executions and postconditions of the `Template`.\n\n Parameters\n ----------\n packet : :obj:`Packet`\n Netfilterqueue packet object. The packet that is intercepted.\n w : pointer\n windiver pointer.\n\n \"\"\"\n # Initialization of the Packet with the new raw bytes\n self.packet = packet.get_payload()\n # Executing the preconditions, executions and postconditions\n for functions in self._functions:\n for condition in functions:\n pkt = condition(self.packet)\n # If the condition returns None, it is not held and the\n # packet must be forwarded\n if not pkt:\n w.send(packet)\n return\n # If the precondition returns the packet, we assign it to the\n # actual packet\n self.packet = pkt\n # If all the conditions are met, we assign the payload of the modified\n # packet to the nfqueue packet and forward it\n packet = pydivert.Packet(self.packet, packet.interface, packet.direction)\n w.send(packet)\n\n def intercept(self):\n \"\"\"This method intercepts the packets and send them to a callback\n function.\"\"\"\n # For Windows Platforms\n if platform.system() == \"Windows\":\n import pydivert\n w = pydivert.WinDivert()\n w.open()\n print(\"[*] Waiting for packets...\\n\\n(Press Ctrl-C to exit)\\n\")\n try:\n while True:\n self.windows_modify(w.recv(), w, pydivert)\n except KeyboardInterrupt:\n w.close()\n # For Linux platforms\n elif platform.system() == \"Linux\":\n from netfilterqueue import NetfilterQueue\n nfqueue = NetfilterQueue()\n # The iptables rule queue number by default is 1\n nfqueue.bind(2, self.linux_modify)\n try:\n self.set_iptables_rules()\n print(banner.get_banner())\n print(\"[*] Waiting for packets...\\n\\n(Press Ctrl-C to exit)\\n\")\n nfqueue.run()\n except KeyboardInterrupt:\n self.clean_iptables()\n elif platform.system() == \"Darwin\":\n print(\"MAC SNIFFER\")\n from scapy.all import conf, sniff\n conf.iface=\"lo0\"\n conf.use_pcap = True\n sniff(prn=process.process_sc_packet)\n else:\n print(\"Sorry. Platform not supported!\\n\")\n","sub_path":"zenpacket/interceptor.py","file_name":"interceptor.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"599134847","text":"from pymongo import MongoClient\nimport datetime\nimport random\n\n\nclient = MongoClient('localhost', 27017)\ndb = client['nex-monitor']\ncoll = db['monitor-data']\ncoll_log = db['monitor-log'] \n\ndef clear_all_data():\n coll.drop()\n coll_log.drop()\n\ndef generate_data():\n to_insert = [{\"ip\":\"10.0.1.2\", \"status\":\"offline\", \"network\":\"others\",\"monitor\":True},\n {\"ip\":\"10.0.1.3\", \"status\":\"online\", \"network\":\"others\",\"monitor\":True},\n {\"ip\":\"10.0.1.4\", \"status\":\"online\", \"network\":\"others\",\"monitor\":True},\n {\"ip\":\"10.0.1.5\", \"status\":\"offline\", \"network\":\"others\",\"monitor\":True},\n {\"ip\":\"10.0.1.6\", \"status\":\"offline\", \"network\":\"others\",\"monitor\":True},\n {\"ip\":\"10.0.1.20\", \"status\":\"online\", \"network\":\"others\",\"monitor\":True}]\n\n for node in to_insert:\n coll.update_one({\"ip\":node[\"ip\"]}, {\"$set\":{\"ip\":node[\"ip\"], \"status\":node[\"status\"],\"network\":node[\"network\"], \"monitor\":node[\"monitor\"]}}, upsert = True)\n\ndef generate_log():\n ip = \"10.20.0.3\"\n date = \"2016-01-18\"\n status = [\"online\", \"offline\"]\n to_insert = []\n\n for hour in range(24):\n for miniute in range(0, 60, 5):\n coll_log.insert_one(create_log(ip=ip,status=random.choice(status), date=date, time=str(datetime.time(hour, miniute))))\n\n\n\n\ndef create_log(ip, status, date, time):\n return {\"ip\":ip, \"status\":status, \"date\":date, \"time\":time}\n\nif __name__ == '__main__':\n clear_all_data()\n generate_data()\n \n\n\n \n","sub_path":"monitor/db_init.py","file_name":"db_init.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"196173858","text":"from django.contrib import admin\n\nfrom .models import *\n\nadmin.site.register(FeedbackForm)\nadmin.site.register(Question)\nadmin.site.register(ConsolidatedReport)\nadmin.site.register(StudentConsolidatedReport)\n\n@admin.register(Answer)\nclass AnswerAdmin(admin.ModelAdmin):\n\n\tfieldsets = (\n\t\t(None, {'fields': ('question', 'teacher', 'form', 'recipient')}),\n\t)\n\n@admin.register(StudentAnswer)\nclass StudentAnswerAdmin(admin.ModelAdmin):\n\n\tfieldsets = (\n\t\t(None, {'fields': ('question', 'teacher', 'form', 'recipient')}),\n\t)\n\tsearch_fields = ('teacher__teacher__first_name',)\n\tlist_display = ['teacher','get_code',]\n\n\tdef get_code(self, obj):\n\t\treturn obj.form.code\n","sub_path":"apps/feedback/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"140000838","text":"#!/usr/bin/env python\n#! -*- coding:utf-8 -*-\n#!@Author: faple\n#!@Time: 2019/3/29 11:26\n#!/usr/bin/env python\n# ! -*- coding:utf-8 -*-\n# !@Author: faple\n# !@Time: 2019/3/29 9:29\n\n# 人脸识别类:使用face_recognition\n\nimport cv2\nimport face_recognition\nimport os\nfrom multiprocessing import Pool, Value, Process\n\n# 人脸库目录\nface_datasets_path = \"img/input\"\n# 获取RTSP视频流\ncap = cv2.VideoCapture(\"rtsp://admin:briup2017@192.168.1.120\")\n# 所有的文件名\ntotal_img_names = []\n# 所有的人脸\ntotal_img_faces = []\n\n\n# 判断加载人脸库时是否是合适的照片\ndef isImage(path, filename, num):\n face_location = face_recognition.load_image_file(path + \"/\" + filename)\n if len(face_location) > 0:\n # 若合适取128特征点之后加入人脸库\n total_img_faces.append(\n face_recognition.face_encodings(\n face_location)[0])\n # 文件名加入所有人名库之中\n filename = filename.split('.')[0]\n total_img_names.append(filename)\n print('人脸库中加入第' + str(num) + '张人脸:', filename)\n\n\n# 加载人脸库\ndef loadImage():\n for filename, num in zip(os.listdir(path=face_datasets_path), range(1, len(face_datasets_path) + 1)):\n isImage(face_datasets_path, filename, num)\n\n\n# 关闭资源\ndef destroyCapAndCV2():\n cap.release()\n cv2.destroyAllWindows()\n\n\n# 获取每帧人脸信息并且进行匹配\ndef getFaceInformation(frame, count, total_img_faces, total_img_names):\n face_locations = face_recognition.face_locations(frame)\n face_encodings = face_recognition.face_encodings(frame, face_locations)\n # 在这个视频帧中循环遍历每个人脸\n j = 0\n for (top, right, bottom, left), face_encoding in zip(\n face_locations, face_encodings):\n j += 1\n print(\"===========第\" + str(count) + \"次人脸识别开始===============\")\n print(\"===========第\" + str(count) + \"次人脸识别中=====第\" + str(j) + \"张人脸识别开始==========\")\n # 看看面部是否与已知人脸相匹配。\n # print(\"===========第\" + str(c) + \"次人脸识别开始匹配===============\", len(total_face_encoding))\n for i, v in enumerate(total_img_faces):\n match = face_recognition.compare_faces(\n [v], face_encoding, tolerance=0.45)\n name = \"Unknown\"\n if match[0]:\n name = total_img_names[i]\n print(\"===========第\" + str(count) + \"次人脸识别中=====第\" + str(i) + \"次人脸库图片比较==========\", name)\n break\n print(\"===========第\" + str(count) + \"次人脸识别中=====第\" + str(i) + \"次人脸库图片比较==========\", name)\n print(\"===========第\" + str(count) + \"次人脸识别中=====第\" + str(j) + \"张人脸识别结束==========\")\n\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n # 画出一个带名字的标签,放在框下\n cv2.rectangle(frame, (left - 15, bottom + 35), (right + 15, bottom), (187, 255, 255),\n cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left - 10, bottom + 30), font, 1.0,\n (0, 0, 0), 1)\n cv2.imwrite(str(count).split('.')[0] + '.png', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 95])\n print(\"===========第\" + str(count) + \"次人脸识别结束===============\")\n\n\n# # 画出一个框,框住脸\n# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n# # 画出一个带名字的标签,放在框下\n# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),\n# cv2.FILLED)\n# font = cv2.FONT_HERSHEY_DUPLEX\n# cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,\n# (255, 255, 255), 1)\n\n# 测试\ndef test1():\n # 用于多进程之间共享值\n value = Value(\"d\", 0)\n loadImage()\n # 开启三个进程\n pool = Pool(3)\n while True:\n value.value += 1\n ret, frame = cap.read()\n if frame is None:\n continue\n pool.apply_async(func=getFaceInformation, args=(frame, value.value, total_img_faces, total_img_names))\n cv2.imshow(\"rtsp\", frame)\n # 退出\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n destroyCapAndCV2()\n\n\n# test1()\nif __name__ == '__main__':\n test1()\n","sub_path":"rtspFace_recognition.py","file_name":"rtspFace_recognition.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"69237185","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom math import sqrt\nimport datetime\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as mtick\n\nmy_df = pd.read_csv('SPY, GLD, TLT Data for Risk Parity.csv')\n\n# making the date the index\nmy_df['Date'] = pd.to_datetime(my_df['Date'], dayfirst=True)\nmy_df = my_df.set_index('Date')\n#port_daily_return = port_daily_return.set_index('Date')\n\n# setting all prices to percentages\nmy_df = my_df.pct_change()\n\n# removes first row where you can't calculate percentage change\nmy_df.dropna(inplace=True)\n\n# Calculating rolling 1 year historical volatility for each asset class\nmy_df['TLT Vol'] = my_df['TLT'].rolling(252).std() * sqrt(252)\nmy_df['GLD Vol'] = my_df['GLD'].rolling(252).std() * sqrt(252)\nmy_df['SPY Vol'] = my_df['SPY'].rolling(252).std() * sqrt(252)\n\n# remove calculation time for historical volatility\nmy_df.dropna(inplace=True)\n\n# target volatility of the portfolio is 10% divided by 3 because there are 3 asset classes\ntarget_vol = 0.10/3\n\n# creates a DataFrame that calculates the allocation for various asset classes\nportfolio_df = pd.DataFrame({'TLT Alloc': target_vol/my_df['TLT Vol'],\n 'GLD Alloc': target_vol/my_df['GLD Vol'],\n 'SPY Alloc': target_vol/my_df['SPY Vol'], }, index=my_df.index)\n\n# print(portfolio_df.head())\n# print(portfolio_df.tail())\n\n# calculating the return on the asset by multiplying the assets total return by it's % weight in portfolio\nportfolio_df['TLT Return'] = my_df['TLT']*portfolio_df['TLT Alloc']\nportfolio_df['GLD Return'] = my_df['GLD']*portfolio_df['GLD Alloc']\nportfolio_df['SPY Return'] = my_df['SPY']*portfolio_df['SPY Alloc']\n\n# adding up the returns from SPY, TLT and GLD to create the portfolio's daily return\nportfolio_df['Portfolio Return'] = portfolio_df['TLT Return'] + portfolio_df['GLD Return'] + portfolio_df['SPY Return']\n\n# plots the allocation to each asset class over time\n\n\n \n#print(contribution)\n\n# Showing growth of hypothetical $1,000 investment into the strategy\nportfolio_df['Portfolio Value'] = ((portfolio_df['Portfolio Return'] + 1).cumprod())*100\n\n# creates a drawdown column for the strategy which we can plot\nportfolio_df['Drawdown'] = portfolio_df['Portfolio Value'].div(portfolio_df['Portfolio Value'].cummax()) - 1\n\n# plot performance of strategy\nportfolio_df['Portfolio Value'].plot()\nplt.title('Performance of $1,000 Investment')\nplt.show()\n\n# plot drawdowns of the strategy\nportfolio_df['Drawdown'].plot()\nplt.title('Strategy Drawdowns')\nplt.show()\n\n# plots the gross exposure of the portfolio over time\n(portfolio_df['TLT Alloc'] + portfolio_df['GLD Alloc'] + portfolio_df['SPY Alloc']).plot()\nplt.title('Gross Portfolio Exposure')\nplt.show()\n\n# calculates monthly returns for the strategy\nmonthly = portfolio_df['Portfolio Value'].resample('BM').apply(lambda x: x[-1])\nnew1 = monthly['2015':'2019'].pct_change()\nfig, ax = plt.subplots()\nnew1.plot(kind='bar', ax=ax)\nax.bar(new1.index, new1)\nax.autoscale_view()\nax.xaxis.set_major_locator(mdates.MonthLocator())\nax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))\nplt.title('Strategy Monthly Performance')\nplt.show()\n\n# Calculating calendar year returns for the strategy\nyearly = portfolio_df['Portfolio Value'].resample('Y').apply(lambda x: x[-1])\nnew2 = yearly.pct_change()\nfig, ax = plt.subplots()\nnew2.plot(kind='bar', ax=ax)\nax.bar(new2.index, new2)\nax.autoscale_view()\nax.xaxis.set_major_locator(mdates.MonthLocator())\nax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))\nplt.title('Strategy Annual Performance')\nplt.show()\n\n#Return contribution graph for daily, monthly, annual and since inception values\nTLT = []\nPortfolio = []\nGLD = []\nSPY = []\nYear = list(range(2004, 2021))\ncontribution = {'Year': Year}\ncontribution = pd.DataFrame(contribution)\ny = 2006\nt=g=s=p=0\nfor index, row in portfolio_df.iterrows():\n if index.year == y:\n t += row['TLT Return']\n g += row['GLD Return']\n s += row['SPY Return']\n p += row['Portfolio Return']\n else:\n TLT.append(t)\n Portfolio.append(p)\n GLD.append(g)\n SPY.append(s)\n t=p=g=s=0\n y=index.year\n t += row['TLT Return']\n g += row['GLD Return']\n s += row['SPY Return']\n p += row['Portfolio Return']\nTLT.append(t)\nPortfolio.append(p)\nGLD.append(g)\nSPY.append(s)\ncontribution.insert(1,'TLT', TLT)\ncontribution.insert(2,'GLD', GLD)\ncontribution.insert(3,'SPY', SPY)\ncontribution.insert(4,'Portfolio', Portfolio)\ncontribution = contribution.set_index('Year')\n\ncontribution.plot.bar()\nplt.title('Contribution of TLT, GLD, SPY')\nplt.show()\n\n# calculating the rolling 12 month volatility of the portfolio\nport_stdev = portfolio_df['Portfolio Return'].rolling(252).std() * sqrt(252)\nport_stdev.plot()\nplt.title('Strategy 1 Yr Rolling Stdev')\nplt.show()\n\n# calculating the rolling 12 month return of the portfolio\nrolling_return = (1 + portfolio_df['Portfolio Return']).rolling(window=252).apply(np.prod, raw=True) - 1\nrolling_return.plot()\nplt.title('Strategy 1 Yr Rolling Returns')\nplt.show()\n\n\n# calculating the rolling 12 month Sharpe Ratio of the portfolio (NOT DONE)\nrisk_free = pd.read_csv('Risk Free Rate.csv')\nrisk_free['Date'] = pd.to_datetime(risk_free['Date'], dayfirst=True)\n\ndaily_interest_rate = []\nfor index, row in my_df.iterrows():\n date = datetime.datetime(index.year, index.month, 1)\n for index, row in risk_free.iterrows():\n if date == row['Date']:\n daily_interest_rate.append(row['3 Month Treasury Rate']/3000)\nmy_df.insert(6, 'Daily Interest Rate', daily_interest_rate)\n\nmy_df['Rolling Sharp Ratio'] = (portfolio_df['Portfolio Return'] - my_df['Daily Interest Rate'])/(portfolio_df['Portfolio Return'].rolling(252).std())\n#print(my_df['Rolling Sharp Ratio'])\nmy_df['Rolling Sharp Ratio'].plot()\nplt.axhline(y=my_df['Rolling Sharp Ratio'].mean(), color=\"red\")\n\nplt.title('Rolling Sharpe Ratio')\nplt.show()\n\n#plots return, drawdown and standard deviation of the strategy in type of percent\nax = (portfolio_df['Drawdown']*100).plot()\nax.yaxis.set_major_formatter(mtick.PercentFormatter())\nplt.title('Drowdown Percentage')\nplt.show()\n\nax = (portfolio_df['Portfolio Return']*100).plot()\nplt.axhline(y=(portfolio_df['Portfolio Return']*100).mean(), color=\"red\")\nax.yaxis.set_major_formatter(mtick.PercentFormatter())\nplt.title('Portfolio Return Percentage')\nplt.show()\n\nax = (port_stdev*100).plot()\nplt.axhline(y=(port_stdev*100).mean(), color=\"red\")\nax.yaxis.set_major_formatter(mtick.PercentFormatter())\nplt.title('Standard Deviation Percentage')\nplt.show()\n\n\n\n\n\n\n","sub_path":"1 - origin.py","file_name":"1 - origin.py","file_ext":"py","file_size_in_byte":6637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"213237355","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport numpy as np\nimport cv2\nimport glob\n\n\n\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-16)\nnp.set_printoptions(precision=16)\n\ndef createObjectpoints(height,width,size):\n objp = np.zeros((height*width, 3), np.float32)\n objp[:,:2] = np.indices((width, height)).T.reshape(-1,2)\n objp *= size\n return np.around(objp, 3)\n\ndef draw(img, corners, imgpts):\n corner = tuple(corners[0].ravel())\n img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)\n img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)\n img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)\n return img\n\n\ndef transform_from_image(gray_image, show=False):\n objp = createObjectpoints(5,8,0.04)\n \n cam_mtx = np.array([[1.0535387124286060e+03, 0, 9.5122786283926860e+02],\n [0, 1.0538348361017167e+03, 5.3702175248086860e+02],\n [0, 0, 1]], np.float32)\n\n cam_dist = np.array([0.0055971983516318, 0.116410774791633, -0.0002495590397735, -0.0005281194057462, -0.192996279885084], np.float32)\n \n found, corners = cv2.findChessboardCorners(gray_image, (8,5))\n\n if found:\n cv2.cornerSubPix(gray_image, corners, (11,11), (-1, -1), criteria)\n\n #debug\n #img = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)\n #cv2.drawChessboardCorners(img, pattern, corners, found)\n #cv2.imshow('image', img)\n #cv2.waitKey(250)\n\n\n #solvePnP is used to get the position and orientation of the object\n found, rvecs, tvec = cv2.solvePnP(objp, corners, cam_mtx, cam_dist)\n rot3X3 = cv2.Rodrigues(rvecs)[0]\n \n if show:\n axis = np.float32([[0.08,0,0], [0,0.08,0], [0,0,-0.08]]).reshape(-1,3)\n imgpts, __ = cv2.projectPoints(axis, rvecs, tvec, cam_mtx, cam_dist)\n gray_image = draw(gray_image,corners,imgpts)\n \n #print(rotationMatrix_3X3)\n transformation = np.array([[rot3X3[0,0], rot3X3[0,1], rot3X3[0,2], tvec[0]],\n [rot3X3[1,0], rot3X3[1,1], rot3X3[1,2], tvec[1]],\n [rot3X3[2,0], rot3X3[2,1], rot3X3[2,2], tvec[2]],\n [0, 0, 0, 1]], np.float32)\n\n else:\n print(\"pattern not found\")\n raise ValueError(\"Could not find the pattern in the image\")\n\n return transformation\n","sub_path":"ros_workspace/src/hand_eye_calibration/src/poseEstimation.py","file_name":"poseEstimation.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"126033252","text":"# BenchExec is a framework for reliable benchmarking.\n# This file is part of BenchExec.\n#\n# Copyright (C) 2007-2016 Dirk Beyer\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for implementing a container using Linux namespaces\nand for appropriately configuring such a container.\"\"\"\n\n# prepare for Python 3\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# THIS MODULE HAS TO WORK WITH PYTHON 2.7!\n\nimport contextlib\nimport ctypes\nimport errno\nimport fcntl\nimport logging\nimport os\nimport signal\nimport socket\nimport struct\n\nfrom benchexec import libc\nfrom benchexec import util\n\n__all__ = [\n 'execute_in_namespace',\n 'setup_user_mapping',\n 'activate_network_interface',\n 'get_mount_points',\n 'remount_with_additional_flags',\n 'make_overlay_mount',\n 'mount_proc',\n 'make_bind_mount',\n 'get_my_pid_from_proc',\n 'drop_capabilities',\n 'forward_all_signals',\n 'setup_container_config',\n 'CONTAINER_UID',\n 'CONTAINER_GID',\n 'CONTAINER_HOME',\n ]\n\n\nDEFAULT_STACK_SIZE = 1024*1024\nGUARD_PAGE_SIZE = 4096 # size of guard page at end of stack\n\nCONTAINER_UID = 1000\nCONTAINER_GID = 1000\nCONTAINER_HOME = '/home/benchexec'\n\nCONTAINER_ETC_NSSWITCH_CONF = \"\"\"\npasswd: files\ngroup: files\nshadow: files\nhosts: files\nnetworks: files\n\nprotocols: db files\nservices: db files\nethers: db files\nrpc: db files\n\nnetgroup: files\nautomount: files\n\"\"\"\nCONTAINER_ETC_PASSWD = \"\"\"\nroot:x:0:0:root:/root:/bin/bash\nbenchexec:x:{uid}:{gid}:benchexec:{home}:/bin/bash\nnobody:x:65534:65534:nobody:/:/bin/false\n\"\"\".format(uid=CONTAINER_UID, gid=CONTAINER_GID, home=CONTAINER_HOME)\n\nCONTAINER_ETC_GROUP = \"\"\"\nroot:x:0:\nbenchexec:x:{gid}:\nnogroup:x:65534:\n\"\"\".format(uid=CONTAINER_UID, gid=CONTAINER_GID, home=CONTAINER_HOME)\n\nCONTAINER_ETC_HOSTS = \"\"\"\n127.0.0.1 localhost {host} {fqdn}\n# The following lines are desirable for IPv6 capable hosts\n::1 localhost ip6-localhost ip6-loopback\nff02::1 ip6-allnodes\nff02::2 ip6-allrouters\n\"\"\".format(host=socket.gethostname(), fqdn=socket.getfqdn())\n\nCONTAINER_ETC_FILE_OVERRIDE = {\n b'nsswitch.conf': CONTAINER_ETC_NSSWITCH_CONF,\n b'passwd': CONTAINER_ETC_PASSWD,\n b'group': CONTAINER_ETC_GROUP,\n b'hosts': CONTAINER_ETC_HOSTS,\n }\n\n\n@contextlib.contextmanager\ndef allocate_stack(size=DEFAULT_STACK_SIZE):\n \"\"\"Allocate some memory that can be used as a stack.\n @return: a ctypes void pointer to the *top* of the stack.\n \"\"\"\n # Allocate memory with appropriate flags for a stack as in https://blog.fefe.de/?ts=a85c8ba7\n base = libc.mmap(\n None,\n size + GUARD_PAGE_SIZE,\n libc.PROT_READ | libc.PROT_WRITE,\n libc.MAP_PRIVATE | libc.MAP_ANONYMOUS | libc.MAP_GROWSDOWN | libc.MAP_STACK,\n -1, 0)\n\n try:\n # create a guard page that crashes the application when it is written to (on stack overflow)\n libc.mprotect(base, GUARD_PAGE_SIZE, libc.PROT_NONE)\n\n yield ctypes.c_void_p(base + size + GUARD_PAGE_SIZE)\n finally:\n libc.munmap(base, size + GUARD_PAGE_SIZE)\n\ndef execute_in_namespace(func, use_network_ns=True):\n \"\"\"Execute a function in a child process in separate namespaces.\n @param func: a parameter-less function returning an int (which will be the process' exit value)\n @return: the PID of the created child process\n \"\"\"\n flags = (signal.SIGCHLD |\n libc.CLONE_NEWNS | libc.CLONE_NEWUTS | libc.CLONE_NEWIPC | libc.CLONE_NEWUSER |\n libc.CLONE_NEWPID)\n if use_network_ns:\n flags |= libc.CLONE_NEWNET\n\n # We use the syscall clone() here, which is similar to fork().\n # Calling it without letting Python know about it is dangerous (especially because\n # we want to execute Python code in the child, too), but so far it seems to work.\n # Basically we attempt to do (almost) the same that os.fork() does (cf. function os_fork_impl\n # in https://github.com/python/cpython/blob/master/Modules/posixmodule.c).\n # We currently do not take the import lock os.lock() does because it is only available\n # via an internal API, and because the child should never import anything anyway\n # (inside the container, modules might not be visible).\n # It is very important, however, that we have the GIL during clone(),\n # otherwise the child will often deadlock when trying to execute Python code.\n # Luckily, the ctypes module allows us to hold the GIL while executing the\n # function by using ctypes.PyDLL as library access instead of ctypes.CLL.\n\n def child_func():\n # This is necessary for correcting the Python interpreter state after a\n # fork-like operation. For example, it resets the GIL and fixes state of\n # several modules like threading and signal.\n ctypes.pythonapi.PyOS_AfterFork()\n\n return func()\n\n with allocate_stack() as stack:\n pid = libc.clone(ctypes.CFUNCTYPE(ctypes.c_int)(child_func), stack, flags, None)\n return pid\n\ndef setup_user_mapping(pid, uid=os.getuid(), gid=os.getgid()):\n \"\"\"Write uid_map and gid_map in /proc to create a user mapping\n that maps our user from outside the container to the same user inside the container\n (and no other users are mapped).\n @see: http://man7.org/linux/man-pages/man7/user_namespaces.7.html\n @param pid: The PID of the process in the container.\n \"\"\"\n proc_child = os.path.join(\"/proc\", str(pid))\n try:\n uid_map = \"{0} {1} 1\".format(uid, os.getuid()) # map uid internally to our uid externally\n util.write_file(uid_map, proc_child, \"uid_map\")\n except IOError as e:\n logging.warning(\"Creating UID mapping into container failed: %s\", e)\n\n try:\n util.write_file(\"deny\", proc_child, \"setgroups\")\n except IOError as e:\n # Not all systems have this file (depends on the kernel version),\n # but if it does not exist, we do not need to write to it.\n if e.errno != errno.ENOENT:\n logging.warning(\"Could not write to setgroups file in /proc: %s\", e)\n\n try:\n gid_map = \"{0} {1} 1\".format(gid, os.getgid()) # map gid internally to our gid externally\n util.write_file(gid_map, proc_child, \"gid_map\")\n except IOError as e:\n logging.warning(\"Creating GID mapping into container failed: %s\", e)\n\ndef activate_network_interface(iface):\n \"\"\"Bring up the given network interface.\n @raise OSError: if interface does not exist or permissions are missing\n \"\"\"\n iface = iface.encode()\n\n SIOCGIFFLAGS = 0x8913 # /usr/include/bits/ioctls.h\n SIOCSIFFLAGS = 0x8914 # /usr/include/bits/ioctls.h\n IFF_UP = 0x1 # /usr/include/net/if.h\n\n # We need to use instances of \"struct ifreq\" for communicating with the kernel.\n # This struct is complex with a big contained union, we define here only the few necessary\n # fields for the two cases we need.\n # The layout is given in the format used by the struct module:\n STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY = b\"16sH14s\" # ifr_name, ifr_addr.sa_family, padding\n STRUCT_IFREQ_LAYOUT_IFFLAGS = b\"16sH14s\" # ifr_name, ifr_flags, padding\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP)\n try:\n # Get current interface flags from kernel\n ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY, iface, socket.AF_INET, b'0' * 14)\n ifreq = fcntl.ioctl(sock, SIOCGIFFLAGS, ifreq)\n if_flags = struct.unpack(STRUCT_IFREQ_LAYOUT_IFFLAGS, ifreq)[1]\n\n # Set new flags\n ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFFLAGS, iface, if_flags | IFF_UP, b'0' * 14)\n fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq)\n finally:\n sock.close()\n\ndef get_mount_points():\n \"\"\"Get all current mount points of the system.\n Changes to the mount points during iteration may be reflected in the result.\n @return a generator of (source, target, fstype, options),\n where options is a list of bytes instances, and the others are bytes instances\n (this avoids encoding problems with mount points with problematic characters).\n \"\"\"\n with open(\"/proc/self/mounts\", \"rb\") as mounts:\n for mount in mounts:\n source, target, fstype, options, unused1, unused2 = mount.split(b\" \")\n options = set(options.split(b\",\"))\n yield (source, target, fstype, options)\n\ndef remount_with_additional_flags(mountpoint, existing_options, mountflags):\n \"\"\"Remount an existing mount point with additional flags.\n @param mountpoint: the mount point as bytes\n @param existing_options: dict with current mount existing_options as bytes\n @param mountflags: int with additional mount existing_options (cf. libc.MS_* constants)\n \"\"\"\n mountflags |= libc.MS_REMOUNT|libc.MS_BIND\n for option, flag in libc.MOUNT_FLAGS.items():\n if option in existing_options:\n mountflags |= flag\n\n libc.mount(None, mountpoint, None, mountflags, None)\n\ndef make_overlay_mount(mount, lower, upper, work):\n logging.debug(\"Creating overlay mount: target=%s, lower=%s, upper=%s, work=%s\",\n mount, lower, upper, work)\n libc.mount(b\"none\", mount, b\"overlay\", 0,\n b\"lowerdir=\" + lower + b\",upperdir=\" + upper + b\",workdir=\" + work)\n\ndef mount_proc():\n \"\"\"Mount the /proc filesystem.\"\"\"\n # We keep a reference to the outer /proc somewhere else because we need it\n # to convert our PID between the namespaces.\n libc.mount(b\"proc\", b\"/proc\", b\"proc\", 0, None)\n\ndef make_bind_mount(source, target, recursive=False, private=False):\n \"\"\"Make a bind mount.\n @param source: the source directory as bytes\n @param target: the target directory as bytes\n @param recursive: whether to also recursively bind mount all mounts below source\n @param private: whether to mark the bind as private, i.e., changes to the existing mounts\n won't propagate and vice-versa (changes to files/dirs will still be visible).\n \"\"\"\n flags = libc.MS_BIND\n if recursive:\n flags |= libc.MS_REC\n if private:\n flags |= libc.MS_PRIVATE\n libc.mount(source, target, None, flags, None)\n\ndef get_my_pid_from_procfs():\n \"\"\"Get the PID of this process by reading from /proc (this is the PID of this process\n in the namespace in which that /proc instance has originally been mounted),\n which may be different from our PID according to os.getpid().\n \"\"\"\n return int(os.readlink(\"/proc/self\"))\n\ndef drop_capabilities():\n \"\"\"Drop all capabilities this process has.\"\"\"\n libc.capset(ctypes.byref(libc.CapHeader(version=libc.LINUX_CAPABILITY_VERSION_3, pid=0)),\n ctypes.byref((libc.CapData * 2)()))\n\n\n_ALL_SIGNALS = range(1, signal.NSIG)\n_FORWARDABLE_SIGNALS = set(range(1, 32)).difference([signal.SIGKILL, signal.SIGSTOP, signal.SIGCHLD])\n_HAS_SIGWAIT = hasattr(signal, 'sigwait')\n\ndef block_all_signals():\n \"\"\"Block asynchronous delivery of all signals to this process.\"\"\"\n if _HAS_SIGWAIT:\n signal.pthread_sigmask(signal.SIG_BLOCK, _ALL_SIGNALS)\n\ndef _forward_signal(signum, target_pid, process_name):\n logging.debug(\"Forwarding signal %d to process %s.\", signum, process_name)\n try:\n os.kill(target_pid, signum)\n except OSError as e:\n logging.debug(\"Could not forward signal %d to process %s: %s\", signum, process_name, e)\n\ndef forward_all_signals_async(target_pid, process_name):\n \"\"\"Install all signal handler that forwards all signals to the given process.\"\"\"\n def forwarding_signal_handler(signum):\n _forward_signal(signum, process_name, forwarding_signal_handler.target_pid)\n\n # Somehow we get a Python SystemError sometimes if we access target_pid directly from inside function.\n forwarding_signal_handler.target_pid = target_pid\n\n for signum in _FORWARDABLE_SIGNALS:\n # Need to directly access libc function,\n # the state of the signal module is incorrect due to the clone()\n # (it may think we are in a different thread than the main thread).\n libc.signal(signum, forwarding_signal_handler)\n\n # Reactivate delivery of signals such that our handler gets called.\n reset_signal_handling()\n\ndef wait_for_child_and_forward_all_signals(child_pid, process_name):\n \"\"\"Wait for a child to terminate and in the meantime forward all signals the current process\n receives to this child.\n @return a tuple of exit code and resource usage of the child as given by os.waitpid\n \"\"\"\n assert _HAS_SIGWAIT\n block_all_signals()\n\n while True:\n logging.debug(\"Waiting for signals\")\n signum = signal.sigwait(_ALL_SIGNALS)\n if signum == signal.SIGCHLD:\n pid, exitcode, ru_child = os.wait4(-1, os.WNOHANG)\n while pid != 0:\n if pid == child_pid:\n return exitcode, ru_child\n else:\n logging.debug(\"Received unexpected SIGCHLD for PID %s\", pid)\n pid, exitcode, ru_child = os.wait4(-1, os.WNOHANG)\n\n else:\n _forward_signal(signum, child_pid, process_name)\n\ndef reset_signal_handling():\n if _HAS_SIGWAIT:\n signal.pthread_sigmask(signal.SIG_SETMASK, {})\n\n\ndef close_open_fds(keep_files=[]):\n \"\"\"Close all open file descriptors except those in a given set.\n @param keep_files: an iterable of file descriptors or file-like objects.\n \"\"\"\n keep_fds = set()\n for file in keep_files:\n if isinstance(file, int):\n keep_fds.add(file)\n else:\n try:\n keep_fds.add(file.fileno())\n except Exception:\n pass\n\n for fd in os.listdir(\"/proc/self/fd\"):\n fd = int(fd)\n if fd not in keep_fds:\n try:\n os.close(fd)\n except OSError:\n # irrelevant and expected\n # (the fd that was used by os.listdir() of course always fails)\n pass\n\ndef setup_container_system_config(basedir):\n \"\"\"Create a minimal system configuration for use in a container.\n @param basedir: The root directory of the container as bytes.\n \"\"\"\n etc = os.path.join(basedir, b\"etc\")\n if not os.path.exists(etc):\n os.mkdir(etc)\n\n for file, content in CONTAINER_ETC_FILE_OVERRIDE.items():\n util.write_file(content, etc, file)\n\n os.symlink(b\"/proc/self/mounts\", os.path.join(etc, b\"mtab\"))\n\ndef is_container_system_config_file(file):\n \"\"\"Determine whether a given file is one of the files created by setup_container_system_config().\n @param file: Absolute file path as string.\n \"\"\"\n if not file.startswith(\"/etc/\"):\n return False\n return file in [os.path.join(\"/etc\", f.decode()) for f in CONTAINER_ETC_FILE_OVERRIDE]\n","sub_path":"benchexec/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":15238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"51027832","text":"import redis\nimport json\n\nclass RecommendationStore(object):\n def __init__(self, host, port, db):\n self.host = host\n self.port = port\n self.db = db\n\n self.MODEL_PREFIX = 'br::uni7::recommendation::product::{0}'\n self.connection = redis.Redis(host=self.host, port=self.port, db=self.db)\n\n def save_recommendation(self, key, recommendations):\n self.connection.set(self.MODEL_PREFIX.format(key), recommendations) \n\n def get_recommendation(self, key):\n result = '[]'\n\n recommendations = self.connection.get(self.MODEL_PREFIX.format(key))\n if recommendations:\n result = recommendations\n\n result = json.loads(result)\n\n return result \n","sub_path":"11_movies-recommendations/recommendation_store.py","file_name":"recommendation_store.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"559751100","text":"from persona import Tripulacion\nfrom persona import Pasajeros\nfrom persona import Servicio\nfrom vuelos import Vuelos\nfrom modelo_avion import Modelo_Avion\n\nfrom datetime import datetime\n\n\nimport json\n\nclass Sistema (object):\n\n def __init__(self):\n self.listaVuelos = []\n self.listaPersonas = []\n self.listaAviones = []\n\n def cargar_archivos (self, archivos): #JSON\n with open(archivos, 'r') as archivo:\n diccionario = json.loads(archivo.read())\n\n for avion in diccionario['Aviones']:\n a = Modelo_Avion(avion['codigoUnico'],avion['cantidadDePasajerosMaxima'],avion['cantidadDeTripulaciónNecesaria'])\n self.listaAviones.append(a)\n\n for persona in diccionario['Personas']:\n if persona['tipo'] == 'Pasajero':\n\n fechaNacimiento = datetime.strptime(persona['fechaNacimiento'], '%Y-%m-%d').date()\n\n necesidades = \"\"\n\n try:\n necesidades = persona['solicitudesEspeciales']\n except:\n pass\n p = Pasajeros(persona['tipo'], persona['nombre'], persona['apellido'], fechaNacimiento, persona['dni'], persona['vip'], necesidades)\n\n self.listaPersonas.append(p)\n\n if persona['tipo'] == 'Piloto':\n fechaNacimiento = datetime.strptime(persona['fechaNacimiento'], '%Y-%m-%d').date()\n\n codigos = persona['avionesHabilitados']\n\n listaTemporal = []\n\n for item in self.listaAviones:\n for item2 in codigos:\n if item.codigo == item2:\n listaTemporal.append(item)\n\n t = Tripulacion(persona['tipo'], persona['nombre'], persona['apellido'], fechaNacimiento, persona['dni'])\n t.avionesHabilitados = listaTemporal\n\n self.listaPersonas.append(t)\n\n if persona['tipo'] == 'Servicio':\n fechaNacimiento = datetime.strptime(persona['fechaNacimiento'], '%Y-%m-%d').date()\n\n idiomas = persona['idiomas']\n codigos = persona['avionesHabilitados']\n\n listaTemporal = []\n\n for item in self.listaAviones:\n for item2 in codigos:\n if item.codigo == item2:\n listaTemporal.append(item)\n\n s = Servicio(persona['tipo'], persona['nombre'], persona['apellido'],fechaNacimiento , persona['dni'])\n s.listaIdiomas = idiomas\n s.avionesHabilitados = listaTemporal\n\n self.listaPersonas.append(s)\n\n for vuelos in diccionario['Vuelos']:\n\n lPasa = vuelos['pasajeros']\n lTrip = vuelos['tripulacion']\n\n PasajerosAux = []\n TripulantesAux = []\n\n for item in self.listaPersonas:\n if item.tipo == 'Pasajero':\n for item2 in lPasa:\n if item.DNI == item2:\n PasajerosAux.append(item)\n for item in self.listaPersonas:\n if item.tipo == 'Piloto'or item.tipo == 'Servicio':\n for item2 in lTrip:\n if item.DNI == item2:\n TripulantesAux.append(item)\n\n for item in self.listaAviones:\n fecha = datetime.strptime(vuelos['fecha'], '%Y-%m-%d').date()\n\n if item.codigo == vuelos['avion']:\n v = Vuelos(item, vuelos['hora'], fecha, vuelos['origen'], vuelos['destino'])\n v.listaPasajeros = PasajerosAux\n v.listaTripulacion = TripulantesAux\n\n self.listaVuelos.append(v)\n\n\n def VuelosQueNoAlcanzenLaTripMinima(self): #Punto 3\n listaVuelosQueNoAlcanzan = []\n\n for item in self.listaVuelos:\n if not item.tenesLaTripulacionNecesaria():\n listaVuelosQueNoAlcanzan.append(str(item.fecha) + ' ' + item.hora + ' ' + 'desde ' + item.origen + ' hasta ' + item.destino)\n\n return listaVuelosQueNoAlcanzan\n\n def VuelosConTripulantesNoAutorizados(self): #Punto 4\n listaVuelosConTripulantesNoAutorizados = []\n\n for item in self.listaVuelos:\n for item2 in item.listaTripulacion:\n if item.Avion not in item2.avionesHabilitados:\n listaVuelosConTripulantesNoAutorizados.append('Vuelo a ' + item.destino)\n break\n\n return listaVuelosConTripulantesNoAutorizados\n\n def TripulantesQueVolaronMasDeUnaVezAlDia(self): #Punto 5\n tripQueRompenLaRegla = []\n\n for item in self.listaVuelos:\n for item2 in self.listaVuelos:\n if item != item2:\n if item.fecha == item2.fecha:\n for item3 in item.listaTripulacion:\n if item3 in item2.listaTripulacion:\n tripQueRompenLaRegla.append(item3.nombre + ' ' + item3.apellido + ' ' + ' DNI: ' + item3.DNI)\n break\n\n return tripQueRompenLaRegla\n\n\n\n\n","sub_path":"sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"238205092","text":"import tkinter as tk\nfrom tkinter import ttk\n\nclass Application(ttk.Frame):\n def __init__(self, win):\n win.geometry(\"300x200\")\n ttk.Frame.__init__(self, win)\n self.pack()\n\nwin = tk.Tk()\napp = Application(win)\nwin.mainloop()","sub_path":"aaa.py","file_name":"aaa.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"377390426","text":"from twisted.internet.protocol import Protocol , ServerFactory , DatagramProtocol\nfrom twisted.internet import defer , task\nfrom twisted.protocols.basic import NetstringReceiver\nfrom twisted.python.failure import Failure\nimport sys\nimport os\nimport msvcrt\nimport time \nimport mysql.connector\nimport random\n\nclass Form_service(object):\n def __init__(self):\n self.db = mysql.connector.connect(\n host = 'localhost',\n user = 'tuanlinh',\n passwd = 'tuanlinh',\n database= 'game'\n )\n self.addr = []\n\n def analyze(self,data):\n if '.' in data:\n pass\n return data\n\n def transform(self,data,address):\n if ' ' in data:\n msg= data.split(' ',1)\n msg[0] = msg[0].rstrip(':')\n self.send_to_db(msg[1],msg[0])\n if 'finding' in msg[1]:\n self.addr.append(address)\n return data\n return data\n\n def send_to_db(self,text,time):\n cursor = self.db.cursor()\n sql = 'INSERT INTO `chat_text_server` (text,time) VALUES (%s,%s)'\n val = (text,time)\n \n cursor.execute(sql,val)\n self.db.commit() \n\nclass UdpProtocol(DatagramProtocol):\n def __init__(self,service):\n self.addr = []\n self.service = service\n\n def startProtocol(self):\n print('Listening for player')\n \n def datagramReceived(self,data,addr):\n self.check(addr)\n \n def check(self,addr):\n if addr not in self.addr:\n self.addr.append(addr)\n\n print(str(len(self.addr)) +' players')\n\n if len(self.addr) % 2 == 0:\n print('sending in4')\n print(self.addr)\n index = len(self.addr) - 1\n msg0 = 'server ' + str(self.addr[index-1][0])+' ' + str(self.addr[index-1][1])\n msg1 = 'server ' + str(self.addr[index][0])+' ' + str(self.addr[index][1])\n self.transport.write(msg1.encode('ascii'),(self.addr[index-1][0],self.addr[index-1][1]))\n self.transport.write(msg0.encode('ascii'),(self.addr[index][0],self.addr[index][1]))\n\n\n\ndef main():\n os.system('cls')\n service = Form_service()\n protocol = UdpProtocol(service)\n from twisted.internet import reactor\n reactor.listenUDP(11111,protocol) \n reactor.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"game_server.py","file_name":"game_server.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"502299385","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom setuptools import setup, find_packages\n\n__version__ = '0.6.1'\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n requirements = f.readlines()\n\nwith open(os.path.join(here, 'requirements.tests.txt')) as f:\n test_requirements = f.readlines()\n\nsetup(\n name='cleo',\n license='MIT',\n version=__version__,\n description='Cleo allows you to create beautiful and testable command-line interfaces.',\n long_description=open('README.rst').read(),\n author='Sébastien Eustace',\n author_email='sebastien.eustace@gmail.com',\n url='https://github.com/sdispater/cleo',\n download_url='https://github.com/sdispater/cleo/archive/%s.tar.gz' % __version__,\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"387230811","text":"import socket\nimport os\nimport struct\nimport json\nfrom ex5.conf import settings\nimport hashlib\n\nclient = socket.socket()\n\n# 列出文件夹下的所有文件名\nfiles_list = []\nfor file in os.listdir(settings.TARGET_PATH):\n files_list.append(file)\n\n# 提示信息\nmsg1 = \"可选文件如下:\\n\"\nfor index, file in enumerate(files_list, 1):\n msg1 = msg1 + str(index) + '. ' + file + '\\n'\nmsg1 = (msg1 + '请输入数字选择想要下载的文件:\\n')\n\n# 选择信息\nreply = ''\nwhile len(reply) ==0:\n reply = input(msg1).strip()\nreply = int(reply)\nfile_chosen = files_list[reply-1]\n\n# 读取文件内容、 生成大文件md5\nm = hashlib.md5()\nwith open(os.path.join(settings.TARGET_PATH, file_chosen), 'rb') as f:\n while True:\n data = f.read(4096)\n if not data:\n break\n m.update(data)\n file_data = f.read()\nfile_md5 = m.hexdigest()\n\n\n# 生成文件头字典\nfile_header = json.dumps({'file_name': file_chosen, 'file_size': len(file_data), 'md5': file_md5})\nprint(file_header)\n\n# 链接服务器\nclient.connect(('127.0.0.1', 8080))\n\n# -------------发送指定文件-------------\n# 发送字典大小\ndict_header = struct.pack('i', len(file_header))\nclient.send(dict_header)\n\n# 发送字典\nclient.send(file_header.encode('utf-8'))\n\n# 发送文件\nclient.send(file_data)\n","sub_path":"Day8/homework/ex5/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"13364731","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nfrom matplotlib import image as im\nfrom scipy import misc\nascent = im.imread('Desktop/naga.jpg')\nscharr = im.imread('Desktop/naga.jpg')\nb = ascent[:,:, 0]\ng = ascent[:,:, 1]\nr = ascent[:,:, 2]\nac= 0.21 * r + 0.72 * g + 0.07 * b\na= scharr[:,:, 0]\nc = scharr[:,:, 1]\nd= scharr[:,:, 2]\nsc = 0.21 * a + 0.72 * c + 0.07 * d\ngrad = signal.convolve2d(ac, sc, boundary='symm', mode='same',fillvalue=0)\nplt.imshow(grad)\n\n\n# In[ ]:\n\n\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nfrom matplotlib import image as im\nimport numpy as np\nkh = np.array([[0, 8, 0], [0, -16, 0], [0, -8, 0]], dtype = np.float)\nkv = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype = np.float)\nimage= im.imread('Desktop/graps.jpg')\nb = image[:,:, 0]\ng = image[:,:, 1]\nr = image[:,:, 2]\ngr= 0.21 * r + 0.72 * g + 0.07 * b\nplt.gray()\nconv=signal.convolve2d(gr,kv,boundary='symm',mode='same',fillvalue=0)\nconh=signal.convolve2d(gr,kh,boundary='symm',mode='same',fillvalue=0)\ncon=abs(conv)\ncon1=abs(conh)\nplt.subplot(1,4,1)\nplt.imshow(image)\nplt.title('original')\nplt.subplot(1,4,2)\nplt.imshow(gr)\nplt.title('grayimage')\nplt.subplot(1,4,3)\nplt.imshow(con)\nplt.title('verticalsobel')\nplt.subplot(1,4,4)\nplt.imshow(conh)\nplt.title('kernal kh')\n\n\n# In[32]:\n\n\nplt.imshow(con)\n\n","sub_path":"Image Edge Detection/egde_detection_sobel_kernal2.py","file_name":"egde_detection_sobel_kernal2.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"555710468","text":"#!/usr/bin/env python\n\nimport os\nimport json\nimport argparse\nimport subprocess\nimport sys\nimport libvirt\n\ndef main():\n inventory = {'all': {'hosts': [],\n 'vars': {'ansible_user': os.environ['USER']}},\n 'cluster': {'hosts': [],\n 'vars': {'cluster_node_prefixlen': 24}}}\n inventory['host'] = {'hosts': ['localhost'],\n 'vars': {'ansible_connection': 'local'}}\n inventory['guest'] = guest()\n inventory['master'] = master()\n inventory['node'] = node()\n\n hostvars = {}\n for type in ['master', 'node']:\n for host in inventory[type]['hosts']:\n num = int(''.join(filter(str.isdigit, host)))\n inventory['all']['hosts'].append(host)\n inventory['cluster']['hosts'].append(host)\n hostvars[host] = {'name': host,\n # Pick the first master as the master.\n 'master': inventory['master']['hosts'][0],\n 'cluster_node_ip': '10.0.0.%d' % num}\n\n # We'll conbine this to the above once hv also joins to the cluster.\n for type in ['guest']:\n for host in inventory[type]['hosts']:\n num = int(''.join(filter(str.isdigit, host)))\n inventory['all']['hosts'].append(host)\n hostvars[host] = {'name': host,\n 'hv_node_ip': '10.0.0.%d' % num}\n\n # noqa https://github.com/ansible/ansible/commit/bcaa983c2f3ab684dca6c2c2c8d1997742260761\n inventory['_meta'] = {'hostvars': hostvars}\n\n parser = argparse.ArgumentParser(description=\"KVM inventory\")\n parser.add_argument('--list', action='store_true',\n help=\"List KVM inventory\")\n parser.add_argument('--host', help='List details of a KVM inventory')\n args = parser.parse_args()\n\n if args.list:\n print(json.dumps(inventory))\n elif args.host:\n print(json.dumps(hostvars.get(args.host, {})))\n\n\ndef guest():\n guest = {'hosts': [],\n 'vars': {'ansible_python_interpreter': 'python2',\n 'hv_node_netmask': '255.255.0.0',\n 'hv_node_broadcast': '10.0.255.255'}}\n c = libvirt.openReadOnly(\"qemu:///system\")\n if c != None:\n for i in c.listDomainsID():\n dom = c.lookupByID(i)\n if dom.name().startswith('cam'):\n guest['hosts'].append(dom.name())\n\n return guest\n\n\ndef master():\n master = {'hosts': []}\n\n c = libvirt.openReadOnly(\"qemu:///system\")\n if c != None:\n for i in c.listDomainsID():\n dom = c.lookupByID(i)\n if dom.name().startswith('kube') == True:\n master['hosts'].append(dom.name())\n\n return master\n\n\ndef node():\n node = {'hosts': []}\n\n c = libvirt.openReadOnly(\"qemu:///system\")\n if c != None:\n for i in c.listDomainsID():\n dom = c.lookupByID(i)\n if dom.name().startswith('node'):\n node['hosts'].append(dom.name())\n\n return node\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"313066347","text":"# Append current path to path.\nimport sys, os\nsys.path.append(os.path.dirname(__file__))\nsys.path.append(os.path.dirname(__file__) + '/IOPi')\n\nimport bottle\nfrom bottle import route, request, abort\nimport shotclock\nimport testshotclock\nimport logging\nimport logging.config\nimport json\n\n\n# ... build or import your bottle application here ...\n# Do NOT use bottle.run() with mod_wsgi\n\n@route('/time')\ndef shotclocktime():\n\n\ttry:\n\t\tlogging.info('shotclockpath() start')\n\t\ttime = s.getJSONTime()\n\t\tlogging.info('shotclockpath() end')\n\t\treturn time\n\texcept:\n\t\tlogging.exception('Uncaught exception')\n\t\traise\n\n@route('/test/start', method='PUT')\ndef testshotclockStart():\n\n\tts.start()\n\treturn \"Started\"\n\n@route('/test/stop', method='PUT')\ndef testshotclockStop():\n\n\tts.stop()\n\treturn \"Stopped\"\n\n@route('/test/reset', method='PUT')\ndef testshotclockReset():\n\n\tts.reset()\n\treturn \"Stopped\"\n\n@route('/test/inError', method='PUT')\ndef testshotclockinError():\n\n\tdata = request.body.readline()\n\tif not data:\n \tabort(400, 'No data received')\n\tentity = json.loads(data)\n\tif not entity.has_key('inError'):\n\t\tabort(400, 'No inError specified')\n\terror = entity['inError']\n\tts.inError(error)\n\n@route('/test/time')\ndef testshotclocktime():\n\n\treturn ts.getJSONTime()\n\n\ns = shotclock.Shotclock()\nts = testshotclock.Testshotclock()\n\nlogging.config.fileConfig(os.path.dirname(__file__) + '/logger.cfg') #logfile config\nlogging.info('Started')\napplication = bottle.default_app()\n","sub_path":"shotclock.wsgi","file_name":"shotclock.wsgi","file_ext":"wsgi","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"66292385","text":"import logging\nimport os\nfrom logging.handlers import TimedRotatingFileHandler\n\nlogging.basicConfig(\n level=logging.INFO\n)\n\n\ndef do_init():\n from __init__ import app\n\n if not os.path.exists(\"logs\"):\n os.mkdir(\"logs\")\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)s:%(lineno)d][%(levelname)s][%(thread)d] - %(message)s\")\n handler = TimedRotatingFileHandler(\n \"logs/flask.log\", when=\"D\", interval=1, backupCount=15,\n encoding=\"UTF-8\", delay=False, utc=True)\n app.logger.addHandler(handler)\n handler.setFormatter(formatter)\n\n\ndef info(msg):\n logging.info(msg)\n","sub_path":"component/logging_.py","file_name":"logging_.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"345374218","text":"import json\nimport cv2\nimport os\nimport numpy as np\nimport math\n\njsonfile=open('newaddbg.json','r')\nload_dict = json.load(jsonfile)\njsonfile.close()\npicdir='addnormal'\n\n\n\n\ndef rotate_image( src, angle, scale=1.):\n w = src.shape[1]\n h = src.shape[0]\n # convet angle into rad\n rangle = np.deg2rad(angle) # angle in radians\n # calculate new image width and height\n nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale\n nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale\n # ask OpenCV for the rotation matrix\n rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)\n # calculate the move from the old center to the new center combined\n # with the rotation\n rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5, 0]))\n # the move only affects the translation, so update the translation\n # part of the transform\n rot_mat[0, 2] += rot_move[0]\n rot_mat[1, 2] += rot_move[1]\n # map\n return cv2.warpAffine(\n src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))),\n flags=cv2.INTER_LANCZOS4)\n\ndef rotate_xml( src, xmin, ymin, xmax, ymax, angle, scale=1.):\n w = src.shape[1]\n h = src.shape[0]\n rangle = np.deg2rad(angle) # angle in radians\n # now calculate new image width and height\n # get width and heigh of changed image\n nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale\n nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale\n # ask OpenCV for the rotation matrix\n rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)\n # calculate the move from the old center to the new center combined\n # with the rotation\n rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5, 0]))\n # the move only affects the translation, so update the translation\n # part of the transform\n rot_mat[0, 2] += rot_move[0]\n rot_mat[1, 2] += rot_move[1]\n # rot_mat: the final rot matrix\n # get the four center of edges in the initial martix,and convert the coord\n point1 = np.dot(rot_mat, np.array([(xmin+xmax)/2, ymin, 1]))\n point2 = np.dot(rot_mat, np.array([xmax, (ymin+ymax)/2, 1]))\n point3 = np.dot(rot_mat, np.array([(xmin+xmax)/2, ymax, 1]))\n point4 = np.dot(rot_mat, np.array([xmin, (ymin+ymax)/2, 1]))\n #point1 = np.dot(rot_mat, np.array([xmin, ymin, 1]))\n #point2 = np.dot(rot_mat, np.array([xmax, ymin, 1]))\n #point3 = np.dot(rot_mat, np.array([xmin, ymax, 1]))\n #point4 = np.dot(rot_mat, np.array([xmax, ymax, 1]))\n # concat np.array\n concat = np.vstack((point1, point2, point3, point4))\n # change type\n concat = concat.astype(np.int32)\n #print(concat)\n rx, ry, rw, rh = cv2.boundingRect(concat)\n return rx, ry, rw, rh\n\n\n\n\nfor key in load_dict:\n print(key)\n #print(keyl2)\n#print(load_dict['annotations'])\n#{'license': 1, 'height': 347, 'flickr_url': '', 'file_name': '190127_151952_00178855.jpg', 'width': 465, 'data_captured': '', 'id': 1457, 'coco_url': ''}\npicid={}\nfor each in load_dict['images']:\n picid[each['id']]=each['file_name']\n\npicbboxs={}\nmaximgid=0\nmaxid=0\ncategorydict={1:0,2:0,3:0,4:0,5:0}\nfor bboxstr in load_dict['annotations']:\n bboximg_id=bboxstr['image_id']\n categorydict[bboxstr['category_id']]+=1\n #print(next(itbgimagenames))\n if(maximgid5500:\n cv2.rectangle(img,(lefttopx,lefttopy),(rightbottomx,rightbottomy),(255,255,0),3)\n cv2.imshow(\"img\",img)\n cv2.waitKey(0)\n \n'''\nfor key in picbboxs:\n img=cv2.imread(os.path.join(picdir,picid[key]))\n rotatea=90\n rotateimg=rotate_image(img,rotatea)\n for eachbox in picbboxs[key]:\n print(eachbox)\n lefttopx=int(eachbox[0][0])\n lefttopy=int(eachbox[0][1])\n rightbottomx=int(eachbox[0][0]+eachbox[0][2])\n rightbottomy=int(eachbox[0][1]+eachbox[0][3])\n cv2.rectangle(img,(lefttopx,lefttopy),(rightbottomx,rightbottomy),(255,255,0),3)\n rotatex,rotatey,rotatew,rotateh=rotate_xml(img,lefttopx,lefttopy,rightbottomx,rightbottomy,rotatea)\n cv2.rectangle(rotateimg,(rotatex,rotatey),(rotatex+rotatew,rotatey+rotateh),(255,255,0),3)\n cv2.imshow(\"rotateimg\",rotateimg)\n cv2.imshow(\"show\",img)\n cv2.waitKey(0)\n #print(type(bboxstr['bbox']))\n'''\n \n \n#{'bbox': [512.0, 426.0, 177.0, 110.0], 'area': [], 'iscrowd': 0, 'id': 5075, 'segmentation': [], 'category_id': 5, 'image_id': 1459} \nfor each in load_dict['annotations']:\n #print(each)\n pass","sub_path":"readpic.py","file_name":"readpic.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"607427358","text":"# This program explains the logic to check if a string is palindrome or not\n\nclass node:\n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass linked_list:\n def __init__(self):\n self.head = None\n\n # Module to insert an element in the linked list \n def insert(self, data):\n Node = node(data)\n if self.head == None:\n self.head = Node\n else:\n temp_node = self.head\n while temp_node.next != None: \n temp_node = temp_node.next\n temp_node.next = Node\n\n # Module to print the linked list\n def print_list(self):\n print('\\t[', end=' ')\n temp_node = self.head\n while temp_node != None:\n print(\"%s\" % temp_node.data, end=' ,')\n temp_node = temp_node.next\n print('\\b]')\n\n # Module to reverse a linked list from the specified node as head.\n def reverse(self, temp_head=None):\n\n current_pointer = temp_head\n previous_pointer = None\n next_pointer = current_pointer.next\n\n while current_pointer != None: \n next_pointer = current_pointer.next\n current_pointer.next = previous_pointer\n previous_pointer = current_pointer\n current_pointer = next_pointer \n \n # Update head\n temp_head = previous_pointer\n return temp_head\n \n \n # Module to delete kth element in linked list \n def check_palindrome(self):\n # find middle node\n fast_pointer = self.head\n slow_pointer = self.head\n while fast_pointer != None and slow_pointer != None:\n # To handle if the list is having odd number of elements\n if fast_pointer.next != None:\n fast_pointer = fast_pointer.next.next\n else:\n fast_pointer = None\n parent_slow_pointer = slow_pointer\n slow_pointer = slow_pointer.next\n\n # Note: Here, slow Pointer is the last element of first half\n\n # Reverse the list from Middle element\n new_mid_head = self.reverse(slow_pointer)\n parent_slow_pointer.next = new_mid_head\n \n # check both the list(start to mid and mid to list are same)\n pointer_1 = self.head\n pointer_2 = new_mid_head\n\n while pointer_2 != None and pointer_1 != new_mid_head:\n #print(\"\\t[%s,%s]\" %(pointer_1.data, pointer_2.data))\n if pointer_1.data != pointer_2.data:\n return False\n pointer_1 = pointer_1.next\n pointer_2 = pointer_2.next\n return True\n \n\nif __name__=='__main__':\n # Create a linked list\n print(\"1. Create a Linked List Object\")\n ll_obj = linked_list()\n \n\n # Update head of the linked list \n print(\"2. Insert Elements in Linked list\")\n \"\"\"For Demo Inserting Elements in order\"\"\"\n ll_obj.insert(1)\n ll_obj.insert(2)\n ll_obj.insert(3)\n ll_obj.insert(4)\n ll_obj.insert(5)\n ll_obj.insert(5)\n ll_obj.insert(4)\n ll_obj.insert(3)\n ll_obj.insert(2)\n ll_obj.insert(1)\n\n # Print the list\n print(\"3. Linked List\") \n ll_obj.print_list()\n\n print(\"4. Checking is the Linked List is Palindrome or not\")\n result = ll_obj.check_palindrome()\n\n print(\"5. Linked List is Palindrome %s\" % result)\n\n # For Simplicity and easy understandability I'm creating adding a new element, \n # However you can also create a new list \n # and check the same.\n\n # Create a linked list\n print(\"1. Create a Linked List Object\")\n ll_obj.insert(2)\n\n # Print the list\n print(\"3. Linked List\") \n ll_obj.print_list()\n\n print(\"4. Checking is the Linked List is Palindrome or not\")\n result = ll_obj.check_palindrome()\n\n print(\"5. Linked List is Palindrome %s\" % result)\n","sub_path":"Linked List/check_list_palindrome.py","file_name":"check_list_palindrome.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"479731722","text":"import sys\n\nTODO_FILE = 'todo.txt'\nARCHIVE_FILE = 'done.txt'\n\nRED = \"\\033[1;31m\" \nBLUE = \"\\033[1;34m\"\nCYAN = \"\\033[1;36m\"\nGREEN = \"\\033[0;32m\"\nRESET = \"\\033[0;0m\"\nBOLD = \"\\033[;1m\"\nREVERSE = \"\\033[;7m\"\nYELLOW = \"\\033[0;33m\"\n\nADICIONAR = 'a'\nREMOVER = 'r'\nFAZER = 'f'\nPRIORIZAR = 'p'\nLISTAR = 'l'\n\n# Imprime texto com cores. Por exemplo, para imprimir \"Oi mundo!\" em vermelho, basta usar\n#\n# printCores('Oi mundo!', RED)\n# printCores('Texto amarelo e negrito', YELLOW + BOLD)\n\ndef printCores(texto, cor) :\n print(cor + texto + RESET)\n \n\n# Adiciona um compromisso aa agenda. Um compromisso tem no minimo\n# uma descrição. Adicionalmente, pode ter, em caráter opcional, uma\n# data (formato DDMMAAAA), um horário (formato HHMM), uma prioridade de A a Z, \n# um contexto onde a atividade será realizada (precedido pelo caractere\n# '@') e um projeto do qual faz parte (precedido pelo caractere '+'). Esses\n# itens opcionais são os elementos da tupla \"extras\", o segundo parâmetro da\n# função.\n#\n# extras ~ (data, hora, prioridade, contexto, projeto)\n#\n# Qualquer elemento da tupla que contenha um string vazio ('') não\n# deve ser levado em consideração. \ndef adicionar(descricao, extras):\n # não é possível adicionar uma atividade que não possui descrição. \n if descricao == '' :\n return False\n #(desc, (data, hora, pri, contexto, projeto) \n atividadeOrdenada = [extras[0], extras[1], extras[2], descricao, extras[3], extras[4]] # DDMMAAAA HHMM (P) DESC @CONTEXT +PROJ\n novaAtividade = ' '.join(atividadeOrdenada) #transformando em string\n try: \n fp = open(TODO_FILE, 'a')\n fp.write(novaAtividade + '\\n')\n fp.close()\n except IOError as err:\n print(\"Não foi possível escrever para o arquivo \", TODO_FILE)\n print(err)\n return False\n\n return True\n\n\n# Valida a prioridade.\ndef prioridadeValida(pri): \n pri = pri.upper() #deixando maiusculo pra poder checar\n if len(pri) != 3: #nao pode ser diferente de 3\n return False\n elif pri[0] != '(' or pri[2] != ')': #tem que ter parenteses\n return False\n elif (pri[1] < 'A' or pri[1] > 'Z'): #de A a Z\n return False\n return True \n\n\n# Valida a hora. Consideramos que o dia tem 24 horas, como no Brasil, ao invés\n# de dois blocos de 12 (AM e PM), como nos EUA.\ndef horaValida(horaMin):\n if len(horaMin) != 4 or not soDigitos(horaMin): #nao pode maior que 4 e tem que ser digitos\n return False\n else:\n if horaMin[0:2] > '23' or horaMin[2:] > '59': #checando se a hora está maior que 23. e minutis 59\n return False \n return True\n\n# Valida datas. Verificar inclusive se não estamos tentando\n# colocar 31 dias em fevereiro. Não precisamos nos certificar, porém,\n# de que um ano é bissexto. \ndef dataValida(data) :\n if len(data) != 8 or not soDigitos(data): #checando se nao é menor ou maior que 8 carecteres e se é so numeros\n return False\n else:\n if data[:2] > '31' or data[2:4] > '12' or data[:2] < '01' or data[2:4] < '01': #checando se o dia e o mês serão inválidos(caso geral)\n return False\n \n if data[2:4] == '02' and data[:2] > '29': #Fevereiro \n return False\n\n if data[2:4] == '04' and data[:2] > '30': #Abril\n return False\n\n if data[2:4] == '06' and data[:2] > '30': #Junho\n return False\n\n if data[2:4] == '09' and data[:2] > '30': #Setembro\n return False\n\n if data[2:4] == '11' and data[:2] > '30': #Novembro\n return False\n\n return True\n\n \n# Valida que o string do projeto está no formato correto. \ndef projetoValido(proj):\n if len(proj) > 2 and proj[0] == '+': #validando se começa com + e tem mais de dois caracteres.\n return True\n return False\n\n\n# Valida que o string do contexto está no formato correto. \ndef contextoValido(cont):\n if len(cont) > 2 and cont[0] == '@': #validando se começa com @ e tem mais de dois caracteres.\n return True\n return False\n\n# Valida que a data ou a hora contém apenas dígitos, desprezando espaços\n# extras no início e no fim.\ndef soDigitos(numero) :\n if type(numero) != str :\n return False\n for x in numero :\n if x < '0' or x > '9' :\n return False\n return True\n\n\n# Dadas as linhas de texto obtidas a partir do arquivo texto todo.txt, devolve\n# uma lista de tuplas contendo os pedaços de cada linha, conforme o seguinte\n# formato:\n#\n# (descrição, prioridade, (data, hora, contexto, projeto))\n#\n# É importante lembrar que linhas do arquivo todo.txt devem estar organizadas de acordo com o\n# seguinte formato:\n#\n# DDMMAAAA HHMM (P) DESC @CONTEXT +PROJ\n#\n# Todos os itens menos DESC são opcionais. Se qualquer um deles estiver fora do formato, por exemplo,\n# data que não tem todos os componentes ou prioridade com mais de um caractere (além dos parênteses),\n# tudo que vier depois será considerado parte da descrição. \ndef organizar(linhas):\n itens = [] \n for l in linhas:\n \n data = '' \n hora = ''\n pri = ''\n desc = ''\n contexto = ''\n projeto = ''\n \n l = l.strip() # remove espaços em branco e quebras de linha do começo e do fim\n \n tokens = l.split() # quebra o string em palavras\n \n # Processa os tokens um a um, verificando se são as partes da atividade.\n # Por exemplo, se o primeiro token é uma data válida, deve ser guardado\n # na variável data e posteriormente removido a lista de tokens. Feito isso,\n # é só repetir o processo verificando se o primeiro token é uma hora. Depois,\n # faz-se o mesmo para prioridade. Neste ponto, verifica-se os últimos tokens\n # para saber se são contexto e/ou projeto. Quando isso terminar, o que sobrar\n # corresponde à descrição. É só transformar a lista de tokens em um string e\n # construir a tupla com as informações disponíveis. \n\n dataCheck = False #para nao dar bug com data\n horaCheck = False #para nao dar bug com hora\n i = 0\n while i < len(tokens): \n \n if tokens[i][:1] <= '9' and tokens[i][:1] >= '0': #checando se é um número pra testar hora e data\n \n if len(tokens[i]) == 8 and not(dataCheck): #vendo se é uma data\n \n if dataValida(tokens[i]): #se for válida adiciona a variável\n data = tokens[i]\n tokens.pop(i)\n dataCheck = True #nao deixa mais entrar nessa condição, pois invalidou a primeira data, a segunda poderá ser descrição.\n\n else:\n desc = desc +' '+tokens[i]\n tokens.pop(i) #mesmo que seja válida você remove pra no final ficar fácil add variável em descrição\n\n\n elif len(tokens[i]) == 4 and not(horaCheck): # vendo se é uma hora\n\n if horaValida(tokens[i]): #mesmo processo da Data\n hora = tokens[i]\n tokens.pop(i)\n horaCheck = True #nao deixa mais entrar nessa condição, pois invalidou a primeira data, a segunda poderá ser descrição.\n \n else:\n desc = desc +' '+tokens[i]\n tokens.pop(i)\n \n \n \n else:\n desc = desc +' '+tokens[i]\n tokens.pop(i)\n\n \n \n else: #se não for número aí vamos as letras\n \n if tokens[i][:1] == '+' and projeto == '': #checando o projeto\n \n if projetoValido(tokens[i]):\n projeto = tokens[i]\n tokens.pop(i)\n else:\n desc = desc +' '+tokens[i]\n tokens.pop(i)\n \n \n elif tokens[i][:1] == '(' and tokens[i][2:] == ')' and pri == '': #checando prioridade\n\n if prioridadeValida(tokens[i]):\n pri = tokens[i]\n pri = pri.upper()\n tokens.pop(i)\n else:\n desc = desc +' '+tokens[i]\n tokens.pop(i)\n\n elif tokens[i][:1] == '@' and contexto == '': #checando contexto\n\n if contextoValido(tokens[i]):\n contexto = tokens[i]\n tokens.pop(i)\n else:\n desc = desc +' '+tokens[i]\n tokens.pop(i)\n else:\n desc = desc +' '+tokens[i]\n tokens.pop(i)\n dataCheck = True #se já entrou em letras e nao numeros é pq não pode haver mais datas e horas\n horaCheck = True\n \n desc = desc +''+ ' '.join(tokens) #pegando o resto do que sobrou nos tokens\n if desc == '' or desc == ' '*len(desc):\n raise ValueError('Não há descrição.')\n \n itens.append((desc, (data, hora, pri, contexto, projeto))) #(DESC, (DATA, HORA, PRI, CONTEXTO, PROJETO)). \n\n return itens\n\n# Datas e horas são armazenadas nos formatos DDMMAAAA e HHMM, mas são exibidas\n# como se espera (com os separadores apropridados). \n#\n# Uma extensão possível é listar com base em diversos critérios: (i) atividades com certa prioridade;\n# (ii) atividades a ser realizadas em certo contexto; (iii) atividades associadas com\n# determinado projeto; (vi) atividades de determinado dia (data específica, hoje ou amanhã). Isso não\n# é uma das tarefas básicas do projeto, porém. \ndef listar():\n \n fp = open(TODO_FILE,'r')\n arquivo_lido = fp.read() #lendo arquivo\n fp.close()\n \n lista_arquivo = arquivo_lido.splitlines() #criando lista sem \\n :D\n \n tuplas_organizadas = organizar(lista_arquivo) #organizando em tuplas\n\n ordenar_tuplas = ordenarPorPrioridade(ordenarPorDataHora(tuplas_organizadas))\n\n enumerar_lista = {} #dicionário para enumerar\n i = 00 #indice para adicionar enumeração na chave do dicionário\n for x in ordenar_tuplas: #(DESC, (DATA, HORA, PRI, CONTEXTO, PROJETO)). \n \n if x[1][0] != '' and x[1][1] != '': #se tiver data e hr\n enumerar_lista[i] = '{} {}/{}/{} {}h{}m {} {} {} {}'.format(str(i), x[1][0][:2], x[1][0][2:4], x[1][0][4:], x[1][1][:2], x[1][1][2:], x[1][2], x[0], x[1][3], x[1][4])\n \n if x[1][2][1:2] == 'A':\n printCores(enumerar_lista[i], BLUE + BOLD)\n elif x[1][2][1:2] == 'B':\n printCores(enumerar_lista[i], GREEN)\n elif x[1][2][1:2] == 'C':\n printCores(enumerar_lista[i], YELLOW) \n elif x[1][2][1:2] == 'D':\n printCores(enumerar_lista[i], CYAN)\n else:\n print(enumerar_lista[i])\n \n elif x[1][0] != '' and x[1][1] == '':\n enumerar_lista[i] = '{} {}/{}/{} {} {} {} {}'.format(str(i), x[1][0][:2], x[1][0][2:4], x[1][0][4:], x[1][2], x[0], x[1][3], x[1][4])\n\n if x[1][2][1:2] == 'A':\n printCores(enumerar_lista[i], BLUE + BOLD)\n elif x[1][2][1:2] == 'B':\n printCores(enumerar_lista[i], GREEN)\n elif x[1][2][1:2] == 'C':\n printCores(enumerar_lista[i], YELLOW) \n elif x[1][2][1:2] == 'D':\n printCores(enumerar_lista[i], CYAN)\n else:\n print(enumerar_lista[i])\n \n elif x[1][0] == '' and x[1][1] != '':\n enumerar_lista[i] = '{} {}h{}m {} {} {} {}'.format(str(i), x[1][1][:2], x[1][1][2:], x[1][2], x[0], x[1][3], x[1][4])\n\n if x[1][2][1:2] == 'A':\n printCores(enumerar_lista[i], BLUE + BOLD)\n elif x[1][2][1:2] == 'B':\n printCores(enumerar_lista[i], GREEN)\n elif x[1][2][1:2] == 'C':\n printCores(enumerar_lista[i], YELLOW) \n elif x[1][2][1:2] == 'D':\n printCores(enumerar_lista[i], CYAN)\n else:\n print(enumerar_lista[i])\n\n elif x[1][0] == '' and x[1][1] == '':\n enumerar_lista[i] = '{} {} {} {} {}'.format(str(i), x[1][2], x[0], x[1][3], x[1][4])\n\n if x[1][2][1:2] == 'A':\n printCores(enumerar_lista[i], BLUE + BOLD)\n elif x[1][2][1:2] == 'B':\n printCores(enumerar_lista[i], GREEN)\n elif x[1][2][1:2] == 'C':\n printCores(enumerar_lista[i], YELLOW) \n elif x[1][2][1:2] == 'D':\n printCores(enumerar_lista[i], CYAN)\n else:\n print(enumerar_lista[i])\n i = i + 1\n \ndef remover(num): #farei o mesmo que a função listar.\n \n if type(num) != int:\n raise ValueError('é necessário receber uma numeração para que esta função execute')\n \n fp = open(TODO_FILE,'r')\n arquivo_lido = fp.read() \n fp.close()\n \n lista_arquivo = arquivo_lido.splitlines() \n\n tuplas_organizadas = organizar(lista_arquivo) \n\n ordenar_tuplas = ordenarPorPrioridade(ordenarPorDataHora(tuplas_organizadas)) #\n\n enumerar_lista = {}\n i = 0\n for x in ordenar_tuplas: #aqui só crio o dicionário para que ele tenha o mesmo modo que a função listar.\n enumerar_lista[i] = x\n i = i + 1\n\n atualizar = False #variavel para ajudar a atualizar o arquivo todo.txt\n if not(num in enumerar_lista): #se o numero nao tiver no dicionario: \n raise ValueError('Item nã existe')\n else: #caso ele exista, você retira da lista ordenar_tuplas\n ordenar_tuplas.remove(enumerar_lista[num])\n atualizar = True #olha a variavel aqui mudada, caso a lista tenha um item removido\n\n if atualizar:\n abrir = open(TODO_FILE,'w')\n for x in ordenar_tuplas:\n abrir.write('%s %s %s %s %s %s\\n' %(x[1][0], x[1][1], x[1][2], x[0], x[1][3], x[1][4]))\n abrir.close()\n \n# prioridade é uma letra entre A a Z, onde A é a mais alta e Z a mais baixa.\n# num é o número da atividade cuja prioridade se planeja modificar, conforme\n# exibido pelo comando 'l'.\n\ndef priorizar(num, prioridade):\n \n if type(num) != int:\n raise ValueError('é necessário receber uma numeração para que esta função execute')\n \n if (prioridade < 'A' or prioridade > 'Z') and (prioridade < 'a' or prioridade > 'z'):\n raise ValueError('a prioridade precisa ser um caractere alfabético')\n prioridade = prioridade.upper()\n prioridade = '(%s)' %(prioridade)\n fp = open(TODO_FILE,'r')\n arquivo_lido = fp.read() \n fp.close()\n \n lista_arquivo = arquivo_lido.splitlines() \n\n tuplas_organizadas = organizar(lista_arquivo) \n\n ordenar_tuplas = ordenarPorPrioridade(ordenarPorDataHora(tuplas_organizadas)) #\n\n enumerar_lista = {}\n i = 0\n for x in ordenar_tuplas: #aqui só crio o dicionário para que ele tenha o mesmo modo que a função listar.\n enumerar_lista[i] = x\n i = i + 1\n \n atualizar = False\n if not(num in enumerar_lista):\n raise ValueError('Item não existe')\n \n if prioridadeValida(enumerar_lista[num][1][2]):\n raise ValueError('Já existe uma prioridade para este item')\n \n \n else: #(DESC, (DATA, HORA, PRI, CONTEXTO, PROJETO))\n salve_tupla = (enumerar_lista[num][0],(enumerar_lista[num][1][0], enumerar_lista[num][1][1], prioridade, enumerar_lista[num][1][3], enumerar_lista[num][1][4]))\n ordenar_tuplas.remove(enumerar_lista[num])\n enumerar_lista[num] = salve_tupla\n \n ordenar_tuplas.append(enumerar_lista[num])\n \n atualizar = True\n\n if atualizar:\n abrir = open(TODO_FILE,'w')\n for x in ordenar_tuplas:\n abrir.write('%s %s %s %s %s %s\\n' %(x[1][0], x[1][1], x[1][2], x[0], x[1][3], x[1][4]))\n abrir.close()\n\n \ndef fazer(num):\n\n if type(num) != int:\n raise ValueError('é necessário receber uma numeração para que esta função execute')\n\n fp = open(TODO_FILE,'r')\n arquivo_lido = fp.read() \n fp.close()\n \n lista_arquivo = arquivo_lido.splitlines() \n\n tuplas_organizadas = organizar(lista_arquivo) \n\n ordenar_tuplas = ordenarPorPrioridade(ordenarPorDataHora(tuplas_organizadas)) #\n\n enumerar_lista = {}\n i = 0\n for x in ordenar_tuplas: \n enumerar_lista[i] = x\n i = i + 1\n \n atualizar = False\n if not(num in enumerar_lista):\n raise ValueError('Item não existe')\n\n else:\n para_todo = enumerar_lista[num] #para_todo vai guardar a tupla correspondente para ser escrita no arquivo todo.txt\n ordenar_tuplas.remove(enumerar_lista[num])\n atualizar = True\n\n if atualizar:\n abrir = open(TODO_FILE,'w')\n for x in ordenar_tuplas:\n \n abrir.write('%s %s %s %s %s %s\\n' %(x[1][0], x[1][1], x[1][2], x[0], x[1][3], x[1][4]))\n abrir.close()\n\n if atualizar:\n \n escrever = open(ARCHIVE_FILE,'a')\n \n escrever.write('%s %s %s %s %s %s\\n' %(para_todo[1][0], para_todo[1][1], para_todo[1][2], para_todo[0], para_todo[1][3], para_todo[1][4]))\n escrever.close\n\n\n\ndef ordenarPorDataHora(itens): #(DESC, (DATA, HORA, PRI, CONTEXTO, PROJETO)).\n\n i = 0\n while i < len(itens):\n j = 0\n while j < len(itens) -1:\n \n if itens[j][1][0][4:] > itens[j+1][1][0][4:]: #organizando por ano\n itens[j], itens[j+1] = itens[j+1], itens[j]\n \n elif itens[j][1][0][4:] == itens[j+1][1][0][4:]:\n if itens[j][1][0][2:4] > itens[j+1][1][0][2:4]: #organizando por mês\n itens[j], itens[j+1] = itens[j+1], itens[j]\n \n elif itens[j][1][0][2:4] == itens[j+1][1][0][2:4]:\n if itens[j][1][0][:2] > itens[j+1][1][0][:2]: #organizando por dia\n itens[j], itens[j+1] = itens[j+1], itens[j]\n\n j = j + 1 #explicação: Se não identar os ifs, tudo fica se desordenando.\n i = i + 1\n \n i = 0\n while i < len(itens):\n j = 0\n while j < len(itens)-1:\n\n if itens[j][1][0] == itens[j+1][1][0]:\n if itens[j][1][1] > itens[j+1][1][1]: #ordenando por hora\n itens[j], itens[j+1] = itens[j+1], itens[j]\n j = j + 1\n i = i + 1\n \n semDataHr = [] #lista para todos os itens sem data ou hora\n i = 0\n while i < len(itens):\n if itens[i][1][0] == '' or itens[i][1][1] == '':\n semDataHr.append(itens[i])\n itens.pop(i)\n i = i - 1\n i = i + 1\n \n for item in semDataHr: #colocando de volta na lista principal sem ordem definida\n itens.append(item)\n \n return itens\n\n\n \ndef ordenarPorPrioridade(itens): #(DESC, (DATA, HORA, PRI, CONTEXTO, PROJETO)).\n\n i = 0\n while i < len(itens):\n j = 0\n while j < len(itens) -1:\n if itens[j][1][2] > itens[j+1][1][2]: #sem igual pq se não vai deslocar o que já tá organizado pela data.\n itens[j], itens[j+1] = itens[j+1], itens[j]\n j = j + 1\n i = i + 1\n \n semPrioridade = [] #lista para todos os itens sem prioridade\n i = 0\n while i < len(itens):\n if itens[i][1][2] == '': \n semPrioridade.append(itens[i])\n itens.pop(i)\n i = i - 1\n i = i + 1\n \n for item in semPrioridade: #colocando de volta na lista principal sem ordem definida\n itens.append(item)\n \n\n return itens\n\n\n\n# Esta função processa os comandos e informações passados através da linha de comando e identifica\n# que função do programa deve ser invocada. Por exemplo, se o comando 'adicionar' foi usado,\n# isso significa que a função adicionar() deve ser invocada para registrar a nova atividade.\n# O bloco principal fica responsável também por tirar espaços em branco no início e fim dos strings\n# usando o método strip(). Além disso, realiza a validação de horas, datas, prioridades, contextos e\n# projetos. \ndef processarComandos(comandos) :\n foiListado = False #criei essa variável como prevenção de remoção de itens aleatórios\n \n if comandos[1] == 'a':\n comandos.pop(0) # remove 'agenda.py'\n comandos.pop(0) # remove 'adicionar'\n itemParaAdicionar = organizar([' '.join(comandos)])[0]\n if itemParaAdicionar[0] != '':\n # itemParaAdicionar = (descricao, (prioridade, data, hora, contexto, projeto))\n adicionar(itemParaAdicionar[0], itemParaAdicionar[1]) # novos itens não têm prioridade\n \n \n elif comandos[1] == 'l':\n foiListado = True #caso seja listado a variavel recebe True\n listar()\n \n elif comandos[1] == 'r':\n remover(int(comandos[2]))\n\n elif comandos[1] == 'f':\n fazer(int(comandos[2]))\n \n \n\n elif comandos[1] == 'p':\n priorizar(int(comandos[2]), comandos[3])\n \n \n\n\n else :\n print(\"Comando inválido.\")\n \n \n# sys.argv é uma lista de strings onde o primeiro elemento é o nome do programa\n# invocado a partir da linha de comando e os elementos restantes são tudo que\n# foi fornecido em sequência. Por exemplo, se o programa foi invocado como\n#\n# python3 agenda.py a Mudar de nome.\n#\n# sys.argv terá como conteúdo\n#\n# ['agenda.py', 'a', 'Mudar', 'de', 'nome']\nprocessarComandos(sys.argv)\n","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":19901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"149119422","text":"# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport image_util\nfrom paddle.utils.image_util import *\nimport random\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport numpy as np\nimport xml.etree.ElementTree\nimport os\nimport time\nimport copy\n\n\nclass Settings(object):\n def __init__(self,\n dataset=None,\n data_dir=None,\n label_file=None,\n resize_h=None,\n resize_w=None,\n mean_value=[104., 117., 123.],\n apply_distort=True,\n apply_expand=True,\n ap_version='11point',\n toy=0):\n self._dataset = dataset\n self._ap_version = ap_version\n self._toy = toy\n self._data_dir = data_dir\n self._apply_distort = apply_distort\n self._apply_expand = apply_expand\n self._resize_height = resize_h\n self._resize_width = resize_w\n self._img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(\n 'float32')\n self._expand_prob = 0.5\n self._expand_max_ratio = 4\n self._hue_prob = 0.5\n self._hue_delta = 18\n self._contrast_prob = 0.5\n self._contrast_delta = 0.5\n self._saturation_prob = 0.5\n self._saturation_delta = 0.5\n self._brightness_prob = 0.5\n # _brightness_delta is the normalized value by 256\n # self._brightness_delta = 32\n self._brightness_delta = 0.125\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def ap_version(self):\n return self._ap_version\n\n @property\n def toy(self):\n return self._toy\n\n @property\n def apply_expand(self):\n return self._apply_expand\n\n @property\n def apply_distort(self):\n return self._apply_distort\n\n @property\n def data_dir(self):\n return self._data_dir\n\n @data_dir.setter\n def data_dir(self, data_dir):\n self._data_dir = data_dir\n\n @property\n def label_list(self):\n return self._label_list\n\n @property\n def resize_h(self):\n return self._resize_height\n\n @property\n def resize_w(self):\n return self._resize_width\n\n @property\n def img_mean(self):\n return self._img_mean\n\n\ndef preprocess(img, bbox_labels, mode, settings):\n img_width, img_height = img.size\n sampled_labels = bbox_labels\n if mode == 'train':\n if settings._apply_distort:\n img = image_util.distort_image(img, settings)\n if settings._apply_expand:\n img, bbox_labels, img_width, img_height = image_util.expand_image(\n img, bbox_labels, img_width, img_height, settings)\n # sampling\n batch_sampler = []\n # hard-code here\n batch_sampler.append(\n image_util.sampler(1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0,\n True))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0,\n True))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0,\n True))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0,\n True))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0,\n True))\n sampled_bbox = image_util.generate_batch_samples(\n batch_sampler, bbox_labels, img_width, img_height)\n\n img = np.array(img)\n if len(sampled_bbox) > 0:\n idx = int(random.uniform(0, len(sampled_bbox)))\n img, sampled_labels = image_util.crop_image(\n img, bbox_labels, sampled_bbox[idx], img_width, img_height)\n\n img = Image.fromarray(img)\n img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)\n img = np.array(img)\n\n if mode == 'train':\n mirror = int(random.uniform(0, 2))\n if mirror == 1:\n img = img[:, ::-1, :]\n for i in xrange(len(sampled_labels)):\n tmp = sampled_labels[i][1]\n sampled_labels[i][1] = 1 - sampled_labels[i][3]\n sampled_labels[i][3] = 1 - tmp\n # HWC to CHW\n if len(img.shape) == 3:\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 1, 0)\n # RBG to BGR\n img = img[[2, 1, 0], :, :]\n img = img.astype('float32')\n img -= settings.img_mean\n img = img * 0.007843\n return img, sampled_labels\n\n\ndef put_txt_in_dict(input_txt):\n with open(input_txt, 'r') as f_dir:\n lines_input_txt = f_dir.readlines()\n\n dict_input_txt = {}\n num_class = 0\n for i in range(len(lines_input_txt)):\n tmp_line_txt = lines_input_txt[i].strip('\\n\\t\\r')\n if '--' in tmp_line_txt:\n if i != 0:\n num_class += 1\n dict_input_txt[num_class] = []\n dict_name = tmp_line_txt\n dict_input_txt[num_class].append(tmp_line_txt)\n if '--' not in tmp_line_txt:\n if len(tmp_line_txt) > 6:\n # tmp_line_txt = tmp_line_txt[:-2]\n split_str = tmp_line_txt.split(' ')\n x1_min = float(split_str[0])\n y1_min = float(split_str[1])\n x2_max = float(split_str[2])\n y2_max = float(split_str[3])\n tmp_line_txt = str(x1_min) + ' ' + str(y1_min) + ' ' + str(\n x2_max) + ' ' + str(y2_max)\n dict_input_txt[num_class].append(tmp_line_txt)\n else:\n dict_input_txt[num_class].append(tmp_line_txt)\n\n return dict_input_txt\n\n\ndef expand_bboxes(bboxes,\n expand_left=2.,\n expand_up=2.,\n expand_right=2.,\n expand_down=2.):\n \"\"\"\n Expand bboxes, expand 2 times by defalut.\n \"\"\"\n expand_boxes = []\n for bbox in bboxes:\n xmin = bbox[0]\n ymin = bbox[1]\n xmax = bbox[2]\n ymax = bbox[3]\n w = xmax - xmin\n h = ymax - ymin\n ex_xmin = max(xmin - w / expand_left, 0.)\n ex_ymin = max(ymin - h / expand_up, 0.)\n ex_xmax = min(xmax + w / expand_right, 1.)\n ex_ymax = min(ymax + h / expand_down, 1.)\n expand_boxes.append([ex_xmin, ex_ymin, ex_xmax, ex_ymax])\n return expand_boxes\n\n\ndef pyramidbox(settings, file_list, mode, shuffle):\n\n dict_input_txt = {}\n dict_input_txt = put_txt_in_dict(file_list)\n\n def reader():\n if mode == 'train' and shuffle:\n random.shuffle(dict_input_txt)\n for index_image in range(len(dict_input_txt)):\n\n image_name = dict_input_txt[index_image][0] + '.jpg'\n image_path = os.path.join(settings.data_dir, image_name)\n\n im = Image.open(image_path)\n if im.mode == 'L':\n im = im.convert('RGB')\n im_width, im_height = im.size\n\n # layout: label | xmin | ymin | xmax | ymax\n bbox_labels = []\n for index_box in range(len(dict_input_txt[index_image])):\n if index_box >= 2:\n bbox_sample = []\n temp_info_box = dict_input_txt[index_image][\n index_box].split(' ')\n xmin = float(temp_info_box[0])\n ymin = float(temp_info_box[1])\n w = float(temp_info_box[2])\n h = float(temp_info_box[3])\n xmax = xmin + w\n ymax = ymin + h\n\n bbox_sample.append(1)\n bbox_sample.append(float(xmin) / im_width)\n bbox_sample.append(float(ymin) / im_height)\n bbox_sample.append(float(xmax) / im_width)\n bbox_sample.append(float(ymax) / im_height)\n bbox_labels.append(bbox_sample)\n\n im, sample_labels = preprocess(im, bbox_labels, mode, settings)\n sample_labels = np.array(sample_labels)\n if len(sample_labels) == 0: continue\n im = im.astype('float32')\n boxes = sample_labels[:, 1:5]\n lbls = [1] * len(boxes)\n difficults = [1] * len(boxes)\n yield im, boxes, expand_bboxes(boxes), lbls, difficults\n\n return reader\n\n\ndef train(settings, file_list, shuffle=True):\n return pyramidbox(settings, file_list, 'train', shuffle)\n\n\ndef infer(settings, image_path):\n def batch_reader():\n img = Image.open(image_path)\n if img.mode == 'L':\n img = im.convert('RGB')\n im_width, im_height = img.size\n if settings.resize_w and settings.resize_h:\n img = img.resize((settings.resize_w, settings.resize_h),\n Image.ANTIALIAS)\n img = np.array(img)\n # HWC to CHW\n if len(img.shape) == 3:\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 1, 0)\n # RBG to BGR\n img = img[[2, 1, 0], :, :]\n img = img.astype('float32')\n img -= settings.img_mean\n img = img * 0.007843\n img = [img]\n img = np.array(img)\n return img\n\n return batch_reader\n","sub_path":"fluid/face_detection/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":9871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"492165572","text":"#This script takes a nc file with tangential/radial winds as well as the tc center locations for all vertical coordinates\n\n#INPUTS to the script are as follows:\n# twind_filep = the path to tangential/radial wind .nc file\n# inbound_outbound_dist = the distance in km to make cross section from the center of the TC. So if it 100km the cross section will be 200km wide\n# angle = the angle relative to a meridian to cut throught the storm. the code automatically finds the center of the vortex and the bottom most level and makes sure to cut through it\n# hor_contour_lev = the vertical level to make an inset plot to show where the cross section is through\n# date = string of the date uses for saving the figure\n# plot_recon = whether or not to plot recon obs on the plot inset\n# recon_file = path to the file with recon obs, if plot_recon is False this won't matter\n# vert_coord = name of vertical coordinate in original grib file\n\nimport sys\nsys.path.insert(0, '../colors')\nsys.path.insert(0, '..')\nsys.path.insert(0, '../constants')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport constants\nfrom geopy.distance import great_circle\nfrom recenter_utils import haversine,calc_distance_from_point\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.basemap import Basemap\nimport color_bars\ncmap,norm,ticks = color_bars.Tyler_colors('wind_speed')\nfrom recon_utils import read_file\nfrom read_trwind import read_trwind_file\nfrom create_cross_points import create_cross_latlon_points\nfrom cross_section import cross_section\nfrom create_output_dir import create_output_directory\nfrom uv_to_rt_winds import uv_to_rt\nfrom pygrib_util import *\nfrom recon_utils import *\n####################################################\n# INPUTS\n####################################################\ntwind_file=\"/mnt/lfs1/HFIP/hybda/greent/realdeal/results/gbradar/back_anal/201610062200/trwind_anal/matthew_201610062200_anal_trwind.nc\"\ngrib_file=\"/mnt/lfs1/HFIP/hybda/greent/realdeal/results/gbradar/back_anal/201610062200/wrfout_201610062200_gbradar_anl.grib\"\ninbound_outbound_dist=100.\nangle=45.\nhor_contour_lev = 1.0 #km\ndate=\"201610062040\"\nplot_recon=True\nsave_recon=True\nrecon_file=\"/mnt/lfs1/HFIP/hybda/greent/verify_recon/gbradar_201610062200/results_conv_ges.2016100622\"\nvert_coord = \"heightAboveSea\"\nlatcoord = \"latitude\"\nloncoord = \"longitude\"\nsecondary_circ=False\nrecon_var=\"uv\"\nrecon_ob_diff=False\ncenter_recon=True\nrecon_center_fix=[26.347,-78.796]\n\n####################################################\n# READ IN DATA FROM FILES\n####################################################\n\n#Read trwind file\nlons2d,lats2d,heights,twind,rwind,tc_lons,tc_lats = read_trwind_file(twind_file,latcoord,loncoord,vert_coord)\n\n#Check heights (they might come out in km)\nif np.max(heights) < 1000.:\n heights=heights*1000.\n\n####################################################\n#CREATE CROSS SECTION\n####################################################\n\n#For lat-lon grid-------\n#Get the 1d version of lats and lons for interpolating function\nlons1d = lons2d[0,:]\nlats1d = lats2d[:,0]\n#------------------------------\n\n#Get points for the cross section and their distance from the center of cross section\nlonpoints,latpoints,distance_from_center = create_cross_latlon_points(lons2d,lats2d,tc_lons[0],tc_lats[0],inbound_outbound_dist,angle)\n\n#Calculate cross section\n\n#LCC grid\n#myslice = cross_section(lons2d,lats2d,heights,twind*constants.msToKnots,lonpoints,latpoints)\n\n#Lat-lon grid\nmyslice = cross_section(lons1d,lats1d,heights,twind*constants.msToKnots,lonpoints,latpoints)\n\n####################################################\n#RECON OBSERVATIONS\n####################################################\nif plot_recon:\n print(\"Doing some calculations for recon obs.. one second\")\n\n #Get heights on pressure surfaces, cant use xarray to get this variable\n lats_grib,lons_grib,plevs,heightsOnP = pygrib_get_3d_var(grib_file,\"Geopotential Height\",\"isobaricInhPa\")\n\n #Read data out of recon file\n vartype,rtimes,rlats,rlons,rp,rusag,val1,inc1,val2,inc2=read_file(recon_file)\n\n #Recenter the recon observations based on the difference between model center and recon center\n if center_recon:\n dlat = tc_lats[0] - recon_center_fix[0]\n dlon = tc_lons[0] - recon_center_fix[1]\n rlats = rlats + dlat\n rlons = rlons + dlon\n recon_center_fix[0] = recon_center_fix[0] + dlat\n recon_center_fix[1] = recon_center_fix[1] + dlon\n\n #Only keep observations within 10.0 km of cross section and convert them into polar coords\n rlons,rlats,rheights,rradius,rindx=recon_trim_convert(lonpoints,latpoints,lons2d,lats2d,plevs,heightsOnP,rlons,rlats,rp,10.0,tc_lons[0],tc_lats[0],angle)\n\n #lons,lats were trimmed when they came out of above function, trim other variables\n vartype=vartype[rindx]\n rtimes=rtimes[rindx]\n rusag=rusag[rindx]\n val1=val1[rindx]\n inc1=inc1[rindx]\n val2=val2[rindx]\n inc2=inc2[rindx]\n\n #Only keep recon obs that match the wanted recon variable\n where_vartype = vartype == recon_var\n\n #Trim again\n rlons=rlons[where_vartype]\n rlats=rlats[where_vartype]\n vartype=vartype[where_vartype]\n rtimes=rtimes[where_vartype]\n rusag=rusag[where_vartype]\n val1=val1[where_vartype]\n inc1=inc1[where_vartype]\n val2=val2[where_vartype]\n inc2=inc2[where_vartype]\n rradius=rradius[where_vartype]\n rheights=rheights[where_vartype]\n\n #Calculate tangential winds from recon uv winds\n lat0=recon_center_fix[0]\n lon0=recon_center_fix[1]\n urecon = val1\n vrecon = val2\n rwind_recon,twind_recon = uv_to_rt(rlons,rlats,urecon,vrecon,lon0,lat0)\n twind_recon = twind_recon*constants.msToKnots\n\n #If difference between model and recon obs is wanted\n if recon_ob_diff:\n where_inrange = np.abs(rradius)= self.lifespanTicks:\n self.die('old age!') # die of old age\n\n self.__updateMaturityLevel()\n self.__lookForPreyAndMates()\n self.__handleAndUpdateHunger()\n self.__chooseDirectionAndMove()\n self.ticksAlive += 1\n\n def printStatus(self):\n return\n\n def __initializeSex(self):\n if random.randint(0, 1) == 0:\n self.sex = \"M\"\n else:\n self.sex = \"F\"\n\n def __initializeAgeAndMaturity(self, isNewborn):\n if isNewborn:\n self.ticksAlive = 0\n else:\n self.ticksAlive = random.randint(0, self.lifespanTicks - 1)\n\n self.isMature = self.ticksAlive >= self.maturityTicks\n\n # Checks if Fish has matured and updates self.isMature accordingly\n def __updateMaturityLevel(self):\n if self.isMature == False and self.ticksAlive >= self.maturityTicks:\n self.isMature = True\n\n def __lookForPreyAndMates(self):\n neighborOrgs = self.ecosystem.getNeighbors(self)\n for org in neighborOrgs:\n if type(org) == type(self): # found a fellow fish!\n if self.isMature and org.isMature:\n if self.sex == \"F\" and org.sex == \"M\":\n self.reproduce()\n break\n elif self.hunger > 0 and self.ecosystem.isEdible(self, org):\n ate = org.beEaten()\n if ate: # if the prey didn't manage to get away\n self.hunger -= 1\n break\n\n # Checks if Fish has starved; if yes dies, else increments hunger.\n def __handleAndUpdateHunger(self):\n if self.hunger > self.starvationLevel:\n self.die('starvation!')\n\n self.hunger += 1 # every tick get 1 more hunger unit\n\n def __chooseDirectionAndMove(self):\n self.randomDirection()\n self.move()\n\n\n\n \n\n\n","sub_path":"ecosym/code/fish.py","file_name":"fish.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"123931797","text":"#!/usr/bin/env python\r\n\r\nimport synergia_workflow\r\n\r\nopts = synergia_workflow.Options(\"iota\")\r\n\r\n\r\nopts.add(\"map_order\", 1, \"Map order\", int)\r\n#default output directory\r\nopts.add(\"output_dir\",\"SC_test\",\"Directory for output files\", str)\r\n#opts.add(\"map_order\", 1, \"Map order\", int)\r\n#opts.add(\"steps\", steps, \"Number of steps per turn\", int)\r\nopts.add(\"steps_per_element\",2,\"Number of steps per element\", int)\r\n\r\n\r\nopts.add(\"verbosity\", 1, \"Verbosity of propagation\", int)\r\nopts.add(\"turns\", 200, \"Number of turns\", int)\r\nopts.add(\"maxturns\", 2000, \"Maximum number of turns to run before checkpointing and quitting\", int)\r\nopts.add(\"checkpointperiod\", 3000, \"Number of turns to run between checkpoints\", int)\r\n\r\n\r\n#opts.add(\"emitx\", 2.5e-6, \"real sigma Horizontal emittance [m rad]\", float)\r\n#opts.add(\"emity\", 2.5e-6, \"real sigma Vertical emittance [m rad]\", float)\r\n#opts.add(\"emit_transverse\", 7.0e-6, \"transverse emittance for elliptical beam [m rad]\", float)\r\nopts.add(\"radius\", 0.5, \"aperture radius [m]\", float)\r\nopts.add(\"emit\",9.74e-6, \"H0 value corresponding to real sigma horizontal emittance of 0.3 mm-mrad\", float)\r\nopts.add(\"stdz\", 0.05, \"sigma read z [m]\", float) #5 cm bunch length for IOTA\r\nopts.add(\"dpop\", 0.0, \"Delta-p/p spread\", float)\r\n\r\nopts.add(\"macro_particles\", 10000, \"Number of macro particles\", int)\r\nopts.add(\"real_particles\", 1.0e11, \"Number of real particles\", float)\r\nopts.add(\"tracked_particles\", 10000, \"Number of tracked particles\", int)\r\nopts.add(\"seed\", 349250524, \"Pseudorandom number generator seed\", int)\r\n\r\nopts.add(\"bunch_file\",\"myBunch.txt\",\"txt file for bunch particles\", str)\r\n\r\n#----------Space Charge Stuff---------------------\r\nopts.add(\"gridx\", 64, \"grid points in x for solver\", int)\r\nopts.add(\"gridy\", 64, \"grid points in y for solver\", int)\r\nopts.add(\"gridz\", 1, \"grid points in z for solver\", int) #1 for explicit 2D solver\r\nopts.add(\"spacecharge\", True, \"whether space charge is on\", bool)\r\nopts.add(\"solver\", \"2dopen-hockney\", \"solver to use, '2dopen-hockney','3dopen-hockney', '2dbassetti-erskine'\", str)\r\n\r\n#options for controlling chef propagation vs. chef mapping!\r\nopts.add(\"use_maps\", \"none\", \"use maps for propagation either all, none, onlyrf, nonrf\") #none means chef propagate\r\n#opts.add(\"use_maps\", \"all\", \"use maps for propagation either all, none, onlyrf, nonrf\")\r\n#opts.add(\"allmaps\", False, \"Use all maps for propagation\", bool)\r\nopts.add(\"requested_stepper\", \"splitoperator\", \"Simulation stepper, either 'independent','elements','splitoperator','soelements'\", str)\r\n\r\n#----------MPI STUFF---------------------\r\nopts.add(\"comm_divide\", 8, \"size of communicator\")\r\n#opts.add(\"matching\", \"4dmoments\", \"matching procedure (4dmoments)\")\r\n#opts.add(\"checkpointperiod\", 2000, \"Number of turns to run between checkpoints\", int)\r\nopts.add(\"concurrent_io\", 8, \"number of concurrent io threads for checkpointing\", int)\r\n#opts.add(\"nsigma\", 8.0, \"nsigma for solver\", float)\r\n#opts.add(\"long_kicks\", True, \"use longitudinal kicks\", bool)\r\n#opts.add(\"cutoffnsigma\", 2.6, \"cut off bunch at cutoffnsigma standard deviations\")\r\n\r\n#job_mgr = synergia_workflow.Job_manager(\"sc_test_14mA.py\", opts)\r\n\r\n#job_mgr = synergia_workflow.Job_manager(\"iota_66_1IO_nll_space_charge.py\", opts)","sub_path":"synergia/SC_test_options.py","file_name":"SC_test_options.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"445945651","text":"# pylint: disable=E0401,E0602\nfrom robot.libraries.BuiltIn import BuiltIn\nfrom dto import *\n\n# Credentials\nadmin_credentials = {'userName': 'Administrator', 'password': 'wpsthpvse1'}\noa_credentials = {'username': 'Administrator', 'password': 'hpvse14'}\nilo_credentials = {'username': 'Administrator', 'password': 'hpvse1-ilo'}\ncliq_credentials = {'mgmt_ip': '16.71.149.173', 'username': 'admin', 'password': 'admin'}\n\n\"\"\"\"# Resource types for X-API-Version=800\nENCLOSURE_TYPE = 'EnclosureV7'\nSERVER_PROFILE_TYPE = 'ServerProfileV9'\"\"\"\n\n# Enclosures, Interconnects, Server Hardware, Networks, ULS, LIG, and EG\n# Enclosures\nENC1 = 'wpst22'\nENC1_OA1 = \"16.125.77.71\"\nENC2 = 'wpst23'\nENC2_OA1 = \"16.125.77.80\"\nENC3 = 'wpst26'\nENC3_OA1 = \"16.125.79.45\"\n# Interconnects\nENC1ICBAY1 = '%s, interconnect 1' % ENC1\nENC1ICBAY2 = '%s, interconnect 2' % ENC1\nENC1ICBAY3 = '%s, interconnect 3' % ENC1\nENC1ICBAY4 = '%s, interconnect 4' % ENC1\nENC1ICBAY5 = '%s, interconnect 5' % ENC1\nENC1ICBAY6 = '%s, interconnect 6' % ENC1\nENC2ICBAY1 = '%s, interconnect 1' % ENC2\nENC2ICBAY2 = '%s, interconnect 2' % ENC2\nENC2ICBAY3 = '%s, interconnect 3' % ENC2\nENC2ICBAY4 = '%s, interconnect 4' % ENC2\nENC2ICBAY5 = '%s, interconnect 5' % ENC2\nENC2ICBAY6 = '%s, interconnect 6' % ENC2\nENC3ICBAY1 = '%s, interconnect 1' % ENC3\nENC3ICBAY2 = '%s, interconnect 2' % ENC3\nENC3ICBAY3 = '%s, interconnect 3' % ENC3\nENC3ICBAY4 = '%s, interconnect 4' % ENC3\nENC3ICBAY5 = '%s, interconnect 5' % ENC3\nENC3ICBAY6 = '%s, interconnect 6' % ENC3\n# Server Hardware\nENC1SHBAY1 = '%s, bay 1' % ENC1 # BL465c Gen8\nENC1SHBAY2 = '%s, bay 2' % ENC1 # BL465c Gen8\nENC1SHBAY3 = '%s, bay 3' % ENC1 # BL465c Gen8\nENC1SHBAY4 = '%s, bay 4' % ENC1 # BL420c Gen8\nENC1SHBAY5 = '%s, bay 5' % ENC1 # BL460c Gen9\nENC1SHBAY6 = '%s, bay 6' % ENC1 # BL460c G6\nENC1SHBAY8 = '%s, bay 7' % ENC1 # BL495c G5\nENC1SHBAY14 = '%s, bay 14' % ENC1 # BL460c Gen10\nENC1SHBAY16 = '%s, bay 16' % ENC1 # BL460c Gen10\nENC2SHBAY1 = '%s, bay 1' % ENC2 # BL465c Gen8\nENC2SHBAY2 = '%s, bay 2' % ENC2 # BL465c Gen8\nENC2SHBAY3 = '%s, bay 3' % ENC2 # BL465c Gen8\nENC2SHBAY4 = '%s, bay 4' % ENC2 # BL420c Gen8\nENC2SHBAY5 = '%s, bay 5' % ENC2 # BL460c Gen9\nENC2SHBAY6 = '%s, bay 6' % ENC2 # BL460c G6\nENC2SHBAY7 = '%s, bay 7' % ENC2 # BL2x220c G5\nENC2SHBAY10 = '%s, bay 10' % ENC2 # BL460c Gen10\nENC2SHBAY16 = '%s, bay 16' % ENC2 # BL460c Gen10\nENC3SHBAY1 = '%s, bay 1' % ENC3 # BL465c Gen8\nENC3SHBAY2 = '%s, bay 2' % ENC3 # BL465c Gen8\nENC3SHBAY3 = '%s, bay 3' % ENC3 # BL465c Gen8\nENC3SHBAY4 = '%s, bay 4' % ENC3 # BL420c Gen8\nENC3SHBAY5 = '%s, bay 5' % ENC3 # BL460c Gen9\nENC3SHBAY7 = '%s, bay 7' % ENC3 # BL660c Gen9\nENC3SHBAY8 = '%s, bay 8' % ENC3 # BL660c Gen8\nENC3SHBAY9 = '%s, bay 9' % ENC3 # BL460c G7\nENC3SHBAY10 = '%s, bay 10' % ENC3 # BL465c G7\n# LIGs and EGs\nLIG1_NAME = 'LIG22'\nEG1_NAME = 'EG22'\nLIG2_NAME = 'LIG23'\nEG2_NAME = 'EG23'\nLIG3_NAME = 'LIG26'\nEG3_NAME = 'EG26'\n\nenclosures_expected = [\n {\"type\": ENCLOSURE_TYPE, \"name\": \"wpst22\", \"state\": \"Configured\"},\n {\"type\": ENCLOSURE_TYPE, \"name\": \"wpst23\", \"state\": \"Configured\"},\n {\"type\": ENCLOSURE_TYPE, \"name\": \"wpst23\", \"state\": \"Configured\"},\n]\n\n# OVF1061\nOVF1061_SERVER1 = ENC1SHBAY14\nOVF1061_SERVER1_SERVER_NAME_1 = \"wpst22bay14-1\"\nOVF1061_SERVER1_SERVER_NAME_2 = \"wpst22bay14-2\"\nOVF1061_SERVER1_SERVER_NAME_3 = \"WPST22BAY14-OS\"\nOVF1061_SERVER2 = ENC2SHBAY2\nOVF1061_SERVER2_SERVER_NAME_1 = \"wpst23bay2-1\"\nOVF1061_SERVER2_SERVER_NAME_2 = \"wpst23bay2-2\"\nOVF1061_SERVER2_SERVER_NAME_3 = \"wpst23bay2-os\"\n\nserver_settings_1 = [\n {'name': OVF1061_SERVER1, 'ilo': '16.125.73.236', 'server_name': OVF1061_SERVER1_SERVER_NAME_1},\n {'name': OVF1061_SERVER2, 'ilo': '16.125.77.205', 'server_name': OVF1061_SERVER2_SERVER_NAME_1},\n]\n\nserver_settings_2 = [\n {'name': OVF1061_SERVER1, 'server_name': OVF1061_SERVER1_SERVER_NAME_2},\n {'name': OVF1061_SERVER2, 'server_name': OVF1061_SERVER2_SERVER_NAME_2},\n]\n\nserver_settings_3 = [\n {'name': OVF1061_SERVER1, 'server_name': OVF1061_SERVER1_SERVER_NAME_3},\n {'name': OVF1061_SERVER2, 'server_name': OVF1061_SERVER2_SERVER_NAME_3},\n]\n","sub_path":"robo4.2/fusion/tests/RIST/API/OVF1061_C7000/Data50.py","file_name":"Data50.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"161685672","text":"from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Listing\nfrom .constants import prices,states,bedrooms\n\ndef listings(request):\n\n listdata = Listing.objects.order_by('-list_date').filter(is_published=True)\n paginator = Paginator(listdata,1)\n page = request.GET.get('page')\n listdata = paginator.get_page(page)\n context = {\n \"listings\":listdata\n }\n return render(request,'listings/listings.html',context)\n\ndef listing(request,listing_id):\n\n listing = get_object_or_404(Listing,pk=listing_id)\n context = {\n 'listing' : listing\n }\n return render(request,'listings/listing.html',context)\n\ndef search(request):\n\n listings = Listing.objects.all()\n\n # Keyword Filter on Form\n if 'keywords' in request.GET:\n keywords = request.GET['keywords']\n if keywords:\n listings = listings.filter(description__icontains=keywords)\n\n context = {\n 'listings' : listings,\n 'states': states,\n 'bedrooms':bedrooms,\n 'prices':prices,\n 'values':request.GET\n }\n return render(request,'listings/search.html',context)\n","sub_path":"src/listings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"255213660","text":"#---------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n#---------------------------------------------------------------------------------------------\n\nfrom azure.cli.core._profile import (\n Profile,\n CLOUD\n)\nfrom azure.cli.core._config import az_config\nfrom azure.mgmt.resource.resources import ResourceManagementClient\nfrom azure.mgmt.storage import StorageManagementClient\n\nfrom azure.cli.core.commands.client_factory import (\n configure_common_settings,\n get_mgmt_service_client\n)\n\nfrom azure.cli.command_modules.acr.mgmt_acr import (\n ContainerRegistryManagementClient,\n ContainerRegistryManagementClientConfiguration,\n VERSION\n)\n\nimport azure.cli.core._logging as _logging\nlogger = _logging.get_az_logger(__name__)\n\ndef get_arm_service_client():\n '''Returns the client for managing ARM resources.\n '''\n return get_mgmt_service_client(ResourceManagementClient)\n\ndef get_storage_service_client():\n '''Returns the client for managing storage accounts.\n '''\n return get_mgmt_service_client(StorageManagementClient)\n\ndef get_acr_service_client():\n '''Returns the client for managing container registries.\n '''\n profile = Profile()\n credentials, subscription_id, _ = profile.get_login_credentials()\n\n config = ContainerRegistryManagementClientConfiguration(\n credentials,\n subscription_id,\n api_version=get_acr_api_version(),\n base_url=CLOUD.endpoints.resource_manager)\n client = ContainerRegistryManagementClient(config)\n\n configure_common_settings(client)\n\n return client\n\ndef get_acr_api_version():\n '''Returns the api version for container registry\n '''\n customized_api_version = az_config.get('acr', 'apiversion', None)\n if customized_api_version:\n logger.warning('Customized api-version is used: %s', customized_api_version)\n return customized_api_version or VERSION\n","sub_path":"src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_factory.py","file_name":"_factory.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"279925437","text":"# Sock_Merchant_EASY.py\n\n# https://www.hackerrank.com/challenges/sock-merchant/problem\n\n# https://www.hackerrank.com/challenges/sock-merchant/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=warmup\n\n# John works at a clothing store. He has a large pile of socks that he must pair by color for sale. Given an array of integers representing the color of each sock, determine how many pairs of socks with matching colors there are.\n\n# For example, there are socks with colors . There is one pair of color and one of color . There are three odd socks left, one of each color. The number of pairs is .\n\n# Function Description\n\n# Complete the sockMerchant function in the editor below. It must return an integer representing the number of matching pairs of socks that are available.\n\n# sockMerchant has the following parameter(s):\n\n# n: the number of socks in the pile\n# ar: the colors of each sock\n# Input Format\n\n# The first line contains an integer , the number of socks represented in .\n# The second line contains space-separated integers describing the colors of the socks in the pile.\n\n# Constraints\n\n# where\n# Output Format\n\n# Return the total number of matching pairs of socks that John can sell.\n\n# Sample Input\n\n# 9\n# 10 20 20 10 10 30 50 10 20\n# Sample Output\n\n# 3\n# Explanation\n\n# sock.png\n\n# John can match three pairs of socks.\n\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the sockMerchant function below.\ndef sockMerchant(n, ar):\n\n # Assumptions\n # n and ar are integers\n # both fit on a single machine\n\n # Approach\n # Since we only want num of pairs (not configuration) we can use a set\n # instead of a hashmap to count. We iterate through ar and check for\n # existence in the set. If it doesn't exist, add it. If it does exist\n # then remove it and add 1 to the counter of num pairs.\n\n # Complexity\n # Time O(N) since we check each item. Since set lookup, insertion & removal\n # is O(1) overall we are still O(N)\n # Space: O(N) since we store the items in a set. Less than n items but\n # still of order N. in the worst case where all socks are different we\n # will eventually store N items in the set.\n\n # Potential Improvements\n # we could do a two pointer approach in place and reduce the space\n # complexity to O(1) at the expense of time complexity. E.g. for each\n # sock, search the rest of the array for it's pair and if found pop both\n # out of the array and add 1 pair to counter. If no pair found pop the\n # unpaired sock. Continue until the array is empty.\n\n # Edge Cases\n # single item\n # no items (not given, see problem statement)\n\n # IMPLEMENTATION -----------------------------------------------------------\n unpaired_socks = set()\n num_pairs = 0\n for sock in ar:\n if sock not in unpaired_socks:\n unpaired_socks.add(sock)\n else:\n num_pairs += 1\n unpaired_socks.remove(sock)\n\n return num_pairs\n\n\nimport unittest\n\nclass TestSockMerchant(unittest.TestCase):\n\n def test_example_1(self):\n n = 9\n socks = [10, 20, 20, 10, 10, 30, 50, 10, 20]\n output = 3\n self.assertEqual(sockMerchant(n, socks), output)\n\n def test_example_2(self):\n n = 7\n socks = [1,2,1,2,1,3,2]\n output = 2\n self.assertEqual(sockMerchant(n, socks), output)\n\n def test_edge_cases(self):\n n = 1\n socks = [1]\n output = 0\n self.assertEqual(sockMerchant(n, socks), output)\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n # n = int(input())\n\n # ar = list(map(int, input().rstrip().split()))\n\n # result = sockMerchant(n, ar)\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n unittest.main()\n","sub_path":"hackerrank/Sock_Merchant_EASY.py","file_name":"Sock_Merchant_EASY.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"258245219","text":"'''\r\nFixed XOR\r\n\r\nWrite a function that takes two equal-length buffers and produces their XOR\r\n combination. If your function works properly, then when you feed it the\r\n string:\r\n\r\n 1c0111001f010100061a024b53535009181c\r\n\r\n ... after hex decoding, and when XOR'd against:\r\n\r\n 686974207468652062756c6c277320657965\r\n\r\n ... should produce:\r\n\r\n 746865206b696420646f6e277420706c6179\r\n'''\r\n\r\nfrom binascii import hexlify, unhexlify\r\nfrom logbook import Logger, FileHandler\r\n\r\nFileHandler('set1.log').push_application()\r\nLOG = Logger('challenge2')\r\n\r\n\r\ndef fixed_xor(s1, s2, debug=False):\r\n if debug:\r\n us1 = unhexlify(s1)\r\n us2 = unhexlify(s2)\r\n LOG.debug('S1 Un-Hexed: {0}'.format(us1))\r\n LOG.debug('S2 Un-Hexed: {0}'.format(us2))\r\n\r\n new_chars = []\r\n for c1, c2 in zip(us1, us2):\r\n LOG.debug('c1: {0}'.format(c1))\r\n LOG.debug('c2: {0}'.format(c2))\r\n result = chr(c1 ^ c2)\r\n LOG.debug(result)\r\n new_chars.append(bytearray(result, 'ascii'))\r\n\r\n result = b''.join(new_chars)\r\n LOG.debug('Un-Hexed XOR: {0}'.format(result))\r\n final_result = hexlify(result)\r\n LOG.debug('Hexed Result: {0}'.format(final_result))\r\n return final_result\r\n\r\n return hexlify(b''.join(bytearray(chr(c1 ^ c2), 'ascii') for c1, c2 in zip(unhexlify(s1), unhexlify(s2))))\r\n\r\n\r\nif __name__ == '__main__':\r\n LOG.debug('====== CHALLENGE #2 | START ======')\r\n string_1 = '1c0111001f010100061a024b53535009181c'\r\n string_2 = '686974207468652062756c6c277320657965'\r\n LOG.debug('Hex-String 1: {0}'.format(string_1))\r\n LOG.debug('Hex-String 2: {0}'.format(string_2))\r\n result = fixed_xor(string_1, string_2)\r\n\r\n LOG.debug('Output: {0}'.format(result))\r\n assert (result == b'746865206b696420646f6e277420706c6179')\r\n LOG.notice('SUCCESS!')\r\n LOG.debug('======= CHALLENGE #2 | END =======\\n')\r\n","sub_path":"set1/challenge2.py","file_name":"challenge2.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"91873957","text":"#!/usr/bin/env python3\n\n'''\nRequirements:\n\n$ pip3 install pygithub\n\nGo to https://github.com/settings/tokens and make a personal access\ntoken with full permissions to the repo and org.\n\n'''\n\nimport csv\n\nfrom github import Github\nfrom pprint import pprint\n\n#--------------------------------------------------------------------\n\n# Read the oAuth token file.\n# (you will need to supply this file yourself -- see the comment at\n# the top of this file)\ntoken_filename = 'oauth-token.txt'\nwith open(token_filename, 'r') as f:\n token = f.read().strip()\ng = Github(token)\n\n#--------------------------------------------------------------------\n\nprint(\"Getting open-mpi organization...\")\norg = 'open-mpi'\nompi_org = g.get_organization(org)\n\n#--------------------------------------------------------------------\n\nprint(\"Loading organization repos...\")\nall_members = dict()\nrepos = dict()\nfor repo in ompi_org.get_repos():\n print(f\"Found Org Repo: {repo.name}\")\n\n if repo.archived:\n print(\"--> NOTE: This repo is archived\")\n\n # For each repo, get the teams on that repo\n repo_teams = dict()\n for team in repo.get_teams():\n out = f\" Found team on repo {ompi_org.name}/{repo.name}: {team.name} ({team.permission}) \"\n # We only care about teams with push permissions\n if team.permission == \"pull\":\n print(f\"{out} -- SKIPPED\")\n continue\n\n print(out)\n\n # Find all the members of this team\n team_members = dict()\n member_teams = dict()\n for member in team.get_members():\n print(f\" Found member: {member.login}\")\n team_members[member.id] = member\n\n if member.id not in all_members:\n all_members[member.id] = {\n 'member' : member,\n 'member_teams' : dict(),\n }\n\n # Find the member in the org and add this team to them\n all_members[member.id]['member_teams'][team.id] = team\n\n # Same the results\n repo_teams[team.id] = {\n 'team' : team,\n 'team_members' : team_members,\n }\n\n # Save the results\n repos[repo.id] = {\n 'repo' : repo,\n 'repo_teams' : repo_teams,\n }\n\nprint(\"All the repos:\")\npprint(repos)\npprint(all_members)\n\n#--------------------------------------------------------------------\n\n# Pre-load the field names with info about the user and repo\nfieldnames = ['login', 'name', 'email', 'company']\n\n# Add all the repo names\n#\n# Skip archived repos -- they're read-only, and thereare are\n# effectively just noise in the annual review process.\nrepo_names = list()\nfor rentry in repos.values():\n repo = rentry['repo']\n if not repo.archived:\n # Used to include the org name in here, but it was always\n # \"open-mpi\", and it just made the colun need to be wider.\n repo_names.append(repo.name)\n\nfieldnames.extend(sorted(repo_names))\n\n#--------------------------------------------------------------------\n\n# Now write out the CSV\noutfile = 'permissions.csv'\nprint(f\"Writing: {outfile}\")\nwith open(outfile, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames,\n quoting=csv.QUOTE_ALL)\n writer.writeheader()\n for mid, mentry in all_members.items():\n member = mentry['member']\n print(f\" Writing member: {member.login}\")\n\n # Initial entries about the user\n row = {\n 'login' : member.login,\n 'name' : member.name,\n 'email' : member.email,\n 'company' : member.company,\n }\n\n # Fill in values for each repo\n for _, rentry in repos.items():\n repo = rentry['repo']\n\n # Per above, skip archived repos\n if repo.archived:\n continue\n\n found = list()\n for tid, tentry in rentry['repo_teams'].items():\n if tid in mentry['member_teams']:\n team = tentry['team']\n found.append(team.name)\n\n row[repo.name] = ', '.join(found)\n\n writer.writerow(row)\n","sub_path":"administrative/annual-ompi-github-committer-review.py","file_name":"annual-ompi-github-committer-review.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"600615130","text":"import numpy as np\r\nimport numpy.linalg as la\r\nfrom matplotlib import pyplot as plt\r\n#import scipy.special as sp\r\n\r\nN = 30 #number of grid points\r\na = 0\r\nb = 2\r\nx,h = np.linspace(a,b,N,retstep = True)\r\n\r\nA=np.zeros((N,N))\r\nA[0,0] = 1\r\nA[-1,-1] = 1\r\nA[-1,-2] = -1\r\n\r\nB=np.zeros((N))\r\nB[0] = 0\r\nB[-1] = 0\r\n\r\n\r\nfor n in range(1,N-1):\r\n A[n,n-1] = h**(-2)\r\n A[n,n] = 9 - 2*h**(-2)\r\n A[n,n+1] = h**(-2)\r\n \r\n B[n] = x[n]\r\n \r\nyn = la.solve(A,B)\r\nyexact = (x/9)-(np.sin(3*x)/(27*np.cos(6)))\r\n\r\nplt.figure()\r\nplt.plot(x,yexact,x,yn,'r.')\r\nplt.show()\r\n\r\nprint(np.sqrt(np.mean((yn-yexact)**2)))","sub_path":"Lab 02 -- Differential Equations with Boundary Conditions/src/Problem 4a.py","file_name":"Problem 4a.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"329457769","text":"import random\n\nstr0 = \"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\"\n\ndef func(st0):\n st1 = ''\n str_list = st0.split()\n for st in str_list:\n if len(st)>4:\n st1 += ran(st)\n else:\n st1 += st\n st1 += ' '\n return st1\n\ndef ran(st):\n list0 = []\n s_begin = st[0]\n s_end = st[-1]\n for s in st[1:-1]:\n list0.append(s)\n random.shuffle(list0)\n st = s_begin\n for s in list0:\n st += s\n return st+s_end\n\nprint(func(str0))\n","sub_path":"09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"53837221","text":"# -*- coding: utf-8 -*-\n\n\nfrom __future__ import (absolute_import, unicode_literals)\n\n\n\"\"\"\nA Python Singleton mixin class that makes use of some of the ideas\nfound at http://c2.com/cgi/wiki?PythonSingleton. Just inherit\nfrom it and you have a singleton. No code is required in\nsubclasses to create singleton behavior -- inheritance from\nSingleton is all that is needed.\n\nSingleton creation is threadsafe.\n\nUSAGE:\n\nJust inherit from Singleton. If you need a constructor, include\nan __init__() method in your class as you usually would. However,\nif your class is S, you instantiate the singleton using S.get_instance()\ninstead of S(). Repeated calls to S.get_instance() return the\noriginally-created instance.\n\nFor example:\n\n\nclass S(Singleton):\n\n def __init__(self, a, b=1):\n pass\n\nS1 = S.get_instance(1, b=3)\n\n\nMost of the time, that\"s all you need to know. However, there are some\nother useful behaviors. Read on for a full description:\n\n1) Getting the singleton:\n\n S.get_instance()\n\nreturns the instance of S. If none exists, it is created.\n\n3) Use __S.__init__() for instantiation processing,\nsince S.get_instance() runs S.__init__(), passing it the args it has received.\n\nIf no data needs to be passed in at instantiation time, you don\"t need S.__init__().\n\n4) If S.__init__(.) requires parameters, include them ONLY in the\nfirst call to S.get_instance(). If subsequent calls have arguments,\na SingletonException is raised by default.\n\nIf you find it more convenient for subsequent calls to be allowed to\nhave arguments, but for those argumentsto be ignored, just include\n\"ignore_subsequent = True\" in your class definition, i.e.:\n\n class S(Singleton):\n\n ignore_subsequent = True\n\n def __init__(self, a, b=1):\n pass\n\n5) For testing, it is sometimes convenient for all existing singleton\ninstances to be forgotten, so that new instantiations can occur. For that\nreason, a forget_all_singletons() function is included. Just call\n\n forget_all_singletons()\n\nand it is as if no earlier instantiations have occurred.\n\n6) As an implementation detail, classes that inherit\nfrom Singleton may not have their own __new__\nmethods. To make sure this requirement is followed,\nan exception is raised if a Singleton subclass includ\nes __new__. This happens at subclass instantiation\ntime (by means of the MetaSingleton metaclass.\n\n\nBy Gary Robinson, grobinson@flyfi.com. No rights reserved --\nplaced in the public domain -- which is only reasonable considering\nhow much it owes to other people\"s code and ideas which are in the\npublic domain. The idea of using a metaclass came from\na comment on Gary\"s blog (see\nhttp://www.garyrobinson.net/2004/03/python_singleto.html#comments).\nOther improvements came from comments and email from other\npeople who saw it online. (See the blog post and comments\nfor further credits.)\n\nNot guaranteed to be fit for any particular purpose. Use at your\nown risk.\n\"\"\"\n\nimport threading\n\nclass SingletonException(Exception):\n pass\n\n_st_singletons = set()\n_lock_for_singletons = threading.RLock()\n_lock_for_singleton_creation = threading.RLock() # Ensure only one instance of each Singleton\n # class is created. This is not bound to the \n # individual Singleton class since we need to\n # ensure that there is only one mutex for each\n # Singleton class, which would require having\n # a lock when setting up the Singleton class,\n # which is what this is anyway. So, when any\n # Singleton is created, we lock this lock and\n # then we don\"t need to lock it again for that\n # class.\n\ndef _create_singleton_instance(cls, args, kw_args):\n _lock_for_singleton_creation.acquire()\n try:\n if cls._is_instantiated(): # some other thread got here first\n return\n\n instance = cls.__new__(cls)\n try:\n instance.__init__(*args, **kw_args)\n except TypeError as e:\n if e.message.find(\"__init__() takes\") != -1:\n raise SingletonException(\"If the singleton requires __init__ \"\\\n \"args, supply them on only on the first call to \"\\\n \"get_instance().\")\n else:\n raise\n cls.c_instance = instance\n _add_singleton(cls)\n finally:\n _lock_for_singleton_creation.release()\n\ndef _add_singleton(cls):\n _lock_for_singletons.acquire()\n try:\n assert cls not in _st_singletons\n _st_singletons.add(cls)\n finally:\n _lock_for_singletons.release()\n\ndef _remove_singleton(cls):\n _lock_for_singletons.acquire()\n try:\n if cls in _st_singletons:\n _st_singletons.remove(cls)\n finally:\n _lock_for_singletons.release()\n\ndef forget_all_singletons():\n \"\"\"This is useful in tests, since it is hard to know which singletons need\n to be cleared to make a test work.\"\"\"\n _lock_for_singletons.acquire()\n try:\n for cls in _st_singletons.copy():\n cls._forget_class_instance_reference_for_testing()\n\n # Might have created some Singletons in the process of tearing down.\n # Try one more time - there should be a limit to this.\n i_num_singletons = len(_st_singletons)\n if len(_st_singletons) > 0:\n for cls in _st_singletons.copy():\n cls._forget_class_instance_reference_for_testing()\n i_num_singletons -= 1\n assert i_num_singletons == len(_st_singletons),\\\n \"Added a singleton while destroying \" + str(cls)\n assert len(_st_singletons) == 0, _st_singletons\n finally:\n _lock_for_singletons.release()\n\nclass MetaSingleton(type):\n def __new__(metaclass, name, bases, dct):\n if \"__new__\" in dct:\n raise SingletonException(\"Can not override __new__ in a Singleton\")\n return super(MetaSingleton, metaclass).__new__(metaclass, name, bases, dct)\n\n def __call__(cls, *args, **kw_args):\n raise SingletonException(\"Singletons may only be instantiated through\"\\\n \" get_instance()\")\n\nclass Singleton(object):\n __metaclass__ = MetaSingleton\n\n def get_instance(cls, *args, **kw_args):\n \"\"\"\n Call this to instantiate an instance or retrieve the existing instance.\n If the singleton requires args to be instantiated, include them the first\n time you call get_instance.\n \"\"\"\n if cls._is_instantiated():\n if (args or kw_args) and not hasattr(cls, \"ignore_subsequent\"):\n raise SingletonException(\"Singleton already instantiated, but\"\\\n \" get_instance() called with args.\")\n else:\n _create_singleton_instance(cls, args, kw_args)\n return cls.c_instance\n get_instance = classmethod(get_instance)\n\n def _is_instantiated(cls):\n # Don\"t use hasattr(cls, \"c_instance\"), because that screws things up if there is a singleton that\n # extends another singleton. hasattr looks in the base class if it doesn\"t find in subclass.\n return \"c_instance\" in cls.__dict__\n _is_instantiated = classmethod(_is_instantiated)\n\n # This can be handy for public use also\n is_instantiated = _is_instantiated\n\n def _forget_class_instance_reference_for_testing(cls):\n \"\"\"\n This is designed for convenience in testing -- sometimes you\n want to get rid of a singleton during test code to see what\n happens when you call get_instance() under a new situation.\n\n To really delete the object, all external references to it\n also need to be deleted.\n \"\"\"\n try:\n if hasattr(cls.c_instance, \"_prepare_to_forget_singleton\"):\n # tell instance to release anything it might be holding onto.\n cls.c_instance._prepare_to_forget_singleton()\n del cls.c_instance\n _remove_singleton(cls)\n except AttributeError:\n # run up the chain of base classes until we find the one that has the instance\n # and then delete it there\n for base_cls in cls.__bases__: \n if issubclass(base_cls, Singleton):\n base_cls._forget_class_instance_reference_for_testing()\n _forget_class_instance_reference_for_testing = classmethod(_forget_class_instance_reference_for_testing)\n\n","sub_path":"pyorganism/singletonmixin.py","file_name":"singletonmixin.py","file_ext":"py","file_size_in_byte":8714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"199732214","text":"from duckql.properties import Property\nfrom duckql.structures.cast_operator import CastOperator\n\n\ndef test_simple():\n my_structure = CastOperator(\n property=Property(name='users.age'),\n to=CastOperator.DataType.VARCHAR,\n alias='age_as_string'\n )\n\n assert str(my_structure) == 'users.age::varchar AS age_as_string'\n","sub_path":"duckql/structures/tests/test_cast_operator.py","file_name":"test_cast_operator.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"243257348","text":"from flask_restful import Resource,reqparse\r\nfrom .model import Attendants_data\r\n\r\nclass Attendants(Resource):\r\n\r\n\tdef __init__(self):\r\n\t\tself.__reqparser = reqparse.RequestParser()\r\n\t\tself.__args = dict()\r\n\r\n\tdef get(self):\r\n\t\tretval = dict()\r\n\t\tstatus = 200\r\n\r\n\t\tservices = Attendants_data()\r\n\t\tservices.get_attendants()\r\n\t\tresult = services.get_data()\r\n\t\tretval = result\r\n\r\n\t\treturn retval,status\r\n\r\n\tdef post(self):\r\n\t\tstatus = 201\r\n\r\n\t\targs_list = [('hiredate',str,'json',None,False),\r\n\t\t\t\t\t ('attendant_name',str,'json',None,False),\r\n\t\t\t\t\t ('allowance',int,'json',None,False),\r\n\t\t\t\t\t ('mobilenumber',str,'json',None,False),\r\n\t\t\t\t\t ('position',str,'json',None,False),\r\n\t\t\t\t\t ('address',str,'json',None,False)]\r\n\r\n\t\tfor args in args_list:\r\n\t\t\tself.__reqparser.add_argument(args[0],type=args[1],location=args[2],default=args[3],required=args[4])\r\n\r\n\t\tself.__args = self.__reqparser.parse_args()\r\n\r\n\t\tservices = Attendants_data()\r\n\r\n\t\tservices.insert_Attendant(**self.__args)\r\n\r\n\t\treturn status\r\n\r\n\tdef delete(self):\r\n\t\tstatus = 204\r\n\r\n\t\targs_list = [('attendantid', int, 'args', 'None', True)]\r\n\r\n\t\tfor args in args_list:\r\n\t\t\tself.__reqparser.add_argument(args[0],type=args[1],location=args[2],default=args[3],required=args[4])\r\n\r\n\t\tself.__args = self.__reqparser.parse_args()\r\n\r\n\r\n\t\tservices = Attendants_data(**self.__args)\r\n\t\tservices.get_attendants()\r\n\t\tresult = services.del_attendants()\r\n\r\n\t\treturn status\r\n\r\n\tdef put(self):\r\n\t\tretval = dict()\r\n\t\tstatus = 200\r\n\r\n\t\tupdate_args = dict()\r\n\r\n\t\targs_update_list = [('attendantid', int, 'json', 'None', True),\r\n\t\t\t\t\t\t\t('position', str, 'json', 'None', True),\r\n\t\t\t\t\t\t\t('allowance', int, 'json', 'None', True)]\r\n\r\n\t\tfor args in args_update_list:\r\n\t\t\tself.__reqparser.add_argument(args[0],type=args[1],location=args[2],default=args[3],required=args[4])\r\n\r\n\t\tupdate_args = self.__reqparser.parse_args()\r\n\r\n\t\tservices = Attendants_data()\r\n\t\tresult = services.edit_attendant(**update_args)\r\n\r\n\t\tif result:\r\n\t\t\tretval['data'] = [{'Message':'Update Complete'}]\r\n\t\telse:\r\n\t\t\tretval['data'] = [{'Message':'Update Failed'}]\r\n\r\n\t\treturn retval,status\r\n","sub_path":"Banahaw_app/BanahawApp/Attendants/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"585236837","text":"from girder_worker.plugins.vtk import vtkrow_to_dict\nimport vtk\n\n\ndef process_node(vtknode, node):\n num_children = input.GetNumberOfChildren(vtknode)\n if num_children > 0:\n node['children'] = []\n for c in range(num_children):\n vtkchild = input.GetChild(vtknode, c)\n v = vtkrow_to_dict(input.GetVertexData(), vtkchild)\n edge = vtk.vtkGraphEdge()\n input.GetInEdge(vtkchild, 0, edge)\n vtkparentedge = edge.GetId()\n e = vtkrow_to_dict(input.GetEdgeData(), vtkparentedge)\n n = {'edge_data': e, 'node_data': v}\n process_node(vtkchild, n)\n node['children'].append(n)\n\n\nnode_fields = []\nfor c in range(input.GetVertexData().GetNumberOfArrays()):\n node_fields.append(input.GetVertexData().GetAbstractArray(c).GetName())\n\nedge_fields = []\nfor c in range(input.GetEdgeData().GetNumberOfArrays()):\n edge_fields.append(input.GetEdgeData().GetAbstractArray(c).GetName())\n\nvtkroot = input.GetRoot()\n\noutput = {\n 'node_fields': node_fields,\n 'edge_fields': edge_fields,\n 'node_data': vtkrow_to_dict(input.GetVertexData(), vtkroot)\n}\n\nprocess_node(vtkroot, output)\n","sub_path":"girder_worker/plugins/vtk/converters/tree/vtktree_to_nested.py","file_name":"vtktree_to_nested.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"154781544","text":"import cv2\nfrom collections import Counter\nimport numpy as np\n\nimg = cv2.imread(\"apple.jpg\", 1)\n\ncount = Counter()\nfor i in img[:,::5,:]:\n for pix in i:\n count[tuple(pix)] += 1\n\ndomin = count.most_common(1)[0]\n\nprint(\"Dominant RGB color: {}\".format(domin[0][::-1]))\n\nc = []\nfor i in range(300):\n c.append([domin[0] for j in range(300)])\nsquare = np.array(c)\n\ncv2.imshow(\"Dominant color\", square)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Dominant-color/dominant_color.py","file_name":"dominant_color.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"319734953","text":"from torchvision.transforms import transforms\nfrom torch.utils.data import DataLoader\nfrom floqlind.routines.routines import get_device, train_val_dataset\nfrom floqlind.routines.model import build_model_base, params_to_learn\nfrom floqlind.routines.dataset import FloqLindDataset\nimport torch.optim as optim\nimport torch\nimport torch.nn as nn\nfrom floqlind.routines.train import train_regression_model\nfrom floqlind.routines.infrastructure import get_path\nimport numpy as np\nimport os\nimport re\n\n\nif __name__ == '__main__':\n\n device = get_device()\n\n system_train = 'two_spins'\n size = 16\n\n path_train = get_path() + f'/dl/datasets/floquet_lindbladian/{system_train}'\n suffix_train = 'ampl(0.5000_2.0000_50)_freq(0.0500_0.2000_50)_phase(0.0000_0.0000_0)'\n\n feature_type = 'prop'\n transforms_type = 'regular'\n label_type = 'log'\n\n # Models to choose from [resnet, vgg, densenet, inception, resnet50_2D]\n model_name = \"resnet\"\n model_dir = f'{path_train}/regression/{model_name}_{feature_type}_{transforms_type}_{label_type}_{suffix_train}'\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n num_classes = 1\n feature_extract = False\n\n batch_size = 32\n num_epochs = 200\n\n is_continue = True\n\n model, input_size = build_model_base(model_name, num_classes, feature_extract, use_pretrained=True)\n # Send the model to GPU\n model = model.to(device)\n # optimizer= optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n optimizer = optim.Adam(model.parameters(), lr=0.005)\n\n train_loss_history = []\n val_loss_history = []\n epoch_last = 0\n best_loss = np.inf\n if is_continue:\n epoches = []\n for file in os.listdir(model_dir):\n if re.match('checkpoint_[-+]?[0-9]+.pth', file):\n epoches = epoches + re.findall(r'\\d+', file)\n if len(epoches) > 0:\n epoches = list(map(int, epoches))\n last_epoch = max(epoches)\n checkpoint = torch.load(f'{model_dir}/checkpoint_{last_epoch}.pth')\n epoch_last = checkpoint['epoch']\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n train_loss_history = checkpoint['train_loss']\n val_loss_history = checkpoint['val_loss']\n best_loss = checkpoint['best_loss']\n print(model)\n\n # define transforms\n if transforms_type == 'regular':\n transforms_regular = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((input_size, input_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n elif transforms_type == 'noNorm':\n transforms_regular = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((input_size, input_size)),\n transforms.ToTensor(),\n ])\n else:\n raise ValueError(f'Unsupported transforms_type: {transforms_type}')\n\n dataset = FloqLindDataset(path_train, size, suffix_train, feature_type, label_type, transforms_regular)\n\n datasets_dict = train_val_dataset(dataset, 0.20, seed=1337)\n\n dataloaders_dict = {\n x: torch.utils.data.DataLoader(datasets_dict[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']\n }\n\n params_to_learn(model, feature_extract)\n\n criterion = nn.MSELoss()\n\n model, best_loss_curr, train_loss_history_curr, val_loss_history_curr = train_regression_model(device,\n model,\n dataloaders_dict,\n criterion,\n optimizer,\n best_loss,\n num_epochs=num_epochs,\n is_inception=(model_name == \"inception\")\n )\n train_loss_history += train_loss_history_curr\n val_loss_history += val_loss_history_curr\n\n torch.save({\n 'epoch': num_epochs + epoch_last,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'train_loss': train_loss_history,\n 'val_loss': val_loss_history,\n 'best_loss': best_loss_curr\n }, f'{model_dir}/checkpoint_{num_epochs + epoch_last}.pth')\n\n np.savetxt(f'{model_dir}/train_loss_{num_epochs + epoch_last}.txt', np.asarray(train_loss_history), fmt='%0.16e')\n np.savetxt(f'{model_dir}/val_loss_{num_epochs + epoch_last}.txt', np.asarray(val_loss_history), fmt='%0.16e')\n","sub_path":"floqlind/images/regression_train_val.py","file_name":"regression_train_val.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"242429287","text":"\"\"\"\nIn LOL world, there is a hero called Teemo and his attacking can make his \nenemy Ashe be in poisoned condition. Now, given the Teemo's attacking ascending\ntime series towards Ashe and the poisoning time duration per Teemo's attacking,\nyou need to output the total time that Ashe is in poisoned condition.\n\nYou may assume that Teemo attacks at the very beginning of a specific time \npoint, and makes Ashe be in poisoned condition immediately.\n\nExample 1:\nInput: [1,4], 2\nOutput: 4\nExplanation: At time point 1, Teemo starts attacking Ashe and makes Ashe \nbe poisoned immediately. This poisoned status will last 2 seconds until \nthe end of time point 2. \nAnd at time point 4, Teemo attacks Ashe again, and causes Ashe to be \nin poisoned status for another 2 seconds. \nSo you finally need to output 4.\n\"\"\"\n\n\nclass Solution(object):\n def findPoisonedDuration(self, timeSeries, duration):\n \"\"\"\n :type timeSeries: List[int]\n :type duration: int\n :rtype: int\n \"\"\"\n length = 0\n for index,time in enumerate(timeSeries):\n if index + 1 < len(timeSeries):\n if timeSeries[index+1] >= time + duration:\n length += duration\n continue\n else:\n length += timeSeries[index+1] - time\n continue\n else:\n length += duration\n break\n return length\n","sub_path":"495TeemoAttacking.py","file_name":"495TeemoAttacking.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"307516997","text":"import numpy as np\nimport cv2\n\nfrom dataset_utils import load_folder_data, load_test_vali_data\n\n\ndef canny(image):\n image = image.astype(np.uint8)\n return cv2.Canny(image, 50, 200)\n\n\n\ndef enhance_data(mask):\n list = []\n for i in range(mask.shape[0]):\n list.append(np.expand_dims(mask[i], 2).repeat(3, axis=2))\n\n return np.array(list,dtype=np.float32)/255.\n\n\ndef generate_data():\n\n train_damage, place_holder1 = load_folder_data('archive/train_another/damage/', damaged=True) # list\n train_no_damage,place_holder2=load_folder_data('archive/train_another/no_damage/',damaged=False) #list\n train_images=np.array(train_damage+train_no_damage,dtype=np.float32)\n train_labels=np.array(place_holder1+place_holder2,dtype=np.float32)\n\n test_images, test_labels, test_another_images, test_another_labels, vali_images, vali_labels = load_test_vali_data()\n\n train_mask=np.array(list(map(lambda x: canny(x), train_images)),dtype=np.float32)\n vali_mask=np.array(list(map(lambda x: canny(x),vali_images)),dtype=np.float32)\n test_mask = np.array(list(map(lambda x: canny(x), test_images)), dtype=np.float32)\n test_anther_mask = np.array(list(map(lambda x: canny(x), test_another_images)), dtype=np.float32)\n\n return enhance_data(train_mask),train_labels,enhance_data(vali_mask),\\\n vali_labels,enhance_data(test_mask),test_labels,enhance_data(test_anther_mask),\\\n test_another_labels\n\nif __name__ == '__main__':\n train_mask,train_labels,vali_mask,vali_labels,test_mask,test_labels,test_another_mask,test_another_labels=generate_data()\n from matplotlib import pyplot as plt\n plt.imshow(train_mask[1000])\n plt.show()\n","sub_path":"EdgeDetection.py","file_name":"EdgeDetection.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"240636845","text":"\"\"\"\nEnum of all available ONS list_types and default type filters associated with them\n\"\"\"\nfrom enum import Enum\nfrom typing import List, Iterable\n\nfrom dp_conceptual_search.ons.search.type_filter import AvailableTypeFilters, TypeFilter\n\n\nclass ListType(Enum):\n # Default list_type - default to all available type filters\n ONS: List[AvailableTypeFilters] = list(AvailableTypeFilters)\n\n # Returns datasets only\n ONSDATA: List[AvailableTypeFilters] = [\n AvailableTypeFilters.DATASETS,\n AvailableTypeFilters.TIME_SERIES,\n AvailableTypeFilters.USER_REQUESTED_DATA\n ]\n\n # Returns publications only\n ONSPUBLICATIONS: List[AvailableTypeFilters] = [\n AvailableTypeFilters.BULLETIN,\n AvailableTypeFilters.COMPENDIA,\n AvailableTypeFilters.ARTICLE\n ]\n\n @property\n def value(self) -> Iterable['AvailableTypeFilters']:\n \"\"\"\n Implements 'value' from super with additional type information\n :return:\n \"\"\"\n return super(ListType, self).value\n\n @staticmethod\n def available_list_types() -> List[str]:\n \"\"\"\n Returns a list of all available list types\n :return:\n \"\"\"\n return [f.name for f in ListType]\n\n @staticmethod\n def is_list_type(label: str) -> bool:\n \"\"\"\n Returns True is string is a valid ListType, else False. Makes case insensitive comparison.\n :param label:\n :return:\n \"\"\"\n return label.upper() in ListType.available_list_types()\n\n @staticmethod\n def from_str(label: str) -> 'ListType':\n \"\"\"\n Returns the enum type corresponding to the input string\n :param label:\n :return:\n \"\"\"\n\n if ListType.is_list_type(label):\n return ListType[label.upper()]\n else:\n raise NotImplementedError(\"No such ListType for string: '{0}'\".format(label))\n\n def to_type_filters(self) -> List[TypeFilter]:\n \"\"\"\n Helper function for converting from a list of AvailableTypeFilters to a list of TypeFilter\n :return:\n \"\"\"\n type_filters: List[TypeFilter] = []\n\n type_filter: AvailableTypeFilters\n for type_filter in self.value:\n type_filters.append(type_filter.value)\n\n return type_filters\n","sub_path":"dp_conceptual_search/api/search/list_type.py","file_name":"list_type.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"252916389","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailcore', '0019_verbose_names_cleanup'),\n ('home', '0015_auto_20150918_1045'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BrewingPage',\n fields=[\n ('page_ptr', models.OneToOneField(auto_created=True, parent_link=True, primary_key=True, to='wagtailcore.Page', serialize=False)),\n ],\n options={\n 'abstract': False,\n },\n bases=('wagtailcore.page',),\n ),\n migrations.CreateModel(\n name='CoffeePage',\n fields=[\n ('page_ptr', models.OneToOneField(auto_created=True, parent_link=True, primary_key=True, to='wagtailcore.Page', serialize=False)),\n ],\n options={\n 'abstract': False,\n },\n bases=('wagtailcore.page',),\n ),\n migrations.CreateModel(\n name='MerchandisePage',\n fields=[\n ('page_ptr', models.OneToOneField(auto_created=True, parent_link=True, primary_key=True, to='wagtailcore.Page', serialize=False)),\n ],\n options={\n 'abstract': False,\n },\n bases=('wagtailcore.page',),\n ),\n migrations.AlterField(\n model_name='blogpage',\n name='date',\n field=models.DateField(default=datetime.datetime(2015, 9, 18, 23, 49, 43, 488430, tzinfo=utc), verbose_name='Post Date'),\n ),\n ]\n","sub_path":"home/migrations/0016_auto_20150918_1649.py","file_name":"0016_auto_20150918_1649.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"545024386","text":"def mergeSort(A, result, start, end):\n print('Processing ' + str(start) + ' to ' + str(end - 1))\n if end - start < 2:\n return\n if end - start == 2:\n if result[start] > result[start + 1]:\n print('Swapping.. ' + str(start) + ' nd ' + str(start + 1))\n result[start], result[start + 1] = result[start + 1], result[start]\n print(str(id(result)) + ':' + str(result))\n return\n mid = (start + end) // 2\n mergeSort(result, A, start, mid)\n mergeSort(result, A, mid, end)\n idx = start\n i = start\n j = mid\n print(str(id(A)) + ':' + str(A))\n print(str(id(result)) + ':' + str(result))\n print('Merging... ' + str(start) + ' to ' + str(end - 1))\n while idx < end:\n if j >= end or (i < mid and A[i] < A[j]):\n result[idx] = A[i]\n i = i + 1\n else:\n result[idx] = A[j]\n j = j + 1\n idx = idx + 1\n print(str(id(result)) + ':' + str(result))\n print('Merge done.')\n\n\narr = [8, 2, 6, 4, 9, 3, 7, 5, 1, 12, 10, 19, 14]\ncopy = list(arr)\nprint(str(id(arr)) + ':' + str(arr))\nprint(str(id(copy)) + ':' + str(copy))\nmergeSort(copy, arr, 0, len(arr))\nprint(str(id(arr)) + ':' + str(arr))\nprint(str(id(copy)) + ':' + str(copy))\n","sub_path":"Sorting/Merge.py","file_name":"Merge.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"388925660","text":"from socket import * \nimport numpy as np\nimport RPi.GPIO as GPIO\nimport time\nimport serial\nimport subprocess\nimport webbrowser, os, sys\nfrom imutils.video import VideoStream\nimport requests\nimport json\n# import out_home\nfrom image import popup\nfrom motorapp import *\nfrom BG_Estimation import *\n# new_out=out_home.out\n# patient_id=new_out[0][0]\npatient_id=5\n##initial dictionary\nx = {'patientid':0 , 'heartrate':0, 'temprature':0 ,'spo':0, 'bloodglucose':0}\nser = serial.Serial(\n port='/dev/ttyS0',\n baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\nMAX30100_StartFlag = '1'\nSPO2_Flag = '2'\nHR_Flag = '3'\nTempTip_StartFlag = '4'\nTempTip_BodyFlag = '5'\nTempTip_AmbFlag = '6'\nTempRest_StartFlag = '7'\nTempRest_BodyFlag = '8'\nTempRest_AmbFlag = '9'\n\nMAX30100_StartFlag_CHECK = 49\nSPO2_Flag_CHECK = 50\nHR_Flag_CHECK = 51\nTempTip_StartFlag_CHECK = 52\nTempTip_BodyFlag_CHECK = 53\nTempTip_AmbFlag_CHECK = 54\nTempRest_StartFlag_CHECK = 55\nTempRest_BodyFlag_CHECK = 56\nTempRest_AmbFlag_CHECK = 57\n\nHR_Data = 0\nSPO2_Data = 0\nBodyTemp_Data = 0\nAmbTemp_Data = 0\nBodyRestTemp_Data = 0\nAmbRestTemp_Data = 0\nBG=0\nd2=0\n# mode=GPIO.getmode()\n# \n# \n# Forward1=8\n# Backward1=11\n# Forward2=25\n# Backward2=9\n# sleeptime=1\n# \n# Forward3=17\n# Backward3=18\n# Forward4=27\n# Backward4=22\n# \n# GPIO.setmode(GPIO.BCM)\n# GPIO.setwarnings(False)\n# GPIO.setup(Forward1,GPIO.OUT)\n# GPIO.setup(Backward1,GPIO.OUT)\n# GPIO.setup(Forward2,GPIO.OUT)\n# GPIO.setup(Backward2,GPIO.OUT)\n# GPIO.setup(Forward3,GPIO.OUT)\n# GPIO.setup(Backward3,GPIO.OUT)\n# GPIO.setup(Forward4,GPIO.OUT)\n# GPIO.setup(Backward4,GPIO.OUT)\n# GPIO.output(Forward1,GPIO.LOW)\n# GPIO.output(Backward1,GPIO.LOW)\n# GPIO.output(Forward2,GPIO.LOW)\n# GPIO.output(Backward2,GPIO.LOW)\n# GPIO.output(Forward3,GPIO.LOW)\n# GPIO.output(Backward3,GPIO.LOW)\n# GPIO.output(Forward4,GPIO.LOW)\n# GPIO.output(Backward4,GPIO.LOW)\n# \n# def backward():\n# \n# GPIO.output(Backward1,GPIO.HIGH)\n# GPIO.output(Forward1,GPIO.LOW)\n# GPIO.output(Backward2,GPIO.LOW)\n# GPIO.output(Forward2,GPIO.HIGH)\n# \n# GPIO.output(Backward3,GPIO.HIGH)\n# GPIO.output(Forward3,GPIO.LOW)\n# GPIO.output(Backward4,GPIO.LOW)\n# GPIO.output(Forward4,GPIO.HIGH) \n# def forward():\n# GPIO.output(Backward1,GPIO.LOW)\n# GPIO.output(Forward1,GPIO.HIGH)\n# GPIO.output(Backward2,GPIO.HIGH)\n# GPIO.output(Forward2,GPIO.LOW)\n# \n# GPIO.output(Backward3,GPIO.LOW)\n# GPIO.output(Forward3,GPIO.HIGH)\n# GPIO.output(Backward4,GPIO.HIGH)\n# GPIO.output(Forward4,GPIO.LOW)\n# \n# def Rightf():\n# GPIO.output(Backward4,GPIO.HIGH)\n# GPIO.output(Forward4,GPIO.LOW)\n# GPIO.output(Backward1,GPIO.LOW)\n# GPIO.output(Forward1,GPIO.HIGH)\n# \n# def Rightb():\n# GPIO.output(Backward4,GPIO.LOW)\n# GPIO.output(Forward4,GPIO.HIGH)\n# GPIO.output(Backward1,GPIO.HIGH)\n# GPIO.output(Forward1,GPIO.LOW)\n# \n# \n# \n# def Leftf():\n# \n# GPIO.output(Backward3,GPIO.LOW)\n# GPIO.output(Forward3,GPIO.HIGH)\n# GPIO.output(Backward2,GPIO.HIGH)\n# GPIO.output(Forward2,GPIO.LOW)\n# \n# def Leftb():\n# GPIO.output(Backward3,GPIO.HIGH)\n# GPIO.output(Forward3,GPIO.LOW)\n# GPIO.output(Backward2,GPIO.LOW)\n# GPIO.output(Forward2,GPIO.HIGH)\n# def stop():\n# \n# GPIO.output(Backward1,GPIO.LOW)\n# GPIO.output(Forward1,GPIO.LOW)\n# \n# GPIO.output(Backward4,GPIO.LOW)\n# GPIO.output(Forward4,GPIO.LOW)\n# \n# GPIO.output(Backward2,GPIO.LOW)\n# GPIO.output(Forward2,GPIO.LOW)\n# \n# GPIO.output(Backward3,GPIO.LOW)\n# GPIO.output(Forward3,GPIO.LOW)\ndef MAX30100_CheckFlag(StartFlagMAX30,HR_DataFlagMAX30,SPO2_DataFlagMAX30,\n StartFlagCheckMAX30,HR_DataFlagCheckMAX30,SPO2_DataFlagCheckMAX30):\n# window_1=popup().alert_popup(\"Follow instructions\",\"hr-spo2.jpeg\")\n global HR_Data\n global SPO2_Data\n ser.write(StartFlagMAX30.encode())\n# f=f+1\n time.sleep(1)\n Recive_StartFlagMAX30=ser.read()\n Recive_StartFlagMAX30=int.from_bytes(Recive_StartFlagMAX30,byteorder='big')\n\n print('startflag=',Recive_StartFlagMAX30)\n if(Recive_StartFlagMAX30 == StartFlagCheckMAX30):\n print(\"ok\")\n ser.write(HR_DataFlagMAX30.encode())\n time.sleep(1)\n Recive_HR_DataFlagMAX30 = ser.read()\n Recive_HR_DataFlagMAX30 = int.from_bytes(Recive_HR_DataFlagMAX30,byteorder='big')\n print('HRflag=',Recive_HR_DataFlagMAX30)\n time.sleep(1)\n HR_Data = ser.read()\n HR_Data = int.from_bytes(HR_Data,byteorder='big')\n print('HRdata=',HR_Data)\n if(Recive_HR_DataFlagMAX30 == HR_DataFlagCheckMAX30):\n print(\"yes\")\n s=ser.write(SPO2_DataFlagMAX30.encode())\n time.sleep(1)\n Recive_SPO2_DataFlagMAX30 = ser.read()\n Recive_SPO2_DataFlagMAX30 = int.from_bytes(Recive_SPO2_DataFlagMAX30,byteorder='big')\n print('spo2flag=',Recive_SPO2_DataFlagMAX30)\n time.sleep(1)\n Recive_SPO2_DataFlagMAX30 = ser.read()\n Recive_SPO2_DataFlagMAX30 = int.from_bytes(Recive_SPO2_DataFlagMAX30,byteorder='big')\n print('spo2flaggg=',Recive_SPO2_DataFlagMAX30)\n time.sleep(1)\n SPO2_Data = ser.read()\n SPO2_Data = int.from_bytes(SPO2_Data,byteorder='big')\n print('spo2data=',SPO2_Data)\n if(Recive_SPO2_DataFlagMAX30 == SPO2_DataFlagCheckMAX30):\n print(\"done\")\n \ndef TempTip_CheckFlag(StartFlagMAX90,BodyTemp_DataFlagMAX90,AmbTemp_DataFlagMAX90,\n StartFlagCheckMAX90,BodyTemp_DataFlagCheckMAX90,AmbTemp_DataFlagCheckMAX90):\n# window_3=popup().alert_popup(\"Follow instructions\",\"temp.jpeg\")\n global BodyTemp_Data\n global AmbTemp_Data\n ser.write(StartFlagMAX90.encode())\n# f=f+1\n time.sleep(1)\n Recive_StartFlagMAX90=ser.read()\n Recive_StartFlagMAX90=int.from_bytes(Recive_StartFlagMAX90,byteorder='big')\n#\n# f1=ord (recive)\n# print(recive)\n print('startflagtemp=',Recive_StartFlagMAX90)\n if(Recive_StartFlagMAX90 == StartFlagCheckMAX90):\n print(\"ok\")\n ser.write(BodyTemp_DataFlagMAX90.encode())\n time.sleep(1)\n Recive_BodyTemp_DataFlagMAX90 = ser.read()\n Recive_BodyTemp_DataFlagMAX90 = int.from_bytes(Recive_BodyTemp_DataFlagMAX90,byteorder='big')\n print('bodytempflag=',Recive_BodyTemp_DataFlagMAX90)\n time.sleep(1)\n BodyTemp_Data = ser.read()\n BodyTemp_Data = int.from_bytes(BodyTemp_Data,byteorder='big')\n print('BODYTEMPdata=',BodyTemp_Data)\n if(Recive_BodyTemp_DataFlagMAX90 == BodyTemp_DataFlagCheckMAX90):\n print(\"yes\")\n s=ser.write(AmbTemp_DataFlagMAX90.encode())\n time.sleep(1)\n Recive_AmbTemp_DataFlagMAX90 = ser.read()\n Recive_AmbTemp_DataFlagMAX90 = int.from_bytes(Recive_AmbTemp_DataFlagMAX90,byteorder='big')\n print('AMBTEMPflag=',Recive_AmbTemp_DataFlagMAX90)\n time.sleep(1)\n Recive_AmbTemp_DataFlagMAX90 = ser.read()\n Recive_AmbTemp_DataFlagMAX90 = int.from_bytes(Recive_AmbTemp_DataFlagMAX90,byteorder='big')\n print('AMBTEMPflag=',Recive_AmbTemp_DataFlagMAX90)\n time.sleep(1)\n AmbTemp_Data = ser.read()\n AmbTemp_Data = int.from_bytes(AmbTemp_Data,byteorder='big')\n print('AMBTEMPdata=',AmbTemp_Data)\n if(Recive_AmbTemp_DataFlagMAX90 == AmbTemp_DataFlagCheckMAX90):\n print(\"done\")\n\ndef TempRest_CheckFlag(StartFlagMAX90,BodyTemp_DataFlagMAX90,AmbTemp_DataFlagMAX90,\n StartFlagCheckMAX90,BodyTemp_DataFlagCheckMAX90,AmbTemp_DataFlagCheckMAX90):\n global BodyRestTemp_Data\n global AmbRestTemp_Data\n ser.write(StartFlagMAX90.encode())\n# f=f+1\n time.sleep(1)\n Recive_StartFlagMAX90=ser.read()\n Recive_StartFlagMAX90=int.from_bytes(Recive_StartFlagMAX90,byteorder='big')\n#\n# f1=ord (recive)\n# print(recive)\n print('startflagtemprest=',Recive_StartFlagMAX90)\n if(Recive_StartFlagMAX90 == StartFlagCheckMAX90):\n print(\"ok\")\n ser.write(BodyTemp_DataFlagMAX90.encode())\n time.sleep(1)\n Recive_BodyTemp_DataFlagMAX90 = ser.read()\n Recive_BodyTemp_DataFlagMAX90 = int.from_bytes(Recive_BodyTemp_DataFlagMAX90,byteorder='big')\n print('bodytempflagrest=',Recive_BodyTemp_DataFlagMAX90)\n time.sleep(1)\n BodyRestTemp_Data = ser.read()\n BodyRestTemp_Data = int.from_bytes(BodyRestTemp_Data,byteorder='big')\n print('BODYTEMPdatarest=',BodyRestTemp_Data)\n if(Recive_BodyTemp_DataFlagMAX90 == BodyTemp_DataFlagCheckMAX90):\n print(\"yes\")\n s=ser.write(AmbTemp_DataFlagMAX90.encode())\n time.sleep(1)\n Recive_AmbTemp_DataFlagMAX90 = ser.read()\n Recive_AmbTemp_DataFlagMAX90 = int.from_bytes(Recive_AmbTemp_DataFlagMAX90,byteorder='big')\n print('AMBTEMPflagrest=',Recive_AmbTemp_DataFlagMAX90)\n time.sleep(1)\n Recive_AmbTemp_DataFlagMAX90 = ser.read()\n Recive_AmbTemp_DataFlagMAX90 = int.from_bytes(Recive_AmbTemp_DataFlagMAX90,byteorder='big')\n print('AMBTEMPflagrest=',Recive_AmbTemp_DataFlagMAX90)\n time.sleep(1)\n AmbRestTemp_Data = ser.read()\n AmbRestTemp_Data = int.from_bytes(AmbRestTemp_Data,byteorder='big')\n print('AMBTEMPdatarest=',AmbRestTemp_Data)\n if(Recive_AmbTemp_DataFlagMAX90 == AmbTemp_DataFlagCheckMAX90):\n print(\"done\")\ndef blood_glucose(BodyTemp_Data,BodyRestTemp_Data,AmbRestTemp_Data):\n global BG\n BG=Estimate_BloodGlucose(patient_id,BodyTemp_Data,AmbTemp_Data,BodyTemp_Data,BodyRestTemp_Data)\n\ndef update(n,temp_tip,hr,spo2,bg):\n x.update({\"patientid\": n}) #patient id based on what visit you are in \n #read sensor data then for example\n \n temperature=temp_tip\n heartrate=hr\n spo=spo2\n x.update({\"bloodglucose\": bg})\n if(temperature>=27 & temperature<= 55):\n x.update({\"temprature\": temperature-13})\n if(heartrate>=40 & heartrate<=200): \n x.update({\"heartrate\": heartrate})\n if(spo>=80 & spo<=100):\n x.update({\"spo\": spo})\n ## covert dictionary to json object \n sorted_string = json.dumps(x, indent=4, sort_keys=True)\n #requesting API\n url=\"http://192.168.43.198/LoginRegister/update_VS.php\"\n r = requests.post(url, json=x)\n #only print the response\n print(r.text)\n# ctrCmd = [b'forward',b'backward',b'right',b'left',b'start',b'stop']\n# \n# HOST = '10.10.10.40'\n# PORT = 8000\n# BUFSIZE = 1024\n# ADDR = (HOST,PORT)\n# tcpSerSock = socket(AF_INET, SOCK_STREAM)\n# tcpSerSock.bind(ADDR)\n# tcpSerSock.listen(5)\ny=0\nwhile True:\n if y==1:\n break\n print ('Waiting for connection')\n tcpCliSock,addr = tcpSerSock.accept()\n print ('...connected from :', addr)\n \n while True:\n \n data = ''\n data = tcpCliSock.recv(BUFSIZE)\n print(data)\n if not data:\n break\n elif data == ctrCmd[4]:\n picam = VideoStream(usePiCamera=False).stop()\n subprocess.run(['sudo service motion start'], shell=True)\n\n elif data == ctrCmd[0]:\n forward()\n time.sleep(1)\n stop()\n time.sleep(2)\n print ('forward')\n\n elif data == ctrCmd[2]:\n Rightf()\n time.sleep(1)\n stop()\n time.sleep(2)\n data = ''\n print ('right')\n elif data == ctrCmd[3]:\n Leftf()\n time.sleep(1)\n stop()\n time.sleep(2)\n data = ''\n print ('left')\n elif data == ctrCmd[5]:\n stop()\n subprocess.run(['sudo service motion stop'], shell=True)\n window_0=popup().alert_popup(\"welcome\",\"welcome.jpeg\")\n window_1=popup().alert_popup(\"Follow instructions\",\"hr-spo2.jpeg\")\n\n for i in range(5):\n MAX30100_CheckFlag(MAX30100_StartFlag,SPO2_Flag,HR_Flag,\n MAX30100_StartFlag_CHECK,SPO2_Flag_CHECK,HR_Flag_CHECK)\n print(BodyTemp_Data-13,HR_Data,SPO2_Data,BG)\n update(patient_id,BodyTemp_Data,HR_Data,SPO2_Data,BG)\n \n window_3=popup().alert_popup(\"Follow instructions\",\"temp.jpeg\")\n for i in range(5):\n TempTip_CheckFlag(TempTip_StartFlag,TempTip_BodyFlag,TempTip_AmbFlag,\n TempTip_StartFlag_CHECK,TempTip_BodyFlag_CHECK,TempTip_AmbFlag_CHECK)\n \n \n print(BodyTemp_Data,HR_Data,SPO2_Data,BG)\n update(patient_id,BodyTemp_Data,HR_Data,SPO2_Data,BG)\n \n window_3=popup().alert_popup(\"Follow instructions\",\"temp.jpeg\")\n for i in range(5):\n TempRest_CheckFlag(TempRest_StartFlag,TempRest_BodyFlag,TempRest_AmbFlag,TempRest_StartFlag_CHECK,TempRest_BodyFlag_CHECK,TempRest_AmbFlag_CHECK)\n blood_glucose(BodyTemp_Data-15,BodyRestTemp_Data,AmbRestTemp_Data)\n print(BodyTemp_Data,HR_Data,SPO2_Data,BG)\n update(patient_id,BodyTemp_Data,HR_Data,SPO2_Data,BG)\n window_4=popup().alert_popup(\"Follow instructions\",\"visit.jpeg\")\n url = \"https://meet.jit.si/vidoeconfforgrandma#config.prejoinPageEnabled=false\"\n #webcam = VideoStream(src=0).start()\n picam = VideoStream(usePiCamera=False).stop()\n chrome_path = '/usr/lib/chromium-browser/chromium-browser'\n webbrowser.get(chrome_path).open(url)\n # if():\n x=0\n for i in range (4):\n\n time.sleep(30)\n if(x==1):\n subprocess.run(['pkill chromium'], shell=True)\n print(\"mm\")\n if(i==2):\n x=1\n y=1\n\n \n\n","sub_path":"Raspberry pi/combo.py","file_name":"combo.py","file_ext":"py","file_size_in_byte":14117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"421152098","text":"'''\ncreate a logistic regression from scratch\nincluding feature transform, PCA\napply Breast cancer data set \n'''\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\nclass log_reg():\n def __init__(self, lr=0.25, epoch=100000, batch_size=4, fold=5, init='random') -> None:\n self.init = init\n self.epoch = epoch\n self.lr = lr\n self.batch_size = batch_size\n self.folding = fold\n self.loss_in = []\n self.loss_out = []\n \n def fold(self):\n self.X_val, self.Y_val= self.X[400:], self.Y[400:]\n self.X, self.Y = self.X[:400], self.Y[:400]\n # self.X_val, self.Y_val= self.X[50:], self.Y[50:]\n # self.X, self.Y = self.X[:50], self.Y[:50]\n \n def init_w(self):\n if self.init == 'zero':\n self.W = np.zeros((self.n, ))\n elif self.init == 'random':\n self.W = np.random.normal(0, 0.01, (self.n, ))\n else:\n print('The initilizer only accept \"zero\" or \"random\"!')\n return \n \n def sigmoid(self, z):\n e = np.exp(z)\n return e/(1+e)\n \n def z(self, x):\n # zz = np.dot(x, self.W)\n # print(\"z:\", x.shape, self.W.shape, zz.shape)\n return np.dot(x, self.W)\n \n def gradient(self, X, Y):\n # numerator = np.dot(Y, X)\n # print('W: ', self.W.shape)\n # print('X.T: ', X.T.shape)\n # print('Y: ', Y.shape)\n #tmp = np.dot(X.T, self.W)\n # e_exp = np.dot(Y, np.dot(X, self.W))\n # return -np.sum(numerator/(1+np.exp(e_exp)))/X.shape[0]\n # prev_y = self.sigmoid(self.z(X))\n # print('prev:', prev_y.shape, 'Y:', Y.shape, 'X.T:', X.T.shape)\n # return (1/self.m)*np.dot(X.T, self.sigmoid(self.z(X))-Y)+2*self.wd*self.W\n # return -(1/self.m)*np.dot(X.T, prev_y*(1-prev_y)*(Y-prev_y))\n # return (-1/X.shape[0])*np.sum(((Y.reshape(-1, 1)*X)/(1 + np.exp(Y*self.z(X))).reshape((-1,1))) , axis=0) +2*self.wd*self.W\n # print((Y.reshape(-1, 1)*X).shape)\n # print((1 + np.exp(Y*self.z(X))).shape)\n g = np.zeros((X.shape[1], ))\n for i in range(len(X)):\n a = 0 \n for j in range(len(X[i])):\n a = a + self.W[j]*X[i][j]\n g = g + ((Y[i]*X[i])/(1+np.exp(Y[i]*a)))\n return -g/(X.shape[0])\n \n \n def update(self, X, Y):\n # delata = self.gradient(X, Y)\n # print('W:', self.W.shape, 'delata:', delata.shape, delata)\n self.W = self.W - self.lr*self.gradient(X, Y)\n\n \n def loss(self, x, y):\n # print(self.W.T.shape)\n # print(x.shape)\n # l = np.sum(np.log(1+np.exp(-y.dot(np.dot(x, self.W)))))/x.shape[0]\n # print(x.shape, y.shape)\n # z = self.z(x)\n # print(z[:3])\n # prev_y = self.sigmoid(self.z(x))\n # print(prev_y[:5])\n # print(y[:5])\n # l = -(1/x.shape[0])*np.sum(y*np.log(self.sigmoid(self.z(x)))+\\\n # (1-y)*np.log(1-self.sigmoid(self.z(x)))) + self.wd*np.dot(self.W, self.W.T)\n # print(l)\n # return (1/x.shape[0])*np.sum(np.log(1 + np.exp(-y*self.z(x))) , axis=0) + self.wd*np.dot(self.W, self.W.T)\n loss = 0\n for i in range(len(x)):\n a = 0\n for j in range(len(x[i])):\n a = a + self.W[j]*x[i][j]\n loss = loss + np.log(1+np.exp(-y[i]*a))\n return loss/x.shape[0]\n \n def fit(self, X, Y, wd=0.0): \n \"\"\"Train the model\n\n Args:\n X (np.array): training data set\n Y (np.array): label\n wd (float): the weight decay supermeter - lambda. Defaults to 0.\n\n Returns:\n weights(np.array): the final weights\n loss_in(list): the list of history loss in sample\n loss_out(list): the list of history loss out of sample\n \"\"\"\n self.wd = wd\n self.X = np.concatenate((np.ones((X.shape[0], 1)), X), 1)\n self.Y = Y\n self.fold()\n self.m, self.n = self.X.shape\n self.init_w()\n for _ in range(self.epoch):\n if _ == 10000:\n self.lr = self.lr/10\n elif _== 50000:\n self.lr = self.lr/10\n for i in range((self.m-1)//self.batch_size+1):\n xb = self.X[i*self.batch_size:(i+1)*self.batch_size]\n yb = self.Y[i*self.batch_size:(i+1)*self.batch_size]\n self.update(xb, yb)\n # print('in: ', end=' ')\n l_in = self.loss(self.X, self.Y)\n # print('out: ', end=' ')\n l_out = self.loss(self.X_val, self.Y_val)\n self.loss_in.append(l_in)\n self.loss_out.append(l_out)\n print('In: {}, Out: {}...'.format(l_in, l_out))\n return self.W, self.loss_in, self.loss_out\n \n\ndef load_bc():\n import sklearn.datasets as ds \n bc = ds.load_breast_cancer()\n X_bc = bc.data\n y_bc = bc.target\n # y_bc[y_bc==0] = -1 # convert 0 to -1 in target\n return X_bc, y_bc\n\nimport pandas as pd\n# df = pd.read_csv(\"G://temp/marks.txt\", header=None)\n# X_bc = np.array(df.iloc[:, :-1])\n# y_bc = np.array(df.iloc[:, -1])\nX_bc, y_bc = load_bc() # load data\n\ndef pca_tran(X, n):\n pca = PCA(n_components=n)\n pcaComponets = pca.fit_transform(X)\n pca_arr = np.array(pcaComponets)\n return pca_arr\n \n \n\n\n\n\n\nlr = log_reg()\n# X_bc = pca_tran(X_bc, 30)\nscaler = StandardScaler() \nX_bc = scaler.fit_transform(X_bc)\nW, loss_in, loss_out = lr.fit(X_bc, y_bc, 0.002)\n\n\n\n# import matplotlib.pyplot as plt\n# x = range(0, 1001, 1)\n# plt.plot(x, loss_in, label='loss_in')\n# plt.plot(x, loss_out, label='loss_out')\n# plt.legend(['train', 'val'])\n# plt.show()\n\n \n\n \n\n \n \n \n \n\n","sub_path":"CUNY_ML_Course_CSC74020/Logistic Regression & Regularization/log_reg_book.py","file_name":"log_reg_book.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"549127825","text":"import pandas as pd\nimport numpy as np\nimport cPickle\n\nfrom sklearn.decomposition import PCA\n\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\nX_test = test.values\n\nX = train.drop('label', 1).values\ny = train.label.values\n\npca = PCA(n_components=100)\npca.fit(X)\npca.fit(X_test)\nX = pca.transform(X)\nX_test = pca.transform(X_test)\n\n\ndef format_training_data(X, y):\n X = X/255. \n return (np.array(X, dtype=np.float32), y)\n\ndef format_test_data(df):\n return (np.array(df, dtype=np.float32)/255.)\n\n\ntrain, test, validation = format_training_data(train[0:28000], y), \\\n format_training_data(train[28000:35000], y), \\\n format_training_data(train[35000:], y) \n\nout_file = open('pca_digits.pkl', \"wb\")\ncPickle.dump( (train,test,validation), out_file, -1)\nout_file.close()\n\nX_test = format_test_data(X_test)\ntest_out_file = open('pca_predict.pkl', \"wb\")\ncPickle.dump( X_test, test_out_file, -1)\ntest_out_file.close()","sub_path":"pca_theano_format.py","file_name":"pca_theano_format.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"588310060","text":"#! /usr/local/python_anaconda/bin/python3.4\n\nfrom optparse import OptionParser\nimport os\nimport itertools\nfrom selecton_utilities import extract_selecton_final_params_single_file\nimport glob\n\n\nSELECTON_WEB_PATH = \"/sternadi/home/volume1/taliakustin/selecton-web/selecton/programs/selecton/selecton\"\nSELECTON_WEB_WRT_PATH = \"/sternadi/home/volume1/taliakustin/selecton-web/selecton-wrt/programs/selecton-wrt/selecton\"\nSELECTON_WEB_WRTM_PATH = \"/sternadi/home/volume1/taliakustin/selecton-web/selecton-wrtm/programs/selecton-wrtm/selecton\"\n\n\ndef main():\n parser = OptionParser(\"usage: %prog [options]\")\n parser.add_option(\"--idx\", dest=\"idx\", help=\"index number\")\n parser.add_option(\"-a\", \"--aln\", dest=\"aln\", help=\"alignment file\")\n parser.add_option(\"-b\", \"--best_model\", dest=\"best_model\", help=\"best previous model file\")\n parser.add_option(\"-o\", \"--output\", dest=\"output\", help=\"output directory\")\n parser.add_option(\"-p\", \"--protein\", dest=\"protein\", help=\"protein name\")\n (options, args) = parser.parse_args()\n\n\n idx = int(options.idx)\n aln = options.aln\n best_model_file = options.best_model\n output = options.output\n protein = options.protein\n\n\n\n parameters = extract_selecton_final_params_single_file(best_model_file)\n\n parameter = [0.1, 0.3, 0.5, 0.7, 0.9, 1, 1.1, 1.3, 1.5, 2, 3, 4]\n\n\n tree = glob.glob(os.path.dirname(best_model_file) + \"/%s*tree*\" % protein)[0]\n\n a = float(parameters[\"alpha\"])\n x = float(parameters[\"beta\"])\n k = float(parameters[\"kappa\"])\n w = float(parameters[\"additional_omega_category\"])\n p = float(parameters[\"prob(additional_omega_category)\"])\n\n if idx <= 12:\n print(\"parameter checked is theta\")\n print(\"mu is fixed to 1\")\n t = parameter[idx - 1]\n output_GA = output + \"wrtmModel_fixed1_mr_theta%s/\" % str(t)\n if not os.path.exists(output_GA):\n os.system(\"mkdir %s\" % output_GA)\n output_GA_protein = output_GA + protein\n output_path = \"%s_output.txt\" % output_GA_protein\n print(output_GA, output_path)\n command = \"%s -i %s -u %s -l %s_log.txt -r %s_result.txt -o %s_output.txt -s %s_rasmul.txt \" \\\n \"-c %s_color.txt -t %s_tree.txt -v %s -y 1 -w %f -p %f -a %f -x %f -k %f -j 40 -z 3 -g 1 -fm -fr\" % \\\n (SELECTON_WEB_WRTM_PATH, aln, tree, output_GA_protein, output_GA_protein, output_GA_protein\n , output_GA_protein, output_GA_protein, output_GA_protein, t, w, 1 - p, a, x, k)\n\n\n elif idx > 12:\n idx = idx - 12\n print(\"parameter checked is mu\")\n print(\"theta is fixed to 1\")\n g = parameter[idx - 1]\n output_GA = output + \"wrtmModel_fixed1_tr_mu%s/\" % str(g)\n if not os.path.exists(output_GA):\n os.system(\"mkdir %s\" % output_GA)\n output_GA_protein = output_GA + protein\n output_path = \"%s_output.txt\" % output_GA_protein\n command = \"%s -i %s -u %s -l %s_log.txt -r %s_result.txt -o %s_output.txt -s %s_rasmul.txt \" \\\n \"-c %s_color.txt -t %s_tree.txt -v 1 -y 1 -w %f -p %f -a %f -x %f -k %f -j 40 -z 3 -g %f -ft -fr\" % \\\n (SELECTON_WEB_WRTM_PATH, aln, tree, output_GA_protein, output_GA_protein, output_GA_protein\n , output_GA_protein, output_GA_protein, output_GA_protein, w, 1 - p, a, x, k, g)\n\n\n if (os.path.exists(output_path) and os.stat(output_path).st_size == 0) or not os.path.exists(output_path):\n print(command)\n os.system(command)\n\n else:\n print(\"output file already exists - didn't run selecton\")\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"dataset/selecton_multi_options_runner_mu_theta.py","file_name":"selecton_multi_options_runner_mu_theta.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"152078912","text":"import sys,os,string\nimport spacy\nimport nltk\nfrom spacy.lang.en import English\nfrom difflib import SequenceMatcher\n\nimport tweepy\n\nfrom config import CONSUMER_KEY, CONSUMER_SECRET\nfrom config import ACCESS_TOKEN, ACCESS_SECRET\n\nNUM_TWEETS = 100\nUSR1_KEYWORDS = []\nUSR2_KEYWORDS = []\n\ndef tokenize1(mylist):\n parser = English()\n tokens = parser(mylist)\n\n for token in tokens:\n if token.orth_.isspace():\n continue\n # elif token.like_url:\n # USR1_KEYWORDS.append('URL')\n # elif token.orth_.startswith('@'):\n # USR1_KEYWORDS.append('SCREEN_NAME')\n else:\n USR1_KEYWORDS.append(token.lower_)\n\n\ndef tokenize2(mylist):\n parser = English()\n tokens = parser(mylist)\n\n for token in tokens:\n if token.orth_.isspace():\n continue\n # elif token.like_url:\n # USR2_KEYWORDS.append('URL')\n # elif token.orth_.startswith('@'):\n # USR2_KEYWORDS.append('SCREEN_NAME')\n else:\n USR2_KEYWORDS.append(token.lower_)\n\n\ndef get_lemma(word):\n #nltk.download('wordnet')\n from nltk.corpus import wordnet as wn\n\n lemma = wn.morphy(word)\n if lemma is None:\n return word\n else:\n return lemma\n\n\ndef get_lastN_tweets(user1, user2):\n ##populate usr1 and usr2 globle list with tweets word in small case\n\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN,ACCESS_SECRET)\n api = tweepy.API(auth)\n\n usr1tweets = api.user_timeline(screen_name=user1,count=NUM_TWEETS)\n usr2tweets = api.user_timeline(screen_name=user2, count=NUM_TWEETS)\n\n usr1lst = [t.text.split(\" \") for t in usr1tweets]\n usr2lst = [t.text.split(\" \") for t in usr2tweets]\n\n makeflat = lambda abc: [ item for sublist in abc for item in sublist]\n\n flatlist1 = makeflat(usr1lst)\n flatlist2 = makeflat(usr2lst)\n\n flatstr1 = \" \".join(str(x) for x in flatlist1)\n flatstr2 = \" \".join(str(x) for x in flatlist2)\n tokenize1(flatstr1)\n tokenize2(flatstr2)\n\n #nltk.download('stopwords')\n stop_wrd = set(nltk.corpus.stopwords.words('english'))\n\n #print(stop_wrd)\n\n #print(type(USR1_KEYWORDS))\n\n tokens = [ token for token in USR1_KEYWORDS if len(token) > 4]\n tokens = [ token for token in tokens if token not in stop_wrd ]\n usr1_final_tokens = \"\".join(str(e) for e in [get_lemma(token) for token in tokens])\n\n\n tokens = [ token for token in USR2_KEYWORDS if len(token) > 4]\n tokens = [ token for token in tokens if token not in stop_wrd ]\n usr2_final_tokens = \"\".join( str(e) for e in [ get_lemma(token) for token in tokens])\n\n #print(usr1_final_tokens)\n #print(usr2_final_tokens)\n\n cmp = SequenceMatcher(None,sorted(usr1_final_tokens),sorted(usr2_final_tokens)).ratio()\n\n print(user1,\"and\",user2, \"matching percentage is\",cmp*100)\n\n\n\n\n #usr1raw_str.append(t.text)\n\n pass\n\n #print(\"user1 string is:{}\".format(usr1raw_str))\n\n #print(string.punctuation)\n\n\n\ndef similar_tweeters(user1, user2):\n\n ##get last N tweets of usr1 and usr2\n get_lastN_tweets(user1, user2)\n\n\n\n pass\n\nif __name__ == \"__main__\":\n # if len(sys.argv) < 3:\n # print('Usage: {} '.format(sys.argv[0]))\n # sys.exit(1)\n #\n # user1, user2 = sys.argv[1:3]\n\n # user1 = 'bbelderbos'\n user1 = 'pybites'\n user2 = 'importpython'\n\n\n similar_tweeters(user1, user2)\n","sub_path":"05/similar_tweeters_ani.py","file_name":"similar_tweeters_ani.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"272340799","text":"import numpy as np\r\nfrom scipy import stats\r\nimport scipy.linalg as sl\r\nimport pandas as pd\r\nimport time\r\nfrom numba import jit\r\nimport numexpr as ne\r\nimport numpy_indexed as npi\r\n\r\n\r\n\r\ndef expand(data):\r\n [m, n] = data.shape\r\n ret = np.zeros((m, n+n-1))\r\n ret[:, 0:n] = data\r\n ret[:, n:n+n-1] = data[:, 1:]\r\n return ret\r\ndef relabel(data):\r\n\tunique_class = np.unique(data[:,0])\r\n\tclass_number = len(unique_class)\r\n\tfor i in range(class_number):\r\n\t\tdata[data[:,0] == unique_class[i], 0] = -47 - (i+1)\r\n\tfor i in range(class_number):\r\n\t\tdata[data[:,0] == -47 - (i+1), 0] = i + 1\r\n\treturn data\r\n\r\ndef embed(data,dim,tau):\r\n\tlabel = data[:,0]\r\n\tdata = np.delete(data,0,axis=1)\r\n\t[m,n] = data.shape\r\n\tembed_num = n-(dim-1)*tau\r\n\tx = np.zeros((embed_num*m, dim))\r\n\ty = np.zeros((embed_num*m, 1))\r\n\r\n\tfor i in range(m):\r\n\t\tx[i*embed_num:(i+1)*embed_num,:]=embed_single(data[i,:],dim,tau)\r\n\t\ty[i*embed_num:(i+1)*embed_num]=label[i]\r\n\r\n\treturn x, y\r\n\r\ndef embed_single(series, dim, tau):\r\n\tn = len(series)\r\n\tmanifold = np.zeros((n-(dim-1)*tau,dim))\r\n\t\r\n\tfor i in range(manifold.shape[0]):\r\n\t\ttemp = []\r\n\t\tfor j in range(dim):\r\n\t\t\ttemp.append(series[i+j*tau])\r\n\t\tmanifold[i,:]=np.asarray(temp)\r\n\r\n\treturn manifold\r\n\r\ndef zscore(series_matrix):\r\n return stats.zscore(series_matrix, axis=1, ddof=1)","sub_path":"BayesCharacteristicSubspace/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"298712750","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\n\"\"\"\n@version: v1.0\n@author: duanwei\n@license: Apache Licence \n@contact: 4064865@qq.com\n@site: http://blog.csdn.net/dwshmilyss\n@software: PyCharm\n@file: 爬取教程生成pdf.py\n@time: 2018/6/19 17:56\n\"\"\"\nimport sys\n\ndefault_encoding = \"utf-8\"\nif (default_encoding != sys.getdefaultencoding()):\n reload(sys)\n sys.setdefaultencoding(default_encoding)\n\n\n# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nimport requests\nfrom requests.exceptions import RequestException\nfrom bs4 import BeautifulSoup\nimport pdfkit\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\nhtml_template = \"\"\"\n\n\n\n \n\n\n{content}\n\n\n\"\"\"\n\nbase_url = 'http://python3-cookbook.readthedocs.io/zh_CN/latest/'\nbook_name = ''\nchapter_info = []\n\n\ndef get_one_page(url):\n \"\"\"\n 获取网页html内容并返回\n :param url: 目标网址\n :return html\n \"\"\"\n headers = {\n 'User-Agent': \"\"\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"\"\"\n }\n\n try:\n # 获取网页内容,返回html格式数据\n response = requests.get(url, headers=headers)\n # 通过状态码判断是否获取成功\n if response.status_code == 200:\n # 指定编码,否则中文出现乱码\n response.encoding = 'utf-8'\n return response.text\n return None\n except RequestException as e:\n return None\n\n\ndef parse_title_and_url(html):\n \"\"\"\n 解析全部章节的标题和url\n :param html: 需要解析的网页内���\n :return None\n \"\"\"\n soup = BeautifulSoup(html, 'html.parser')\n\n # 获取书名\n global book_name\n book_name = soup.find('div', class_='wy-side-nav-search').a.text.strip()\n menu = soup.find_all('div', class_='section')\n chapters = menu[0].div.ul.find_all('li', class_='toctree-l1')\n for chapter in chapters:\n info = {}\n # 获取一级标题和url\n # 标题中含有'/'和'*'会保存失败\n info['title'] = chapter.a.text.replace('/', '').replace('*', '')\n info['url'] = base_url + chapter.a.get('href')\n info['child_chapters'] = []\n\n # 获取二级标题和url\n if chapter.ul is not None:\n child_chapters = chapter.ul.find_all('li')\n for child in child_chapters:\n url = child.a.get('href')\n # 如果在url中存在'#',则此url为页面内链接,不会跳转到其他页面\n # 所以不需要保存\n if '#' not in url:\n info['child_chapters'].append({\n 'title': child.a.text.replace('/', '').replace('*', ''),\n 'url': base_url + child.a.get('href'),\n })\n\n chapter_info.append(info)\n\n\ndef save_pdf(html, filename):\n \"\"\"\n 把所有html文件保存到pdf文件\n :param html: html内容\n :param file_name: pdf文件名\n :return:\n \"\"\"\n options = {\n 'page-size': 'Letter',\n 'margin-top': '0.75in',\n 'margin-right': '0.75in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.75in',\n 'encoding': \"UTF-8\",\n 'custom-header': [\n ('Accept-Encoding', 'gzip')\n ],\n 'cookie': [\n ('cookie-name1', 'cookie-value1'),\n ('cookie-name2', 'cookie-value2'),\n ],\n 'outline-depth': 10,\n }\n\n path_wk = r'D:\\Program Files\\wkhtmltox-0.12.5-1.mxe-cross-win64\\wkhtmltox\\bin\\wkhtmltopdf.exe' # 安装位置\n config = pdfkit.configuration(wkhtmltopdf=path_wk)\n # pdfkit.from_url(url, r'D:\\are you coding\\pdf\\taobao.pdf', configuration=config)\n pdfkit.from_string(html, filename, options=options,configuration=config)\n\n\ndef get_content(url):\n \"\"\"\n 解析URL,获取需要的html内容\n :param url: 目标网址\n :return: html\n \"\"\"\n html = get_one_page(url)\n soup = BeautifulSoup(html, 'html.parser')\n content = soup.find('div', attrs={'itemprop': 'articleBody'})\n html = html_template.format(content=content)\n return html\n\n\ndef parse_html_to_pdf():\n \"\"\"\n 解析URL,获取html,保存成pdf文件\n :return: None\n \"\"\"\n try:\n for chapter in chapter_info:\n ctitle = chapter['title']\n url = chapter['url']\n # 文件夹不存在则创建(多级目录)\n dir_name = os.path.join(os.path.dirname(__file__), 'gen', ctitle)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n html = get_content(url)\n\n print('保存章节:', ctitle)\n save_pdf(html, os.path.join(dir_name, ctitle + '.pdf'))\n\n children = chapter['child_chapters']\n if children:\n for child in children:\n html = get_content(child['url'])\n pdf_path = os.path.join(dir_name, child['title'] + '.pdf')\n save_pdf(html, pdf_path)\n print('====== 本章保存完毕')\n except Exception as e:\n print(e)\n\n\ndef merge_pdf(infnList, outfn):\n \"\"\"\n 合并pdf\n :param infnList: 要合并的PDF文件路径列表\n :param outfn: 保存的PDF文件名\n :return: None\n \"\"\"\n pagenum = 0\n pdf_output = PdfFileWriter()\n\n for pdf in infnList:\n # 先合并一级目录的内容\n first_level_title = pdf['title']\n dir_name = os.path.join(os.path.dirname(\n __file__), 'gen', first_level_title)\n padf_path = os.path.join(dir_name, first_level_title + '.pdf')\n\n pdf_input = PdfFileReader(open(padf_path, 'rb'))\n # 获取 pdf 共用多少页\n page_count = pdf_input.getNumPages()\n for i in range(page_count):\n pdf_output.addPage(pdf_input.getPage(i))\n\n # 添加书签\n parent_bookmark = pdf_output.addBookmark(\n first_level_title, pagenum=pagenum)\n\n # 页数增加\n pagenum += page_count\n\n # 存在子章节\n if pdf['child_chapters']:\n for child in pdf['child_chapters']:\n second_level_title = child['title']\n padf_path = os.path.join(dir_name, second_level_title + '.pdf')\n\n pdf_input = PdfFileReader(open(padf_path, 'rb'))\n # 获取 pdf 共用多少页\n page_count = pdf_input.getNumPages()\n for i in range(page_count):\n pdf_output.addPage(pdf_input.getPage(i))\n\n # 添加书签\n pdf_output.addBookmark(\n second_level_title, pagenum=pagenum, parent=parent_bookmark)\n # 增加页数\n pagenum += page_count\n\n # 合并\n pdf_output.write(open(outfn, 'wb'))\n # 删除所有章节文件\n shutil.rmtree(os.path.join(os.path.dirname(__file__), 'gen'))\n\ndef main():\n html = get_one_page(base_url)\n parse_title_and_url(html)\n parse_html_to_pdf()\n merge_pdf(chapter_info, os.path.join(\n os.path.dirname(__file__), book_name + '.pdf'))\n\nif __name__ == '__main__':\n main()","sub_path":"crawler/htmlTopdf.py","file_name":"htmlTopdf.py","file_ext":"py","file_size_in_byte":7184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"567482160","text":"\"\"\"\nDjango settings for androidgreetings project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\nfrom django.conf.global_settings import STATICFILES_FINDERS as SFF\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nTEMPLATE_CONTEXT_PROCESSORS = TCP + (\n 'django.core.context_processors.request',\n)\n\nSTATICFILES_FINDERS = SFF + (\n 'compressor.finders.CompressorFinder',\n)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '!f+qo!(bg#qp1bylus=f#(^u!#htc&%a_(lrd^7x7v^gg$83q6'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = ['androidgreetings.ru', 'localhost', '127.0.0.1']\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'suit',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.sitemaps',\n 'django.contrib.staticfiles',\n 'django.contrib.comments',\n 'compressor', \n 'static_sitemaps',\n 'sorl.thumbnail',\n 'greetings',\n 'blog',\n 'banners',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'htmlmin.middleware.HtmlMinifyMiddleware',\n 'htmlmin.middleware.MarkRequestMiddleware',\n)\n\nROOT_URLCONF = 'androidgreetings.urls'\n\nWSGI_APPLICATION = 'androidgreetings.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'ru-RU'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'androidgreetings', 'public', 'static')\n\nMEDIA_URL = '/public/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'androidgreetings', 'public', 'media')\n\nCOMPRESS_CSS_FILTERS = ['compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter']\n\nSTATICSITEMAPS_ROOT_SITEMAP = 'androidgreetings.sitemaps.sitemaps'\n\nSITE_ID = 1\n\ntry:\n from production_settings import *\nexcept ImportError:\n pass\n","sub_path":"androidgreetings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"274734515","text":"'''\nCreated on 25/07/2014\n\n:author: alfred\n'''\nimport logging\nfrom functools import reduce\nfrom mc_be.commons.utils import get_attr_by_path\nfrom mc_be.commons.extractors import BaseExtractor\nfrom datetime import datetime, timezone\n\nlogger = logging.getLogger('extractor')\n\n\nclass FEBaseExtractor(BaseExtractor):\n\n def __init__(self, field=None, model_filter_obj=None, *args, **kwargs):\n self.field = field\n self.model_filter_obj = model_filter_obj\n self.convert_data = self.field and self.field.convertData\n self.data_field = self.convert_data and get_attr_by_path(self.convert_data, 'fields.0')\n if self.model_filter_obj:\n self.filter_obj = get_attr_by_path(self.model_filter_obj, self.convert_data.filter) \\\n if self.convert_data and self.convert_data.filter else get_attr_by_path(self.model_filter_obj,\n 'default')\n\n def extract(self, value, *args, **kwargs):\n try:\n return get_attr_by_path(value, self.field.mapping)\n except AttributeError:\n return value\n\n\nclass DefaultExtractor(FEBaseExtractor):\n\n def extract(self, value, *args, **kwargs):\n\n def _lookup(memo, item_value):\n matches_filter = True\n\n if self.filter_obj and self.filter_obj.filter_params:\n for key, data in self.filter_obj.filter_params:\n check_value = get_attr_by_path(item_value, key)\n matches_filter = matches_filter and check_value and check_value.upper() == data.upper()\n if not matches_filter:\n return memo\n if matches_filter and (not memo or\n (self.filter_obj and\n self.filter_obj.timestamp_field and\n is_sorted_dates(get_attr_by_path(memo,\n self.filter_obj.timestamp_field,\n datetime.fromtimestamp(0, tz=timezone.utc)),\n get_attr_by_path(item_value,\n self.filter_obj.timestamp_field,\n datetime.fromtimestamp(0, tz=timezone.utc))))):\n memo = item_value\n return memo\n\n value = super(DefaultExtractor, self).extract(value, *args, **kwargs)\n\n data_value = reduce(_lookup, value or [], '')\n\n if self.data_field:\n data_value = get_attr_by_path(data_value, self.data_field)\n\n return data_value\n\n\nclass RelationExtractor(FEBaseExtractor):\n\n def __init__(self, *args, **kwargs):\n super(RelationExtractor, self).__init__(*args, **kwargs)\n if self.convert_data:\n self.relation_filter = get_attr_by_path(self.model_filter_obj, self.convert_data.relation_filter) \\\n if self.convert_data.relation_filter and self.model_filter_obj else None\n\n def extract(self, value, *args, **kwargs):\n value = super(RelationExtractor, self).extract(value, *args, **kwargs)\n data = value\n if self.convert_data:\n if self.convert_data.entity and self.convert_data.submapping:\n relation = value and get_attr_by_path(value, \".\".join([\"0\", \"data\", self.convert_data.entity]))\n if self.relation_filter:\n extractor = DefaultExtractor()\n extractor.data_field = self.convert_data.submapping\n extractor.filter_obj = self.relation_filter\n data = extractor.extract(relation)\n elif relation:\n data = get_attr_by_path(relation[0], self.convert_data.submapping)\n else:\n data = None\n\n if data:\n extractor = DefaultExtractor()\n extractor.data_field = self.data_field\n extractor.filter_obj = self.filter_obj\n return extractor.extract(data)\n\n return ''\n\n\nclass Concat(FEBaseExtractor):\n is_dependent = True\n\n def extract(self, value, *args, **kwargs):\n result = ''\n\n if self.convert_data and self.convert_data.fields:\n record_values = [get_attr_by_path(value, record_field, '')\n for record_field in self.convert_data.fields]\n\n if self.convert_data.concat_separator:\n result = (self.convert_data.concat_separator or ' ').join(record_values)\n\n return result.strip()\n\n\ndef is_sorted_dates(*args):\n from dirty_models.fields import DateTimeField\n field = DateTimeField(parse_format='iso8601')\n previous_date = None\n for date in args:\n date_obj = field.convert_value(date) if not field.check_value(date) else date\n try:\n if previous_date and previous_date > date_obj:\n return False\n except TypeError as ex:\n logger.error(\"Impossible compare '{0}' with '{1}'. Original date: '{2}'\".format(str(previous_date),\n str(date_obj),\n str(date)))\n logger.error(ex)\n return False\n\n previous_date = date_obj\n\n return True\n","sub_path":"mc-pybe-release-smip-R4/mc_be/blueprints/dmm/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"465165884","text":"from kt_poly_p import *\n\ndef kt_poly_n(n):\n return str(kt_poly_n_f(n))[1:-1]\n\ndef kt_poly_n_f(n):\n if n % 2 == 0: return poly(3*n)\n return poly(3*n+1)\n\n@memoized\ndef poly(n):\n c1 = []\n for i in range(1, n+1+1):\n c1.append(p(i))\n c2 = []\n for i in range(1, n+1+1):\n c2.append(Not(p(2*i)))\n return mbox(n+1, list2conj(c1)) |OR| f(n, n) |OR| mbox(n+1, list2conj(c2))\n \n@memoized\ndef f(i, n):\n if i == 0: return FALSE\n if i == n: return Dia(f(n-1, n) |OR| mdia(n+2, p(n) |IFF| p(1))) |OR| Box(p(n+2))\n return Dia(f(i-1, n) |OR| mdia(i+2, p(i) |IFF| p(i+1))) |OR| Box(p(i+2))\n","sub_path":"kt_poly_n.py","file_name":"kt_poly_n.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"392176315","text":"# -*- coding: utf-8 -*-\n# Date: 2018/12/14\nimport requests, re, time\n\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\"}\ndef crawl_html(post_url, post_data):\n r = requests.post(post_url, data=post_data, headers=headers)\n pars_html(r.text)\n\ndef pars_html(html):\n print(html)\n\ndef main():\n url = 'http://www.cy365.com/shop/category-20-sortID-desc-image-{}-0.html'\n post_rul = 'http://www.cy365.com/shop/category.php?uid=20&sortby=sortID&sort=desc&display=image&pageindex={}&extend=0'\n for i in range(1,200):\n r = requests.get(url.format(i), headers=headers)\n xjxargs = rc.search(r.text)\n if not xjxargs:\n break\n rt = str(int(time.time() * 1000))\n post_data = {\n 'xjxfun': 'do_req_category',\n 'xjxr': rt,\n 'xjxargs[]': ('S', 'N1')}\n crawl_html(post_rul.format(i), post_data)\n\nif __name__ == '__main__':\n rc = re.compile(r\"xajax_do_req_category\\('([^']+)'\")\n main()\n\n","sub_path":"chapter13/post请求.py","file_name":"post请求.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"512658320","text":"#coding:utf8\nimport sys\nimport check,clearIDCache\nfrom Lib import auto360\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\ntry:\n _fromUtf8 = QString.fromUtf8\nexcept AttributeError:\n _fromUtf8 = lambda s: s\n\n#QTextCodec.setCodecForTr(QTextCodec.codecForName(\"utf8\"))\n\nclass SwitchNet(QThread):\n def __init__(self, parent = None):\n QThread.__init__(self, parent)\n self.exiting = False\n\n def __del__(self):\n self.exiting = True\n self.wait()\n\n def render(self, net):\n self.net = net\n self.start()\n\n def outLogComment(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogError(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogPass(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def run(self):\n if self.net == 0:\n s = '内网'\n else:\n s = '外网'\n self.outLogComment('开始切换到%s后台...'%s)\n ret = check.switchNet(self.net, self)\n if ret:\n self.outLogPass('%s后台切换成功'%s)\n self.outLogComment('----------------------------------------------\\n')\n else:\n self.outLogError('%s后台切换失败'%s)\n self.outLogComment('----------------------------------------------\\n')\n\nclass CheckJsonData(QThread):\n def __init__(self, parent = None):\n QThread.__init__(self, parent)\n self.exiting = False\n\n def __del__(self):\n self.exiting = True\n self.wait()\n\n def render(self, net,tasktype):\n self.net = net\n self.tasktype = tasktype\n self.start()\n\n def outLogComment(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogError(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogPass(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def run(self):\n if self.net == 0:\n s = '内网'\n else:\n s = '外网'\n \n if self.tasktype == 0:\n \n s1 = '任务'\n else:\n s1 = '特权'\n \n self.outLogComment('开始校验%s后台%s数据...'%(s,s1))\n \n ret = check.checkJsonFiles(self.net, self, self.tasktype)\n \n if ret:\n self.outLogPass('校验%s后台%s���据成功'%(s,s1))\n self.outLogComment('----------------------------------------------\\n')\n else:\n self.outLogError('校验%s后台%s数据失败'%(s,s1))\n self.outLogComment('----------------------------------------------\\n')\n\n#class CheckMD5(QThread):\n #def __init__(self, parent = None):\n #QThread.__init__(self, parent)\n #self.exiting = False\n\n #def __del__(self):\n #self.exiting = True\n #self.wait()\n\n #def render(self):\n #self.start()\n\n #def outLogComment(self, qs):\n #self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n #def outLogError(self, qs):\n #self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n #def outLogPass(self, qs):\n #self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n #def run(self):\n #self.outLogComment('开始对比内外网后台MD5值...')\n #check.checkMD5(self)\n #self.outLogComment('对比内外网后台MD5值完成')\n #self.outLogComment('----------------------------------------------\\n')\n \nclass ClearIDCache(QThread):\n def __init__(self, parent = None):\n QThread.__init__(self, parent)\n self.exiting = False\n\n def __del__(self):\n self.exiting = True\n self.wait()\n\n def render(self):\n self.start()\n\n def outLogComment(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogError(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogPass(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def run(self):\n self.outLogComment('开始清除ID化缓存...')\n clearIDCache.main(self)\n self.outLogComment('清除ID化缓存完成')\n self.outLogComment('----------------------------------------------\\n')\n \n \n \nclass checkLoginWind(QThread):\n def __init__(self, parent = None):\n QThread.__init__(self, parent)\n self.exiting = False\n\n def __del__(self):\n self.exiting = True\n self.wait()\n\n def render(self):\n self.start()\n\n def outLogComment(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogError(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def outLogPass(self, qs):\n self.emit(SIGNAL(\"output(QString)\"), ' %s'%qs)\n\n def run(self):\n self.outLogComment('请点击切换网络的按钮,切换到需要的网络')\n self.outLogComment('切换网络成功,登录符合条件的账号')\n \n check.getLoginWind(self)\n \n\n\n \nclass IDCheck(QWidget):\n\n def __init__(self, parent=None):\n super(IDCheck, self).__init__(parent)\n self.setWindowTitle(_fromUtf8(\"ID化特权后台集成校验工具\"))\n self.setFixedSize(590, 600)\n\n self.bt_Switch0 = QCommandLinkButton(_fromUtf8('切换到内网'))\n #self.bt_Switch0.setGeometry(QRect(10, 40, 151, 41))\n self.bt_Switch0.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_Switch0.setObjectName(_fromUtf8(\"bt_Switch0\"))\n self.bt_Switch0.setFixedSize(151, 41)\n self.bt_Switch0.clearFocus()\n\n self.bt_Switch1 = QCommandLinkButton(_fromUtf8('切换到外网'))\n #self.bt_Switch1.setGeometry(QRect(10, 100, 151, 41))\n self.bt_Switch1.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_Switch1.setObjectName(_fromUtf8(\"bt_Switch1\"))\n self.bt_Switch1.setFixedSize(151, 41)\n self.bt_Switch0.clearFocus()\n\n self.bt_Json00 = QCommandLinkButton(_fromUtf8('内网后台任务校验'))\n #self.bt_Json0.setGeometry(QRect(10, 160, 181, 41))\n self.bt_Json00.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_Json00.setObjectName(_fromUtf8(\"bt_Json00\"))\n self.bt_Json00.setFixedSize(181, 41)\n \n self.bt_Json01 = QCommandLinkButton(_fromUtf8('内网后台特权校验'))\n #self.bt_Json0.setGeometry(QRect(10, 160, 181, 41))\n self.bt_Json01.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_Json01.setObjectName(_fromUtf8(\"bt_Json01\"))\n self.bt_Json01.setFixedSize(181, 41) \n\n self.bt_Json10 = QCommandLinkButton(_fromUtf8('外网后台任务校验'))\n #self.bt_Json1.setGeometry(QRect(10, 220, 181, 41))\n self.bt_Json10.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_Json10.setObjectName(_fromUtf8(\"bt_Json10\"))\n self.bt_Json10.setFixedSize(181, 41)\n\n self.bt_Json11 = QCommandLinkButton(_fromUtf8('外网后台特权校验'))\n #self.bt_Json1.setGeometry(QRect(10, 220, 181, 41))\n self.bt_Json11.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_Json11.setObjectName(_fromUtf8(\"bt_Json11\"))\n self.bt_Json11.setFixedSize(181, 41)\n\n self.bt_MD5 = QCommandLinkButton(_fromUtf8('推广登录弹窗测试'))\n #self.bt_MD5.setGeometry(QRect(10, 280, 181, 41))\n self.bt_MD5.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_MD5.setObjectName(_fromUtf8(\"bt_MD5\"))\n self.bt_MD5.setFixedSize(181, 41)\n\n self.bt_ClearCache = QCommandLinkButton(_fromUtf8('清除ID化缓存'))\n #self.bt_ClearCache.setGeometry(QRect(10, 280, 181, 41))\n self.bt_ClearCache.setStyleSheet(_fromUtf8(\"font: 10pt \\\"浪漫雅圆\\\";\"))\n self.bt_ClearCache.setObjectName(_fromUtf8(\"bt_ClearCache\"))\n self.bt_ClearCache.setFixedSize(181, 41)\n\n self.browser = QTextBrowser()\n #self.browser.setGeometry(QRect(210, 40, 321, 441))\n self.browser.setObjectName(_fromUtf8(\"runLog\"))\n self.browser.setFixedSize(350, 450)\n\n self.label = QLabel(_fromUtf8('运行日志:'))\n #self.label.setGeometry(QRect(210, 0, 101, 41))\n self.label.setStyleSheet(_fromUtf8(\"font: italic 12pt \\\"微软雅黑\\\";\"))\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.label.setFixedSize(101, 20)\n\n self.bt_Clear = QPushButton(_fromUtf8(\"清除日志\"))\n #self.bt_Clear.setGeometry(QRect(230, 510, 101, 41))\n self.bt_Clear.setStyleSheet(_fromUtf8(\"font: 10pt \\\"微软雅黑\\\";\"))\n self.bt_Clear.setObjectName(_fromUtf8(\"BtClearLog\"))\n self.bt_Clear.setFixedSize(101, 41)\n\n self.bt_Close = QPushButton(_fromUtf8(\"关闭\"))\n #self.bt_Close.setGeometry(QRect(390, 510, 101, 41))\n self.bt_Close.setStyleSheet(_fromUtf8(\"font: 10pt \\\"微软雅黑\\\";\"))\n self.bt_Close.setObjectName(_fromUtf8(\"BtExit\"))\n self.bt_Close.setFixedSize(101, 41)\n self.bt_Close.setFocus()\n\n self.thSwitchNet = SwitchNet()\n self.thJson = CheckJsonData()\n self.thMD5 = checkLoginWind()\n self.thClearCache = ClearIDCache()\n\n mainLayout = QHBoxLayout()\n mainLayout.addWidget(self.bt_Switch0)\n mainLayout.addWidget(self.bt_Switch1)\n mainLayout.addWidget(self.bt_Json00)\n mainLayout.addWidget(self.bt_Json01)\n mainLayout.addWidget(self.bt_Json10)\n mainLayout.addWidget(self.bt_Json11)\n mainLayout.addWidget(self.bt_MD5)\n mainLayout.addWidget(self.bt_ClearCache)\n mainLayout.addWidget(self.browser)\n mainLayout.addWidget(self.label)\n mainLayout.addWidget(self.bt_Clear)\n mainLayout.addWidget(self.bt_Close)\n mainLayout.setAlignment(Qt.AlignCenter)\n\n mainSplitter = QSplitter(Qt.Vertical)\n mainSplitter.setOpaqueResize(True)\n\n frame = QFrame(mainSplitter)\n\n buttonLayout = QVBoxLayout()\n buttonLayout.addStretch(1)\n buttonLayout.setAlignment(Qt.AlignCenter)\n buttonLayout.addWidget(self.bt_Switch0)\n buttonLayout.addWidget(self.bt_Switch1)\n buttonLayout.addWidget(self.bt_Json00)\n buttonLayout.addWidget(self.bt_Json01)\n \n buttonLayout.addWidget(self.bt_Json10)\n buttonLayout.addWidget(self.bt_Json11)\n buttonLayout.addWidget(self.bt_MD5)\n buttonLayout.addWidget(self.bt_ClearCache)\n\n buttonLayout1 = QVBoxLayout()\n buttonLayout.addStretch(1)\n buttonLayout1.addWidget(self.label)\n buttonLayout1.addWidget(self.browser)\n\n closeLayout = QHBoxLayout()\n closeLayout.addWidget(self.bt_Clear)\n closeLayout.addWidget(self.bt_Close)\n\n buttonLayout1.addLayout(closeLayout)\n\n mainLayout = QHBoxLayout(frame)\n mainLayout.setMargin(2)#设置空白\n mainLayout.setSpacing(10)\n mainLayout.addLayout(buttonLayout)\n mainLayout.addLayout(buttonLayout1)\n self.connect(self.bt_Switch0, SIGNAL(\"clicked()\"), self.SwitchNet0)\n self.connect(self.bt_Switch1, SIGNAL(\"clicked()\"), self.SwitchNet1)\n self.connect(self.bt_Json00, SIGNAL(\"clicked()\"), self.checkJson00)\n self.connect(self.bt_Json01, SIGNAL(\"clicked()\"), self.checkJson01)\n \n self.connect(self.bt_Json10, SIGNAL(\"clicked()\"), self.checkJson10)\n self.connect(self.bt_Json11, SIGNAL(\"clicked()\"), self.checkJson11)\n self.connect(self.bt_MD5, SIGNAL(\"clicked()\"), self.checkLoginWind)\n self.connect(self.bt_ClearCache, SIGNAL(\"clicked()\"), self.clearCache)\n self.connect(self.bt_Close, SIGNAL(\"clicked()\"), self, SLOT(\"close()\"))\n self.connect(self.bt_Clear, SIGNAL(\"clicked()\"), self.clearText)\n\n self.connect(self.thSwitchNet, SIGNAL(\"finished()\"), self.updateUi)\n self.connect(self.thSwitchNet, SIGNAL(\"terminated()\"), self.updateUi)\n self.connect(self.thSwitchNet, SIGNAL(\"output(QString)\"), self.addLog)\n\n self.connect(self.thJson, SIGNAL(\"finished()\"), self.updateUi)\n self.connect(self.thJson, SIGNAL(\"terminated()\"), self.updateUi)\n self.connect(self.thJson, SIGNAL(\"output(QString)\"), self.addLog)\n\n self.connect(self.thMD5, SIGNAL(\"finished()\"), self.updateUi)\n self.connect(self.thMD5, SIGNAL(\"terminated()\"), self.updateUi)\n self.connect(self.thMD5, SIGNAL(\"output(QString)\"), self.addLog)\n\n self.connect(self.thClearCache, SIGNAL(\"finished()\"), self.updateUi)\n self.connect(self.thClearCache, SIGNAL(\"terminated()\"), self.updateUi)\n self.connect(self.thClearCache, SIGNAL(\"output(QString)\"), self.addLog)\n\n layout = QHBoxLayout(self)\n layout.addWidget(mainSplitter)\n self.setLayout(layout)\n\n def clearText(self):\n self.browser.clear()\n\n def updateUi(self):\n self.bt_Close.setEnabled(True)\n self.bt_Clear.setEnabled(True)\n self.bt_Switch0.setEnabled(True)\n self.bt_Switch1.setEnabled(True)\n self.bt_Json00.setEnabled(True)\n self.bt_Json01.setEnabled(True)\n \n self.bt_Json10.setEnabled(True)\n self.bt_Json11.setEnabled(True)\n self.bt_MD5.setEnabled(True)\n self.bt_ClearCache.setEnabled(True)\n\n def disableButtons(self):\n self.bt_Close.setEnabled(False)\n self.bt_Clear.setEnabled(False)\n self.bt_Switch0.setEnabled(False)\n self.bt_Switch1.setEnabled(False)\n self.bt_Json00.setEnabled(False)\n self.bt_Json01.setEnabled(False)\n\n self.bt_Json10.setEnabled(False)\n self.bt_Json11.setEnabled(False)\n \n self.bt_Json10.setEnabled(False)\n self.bt_Json11.setEnabled(False)\n self.bt_MD5.setEnabled(False)\n self.bt_ClearCache.setEnabled(False)\n\n def addLog(self, text):\n self.browser.append(_fromUtf8(text))\n\n def SwitchNet0(self):\n self.disableButtons()\n self.thSwitchNet.render(0)\n\n def SwitchNet1(self):\n self.disableButtons()\n self.thSwitchNet.render(1)\n\n def checkJson00(self):\n self.disableButtons()\n self.thJson.render(0,0)\n \n def checkJson01(self):\n self.disableButtons()\n self.thJson.render(0,1) \n\n def checkJson10(self):\n self.disableButtons()\n self.thJson.render(1,0)\n\n def checkJson11(self):\n self.disableButtons()\n self.thJson.render(1,1)\n\n def checkLoginWind(self):\n self.disableButtons()\n self.thMD5.render()\n\n def clearCache(self):\n auto360.closeProtect()\n self.disableButtons()\n self.thClearCache.render()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n form = IDCheck()\n form.show()\n app.exec_()\n","sub_path":"IDCheck_UI.py","file_name":"IDCheck_UI.py","file_ext":"py","file_size_in_byte":15508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"133418871","text":"## Represents a single node in the Trie\nclass TrieNode:\n def __init__(self):\n ## Initialize this node in the Trie\n self.children = {}\n self.is_word = False\n\n def insert(self, char):\n ## Add a child node in this Trie\n node = TrieNode()\n self.children[char] = node\n \n def suffixes(self, suffix=''):\n ## Recursive function that collects the suffix for\n ## all complete words below this point\n node = self.children\n\n if self.is_word:\n yield suffix\n\n for i, k in node.items():\n for x in k.suffixes(suffix + i):\n yield x\n\n \n## The Trie itself containing the root node and insert/find functions\nclass Trie:\n def __init__(self):\n ## Initialize this Trie (add a root node)\n self.root = TrieNode()\n\n def insert(self, word):\n ## Add a word to the Trie\n if word is None:\n return False\n node = self.root\n for i in word:\n if i not in node.children:\n node.children[i] = TrieNode()\n node = node.children[i]\n node.is_word = True\n \n\n def find(self, prefix):\n ## Find the Trie node that represents this prefix\n node = self.root\n for i in prefix:\n if i not in node.children:\n return False\n node = node.children[i]\n return node\n \n'''\nTEST\n'''\n\nMyTrie = Trie()\nwordList = [\n \"ant\", \"anthology\", \"antagonist\", \"antonym\", \n \"fun\", \"function\", \"factory\", \n \"trie\", \"trigger\", \"trigonometry\", \"tripod\"\n]\nfor word in wordList:\n MyTrie.insert(word)\n\nprefixNode = MyTrie.find('f')\nif prefixNode:\n prefixNode.suffixes()\n print('\\n'.join(prefixNode.suffixes()))\nelse:\n print(prefix + \" not found\")\n\n\ndef test(wordList):\n\n for word in wordList:\n MyTrie.insert(word)\n\n prefixNode = MyTrie.find('f')\n return prefixNode == False\n\n\nwordList1 = ['hello']\nprint(test(wordList1))\n# True\n\nwordList1 = ['']\nprint(test(wordList1))\n# True\n\nwordList1 = [None]\nprint(test(wordList1))\n# True\n","sub_path":"Algorithm/problem_5-autocomplete.py","file_name":"problem_5-autocomplete.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"167924314","text":"import json, config, random\nfrom abc import abstractmethod\nfrom logs import Logger\n\nlogger = Logger('Generator').getLogger()\n\n\nclass DataGenerator():\n\t\n\tdef __init__(self, schema, seed=''):\n\t\tself.rand = random.Random()\n\t\tself.data_fields = []\n\t\tself.field_names = []\n\t\tself.schema = schema\n\t\tif not seed == '':\n\t\t\tself.rand.seed(seed)\n\t\t\tlogger.info('Using seed: ' + seed)\n\t\tself.check_schema(schema)\n\t\tlogger.info('Generator using Schema at: ' + str(schema))\n\t\t\t\t\n\t# Returns true/false whether or not the schema is valid\n\t# Raises an exception?\n\tdef check_schema(self, schema):\n\t\tpath = config.get_conf_dir() + schema\n\n\t\twith open(path) as data_file:\n\t\t\tconf = json.load(data_file)\n\t\t\t\n\t\t\tif not type(conf) == list:\n\t\t\t\tlogger.error('JSON Schema not formatted properly')\n\t\t\t\traise TypeError('Root of JSON Schema is not a list')\n\t\t\t\n\t\t\tfor field in conf:\n\t\t\t\tif not 'fieldName' in field:\n\t\t\t\t\tlogger.error('fieldName not found in schema at ' + path)\n\t\t\t\t\traise KeyError('Could not find \\'fieldName\\' in field of schema: ' + schema )\n\t\t\t\t\t\n\t\t\t\tif not 'type' in field:\n\t\t\t\t\tlogger.error('type not found in schema at ' + path)\n\t\t\t\t\traise KeyError('Could not find \\'type\\' in field of schema: ' + schema)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tfield_type = field['type']\n\t\t\t\tlogger.debug('Attempting to register datum with type: ' + str(field_type))\n\t\t\t\tdatum = AbstractDatum(field)\n\t\t\t\tif not datum.field_name in self.field_names:\n\t\t\t\t\tself.field_names.append(datum.field_name)\n\t\t\t\t\tlogger.debug('Added datum to field set with type: ' + str(field_type))\n\t\t\t\telse:\n\t\t\t\t\traise ValueError('Cannot have duplicate field names')\n\t\t\t\tif 'string' == field_type:\n\t\t\t\t\tdatum = StringDatum(field)\n\t\t\t\telif 'int' == field_type:\n\t\t\t\t\tdatum = IntDatum(field)\n\t\t\t\telif 'decimal' == field_type:\n\t\t\t\t\tdatum = DecimalDatum(field)\n\t\t\t\telif 'map' == field_type:\n\t\t\t\t\tdatum = MapDatum(field)\n\t\t\t\telif 'boolean' == field_type:\n\t\t\t\t\tdatum = BooleanDatum(field)\n\t\t\t\telse:\n\t\t\t\t\traise RuntimeError('Field type was not found. Please change the field type or implement a new datum')\n\t\t\t\t\n\t\t\t\tdatum.check() # Check to make sure the field has necessary attributes\t\n\t\t\t\tlogger.info('Datum passed check successfully')\n\t\t\t\tself.data_fields.append(datum)\n\t\t\n\tdef generate(self):\n\t\tdata = {}\n\t\tmaps = []\n\t\tfor datum in self.data_fields:\n\t\t\tif datum.type == 'map':\n\t\t\t\tmaps.append(datum)\n\t\t\t\tcontinue # put off mappers until end\n\t\t\t\n\t\t\tval = datum.generate(self.rand)\n\t\t\t\n\t\t\tdata[datum.field_name] = val\n\t\t\t\n\t\tfor mapper in maps:\n\t\t\tval = mapper.generate(self.rand, data)\n\t\t\tdata[mapper.field_name] = val\n\t\t\n\t\treturn data\n\n\nclass AbstractDatum(object):\n\t\t\n\tdef __init__(self, field):\n\t\tself.field = field\n\t\tself.check_for_key('fieldName')\n\t\tself.check_for_key('type')\n\t\tself.field_name = field['fieldName']\n\t\tself.type = field['type']\n\t\t\n\t\n\tdef check_for_key(self, key_name):\n\t\tif not key_name in self.field:\n\t\t\traise KeyError('Missing key: ' + key_name + ' in ' + self.field_name)\n\t\telse:\n\t\t\treturn True\n\t\n\t# A method to determine whether or not the schema object has the necessary fields.\n\t@abstractmethod\n\tdef check(self):\n\t\traise NotImplementedError('AbstractDatum: This method should have been implemented by a sublcass')\n\t\n\t@abstractmethod\n\tdef generate(self, rand):\n\t\traise NotImplementedError('AbstractDatum: This method should have been implemented by a sublcass')\n\nclass StringDatum(AbstractDatum):\n\tdef __init__(self, field):\n\t\tself.values = []\n\t\tAbstractDatum.__init__(self, field)\n\t\tself.check()\n\t\t#calculate CDF if necessary\n\t\tself.values = [] # list will be sorted by cumulative probability\n\t\tif type(self.field['values']) == dict:\n\t\t\tcsum = 0\n\t\t\tfor key in self.field['values']:\n\t\t\t\tprob = self.field['values'][key]\n\t\t\t\tcsum += prob\n\t\t\t\tentry = {}\n\t\t\t\tentry['key'] = key\n\t\t\t\tentry['prob'] = csum\n\t\t\t\tself.values.append(entry)\t\t\t\n\t\t\n\tdef check(self):\n\t\tself.check_for_key('type')\n\t\tself.check_for_key('values')\n\t\tassert (self.field['type'] == 'string')\n\t\tval_type = type(self.field['values']) \n\t\tassert (val_type == list or val_type == dict)\n\t\t\n\tdef generate(self, rand):\n\t\tif type(self.field['values']) == list:\n\t\t\tnum_items = len(self.field['values'])\n\t\t\tindex = rand.randint(0, num_items - 1)\n\t\t\treturn self.field['values'][index]\n\t\telif type(self.field['values']) == dict:\n\t\t\tval = random.random()\n\t\t\tfor i in range(len(self.values)):\n\t\t\t\tif val < self.values[i]['prob']:\n\t\t\t\t\treturn self.values[i]['key']\t\t\t\n\nclass NumberDatum(AbstractDatum):\n\tdef __init__(self, field):\n\t\tAbstractDatum.__init__(self, field)\n\t\tself.check()\t\t\t\n\t\t\n\tdef check(self):\n\t\tself.check_for_key('type')\n\t\tself.check_for_key('distribution')\n\t\tassert (self.field['type'] == 'int' or self.field['type'] == 'decimal')\n\t\tval_type = type(self.field['distribution']) \n\t\tassert val_type == str or val_type == unicode\n\t\td_type = self.field['distribution']\n\t\tif not (d_type == 'uniform' or d_type == 'exponential' or d_type == 'gaussian' or d_type == 'gamma'):\n\t\t\traise ValueError('Distribution can only be one of: uniform, exponential, gaussian, or gamma')\n\t\t\n\t\tself.a = 0\n\t\tself.b = 1\n\t\tself.lambd = 1\n\t\tself.mu = 0\n\t\tself.sigma = 1\n\t\tself.alpha = 1\n\t\tself.beta = 1\n\t\t\n\t\t\n\t\tif 'a' in self.field:\n\t\t\t\tself.a = self.field['a']\n\t\tif 'b' in self.field:\n\t\t\t\tself.b = self.field['b']\n\t\tif 'lambda' in self.field:\n\t\t\t\tself.lambd = self.field['lambda']\n\t\tif 'mu' in self.field:\n\t\t\t\tself.mu = self.field['mu']\n\t\tif 'sigma' in self.field:\n\t\t\t\tself.sigma = self.field['sigma']\n\t\tif 'alpha' in self.field:\n\t\t\t\tself.alpha = self.field['alpha']\n\t\tif 'beta' in self.field:\n\t\t\t\tself.beta = self.field['beta']\n\t\t\n\t\t\n\t\t\n\tdef generate(self, rand):\n\t\tdistribution = self.field['distribution']\n\t\tnum = 0\n\t\tif distribution == 'uniform':\n\t\t\tnum = rand.uniform(self.a, self.b)\n\t\telif distribution == 'exponential':\n\t\t\tnum = rand.expovariate(self.lambd)\n\t\telif distribution == 'gaussian':\n\t\t\tnum = rand.gauss(self.mu, self.sigma)\n\t\telif distribution == 'gamma':\n\t\t\tnum = rand.gammavariate(self.alpha, self.beta)\n\t\t\t\n\t\treturn num\n\nclass DecimalDatum(NumberDatum):\n\tdef __init__(self, field):\n\t\tNumberDatum.__init__(self, field)\n\t\t\nclass IntDatum(NumberDatum):\n\t\n\tdef __init__(self, field):\n\t\tNumberDatum.__init__(self, field)\n\t\t\n\tdef generate(self, rand):\n\t\treturn int(round(NumberDatum.generate(self, rand)))\n\nclass MapDatum(AbstractDatum):\n\t\n\tdef __init__(self, field):\n\t\tAbstractDatum.__init__(self, field)\n\t\tself.field = field\n\t\tself.check()\n\t\n\tdef check(self):\n\t\tself.check_for_key('map')\n\t\tself.check_for_key('mapFromField')\n\t\tif not type(self.field['map']) == dict:\n\t\t\traise ValueError('Expected map key to be a dict object')\n\t\tif not (type(self.field['mapFromField']) == str or type(self.field['mapFromField']) == unicode):\n\t\t\traise ValueError('Expected mapFromField key to be a dict object')\n\t\t\t\n\t\tself.maps = self.field['map']\n\t\tself.map_from = str(self.field['mapFromField'])\n\t\t\n\tdef generate(self, rand, data):\n\t\t\n\t\tif not self.map_from in data:\n\t\t\traise ValueError('Could not get key: ' + self.map_from + ' in data')\n\t\t\t\n\t\tkey = data[self.map_from] # Get data from the map_from field\n\t\t\n\t\ttry:\n\t\t\treturn self.maps[key] # Get the mapped value from the given key\n\t\texcept KeyError as e:\n\t\t\treturn ''\nclass BooleanDatum(AbstractDatum):\n\t\n\tdef __init__(self, field):\n\t\tAbstractDatum.__init__(self, field)\n\t\tself.field = field\n\t\tself.check()\n\t\t# Create CDF\n\t\tif 'values' in self.field:\n\t\t\tself.cdf_cutoff = self.field['values']['True']\n\t\telse:\n\t\t\tself.cdf_cutoff = 0.5\n\t\n\tdef check(self):\n\t\tif 'values' in self.field:\n\t\t\tassert type(self.field['values']) == dict\n\t\t\tvals = self.field['values']\n\t\t\tassert 'True' in vals, 'True must in values'\n\t\t\tassert 'False' in vals, 'False must in values'\n\t\t\tassert vals['True'] + vals['False'] == 1.0, 'Probabilities must equal 1.0'\n\t\t\t\n\t\t\n\tdef generate(self, rand):\n\t\tval = rand.random()\n\t\tif val < self.cdf_cutoff:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t","sub_path":"demo_utils/demo_utils/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"470818509","text":"import requests\nwith open ('dataset_3378_2.txt', 'r') as inf:\n url = inf.readline().strip()\ni = 0\nfile=requests.get(url)\nfor line in file.text.splitlines():\n i += 1\nprint(i)\nwith open ('fin.txt', 'w') as ouf:\n ouf.write(str(i))","sub_path":"block_03/3.6/3.6_1.py","file_name":"3.6_1.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"576847677","text":"import os\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n checkpoint_base = '/home/victorhuang/Desktop/MLDS2018SPRING/hw1/models/saved/1-2-2/'\n checkpoint_filenames = sorted(os.listdir(checkpoint_base))\n color_list = ['r', 'b']\n plt.figure(figsize=(12, 9))\n for i, (filename, color) in enumerate(zip(checkpoint_filenames, color_list)):\n checkpoint = torch.load(os.path.join(checkpoint_base, filename))\n logger = checkpoint['logger']\n x = [entry['epoch'] for _, entry in logger.entries.items()]\n y = [entry['loss'] for _, entry in logger.entries.items()]\n x = x[2:int(checkpoint['epoch'] * 0.6)]\n y = y[2:int(checkpoint['epoch'] * 0.6)]\n plt.subplot(220 + i + 1)\n plt.title('' + ' loss')\n plt.plot(x, y, color, label='loss')\n plt.legend(loc=\"best\")\n\n x = [entry['epoch'] for _, entry in logger.entries.items()]\n y = [entry['grad_norm'] for _, entry in logger.entries.items()]\n x = x[2:int(checkpoint['epoch'] * 0.6)]\n y = y[2:int(checkpoint['epoch'] * 0.6)]\n plt.subplot(220 + i + 3)\n plt.title('' + ' gradient norm')\n plt.plot(x, y, color, label='grad_norm')\n plt.legend(loc=\"best\")\n\n plt.tight_layout()\n plt.show()\n","sub_path":"hw1/vis/hw1_2/plot_grad_norm.py","file_name":"plot_grad_norm.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"577789506","text":"from django.contrib import admin\n\nfrom Gym_app.models import Exercise\nfrom Gym_app.models import ExerciseInPlan\nfrom Gym_app.models import Goal\nfrom Gym_app.models import GoalRecord\nfrom Gym_app.models import GroupClass\nfrom Gym_app.models import ClassInSchedule\nfrom Gym_app.models import ClassInformation\n\n# Register your models here.\nfrom Gym_app.models import PartialGoal\nfrom Gym_app.models import TrainingPlan\nfrom Gym_app.models.classes_models import PersonalTraining\n\n\nclass ClassModelAdmin(admin.ModelAdmin):\n list_display = ('date', 'class_in_schedule')\n\n\nadmin.site.register(GroupClass, ClassModelAdmin)\n\n\nclass ClassInScheduleModelAdmin(admin.ModelAdmin):\n list_display = ('day', 'start_time', 'end_time', 'class_type')\n\n\nadmin.site.register(ClassInSchedule, ClassInScheduleModelAdmin)\n\n\nclass ClassInformationModelAdmin(admin.ModelAdmin):\n list_display = ('name', 'description')\n\n\nadmin.site.register(ClassInformation, ClassInformationModelAdmin)\n\n\nclass PersonalTrainingModelAdmin(admin.ModelAdmin):\n list_display = ('date', 'class_in_schedule')\n\n\nadmin.site.register(PersonalTraining, PersonalTrainingModelAdmin)\n\n\nclass ExerciseInPlanModelAdmin(admin.ModelAdmin):\n list_display = ('plan', 'exercise','day','order','repetitions')\n\n\nadmin.site.register(ExerciseInPlan, ExerciseInPlanModelAdmin)\n\nclass TrainingPlanModelAdmin(admin.ModelAdmin):\n list_display = ('trainer', 'description')\n\nadmin.site.register(TrainingPlan, TrainingPlanModelAdmin)\n\n\nclass ExerciseModelAdmin(admin.ModelAdmin):\n list_display = ('name', 'description', 'url')\n\nadmin.site.register(Exercise, ExerciseModelAdmin)\n\n\nclass GoalModelAdmin(admin.ModelAdmin):\n list_display = ('member', 'name')\n\nadmin.site.register(Goal, GoalModelAdmin)\n\n\nclass PartialGoalModelAdmin(admin.ModelAdmin):\n list_display = ('goal', 'value')\n\nadmin.site.register(PartialGoal, PartialGoalModelAdmin)\n\n\nclass GoalRecordModelAdmin(admin.ModelAdmin):\n list_display = ('goal', 'date', 'value')\n\nadmin.site.register(GoalRecord, GoalRecordModelAdmin)\n","sub_path":"Gym_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"281299214","text":"import shutil\nimport os\n\nfrom operator import itemgetter\n# import predict_image\nfrom PIL import Image\nfrom yolo import YOLO\n\n# helper function to calculate IoU\ndef iou(box1, box2):\n x11, y11, x12, y12 = box1\n x21, y21, x22, y22 = box2\n w1, h1 = x12 - x11, y12 - y11\n w2, h2 = x22 - x21, y22 - y21\n\n area1, area2 = w1 * h1, w2 * h2\n xi1, yi1, xi2, yi2 = max([x11, x21]), max([y11, y21]), min([x12, x22]), min([y12, y22])\n\n if xi2 <= xi1 or yi2 <= yi1:\n return 0\n else:\n intersect = (xi2 - xi1) * (yi2 - yi1)\n union = area1 + area2 - intersect\n return intersect / union\n\ndef non_max_suppression(boxes, overlap_th=0.6):\n # left, top, right, bottom, predicted_class, score\n # xmin, ymin, xmax, ymax, score, class\n boxes = sorted(boxes, key=itemgetter(5), reverse=True)\n idx = 0\n skip_idx = []\n\n while(idx < len(boxes)-1):\n if idx in skip_idx:\n idx += 1\n continue\n for i in range(idx+1, len(boxes)):\n if boxes[idx][4] == boxes[i][4] and iou(boxes[idx][:4], boxes[i][:4]) > overlap_th:\n skip_idx.append(i)\n idx += 1\n\n boxes_n = []\n for i, box in enumerate(boxes):\n if i not in skip_idx:\n boxes_n.append(boxes[i])\n\n return boxes_n\n\ndef predict_yolo(file_list, model_path, anchor_path, classes, gpu_num, sav_dir):\n yolo = YOLO(model_path=model_path, anchors_path=anchor_path, classes_path=classes, gpu_num=gpu_num, image=True, model_image_size=(320, 320), score=0.0)\n\n for file_path in file_list:\n img_path = file_path\n image = Image.open(img_path)\n bbox_list = yolo.detect_bbox(image)\n # left, top, right, bottom, predicted_class, score\n bbox_list = non_max_suppression(bbox_list, overlap_th=0.6)\n\n img_name = os.path.basename(img_path)\n name, ext = os.path.splitext(img_name)\n\n with open(os.path.join(sav_dir, name+'.txt'), 'w') as f:\n for bbox in bbox_list:\n left, top, right, bottom, predicted_class, score = bbox\n print(bbox)\n f.write(predicted_class + ' ' + str(score) + ' ' + str(left) + ' ' + str(top) + ' ' + str(right) + ' ' + str(bottom) + '\\n')\n\n yolo.close_session()\n\ndef predict_retinanet(file_list, model_path, sav_dir):\n import predict_image\n all_bbox_list = predict_image.predict(file_list, model_path, score=0.0)\n for i, bbox_list in enumerate(all_bbox_list):\n bbox_list = non_max_suppression(bbox_list, overlap_th=0.6)\n img_name = os.path.basename(file_list[i])\n name, ext = os.path.splitext(img_name)\n\n with open(os.path.join(sav_dir, name+'.txt'), 'w') as f:\n for bbox in bbox_list:\n left, top, right, bottom, predicted_class, score = bbox\n print(bbox)\n f.write(predicted_class + ' ' + str(score) + ' ' + str(left) + ' ' + str(top) + ' ' + str(right) + ' ' + str(bottom) + '\\n')\n\n\ndef copy_val_images(annt_path, img_dir, sav_dir):\n max_no = 1000\n curr_no = 1\n annotation_datas = {}\n\n with open(annt_path, 'r') as f:\n lines = f.readlines()\n img_name = ''\n\n for line in lines:\n if curr_no > max_no:\n break\n\n if line.find('--') > -1:\n img_path = os.path.join(img_dir, line.replace('\\n', ''))\n img_name = os.path.basename(img_path)\n\n annotation_datas[img_name] = {'img_name': img_name, 'object': [], 'img_path': img_path}\n curr_no += 1\n else:\n if len(line.split(' ')) > 1:\n x1 = int(line.split(' ')[0])\n y1 = int(line.split(' ')[1])\n x2 = int(line.split(' ')[2]) + x1\n y2 = int(line.split(' ')[3]) + y1\n if y2 > y1 and x2 > x1:\n annotation_datas[img_name]['object'].append(\n {'dtt_box_xmin': x1, 'dtt_box_xmax': x2, 'dtt_box_ymin': y1, 'dtt_box_ymax': y2,\n 'dtt_label_name': 'face'})\n\n for img_name in annotation_datas:\n annt = annotation_datas[img_name]\n img_path = annt['img_path']\n shutil.copy(img_path, os.path.join(sav_dir, img_name))\n\nif __name__ == '__main__':\n img_dir = '/woo/dev3/face/images'\n # 1. copy 1000 images\n # copy_val_images('dataset/wider_face_train_bbx_gt.txt', '/woo/dev3/large_data/WIDER/WIDER_train/images',\n # img_dir)\n\n file_list = os.listdir(img_dir)\n for i, file_nm in enumerate(file_list):\n file_list[i] = os.path.join(img_dir, file_nm)\n\n # 2-1. Predict YOLOv3 model#1\n #\n # model_path = '/woo/dev3/new_model/yolo_v3/logs/000/trained_weights_final.h5'\n # anchor_path = '/woo/dev3/new_model/yolo_v3/model_data/yolo_anchors.txt'\n # classes = '/woo/dev3/new_model/yolo_v3/model_data/face_classes.txt'\n # sav_dir = '/woo/dev3/face/output/yolov3-1'\n #\n # predict_yolo(file_list, model_path, anchor_path, classes, 0, sav_dir)\n\n # 2-2. Predict YOLOv3 model#2\n #\n model_path = '/woo/dev3/new_model/yolo_v3/logs/anchor/trained_weights_final.h5'\n anchor_path = '/woo/dev3/new_model/yolo_v3/model_data/face_anchors.txt'\n classes = '/woo/dev3/new_model/yolo_v3/model_data/face_classes.txt'\n sav_dir = '/woo/dev3/face/output/yolov3-2'\n\n predict_yolo(file_list, model_path, anchor_path, classes, 0, sav_dir)\n\n # 3-1. Predict Retinanet model#1\n # model_path = 'snapshot_basic/se_resnext101_csv_12.h5'\n # sav_dir = '/woo/dev3/face/output/retinanet-1'\n # predict_retinanet(file_list, model_path, sav_dir)","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"410051881","text":"def main():\n appName = 'Shares Application'\n print(appName)\n\n deal = Deal()\n deal.shareTicker = getTicker()\n deal.sharePrice = getTickerPrice(deal.shareTicker);\n deal.quantity = getQuantity()\n\n print('Ticker ',deal.shareTicker)\n print('Quantity Pay ',deal.quantity)\n print('Share Price',deal.sharePrice)\n print('Deal Cost ',deal.quantity*deal.sharePrice/100.0)\n print('Commission ',getCommission(deal))\n\ndef getCommission(d):\n if (d.quantity*d.sharePrice/100*0.01 < 10):\n return 10\n else:\n return 10 + (d.quantity*d.sharePrice/100-1000)*0.005\n \n\ndef getTickerPrice(ticker):\n tickers = ['BAR', 'VOD', 'SHE','ORG','APP','ARM','INT','ZZZ']\n livePrices = [180.92, 172.45,451.78,328.11,4861.1,609.5,3291.0,100.0]\n tickerIndex = 0;\n \n for t in tickers:\n if t == ticker:\n return livePrices[tickerIndex]\n \n tickerIndex = tickerIndex +1\n \n \ndef getQuantity():\n qt = 0\n while (qt < 1 or qt > 2000):\n qt = int(input(\"enter quantity? \"))\n if (qt < 1 or qt > 2000):\n print ('Enter Quanity BETWEEN 1 and 2000')\n \n return qt\n\ndef getTicker():\n ticker = input(\"enter ticker? \")\n return ticker\n\nclass Deal:\n def __init__(self):\n self.quantity = 0\n self.shareTicker = \"NULL\"\n self.sharePrice = 0\n \nif __name__ == '__main__':\n main()\n\n","sub_path":"plotting/Exchange-V3.py","file_name":"Exchange-V3.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"310677531","text":"# P R E - P R O C E S S I N G\n\nimport cv2\n \nmser = cv2.MSER_create()\n#img = cv2.imread('signboard.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nvis = img.copy()\nregions, _ = mser.detectRegions(gray)\nhulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]\ncv2.polylines(vis, hulls, 1, (0, 255, 0))\ncv2.imshow('img', vis)\nif cv2.waitKey(0) == 9:\n cv2.destroyAllWindows()\n \nmask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)\nfor contour in hulls:\n cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)\n \ntext_only = cv2.bitwise_and(img, img, mask=mask)\n ","sub_path":"PreProcessing.py","file_name":"PreProcessing.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619630835","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Para el archivo `data.csv, imprima una tabla en formato CSV que contenga \n## la cantidad de registros en que aparece cada clave de la columna 5.\n##\n## Rta/\n## aaa,13\n## bbb,16\n## ccc,23\n## ddd,23\n## eee,15\n## fff,20\n## ggg,13\n## hhh,16\n## iii,18\n## jjj,18\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\n\n\nimport pandas as pd\n\ndf = pd.read_csv('vagrant4docker/laboratorios-de-programacion-acadavidvunal/03-python=1/q09=1/data.csv',sep='\\t',header=None)\nd = []\nprint(df[4])\nfor row in df[4]:\n for val in row.split(','):\n d.append(val.split(':'))\ndf_v = pd.DataFrame(d, columns=['col1', 'col2'])\ncount_val = df_v['col1'].value_counts().sort_index()\nfor index, row in count_val.items():\n print(str(index) + ',' + str(row))\n\n\n","sub_path":"03-python=1/q09=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"222957233","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 7 14:12:27 2021\n\n@author: 10695421999\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 7 14:03:49 2021\n\n@author: 10695421999\n\"\"\"\n\nimport numpy as np\nimport time\nfrom math import sqrt\nfrom math import pi\nfrom math import sin\nfrom math import cos\nimport matplotlib.pyplot as plt\n# from mpmath import *\nfrom numpy import random\n# mp.dps = 25; mp.pretty = True\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 10 16:05:14 2021\n\n@author: 10695421999\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 10 15:12:28 2021\n\n@author: 10695421999\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 9 14:29:01 2021\n\n@author: 10695421999\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 21 08:11:39 2021\n\n@author: 10695421999\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 20 10:18:10 2021\n\n@author: 10695421999\n\"\"\"\n\nnp.random.seed(1)\n\nprint(\"começou\\n\")\ntempo_inicial = time.time()\n#####################################\n# PRIMEIRA PARTE DO CÓGIGO:\n# INICIALIZAÇÃO\n#####################################\n# PARAMETROS DA SIMULAÇÃO\ndt = 1e-5\nts = 0.0001\nrazao_ts_dt = round(ts/dt)\ntmax = 12-dt\nnpontos = round(tmax/dt)\ni = 0\n\n# # #PARAMETROS DA TESE DO CAVALCA\n\n# Rs = 2.50\n# Rr = 2.24\n# Ls = 0.288\n# Lr = 0.288\n# Lm = 0.27\n# jm = 0.0135\n# bm = 0.0027\n# vrms = 310\n\n# parametros vanessa\nRs = 20.6\nRr = 20.6\nLs = 0.82425\nLr = 0.82425\nLm = 0.78\njm = 0.012\nbm = 0.00881\nvrms = 192\n\nP = 4 # POLOS OU PAR DE POLOS?\nPd2 = P/2\n# Ls = Lm+lls\n# Lr = Lm+llr\nsig = Lr*Ls-Lm**2\n\n# VARIÁVEIS\nvas = 0\nvbs = 0\nvcs = 0\nvalphas = 0\nvbetas = 0\n\nvar = 0\nvbr = 0\nvcr = 0\nvalphar = 0\nvbetar = 0\n\nfas = 0\nfbs = 0\nfcs = 0\nfalphas = 0\nfbetas = 0\n\nfar = 0\nfbr = 0\nfcr = 0\nfalphar = 0\nfbetar = 0\n\nias = 0\nibs = 0\nics = 0\nialphas = 0\nibetas = 0\nids = 0\niqs = 0\nizs = 0\n\niar = 0\nibr = 0\nicr = 0\nialphar = 0\nibetar = 0\nidr = 0\niqr = 0\n\nwe = 0\nwr = 0\nwm = 0\nte = 0\ntl = 0\n# tl = tb\noe = 0\nom = 0\n#####################\n# CÁLCULO DAS TENSÕES DO INVERSOR (IGUAL AO CÓDIGO DA SABRINA)\ndef inversor_ideal(s1, s2, s3, vdc):\n vas = (2.0/3.0*s1-1.0/3.0*s2-1.0/3.0*s3)*vdc\n vbs = (2.0/3.0*s2-1.0/3.0*s1-1.0/3.0*s3)*vdc\n vcs = (2.0/3.0*s3-1.0/3.0*s2-1.0/3.0*s1)*vdc\n return vas, vbs, vcs\n\n# ROTINA PARA O CHAVEAMENTO (IGUAL AO CÓDIGO DA SABRINA)\ndef chaveamento(indice_min):\n if indice_min == 1:\n sw1 = 1\n sw2 = 0\n sw3 = 0\n elif indice_min == 2:\n sw1 = 0\n sw2 = 1\n sw3 = 0\n elif indice_min == 3:\n sw1 = 1\n sw2 = 1\n sw3 = 0\n elif indice_min == 4:\n sw1 = 0\n sw2 = 0\n sw3 = 1\n elif indice_min == 5:\n sw1 = 1\n sw2 = 0\n sw3 = 1\n elif indice_min == 6:\n sw1 = 0\n sw2 = 1\n sw3 = 1\n else:\n sw1 = 0\n sw2 = 0\n sw3 = 0\n return sw1, sw2, sw3\n\n\ndef abg_transform(a, b, c):\n alpha = (2*a-b-c)/3\n beta = sqrt(3)*(b-c)/3\n return alpha, beta\n\n\ndef inv_ab_transf(alpha, beta):\n a = alpha\n b = -alpha/2+beta*sqrt(3)/2\n c = -alpha/2-beta*sqrt(3)/2\n return a, b, c\n\n\ndef dq_transf(a, b, c, oe):\n d = cos(oe)*a+cos(oe-2*pi/3)*b+cos(oe+2*pi/3)*c\n q = sin(oe)*a+sin(oe-2*pi/3)*b+sin(oe+2*pi/3)*c\n # z=(a+b+c)/3\n return 2/3*d, 2/3*q\n\n\ndef invdq_transf(q, d, oe):\n a = sin(oe)*d + cos(oe)*q\n b = sin(oe-2*pi/3)*d + cos(oe-2*pi/3)*q\n c = sin(oe+2*pi/3)*d + cos(oe+2*pi/3)*q\n return a, b, c\n\n# INICIALIZAÇÃO DOS VETORES PARA O ARMEZENAMENTO DA INFORMAÇÃO\n\n\nVas = np.zeros(npontos)\nVbs = np.zeros(npontos)\nVcs = np.zeros(npontos)\nValphas = np.zeros(npontos)\nVbetas = np.zeros(npontos)\n\nVar = np.zeros(npontos)\nVbr = np.zeros(npontos)\nVcr = np.zeros(npontos)\nValphar = np.zeros(npontos)\nVbetar = np.zeros(npontos)\n\nFas = np.zeros(npontos)\nFbs = np.zeros(npontos)\nFcs = np.zeros(npontos)\nFalphas = np.zeros(npontos)\nFbetas = np.zeros(npontos)\n\nFar = np.zeros(npontos)\nFbr = np.zeros(npontos)\nFcr = np.zeros(npontos)\nFalphar = np.zeros(npontos)\nFbetar = np.zeros(npontos)\n\nIas = np.zeros(npontos)\nIbs = np.zeros(npontos)\nIcs = np.zeros(npontos)\nIalphas = np.zeros(npontos)\nIbetas = np.zeros(npontos)\nIds = np.zeros(npontos)\nIqs = np.zeros(npontos)\nIzs = np.zeros(npontos)\n\nIar = np.zeros(npontos)\nIbr = np.zeros(npontos)\nIcr = np.zeros(npontos)\nIalphar = np.zeros(npontos)\nIbetar = np.zeros(npontos)\n\nWe = np.zeros(npontos)\nWr = np.zeros(npontos)\nWm = np.zeros(npontos)\nTe = np.zeros(npontos)\nTl = np.zeros(npontos)\nOe = np.zeros(npontos)\nOm = np.zeros(npontos)\nOm_ref = np.zeros(npontos)\n\nT = np.zeros(npontos)\nSa = np.zeros(npontos)\nSb = np.zeros(npontos)\nSc = np.zeros(npontos)\nVrms = np.zeros(npontos)\nWb = np.zeros(npontos)\nWref = np.zeros(npontos)\n\nIalphas_ref = np.zeros(npontos)\nIbetas_ref = np.zeros(npontos)\n\n# MAIS ALGUMAS VARIAVEIS ALTERADAS COM MAIS FREQUÊNCIA\nwrefr = 90 # 1500*pi/30\nwref = 0\n\nfor j in range(1, npontos):\n\n t = j*dt\n wref = wrefr*t/(6)\n # if t>=3.0:\n # wref=0.333*wrefr\n # elif t>=2.8:\n # wref=0\n # elif t>=1.6:\n # wref=2*wrefr\n # elif t>=1.4:\n # wref=0\n # elif t>=1.0:\n # wref=wrefr\n # elif t>=0.9:\n # wref=0\n # elif t>=0.6:\n # wref=wrefr-wrefr*(t-0.6)/(0.3)\n # el\n if (t >= 6):\n wref = wrefr\n Wref[j] = wref\n\nj = 0\ntl = 0\nsa = 0\nsb = 0\nsc = 0\n\nvdc = 160 # 220*sqrt(2)*sqrt(3)\nflag = 5\nJmin = 1e20\nindice_min = (0,0,0)\nflag2 = 0\nn = 1\ntl_est=0\nialphar_est = 0\nibetar_est = 0 \neas_a = 0\nebs_a = 0\n# ki=0.0001\n# kp=0.005\nki=0.05\nkp=0.4\nfalphas_c0 = 0\nfbetas_c0 = 0\nfalpha_ref = 0.75 \nea = 0\n\n# Kp_vel=0.0436\n# Ki_vel=0.19\nKp_vel=0.05\nKi_vel=0.4\nFlux_r_ref=0.5\nTe_max=8\nInt_er_vel = 0\ntetar = 0\nias_ref = 0\nibs_ref = 0\nprint(\"inicialização concluída\\n\")\n##############################################################################\n#####################################\n# SEGUNDA PARTE DO CÓGIGO:\n# SIMULAÇÃO\n#####################################\nfor t_int in range(npontos):\n t = t_int*dt\n ##########################################################\n ############################\n if t >= 0.7*tmax:\n tl = 0.5 # 0.5\n ##\n # ALIMENTAÇÃO DO SISTEMA\n if flag >= razao_ts_dt: # TESTE DO TEMPO DE CONTROLE\n Jmin = 10000000000000000000000\n wm_medido = wm + 2*(-0.5+random.random())\n ialphas_medido = ialphas + 0.2*(random.random()-0.5)\n ibetas_medido = ibetas + 0.2*(random.random()-0.5)\n ialphar_medido = ialphar_est #+ 0.05*(random.random()-0.5)\n ibetar_medido = ibetar_est #+ 0.05*(random.random()-0.5)\n # tl_est=tl_est+1*ts*(Wref[i]-wm_medido)\n\n er_vel = Wref[i]-wm_medido\n Int_er_vel=Int_er_vel + ts*er_vel\n Te_ref=Kp_vel*er_vel + Ki_vel*Int_er_vel\n abs_Te_ref = abs(Te_ref)\n if (abs_Te_ref > Te_max):\n Te_ref=Te_max*Te_ref/abs_Te_ref\n Int_er_vel=Int_er_vel - ts*er_vel\n Isd_ref = (1.0/Lm)*Flux_r_ref\n Isq_ref = Te_ref/((3.0/2.0)*(P/2.0)*(Lm/Lr)*Flux_r_ref)\n tetar = tetar + ts*(wm_medido + (Isq_ref/Isd_ref)*(Rr/Lr))\n while (tetar > pi):\n tetar = tetar-2*pi\n while (tetar < -pi):\n tetar = tetar+2*pi\n\n ias_ref = Isd_ref*cos(tetar) - Isq_ref*sin(tetar)\n ibs_ref = Isd_ref*sin(tetar) + Isq_ref*cos(tetar)\n\n falphas_c0 = Ls*ialphas_medido + Lm*ialphar_medido\n fbetas_c0 = Ls*ibetas_medido + Lm*ibetar_medido\n falphar_c0 = Lr*ialphar_medido + Lm*ialphas_medido\n fbetar_c0 = Lr*ibetar_medido + Lm*ibetas_medido\n falphar_c1 = falphar_c0 - Rr*ts*ialphar_medido - ts*wm_medido*fbetar_c0\n fbetar_c1 = fbetar_c0 - Rr*ts*ibetar_medido + ts*wm_medido*falphar_c0\n\n Dict = {1: (sa, sb, sc), 2: (abs(sa-1), sb, sc), 3: (sa, abs(sb-1), sc), 4: (sa, sb, abs(sc-1))}\n for p in Dict:\n sa_c1,sb_c1,sc_c1 = Dict[p]\n valphas_c = 1/3*(-sc_c1-sb_c1+2*sa_c1)*vdc\n vbetas_c = (sb_c1-sc_c1)*vdc/sqrt(3)\n \n falphas_c1 = falphas_c0 - Rs*ts*ialphas_medido + ts*valphas_c\n fbetas_c1 = fbetas_c0 - Rs*ts*ibetas_medido + ts*vbetas_c\n ialphas_c1 = (Lr*falphas_c1- Lm*falphar_c1)/sig\n ibetas_c1 = (Lr*fbetas_c1 - Lm*fbetar_c1 )/sig\n \n Jc = (ias_ref-ialphas_c1)**2 + (ibs_ref-ibetas_c1)**2\n if Jc <= Jmin:\n Jmin = Jc\n indice_min = sa_c1, sb_c1, sc_c1\n ialphar_est = (Ls*falphar_c1- Lm*falphas_c1)/sig\n ibetar_est = (Ls*fbetar_c1 - Lm*fbetas_c1 )/sig\n\n \n # ENTRADA DA ALIMENTAÇÃO DO SISTEMA, QUE VEM DO CONTROLE\n sa, sb, sc = indice_min\n vas, vbs, vcs = inversor_ideal(sa, sb, sc, vdc)\n # vas, vbs, vcs = 311*cos(377*t),311*cos(377*t-2*pi/3),311*cos(377*t+2*pi/3)\n valphas = 2/3*(vas-1/2*vbs-1/2*vcs)\n vbetas = 2/3*(sqrt(3)/2*vbs-sqrt(3)/2*vcs)\n valphar = 0\n vbetar = 0\n\n flag = 0\n if flag2 >= npontos*0.05*n/razao_ts_dt:\n print(n*5, \"% concluído\\n\")\n n += 1\n flag2 += 1\n \n falphas = falphas -Rs*dt*ialphas + dt*valphas\n fbetas = fbetas -Rs*dt*ibetas + dt*vbetas\n temp = falphar\n falphar = falphar -Rr*dt*ialphar - dt*wm*fbetar\n fbetar = fbetar -Rr*dt*ibetar + dt*wm*temp\n\n ialphas = (Lr*falphas- Lm*falphar)/sig\n ibetas = (Lr*fbetas - Lm*fbetar )/sig\n ialphar = (Ls*falphar- Lm*falphas)/sig\n ibetar = (Ls*fbetar - Lm*fbetas )/sig\n\n \n wm = (1-dt*bm/jm)*wm + (dt/jm)*(te-tl)\n \n te = 3/2*P*Lm*(ialphar*ibetas-ibetar*ialphas)\n \n fas,fbs,fcs = inv_ab_transf(falphas, fbetas)\n far,fbr,fcr = inv_ab_transf(falphar, fbetar)\n\n ias, ibs, ics = inv_ab_transf(ialphas, ibetas)\n iar, ibr, icr = inv_ab_transf(ialphar, ibetar)\n # FIM DA SIMULAÇÃO\n ##############################################\n # AS EXPRESSÕES A SEGUIR APENAS TRATAM AS VARIAVEIS PARA POSTERIOR LEITURA\n \n # fas = falphas\n # fbs = -1/2*falphas+sqrt(3)/2*fbetas\n # fcs = -1/2*falphas-sqrt(3)/2*fbetas\n\n # far = falphar*sin(-oe)+fbetar*cos(-oe)\n # fbr = falphar*sin(-oe-2*pi/3)+fbetar*cos(-oe-2*pi/3)\n # fcr = falphar*sin(-oe-4*pi/3)+fbetar*cos(-oe-4*pi/3)\n\n om = om+dt*wm\n if om > pi:\n om = om-2*pi\n if om < -pi:\n om = om+2*pi\n \n # if(wm>10000):\n # break\n ##########################################################################\n # LEITURA DAS VARIÁVEIS\n Ialphas_ref[i] = ias_ref\n Ibetas_ref [i] = ibs_ref\n Sa[i] = sa\n Sb[i] = sb\n Sc[i] = sc\n\n Vas[i] = vas\n Vbs[i] = vbs\n Vcs[i] = vcs\n Valphas[i] = valphas\n Vbetas[i] = vbetas\n\n Var[i] = var\n Vbr[i] = vbr\n Vcr[i] = vcr\n Valphar[i] = valphar\n Vbetar[i] = vbetar\n\n Fas[i] = fas\n Fbs[i] = fbs\n Fcs[i] = fcs\n Falphas[i] = falphas\n Fbetas[i] = fbetas\n\n Far[i] = far\n Fbr[i] = fbr\n Fcr[i] = fcr\n Falphar[i] = falphar\n Fbetar[i] = fbetar\n\n Ias[i] = ias\n Ibs[i] = ibs\n Ics[i] = ics\n Ialphas[i] = ialphas\n Ibetas[i] = ibetas\n Ids[i] = ids\n Iqs[i] = iqs\n Izs[i] = izs\n\n Iar[i] = iar\n Ibr[i] = ibr\n Icr[i] = icr\n Ialphar[i] = ialphar\n Ibetar[i] = ibetar\n\n We[i] = we\n Wr[i] = wr\n Wm[i] = wm\n Te[i] = te\n Tl[i] = tl\n Om[i] = om\n Om_ref[i] = tetar\n\n T[i] = t\n\n i = i+1\n flag += 1\n\nprint('demorou ', time.time()-tempo_inicial, ' s\\n\\n\\n')\n#####################################\n# TERCEIRA PARTE DO CÓGIGO:\n# PLOT E/OU SALVAR OS DADOS\n#####################################\n# np.savetxt('input4.dat',inputs)\n# np.savetxt('output4.dat',output)\n\n\nplt.rc('text', usetex=False)\n# plt.rc('font', family='serif')\nplt.close('all')\n\nplt.figure(1)\nplt.plot(T, Wm, color='C0', label=r\"$\\omega_m$\")\nplt.plot(T, Wref, color='k', label=r\"$\\omega_m^{*}$\")\nplt.xlabel(r\"$t$[s]\")\nplt.ylabel(r\"$\\omega_m (t)$ [rad/s]\")\nplt.title(r\"Velocidade Angular\")\nplt.grid()\nplt.legend()\n# plt.savefig(\"sim_mit_velocidade.pdf\", format='pdf')\n\nplt.figure(2)\nplt.plot(T, Ias, color='C0', label=r'$i_{as}$')\nplt.plot(T, Ibs, color='C1', label=r'$i_{bs}$')\nplt.plot(T, Ics, color='C2', label=r'$i_{cs}$')\nplt.xlabel(r\"$t$[s]\")\nplt.ylabel(r\"$i(t)$[A]\")\nplt.title(r\"Corrente Estator\")\nplt.grid()\nplt.legend()\n# plt.savefig(\"sim_mit_corrente.pdf\", format='pdf')\nplt.figure(3)\nplt.plot(T, Ialphas , color='C0', label=r'$i_{\\alpha s}$')\nplt.plot(T, Ialphas_ref, color='k',linestyle='--', label=r'$i_{\\alpha s}^{*}$')\nplt.xlabel(r\"$t$[s]\")\nplt.ylabel(r\"$i(t)$[A]\")\nplt.title(r\"Correntes Estator\")\nplt.grid()\nplt.legend()\n# plt.savefig(\"sim_mit_alphascorrente.pdf\", format='pdf')\nplt.figure(4)\nplt.plot(T, Ibetas , color='C0', label=r'$i_{\\beta s}$')\nplt.plot(T, Ibetas_ref , color='k' ,linestyle='--', label=r'$i_{\\beta s}^{*}$')\nplt.xlabel(r\"$t$[s]\")\nplt.ylabel(r\"$i(t)$[A]\")\nplt.title(r\"Correntes Estator\")\nplt.grid()\nplt.legend()\n# plt.savefig(\"sim_mit_betascorrente.pdf\", format='pdf')\nplt.figure(5)\nplt.plot(T, Ialphar, color='C0', label=r'$i_{\\alpha r}$')\nplt.plot(T, Ibetar, color='C1', label=r'$i_{\\beta r}$')\nplt.xlabel(r\"$t$[s]\")\nplt.ylabel(r\"$i(t)$[A]\")\nplt.title(r\"Correntes rotor\")\nplt.grid()\nplt.legend()\n# plt.savefig(\"sim_mit_alpharcorrente.pdf\", format='pdf')\nplt.figure(6)\nplt.plot(T, Om, color='C0', label=r'$\\theta_{m}$')\nplt.plot(T, Om_ref, color='k',linestyle='--', label=r'$\\theta_{m}^{*}$')\nplt.xlabel(r\"$t$[s]\")\nplt.ylabel(r\"$\\theta_{m} (t)$[rad]\")\nplt.title(r\"Posição Angular\")\nplt.grid()\nplt.legend()\n# plt.savefig(\"sim_mit_posicao.pdf\", format='pdf')\n\nplt.figure(7)\nplt.plot(T, Vas, color='C0', label=r'$v_{as}$')\nplt.plot(T, Vbs, color='C1', label=r'$v_{bs}$')\nplt.plot(T, Vcs, color='C2', label=r'$v_{cs}$')\nplt.xlabel(r\"$t$[s]\")\nplt.ylabel(r\"$v(t)$[V]\")\nplt.title(r\"Ação de Controle\")\nplt.grid()\nplt.legend()\n# plt.savefig(\"sim_mit_tensao.pdf\", format='pdf')\n\nplt.show()","sub_path":"simulacao_mit_fcs.py","file_name":"simulacao_mit_fcs.py","file_ext":"py","file_size_in_byte":13844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"641941294","text":"\"\"\"\nBaltics Crawler on top of base Spider.\n\nscrapy crawl baltics\n\"\"\"\nfrom datetime import datetime\nfrom urlparse import urljoin\n\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.spiders import Spider\nfrom scrapy.selector import Selector\nfrom scrapy.loader.processors import TakeFirst, Join, MapCompose\nfrom scrapy.loader import ItemLoader\n\nfrom companies.items import *\n\n\nclass Baltics(Spider):\n\n name = 'baltics'\n allowed_domains = ['business-baltics.com']\n start_urls = ['http://www.business-baltics.com/']\n\n def parse(self, response):\n sel = Selector(response)\n\n for url in sel.xpath('//h2/a/@href').extract():\n yield Request(url=urljoin(response.url, url))\n\n for url in sel.xpath('//a[@class=\"blue_list\"]/@href').extract():\n yield Request(url=urljoin(self.start_urls[0], url))\n\n for url in sel.xpath('//a[h2]/@href').extract():\n yield Request(\n url=urljoin(self.start_urls[0], url),\n callback=self.parse_company\n )\n\n next = sel.xpath('//a[text()=\">\"]/@href').extract()\n if next:\n yield Request(url=urljoin(self.start_urls[0], next[0]))\n\n def parse_company(self, response):\n \"\"\"\n Parses the company page by checking the presence of key words in the\n text and extracting them.\n \"\"\"\n sel = Selector(response)\n ci_loader = CompanyItemLoader(selector=sel)\n\n ci_loader.add_value(\"reference_id\", response.url,\n lambda x: \"%s-%s\" % (self.name, x[0]),\n re=r'/.*-(\\d+)')\n ci_loader.add_value(\"source_code\", self.name)\n ci_loader.add_xpath(\"name\", '//h1[@itemprop=\"name\"]/text()')\n ci_loader.add_value(\"created_ts\", str(datetime.now()))\n ci_loader.add_xpath('logo', '//div[@class=\"logo_wrap\"]/img/@src')\n ci_loader.add_xpath(\"phone\", '//div[@itemprop=\"telephone\"]/text()', MapCompose(lambda x: x.strip()))\n ci_loader.add_xpath(\"fax\", '//div[@itemprop=\"faxNumber\"]/text()', MapCompose(lambda x: x.strip()))\n ci_loader.add_xpath(\"url\", '//div[contains(div/img/@src, \"address_16\")]/div[2]/a/text()')\n ci_loader.add_value(\"address\", self.get_address(sel))\n ci_loader.add_value(\"firmographic\", self.get_firmographic(sel))\n ci_loader.add_value(\"misc\", {'source_url': response.url})\n ci_loader.add_xpath('description', '//h2[@itemprop=\"description\"]/text() | //div[@class=\"block_normal\" and span/text()=\"Presentation\"]/div/p/text()', Join())\n\n return ci_loader.load_item()\n\n def get_address(self, sel):\n al = AddressItemLoader(selector=sel)\n\n al.add_xpath('raw_address', '//div[contains(div/img/@src, \"address_16\")]/div[2]/span/text()')\n al.add_value('country', al.get_collected_values('raw_address'), re=r'.*,\\s(.*)$')\n\n yield al.load_item()\n\n def get_firmographic(self, sel):\n fl = FirmographicItemLoader(selector=sel)\n\n fl.add_xpath('industry', '//div[@class=\"half\"]/div/a/b/text()')\n\n return fl.load_item()\n","sub_path":"companies/companies/spiders/baltics.py","file_name":"baltics.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"172107820","text":"from django.urls import path, include\nfrom . import views\n\ninverted_index = 'inverted_index'\n\nurlpatterns = [\n path('test/', views.Test.as_view(), name='test'),\n path('indexer/', views.Indexer.as_view(), name='indexer'),\n path('query/', views.QueryEngine.as_view(), name='query_engine'),\n path('docs/', views.DocumentRetreival.as_view(), name='document_reteival'),\n]","sub_path":"DjangoApp/IRA1/inverted_index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"245949821","text":"# Time Complexity : O(n)[n = length of nums list]\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\nclass Solution: \n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n n = len(nums)\n if not nums or len(nums) == 1 or k % n == 0:return\n k = k % n\n \n def reverse(start, end):\n while start [7,6,5,4,3,2,1]\n reverse(0, n-1)\n \n # reverse the first k elemenst\n # [7,6,5,4,3,2,1] -> [5,6,7,4,3,2,1]\n reverse(0, k-1)\n \n # reverse the last n-k elements\n # [5,6,7,4,3,2,1] -> [5,6,7,1,2,3,4]\n reverse(k, n-1)","sub_path":"189_Rotate_Array.py","file_name":"189_Rotate_Array.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"561398800","text":"\n# PyQt\nfrom PyQt5 import QtWidgets as qtw\nfrom PyQt5 import QtCore as qtc\nfrom PyQt5 import QtGui as qtg\n\n# Built-in Modules\nimport sys\nimport time\n\n# External resources\nimport resources\n\n# Opening window\nclass WelcomeWindow(qtw.QDialog):\n\n new_book = qtc.pyqtSignal()\n open_existing = qtc.pyqtSignal(str)\n open_sample = qtc.pyqtSignal()\n closed = qtc.pyqtSignal()\n\n INIT_WIDTH = 960\n INIT_HEIGHT = 640\n\n def __init__(self, width, height, parent=None):\n super(WelcomeWindow, self).__init__(parent)\n \n self.setSizePolicy(\n qtw.QSizePolicy.Preferred,\n qtw.QSizePolicy.Preferred\n )\n # self.setModal(True)\n self.setWindowTitle('Welcome!')\n self.setFixedSize(WelcomeWindow.INIT_WIDTH, WelcomeWindow.INIT_HEIGHT)\n \n # self.background = qtg.QPixmap(':/background-images/welcome_background.png')\n self.background_movie = qtg.QMovie(':background-images/welcome_screen.gif')\n self.background_movie.setCacheMode(qtg.QMovie.CacheAll)\n self.background_movie.jumpToFrame(0)\n self.background_movie.setScaledSize(qtc.QSize(WelcomeWindow.INIT_WIDTH, WelcomeWindow.INIT_HEIGHT))\n # background_size = background_movie.currentImage().size()\n self.background_aspect = WelcomeWindow.INIT_WIDTH / WelcomeWindow.INIT_HEIGHT\n\n # self.background_label = qtw.QLabel()\n # self.background_label.setAlignment(qtc.Qt.AlignCenter)\n # self.resizeEvent()\n\n # self.background_label.setMovie(background_movie)\n self.background_movie.frameChanged.connect(self.paintNewFrame)\n self.background_movie.stateChanged.connect(self.loopMovie)\n self.background_movie.start()\n\n # self.background = self.background_label.grab()\n\n\n # Set up layout\n layout = qtw.QGridLayout()\n\n heading = qtw.QLabel('Fantasy Creator')\n heading.setAttribute(qtc.Qt.WA_TranslucentBackground)\n heading_font = qtg.QFont('Apple Chancery', 100, qtg.QFont.ExtraBold)\n heading.setFont(heading_font)\n heading.setAlignment(qtc.Qt.AlignCenter)\n heading.setStyleSheet(\"QLabel {color : #ebbc00}\")\n layout.addWidget(heading, 1, 1, 2, 6)\n\n options_font = qtg.QFont('Baskerville', 25)\n\n self.new_book_btn = qtw.QPushButton('New Book')\n self.new_book_btn.setFont(options_font)\n self.new_book_btn.clicked.connect(self.handleNewBook)\n layout.addWidget(self.new_book_btn, 3, 3, 1, 2)\n\n self.open_book_btn = qtw.QPushButton('Open Existing')\n self.open_book_btn.setFont(options_font)\n self.open_book_btn.clicked.connect(self.handleOpenBook)\n layout.addWidget(self.open_book_btn, 4, 3, 1, 2)\n\n self.open_sample_btn = qtw.QPushButton('Sample')\n self.open_sample_btn.setFont(options_font)\n self.open_sample_btn.clicked.connect(self.handleOpenSample)\n layout.addWidget(self.open_sample_btn, 5, 3, 1, 2)\n\n spacer = qtw.QSpacerItem(0, 0)\n layout.addItem(spacer, 7, 0, 1, 1)\n\n self.progress_bar = qtw.QProgressBar(self)\n self.progress_bar.setOrientation(qtc.Qt.Horizontal)\n self.progress_bar.setMinimum(0)\n self.progress_bar.setMaximum(8)\n self.current_progress = 0\n # self.progress_bar.setVisible(False)\n layout.addWidget(self.progress_bar, 2, 2, 1, 4)\n\n\n self.cancel = qtw.QPushButton(\n 'Exit',\n clicked=sys.exit\n )\n self.cancel.setFont(qtg.QFont('Baskerville', 18))\n layout.addWidget(self.cancel, 7, 7, 1, 1)\n\n for col in range(8):\n layout.setColumnStretch(col, 1)\n\n # layout.addWidget(self.background_label, 0, 0, 7, 7)\n self.setLayout(layout)\n self.progress_bar.setVisible(False)\n \n\n def launchApp(self, signal, args=None):\n self.progress_bar.setVisible(True)\n self.new_book_btn.setVisible(False)\n self.open_book_btn.setVisible(False)\n self.open_sample_btn.setVisible(False)\n self.cancel.setVisible(False)\n app = qtw.QApplication.instance()\n app.processEvents()\n if args:\n signal.emit(args)\n else:\n signal.emit()\n\n def incrementProgressBar(self):\n self.current_progress += 1\n self.progress_bar.setValue(self.current_progress)\n app = qtw.QApplication.instance()\n app.processEvents()\n \n\n def closeEvent(self, event):\n self.closed.emit()\n super(WelcomeWindow, self).closeEvent(event) \n \n def handleOpenBook(self):\n filename, _ = qtw.QFileDialog.getOpenFileName(\n self,\n \"Select a file to open...\",\n qtc.QDir.currentPath(), # static method returning user's home path\n 'JSON Files (*.json) ;;Text Files (*.txt) ;;All Files (*)',\n 'JSON Files (*.json)'\n )\n if filename:\n self.launchApp(self.open_existing, filename)\n\n def handleNewBook(self):\n self.launchApp(self.new_book)\n\n def handleOpenSample(self):\n self.launchApp(self.open_sample)\n \n # def resizeEvent(self, event):\n # bkgnd_img = self.background.scaled(self.size(), \n # qtc.Qt.IgnoreAspectRatio, qtc.Qt.SmoothTransformation)\n # palette = qtg.QPalette()\n # palette.setBrush(qtg.QPalette.Window, qtg.QBrush(bkgnd_img))\n # self.setPalette(palette)\n\n # super(WelcomeWindow, self).resizeEvent(event)\n\n # def resizeEvent(self, event=None):\n # rect = self.geometry()\n\n # background_movie = self.background_label.movie()\n # if background_movie:\n # width = rect.height() * self.background_aspect\n # if width <= rect.width():\n # size = qtc.QSize(width, rect.height())\n # else:\n # height = rect.width() / self.background_aspect\n # size = qtc.QSize(rect.width(), height)\n\n # background_movie.setScaledSize(size)\n\n # palette = qtg.QPalette()\n # palette.setBrush(qtg.QPalette.Window, qtg.QBrush(self.background))\n # self.setPalette(palette)\n\n # super(WelcomeWindow, self).resizeEvent(event)\n \n def paintEvent(self, event):\n current_frame = self.background_movie.currentPixmap()\n frame_rect = current_frame.rect()\n\n frame_rect.moveCenter(self.rect().center())\n if frame_rect.intersects(event.rect()):\n painter = qtg.QPainter(self)\n painter.drawPixmap(\n frame_rect.left(),\n frame_rect.top(),\n current_frame)\n \n def paintNewFrame(self, frame_num):\n # print(frame_num, self.background_movie.state())\n # # if self.background_movie.state() == qtg.QMovie.NotRunning:\n # # self.background_movie.start()\n self.repaint()\n \n def loopMovie(self, state):\n if state == qtg.QMovie.NotRunning:\n self.background_movie.start()\n","sub_path":"fantasycreator/welcomeWindow.py","file_name":"welcomeWindow.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"332663666","text":"\ndef predict_sales(item_id,outlet_id):\n import numpy as np\n import matplotlib.pyplot as plt\n import pandas as pd\n from sklearn.externals import joblib\n\n\n test = pd.read_csv('Test.csv')\n train=pd.read_csv('Train.csv')\n train['source']='train'\n test['source']='test'\n data = pd.concat([train, test],ignore_index=True)\n #check the missing values\n data.apply(lambda x: sum(x.isnull()))\n\n\n #handle the missing values\n data['Item_Weight'].fillna(data['Item_Weight'].mean(),inplace=True)\n data['Outlet_Size'].fillna(data['Outlet_Size'].mode()[0],inplace=True)\n\n\n\n\n data.apply(lambda x: len(x.unique()))\n\n\n #Years:\n data['Outlet_Years'] = 2013 - data['Outlet_Establishment_Year']\n data['Outlet_Years'].describe()\n\n\n\n #visibility\n data['Item_Visibility']=data['Item_Visibility'].replace(0,data['Item_Visibility'].mean())\n\n #combine into categories\n data['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2])\n\n data['Item_Type_Combined'] = data['Item_Type_Combined'].map({'FD':'Food',\n 'NC':'Non-Consumable',\n 'DR':'Drinks'})\n data['Item_Type_Combined'].value_counts()\n\n data['Item_Fat_Content']=data['Item_Fat_Content'].replace('LF',\"Low Fat\")\n data['Item_Fat_Content']=data['Item_Fat_Content'].replace('low fat',\"Low Fat\")\n data['Item_Fat_Content']=data['Item_Fat_Content'].replace('reg',\"Regular\")\n data['Item_Fat_Content'].value_counts()\n\n #Mark non-consumables as separate category in low_fat:\n data.loc[data['Item_Type_Combined']==\"Non-Consumable\",'Item_Fat_Content'] = \"Non-Edible\"\n data['Item_Fat_Content'].value_counts()\n\n\n from sklearn.preprocessing import LabelEncoder\n le = LabelEncoder()\n #New variable for outlet\n data['Outlet'] = le.fit_transform(data['Outlet_Identifier'])\n var_mod = ['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Item_Type_Combined','Outlet_Type','Outlet']\n le = LabelEncoder()\n for i in var_mod:\n data[i] = le.fit_transform(data[i])\n\n #One Hot Coding:\n data = pd.get_dummies(data, columns=['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Outlet_Type',\n 'Item_Type_Combined','Outlet'])\n data.dtypes\n\n data[['Item_Fat_Content_0','Item_Fat_Content_1','Item_Fat_Content_2']].head(10)\n\n #Drop the columns which have been converted to different types:\n data.drop(['Item_Type','Outlet_Establishment_Year'],axis=1,inplace=True)\n\n #Divide into test and train:\n train = data.loc[data['source']==\"train\"]\n test = data.loc[data['source']==\"test\"]\n\n #Drop unnecessary columns:\n test.drop(['Item_Outlet_Sales','source'],axis=1,inplace=True)\n train.drop(['source'],axis=1,inplace=True)\n\n #Export files as modified versions:\n train.to_csv(\"train_modified.csv\",index=False)\n test.to_csv(\"test_modified.csv\",index=False)\n\n #Mean based:\n mean_sales = train['Item_Outlet_Sales'].mean()\n\n #Define a dataframe with IDs for submission:\n base1 = test[['Item_Identifier','Outlet_Identifier']]\n base1['Item_Outlet_Sales'] = mean_sales\n\n #Export submission file\n base1.to_csv(\"alg0.csv\",index=False)\n\n #Define target and ID columns:\n target = 'Item_Outlet_Sales'\n IDcol = ['Item_Identifier','Outlet_Identifier']\n from sklearn.model_selection import cross_val_score\n from sklearn.metrics import mean_squared_error\n #def modelfit(alg, dtrain, dtest, predictors, target, IDcol, filename):\n # #Fit the algorithm on the data\n # alg.fit(dtrain[predictors], dtrain[target])\n #\n # #Predict training set:\n # dtrain_predictions = alg.predict(dtrain[predictors])\n #\n # #Perform cross-validation:\n # cv_score = cross_val_score(alg, dtrain[predictors], dtrain[target], cv=20, scoring='neg_mean_squared_error')\n # cv_score = np.sqrt(np.abs(cv_score))\n #\n # #Print model report:\n # print (\"\\nModel Report\")\n # print (\"RMSE : %.4g\" % np.sqrt(mean_squared_error(dtrain[target].values, dtrain_predictions)))\n # print (\"CV Score : Mean - %.4g | Std - %.4g |\" % (np.mean(cv_score),np.std(cv_score)))\n #\n # #Predict on testing data:\n # dtest[target] = alg.predict(dtest[predictors])\n #\n # #Export submission file:\n # IDcol.append(target)\n # submission = pd.DataFrame({ x: dtest[x] for x in IDcol})\n # submission.to_csv(filename, index=False)\n\n\n #decision tree model\n from sklearn.tree import DecisionTreeRegressor\n predictors = [x for x in train.columns if x not in [target]+IDcol]\n alg0 = DecisionTreeRegressor(max_depth=15, min_samples_leaf=100)\n alg0.fit(train[predictors], train[target])\n train_predictions0 = alg0.predict(train[predictors])\n cv_score0 = np.mean(cross_val_score(alg0,train[predictors], train[target], cv=20))\n\n test[target] = alg0.predict(test[predictors])\n\n IDcol.append(target)\n submission = pd.DataFrame({x : test[x] for x in IDcol})\n submission.to_csv(\"predictions.csv\", index = False)\n\n rmse0 = np.sqrt(mean_squared_error(train[target].values, train_predictions0))\n #modelfit(alg3, train, test, predictors, target, IDcol, 'alg3.csv')\n #coef3 = pd.Series(alg3.feature_importances_, predictors).sort_values(ascending=False)\n #coef3.plot(kind='bar', title='Feature Importances')\n joblib.dump(alg0 , 'model.pkl')\n model = joblib.load('model.pkl')\n\n\n #test2 = test.sort_values('Item_Outlet_Sales' , ascending = False)\n #top5 = test2.head(5)\n #top5.plot.bar(x ='Item_Identifier' , y = 'Item_Outlet_Sales', rot =0 )\n\n \n\n # 1- item-id 2- out-id\n df = pd.read_csv(\"predictions.csv\")\n new_df =df.loc[(df['Item_Identifier'] == item_id)& (df['Outlet_Identifier'] == outlet_id)]\n pred = new_df['Item_Outlet_Sales']\n if(pred.values[0]==0):\n return \"invalid\"\n else:\n pred.values[0]\n \n \n #from sklearn.ensemble import RandomForestRegressor\n #alg1 = RandomForestRegressor(max_depth=2,random_state=0, n_estimators=100)\n #alg1.fit(train[predictors], train[target])\n #train_predictions1 = alg1.predict(train[predictors])\n #cv_score1 = np.mean(cross_val_score(alg1,train[predictors], train[target], cv=20))\n #rmse1 = np.sqrt(mean_squared_error(train[target].values, train_predictions1))\n #\n #\n #\n #\n #from sklearn import linear_model\n #alg2 = linear_model.Lasso(alpha=0.1)\n #alg2.fit(train[predictors], train[target])\n #train_predictions2 = alg2.predict(train[predictors])\n #cv_score2 = np.mean(cross_val_score(alg2,train[predictors], train[target], cv=20))\n #rmse2 = np.sqrt(mean_squared_error(train[target].values, train_predictions2))\n","sub_path":"project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"384291113","text":"import time\nimport math\nimport matplotlib.pyplot as plt\nimport csv\nimport pandas\nimport data_export\nimport numpy\nfrom filterpy.kalman import KalmanFilter\nfrom filterpy.common import Q_discrete_white_noise\nfrom random import randint\n\ndef ComputeAcceleration(v1,v2,t1,t2):\n\tif (t2 == t1):\n\t\treturn 0\n\n\treturn ((v2-v1)/(t2-t1))\n\n\ndef ComputeVelocity(r1,r2,t1,t2):\n\tif (t2 == t1):\n\t\treturn 0\n\n\treturn ((r2-r1)/(t2-t1))\n\ndef ComputeTau(r,v):\n\tif(v==0.0):\n\t\tv = -0.001\n\treturn r/v\n\ndef efTest(v_error, r_error, buf_size):\n\t#initialization\n\tr = []\n\tt = []\n\tr_meas = []\n\tv = []\n\ttau = []\n\ta_need = []\n\tv_need = []\n\ta = []\n\theader = []\n\tfile_return = []\n\tstage = \"ef\"\n\ttimer = \"unset\"\n\n\t# //Parameters//\n\n\tfilename_readable = \"recentKFTestReadable.txt\"\n\tfilename = \"recentKFTestBuff.csv\"\n\tr0 = 2.0\n\tv0 = -0.4\n\ttau_dot = 0.5\n\n\n\n\n\n\t# kf = KalmanFilter(dim_x=2, dim_z=1)\n\tdt = 1.0/15.0\n\t# kf.x = numpy.array([[2.6],\n\t# \t\t\t\t\t[-0.5]])\n\t# kf.F = numpy.array([[1.,dt],\n\t# \t\t\t\t[0.,1.]])\n\n\t# kf.H = numpy.array([[1.,0.]])\n\n\t# kf.P *= 10\n\n\t# kf.R = .0005\n\n\t# kf.Q = Q_discrete_white_noise(dim=2, dt=dt, var=0.13)\n\t# // Velocity Equation //\n\n\t# // C = 2.602*(Sqrt(0.712-V)-0.846)\n\n\t# // Marker notation //\n\t# //0 -> no EF, trying to reach constant velocity\n\t# //1 -> starting EF from filtered data\n\n\t# //Loop\n\n\n\t# //Functions\n\n\n\n\n\t\t\t\n\n\n\n\tf = open(\"ContinuousDataKFTest.txt\", \"w\")\n\tf.write(\"stage\\tr\\tt\\tr_filt\\tv\\ttau\\tv_need\\ta_need\\tcmnd\\tmarker\\n\")\n\tcount = 0\n\n\tloop = True\n\n\tv.append(v0)\n\tr_meas.append(r0 + numpy.random.normal(0,r_error))\n\tt.append(0)\n\tr.append(r0)\n\ttau.append(0)\n\ta_need.append(0)\n\tv_need.append(0)\n\n\tv_meas = []\n\tcurrent_accel = 0.0\n\ti = 1\n\twhile i < buf_size + 1:\n\n\t\tv.append(v[i-1])\n\t\tt.append(t[i-1] + dt)\n\t\ta.append(ComputeAcceleration(v[i-1],v[i],t[i-1],t[i]))\n\n\t\t\n\n\t\tr.append(r[i-1] + v[-1]*dt)\n\n\t\tr_meas.append(r[i] + numpy.random.normal(0,r_error))\n\n\t\ti = i + 1\n\n\n\t\t#//what is the current sample?\n\n\ti = buf_size+1\n\tduration = -r[buf_size]/(tau_dot * v[buf_size])\n\tsamples = numpy.ceil(duration*15 + buf_size +1)\n\twhile i < samples:\n\n\t\t# current_filt = kf.x[0][0]\n\t\t# r_filt.append(current_filt)\n\n\t\tv_meas.append(ComputeVelocity(r_meas[i-2], r_meas[i-1], t[i-2], t[i-1]))\n\t\ttau.append(ComputeTau(r_meas[i-1], v_meas[-1]))\n\t\ta_need.append(v_meas[-1]*(1-tau_dot)/tau[-1])\n\t\tv_need.append(v_meas[-1] + a_need[-1]*dt)\n\t\t#//compute current velocity\n\t\t# v.append(kf.x[1][0])\n\t\tv.append(v_need[-1] + numpy.random.normal(0,v_error))\n\t\tt.append(t[i-1] + dt)\n\t\ta.append(ComputeAcceleration(v[i-1],v[i],t[i-1],t[i]))\n\t\t\n\t\t#//compute current tau\n\n\n\t\t#//compute needed acceleration\n\t\tr.append(r[i-1] + v[-1]*dt)\n\n\t\tr_meas.append(r[i] + numpy.random.normal(0,r_error))\n\t\t# kf.predict()\n\t\t# kf.update(r_meas[i])\n\t\t# r_filt.append(kf.x[0][0])\n\t\ti = i + 1\n\t\t#//check if desitnation is reached\n\n\n\n\n\t\t\n\tf.close()\n\n\n\n\tjust = open(\"JustinEFTest.txt\", \"w\")\n\tjust.write(\"{},\".format(len(r)))\n\tfor x in r:\n\t\tline = \"{},\".format(x)\n\t\tjust.write(line)\n\tfor x in t:\n\t\tline = \"{},\".format(x)\n\t\tjust.write(line)\n\tfor x in r_meas:\n\t\tline = \"{},\".format(x)\n\t\tjust.write(line)\n\tfor x in v:\n\t\tline = \"{},\".format(x)\n\t\tjust.write(line)\n\tfor x in tau:\n\t\tline = \"{},\".format(x)\n\t\tjust.write(line)\n\tfor x in v_need:\n\t\tline = \"{},\".format(x)\n\t\tjust.write(line)\n\tfor x in a_need:\n\t\tline = \"{},\".format(x)\n\t\tjust.write(line)\n\tjust.write(\"0\")\n\tjust.close()\n\n\treturn data_export.printRecentGraph(\"JustinEFTest.txt\",buf_size)","sub_path":"echoic_flow_test.py","file_name":"echoic_flow_test.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"366460818","text":"import torch\r\nfrom torchvision import transforms, datasets\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport numpy as np\r\nfrom PIL import Image, ImageStat\r\nimport os\r\nimport h5py\r\nfrom PIL import Image\r\nimport random\r\nfrom torchvision import transforms\r\n\r\nAudio_path = \"./AVE/AVE_Dataset/audio_features/\"\r\nForg_path = './AVE/AVE_Dataset/audiotrue/' # audio feature is true\r\ndef make_dataset(ori_path):\r\n path_listz = []\r\n path_listo = []\r\n count = 0\r\n ori_name = os.listdir(ori_path)\r\n for file in range(0, len(ori_name)):\r\n print(file)\r\n ficpath = os.path.join(ori_path, ori_name[file])\r\n ficname = os.listdir(ficpath)\r\n for fs in range(0, len(ficname)):\r\n picpath = os.path.join(ficpath, ficname[fs])\r\n picname = os.listdir(picpath)\r\n for picp in range(0, len(picname)):\r\n if os.path.exists(os.path.join(Forg_path, ori_name[file], ficname[fs], picname[picp][:-4]+'.jpg')):\r\n onoroff = '1'\r\n pa = os.path.join(Audio_path, ori_name[file], ficname[fs], picname[picp][:-4]+'_asp.h5')\r\n path_listo.append(onoroff+'+'+pa+'+'+str(file)+'+'+ori_name[file]+'+'+ficname[fs]+'+'+picname[picp][:-4]+'.jpg')\r\n else:\r\n onoroff = '0'\r\n pa = os.path.join(Audio_path, ori_name[file], ficname[fs], picname[picp][:-4]+'_asp.h5')\r\n path_listz.append(onoroff+'+'+pa+'+'+str(file)+'+'+ ori_name[file]+'+'+ficname[fs]+'+'+picname[picp][:-4]+'.jpg')\r\n random.shuffle(path_listz)\r\n slice = random.sample(path_listz, len(path_listo))\r\n path_listo = path_listo+ slice\r\n return path_listo\r\n\r\n\r\nclass ImageFolder(Dataset):\r\n def __init__(self, root):\r\n self.root = root\r\n self.imgs = make_dataset(root)\r\n\r\n def __getitem__(self, index):\r\n pathimla = self.imgs[index]\r\n img_la = pathimla.split('+')\r\n onoroff = int(img_la[0])\r\n audio_path = img_la[1]\r\n with h5py.File(audio_path, 'r') as hf:\r\n audio_features = np.float32(hf['dataset'][:]) # 5,128\r\n audio_features_batch = torch.from_numpy(audio_features).float()\r\n inda = int(img_la[-4])\r\n file = img_la[-3]\r\n subfile = img_la[-2]\r\n ssubfile = img_la[-1]\r\n\r\n return audio_features_batch, onoroff, inda, file, subfile, ssubfile\r\n\r\n def __len__(self):\r\n return len(self.imgs)\r\n","sub_path":"audio/audiodata.py","file_name":"audiodata.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"519169279","text":"from django import forms\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom .models import Tuitodo\nimport datetime\n\nclass TodoForm(forms.ModelForm):\n class Meta:\n model = Tuitodo\n fields = ('title', 'death_date', 'deadline')\n\n\n@login_required(login_url=\"/login/\")\ndef index(request):\n ctx = {}\n\n n_day = datetime.date.today() + datetime.timedelta(days=30)\n todo_list = Tuitodo.objects.filter(Q(finished=0), Q(deadline__lt=n_day),).order_by('deadline')\n paginator = Paginator(todo_list, 15) # 每页显示15个联系人\n page = request.GET.get('page')\n try:\n contacts = paginator.page(page)\n except PageNotAnInteger:\n # 如果page不是一个整数,则展示第一页。\n contacts = paginator.page(1)\n except EmptyPage:\n # 如果page不在范围内(例如,9999),则展示结果的最后一页。\n contacts = paginator.page(paginator.num_pages)\n ctx['todos'] = contacts\n\n ctx['form'] = TodoForm()\n return render(request, 'tui_todo/todo_index.html', ctx)\n\n\n@login_required(login_url=\"/login/\")\ndef new(request):\n form = TodoForm()\n if request.method == \"POST\":\n form = TodoForm(request.POST)\n if form.is_valid():\n form.save()\n messages.info(request, u'创建成功')\n return HttpResponseRedirect(reverse(\"tui_todo:todo_idx\"))\n return render(request, 'tui_todo/form.html', {'form': form})\n\n\n@login_required(login_url=\"/login/\")\ndef edit(request, id):\n edit_todo = get_object_or_404(Tuitodo, id=id)\n form = TodoForm(instance=edit_todo)\n if request.method == \"POST\":\n form = TodoForm(request.POST, instance=edit_todo)\n if form.is_valid():\n form.save()\n messages.info(request, u'编辑成功')\n return HttpResponseRedirect(reverse(\"tui_todo:todo_idx\"))\n return render(request, 'tui_todo/form.html', {'form': form})\n\n\n@login_required(login_url=\"/login/\")\ndef delete(request, id):\n todo = get_object_or_404(Tuitodo, id=id)\n todo.delete()\n messages.info(request, u'成功删除')\n return HttpResponseRedirect(reverse(\"tui_todo:todo_idx\"))\n\n\n@login_required(login_url=\"/login/\")\ndef finish(request, id):\n todo = get_object_or_404(Tuitodo, id=id)\n status = request.GET.get('status', '')\n if status == 'yes':\n finished = 1\n todo.finished = finished\n elif status == 'no':\n finished = 0\n else:\n messages.info(request, u'非法请求')\n return HttpResponseRedirect(reverse(\"tui_todo:todo_idx\"))\n todo.finished = finished\n todo.save()\n messages.info(request, u'修改成功')\n return HttpResponseRedirect(reverse(\"tui_todo:todo_idx\"))\n","sub_path":"tui_ban_todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"75358866","text":"import serial\nimport time\n\n# Set up serial port.\nser = serial.Serial('COM4', 9600)\n\nprint(\"This script calibrates each sensor at 30 degree intervals.\")\nprint(\"Reset receiver board then plug in RX wire before starting.\")\ninput(\"Press Enter to start.\")\n\n# Open the file to be written to.\n# File is a .csv, with row n = res(0), res(30), res(60), res(90) for sensor n. \nf = open(\"PS_Calibration.csv\",\"w\")\n\nn = 8 # Total amount of sensors hooked up.\n\n# Function to get the current (realtime) reading from the sensors.\ndef getSerial():\n global data\n global ser\n \n # Clear read buffer so next read is realtime.\n ser.reset_input_buffer() \n time.sleep(0.1) # Give time for a new serial value to come in.\n \n # Get raw serial input.\n # Comes in as: b'###,###,###,###,\\r\\n'\n rawline = str(ser.readline()) # Throw away the first line, usually garbage.\n rawline = str(ser.readline())\n \n # Convert raw serial to a list of numbers.\n rawline = rawline[2:len(rawline)] # Cut off the first b' characters.\n \n # Set initial value for break.\n endraw = 0\n \n # Extract all numbers as strings. Extracted in the order defined in SensorMapping.png.\n while endraw == 0:\n data.append(int(rawline[:rawline.find(\",\")])) # Get next number in string format.\n rawline = rawline[rawline.find(\",\")+1:len(rawline)] # Seek forward past the next comma.\n \n # Check if at end of rawline data. \n if rawline[0] == '\\\\':\n endraw = 1\n \n# Read each sensor at 30 degree intervals. 0 to 90 degrees.\nfor i in range(n):\n # Get reading at 0 degrees.\n print(\"Position sensor \",i+1,\" at 0 degrees.\",sep='')\n input(\"Press Enter when sensor is positioned.\")\n data = [] # Zero the data array in each loop.\n getSerial() # Fill data with readings for each sensor.\n print(data[i],\",\",end='',file=f)\n \n # Get reading at 0 degrees.\n #print(\"Position sensor \",i+1,\" at 30 degrees.\",sep='')\n #input(\"Press Enter when sensor is positioned.\")\n #data = [] # Zero the data array in each loop.\n #getSerial() # Fill data with readings for each sensor.\n #print(data[i],\",\",end='',file=f)\n \n # Get reading at 0 degrees.\n #print(\"Position sensor \",i+1,\" at 60 degrees.\",sep='')\n #input(\"Press Enter when sensor is positioned.\")\n #data = [] # Zero the data array in each loop.\n #getSerial() # Fill data with readings for each sensor.\n #print(data[i],\",\",end='',file=f)\n \n # Get reading at 0 degrees.\n print(\"Position sensor \",i+1,\" at 90 degrees.\",sep='')\n input(\"Press Enter when sensor is positioned.\")\n data = [] # Zero the data array in each loop.\n getSerial() # Fill data with readings for each sensor.\n print(data[i],file=f)\n \n# Close file and end script.\nf.close()\ninput(\"Calibration complete. Press Enter to exit.\")","sub_path":"PS_Calibration.py","file_name":"PS_Calibration.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"160869530","text":"def cifs(s):\n\treturn sum(int(x) for x in s)\n\nbest = 0\nX, Y = None, None\n\nfor i in range(100,1000):\n\tfor j in range(i+1, 1000):\n\t\tif cifs(str(i)) == cifs(str(j)):\n\t\t\tif j-i > best:\n\t\t\t\tbest = j-i\n\t\t\t\tX = i\n\t\t\t\tY = j\n\nprint(best)\nprint('maru', X)\nprint('usama', Y)","sub_path":"rychlostne/maxdiff_slow.py","file_name":"maxdiff_slow.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"293999062","text":"import subprocess\nimport os\nimport signal\nimport time\nfrom datetime import datetime\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\ncount = 0\nid = 0\nrecorder = cv2.VideoWriter(\"%d.avi\"%id, cv2.VideoWriter_fourcc(*\"MJPG\"), 25,(640,480))\n\n\nwhile True:\n _, img = cap.read()\n recorder.write(img)\n if count > 200:\n\n recorder.release()\n id = id + 1\n recorder = cv2.VideoWriter(\"%d.avi\"%id, cv2.VideoWriter_fourcc(*\"MJPG\"), 25,(640,480))\n print(\"writing file %d.avi\"%id)\n count = 0\n\n count = count + 1\n","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"287235369","text":"#-*- coding:utf-8 -*-\nimport sqlite3\nimport os\n\nstorename = input()\nids = input()\n\nconn = sqlite3.connect(\"../store/\"+storename+\"/imformation.db\")\n\ncur = conn.cursor()\n\n#img name\nsql = \"delete from customs where id= '\"+ids+\"';\"\n#sql = 'insert into items values(\"'+name+'\",\"'+text+'\")'\n#sql =\"insert into member values('name','ids','pwd','storelocation','phonenum','email')\"\ncur.execute(sql)\nrows = cur.fetchall()\nconn.commit()\nconn.close()\n\nos.system(\"sudo rm -r ../store/\"+storename+\"/custom/\"+ids+\"\")\n","sub_path":"python/storecusdel.py","file_name":"storecusdel.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"557938902","text":"import sys \nimport json \nimport datetime \nimport time \nimport MySQLdb \nimport pandas as pd \nfrom sqlalchemy import create_engine \n\ndef load_data(years): \n\n # Connect to mysql server \n db = MySQLdb.connect(\"localhost\",\"liululu\",\"\",\"beijingdb\")\n cursor = db.cursor()\n\n sql = \"\"\"SET NAMES 'utf8';\"\"\" # set the character set to be 'utf-8'\n cursor.execute(sql)\n\n sql = \"DROP TABLE IF EXISTS beijingair;\"\n cursor.execute(sql)\n\n sql = \"\"\"CREATE TABLE IF NOT EXISTS beijingair (\n Date CHAR(10) NOT NULL, \n Year SMALLINT NOT NULL, \n Month TINYINT NOT NULL, \n Day TINYINT NOT NULL, \n Hour TINYINT NOT NULL,\n Weekday TINYINT NOT NULL,\n Value SMALLINT NOT NULL); \"\"\"\n cursor.execute(sql)\n\n # for to_sql()\n engine = create_engine('mysql://liululu@localhost/beijingdb')\n\n for year in years:\n inputfile=\"Beijing_Hourly_PM2.5_\"+str(year)+\".csv\"\n df = pd.read_csv(inputfile)\n df['Date']=df.apply(lambda row: str(row['Year'])+\"-\"+str(row['Month'])+\"-\"+str(row['Day']), axis=1)\n df['Weekday']=df.apply(lambda row: datetime.date(row['Year'], row['Month'], row['Day']).weekday(), axis=1)\n df=df.drop(df.columns[[0, 1, 2, 8, 9, 10]], axis=1)\n #print df\n\n ## use to_sql(), batch load, very fast \n df.to_sql(con=engine, name='beijingair', if_exists='append',flavor='mysql',index=False)\n\n db.close()\n\nif __name__ == '__main__':\n\n load_data([2008,2009,2010,2011,2012,2013,2014,2015,2016])\n #load_data([2008])\n","sub_path":"data/load_pm2.5_to_DB.py","file_name":"load_pm2.5_to_DB.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"375755646","text":"#!/usr/bin/env python\n# encoding: utf-8\nfrom django.conf import settings\nfrom django.http import HttpResponse\nimport simplejson, mimetypes\n\n\n# INDENTATION \nif hasattr(settings, 'INDENT_JSON') and settings.INDENT_JSON:\n INDENT = 2\nelse:\n INDENT = 2 if settings.DEBUG else None\n\n# DEBUG TOOLBAR \nif hasattr(settings, 'DEBUG_TOOLBAR_CONFIG'):\n TAG = settings.DEBUG_TOOLBAR_CONFIG.get('TAG', 'body') \nelse:\n TAG = 'body' \n\n\n\nclass HttpResponseUnauthorized(HttpResponse):\n '''\n Http Response for status 401\n '''\n status_code = 401\n\n\nclass FileResponse(HttpResponse):\n '''\n Http Response with file content\n '''\n def __init__(self, content='', status=None, content_type=None):\n HttpResponse.__init__(self, content = content, \n mimetype = mimetypes.guess_type(content.name)[0], \n status = status, \n content_type = content_type, )\n self['Content-Disposition'] = 'attachment; filename=' + content.name\n\n\nclass JsonResponse(HttpResponse):\n '''\n Http Response with Json content type\n '''\n def __init__(self, content='', mimetype=None, status=None):\n content = simplejson.dumps( content or [], indent=INDENT, ensure_ascii=False )\n HttpResponse.__init__(self, content = content, \n mimetype = mimetype, \n status = status, \n content_type = 'application/json; charset=utf-8', )\n\n\nclass JsonpResponse(HttpResponse):\n '''\n Http Response with Jsonp content type\n '''\n def __init__(self, content='', mimetype=None, status=None, param='callback'):\n content = simplejson.dumps( content or [], indent=INDENT, ensure_ascii=False )\n HttpResponse.__init__(self, content = \"%s(%s)\" %(param, content), \n mimetype = mimetype,\n status = status, \n content_type = 'application/javascript; charset=utf-8', )\n\n\nclass ProperJsonResponse:\n '''\n Json or Jsonp Response according to request\n ''' \n def __init__(self, request):\n self.__jsonp_param = None\n for param in ['callback', 'jsonp']:\n if param in request.GET:\n self.__jsonp_param = request.GET.get(param)\n \n def __call__(self, *args, **kwargs):\n if self.__jsonp_param:\n return JsonpResponse(*args, param=self.__jsonp_param, **kwargs) \n return JsonResponse(*args, **kwargs)\n\n\nclass JsonDebugResponse(HttpResponse):\n '''\n HTTP Response for debug purposes (django-debug-toolbar)\n '''\n def __init__(self, content='', mimetype=None, content_type=None, status=None):\n content = simplejson.dumps( content or [], indent=INDENT, ensure_ascii=False )\n HttpResponse.__init__(self, content = self.__json_to_html(content),\n mimetype = mimetype,\n status = status, \n content_type = content_type,)\n \n def __json_to_html(self, content):\n content = content.replace('\\n', '
')\n content = content.replace(' ','    ')\n if TAG != 'body':\n return \"<%s>%s\" %(TAG, content, TAG)\n return \"<%s>%s\" %(TAG, content, TAG)\n\n\n# JSON RESPONSE\nif hasattr(settings, 'YARD_DEBUG_TOOLBAR'):\n JSONResponse = JsonDebugResponse if settings.YARD_DEBUG_TOOLBAR else ProperJsonResponse\nelse:\n if 'debug_toolbar' in settings.INSTALLED_APPS and settings.DEBUG==True:\n JSONResponse = JsonDebugResponse\n else:\n JSONResponse = ProperJsonResponse\n\n","sub_path":"src/yard/utils/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"257536633","text":"# -*- coding: utf-8 -*-\n\nfrom route4me import Route4Me\n\nAPI_KEY = \"11111111111111111111111111111111\"\n\n\ndef main():\n route4me = Route4Me(API_KEY)\n members = route4me.members\n response = members.get_users(limit=5, offset=0)\n if isinstance(response, dict) and 'errors' in response.keys():\n print('. '.join(response['errors']))\n else:\n for i, member in enumerate(response):\n print('Member #{}'.format(i + 1))\n print('\\tName: {0}, {1}'.format(\n member.get('member_first_name'),\n member.get('member_last_name')\n ))\n print('\\tEmail: {}'.format(member.get('member_email')))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/members/get_users.py","file_name":"get_users.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"116531848","text":"# -*- coding: utf-8 -*-\n# Author: vkaff\n# E-mail: vkaffes@imis.athena-innovation.gr\n\nimport os\nimport re\nfrom text_unidecode import unidecode\nimport __main__\n\nfrom sim_measures import strip_accents, algnms_to_func\nimport config\n\n\npunctuation_regex = re.compile(u'[‘’“”\\'\"!?;/⧸⁄‹›«»`ʿ,.-]')\n\n\ndef ascii_transliteration_and_punctuation_strip(s):\n # NFKD: first applies a canonical decomposition, i.e., translates each character into its decomposed form.\n # and afterwards apply the compatibility decomposition, i.e. replace all compatibility characters with their\n # equivalents.\n\n s = unidecode(strip_accents(s.lower()))\n s = punctuation_regex.sub('', s)\n return s\n\n\ndef transform(strA, strB, sorting=False, canonical=False, delimiter=' ', thres=config.sort_thres, only_sorting=False):\n a = strA.decode('utf8') #.lower()\n b = strB.decode('utf8') #.lower()\n\n if canonical:\n a = ascii_transliteration_and_punctuation_strip(a)\n b = ascii_transliteration_and_punctuation_strip(b)\n\n if sorting:\n tmp_a = a.replace(' ', '')\n tmp_b = b.replace(' ', '')\n\n if algnms_to_func['damerau_levenshtein'](tmp_a, tmp_b) < thres:\n a = \" \".join(sorted_nicely(a.split(delimiter)))\n b = \" \".join(sorted_nicely(b.split(delimiter)))\n elif algnms_to_func['damerau_levenshtein'](tmp_a, tmp_b) > algnms_to_func['damerau_levenshtein'](a, b):\n a = tmp_a\n b = tmp_b\n elif only_sorting:\n a = \" \".join(sorted_nicely(a.split(delimiter)))\n b = \" \".join(sorted_nicely(b.split(delimiter)))\n\n return a, b\n\n\ndef sorted_nicely(l):\n \"\"\" Sorts the given iterable in the way that is expected.\n\n Required arguments:\n l -- The iterable to be sorted.\n\n \"\"\"\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)\n\n\ndef getBasePath():\n return os.path.abspath(os.path.dirname(__main__.__file__))\n\n\ndef getRelativePathtoWorking(ds):\n return os.path.join(getBasePath(), ds)\n\n\nclass StaticValues:\n featureColumns = [\n \"Damerau-Levenshtein\",\n \"Jaro\",\n \"Jaro-Winkler\",\n \"Jaro-Winkler reversed\",\n \"Sorted Jaro-Winkler\",\n # \"Permuted Jaro-Winkler\",\n \"Cosine N-grams\",\n \"Jaccard N-grams\",\n \"Dice bigrams\",\n \"Jaccard skipgrams\",\n \"Monge-Elkan\",\n \"Soft-Jaccard\",\n \"Davis and De Salles\",\n \"Damerau-Levenshtein Sorted\",\n \"Jaro Sorted\",\n \"Jaro-Winkler Sorted\",\n \"Jaro-Winkler reversed Sorted\",\n # \"Sorted Jaro-Winkler Sorted\",\n # \"Permuted Jaro-Winkler Sorted\",\n \"Cosine N-grams Sorted\",\n \"Jaccard N-grams Sorted\",\n \"Dice bigrams Sorted\",\n \"Jaccard skipgrams Sorted\",\n \"Monge-Elkan Sorted\",\n \"Soft-Jaccard Sorted\",\n \"Davis and De Salles Sorted\",\n \"LinkGeoML Jaro-Winkler\",\n \"LinkGeoML Jaro-Winkler reversed\",\n # \"LSimilarity\",\n \"LSimilarity_wavg\",\n # \"LSimilarity_davies\",\n # \"LSimilarity_skipgram\",\n # \"LSimilarity_soft_jaccard\",\n # \"LSimilarity_strike_a_match\",\n # \"LSimilarity_cosine\",\n # \"LSimilarity_monge_elkan\",\n # \"LSimilarity_jaro_winkler\",\n # \"LSimilarity_jaro\",\n # \"LSimilarity_jaro_winkler_reversed\",\n \"LSimilarity_davies_wavg\",\n \"LSimilarity_skipgram_wavg\",\n \"LSimilarity_soft_jaccard_wavg\",\n \"LSimilarity_strike_a_match_wavg\",\n \"LSimilarity_cosine_wavg\",\n \"LSimilarity_jaccard_wavg\",\n \"LSimilarity_monge_elkan_wavg\",\n \"LSimilarity_jaro_winkler_wavg\",\n \"LSimilarity_jaro_wavg\",\n \"LSimilarity_jaro_winkler_reversed_wavg\",\n \"LSimilarity_l_jaro_winkler_wavg\",\n \"LSimilarity_l_jaro_winkler_reversed_wavg\",\n # \"LSimilarity_baseScore\",\n # \"LSimilarity_mismatchScore\",\n # \"LSimilarity_specialScore\",\n \"Avg LSimilarity_baseScore\",\n \"Avg LSimilarity_mismatchScore\",\n \"Avg LSimilarity_specialScore\",\n # non metric features\n # \"contains_str1\",\n # \"contains_str2\",\n # \"WordsNo_str1\",\n # \"WordsNo_str2\",\n # \"dashed_str1\",\n # \"dashed_str2\",\n # \"hasFreqTerm_str1\",\n # \"hasFreqTerm_str2\",\n # \"posOfHigherSim_str1_start\",\n # \"posOfHigherSim_str1_middle\",\n # \"posOfHigherSim_str1_end\",\n # \"posOfHigherSim_str2_start\",\n # \"posOfHigherSim_str2_middle\",\n # \"posOfHigherSim_str2_end\",\n ]\n\n opt_values = {\n 'latin': {\n # Only latin dataset 100k lines\n 'damerau_levenshtein': {'simple': [0.6, [0.7, 0.1, 0.2]], 'avg': [0.8, [0.5, 0.1, 0.4]]},\n 'jaro': {'simple': [0.6, [0.7, 0.1, 0.2]], 'avg': [0.8, [0.7, 0.1, 0.2]]},\n 'jaro_winkler': {'simple': [0.8, [0.7, 0.1, 0.2]], 'avg': [0.8, [0.6, 0.1, 0.3]]},\n 'jaro_winkler_r': {'simple': [0.6, [0.7, 0.1, 0.2]], 'avg': [0.8, [0.7, 0.1, 0.2]]},\n # 'permuted_winkler': [],\n # 'sorted_winkler': [],\n 'cosine': {'simple': [0.6, [0.6, 0.2, 0.2]], 'avg': [0.8, [0.4, 0.2, 0.4]]},\n 'jaccard': {'simple': [0.6, [0.6, 0.1, 0.3]], 'avg': [0.8, [0.334, 0.333, 0.333]]},\n 'strike_a_match': {'simple': [0.6, [0.6, 0.1, 0.3]], 'avg': [0.8, [0.4, 0.2, 0.4]]},\n 'skipgram': {'simple': [0.6, [0.6, 0.2, 0.2]], 'avg': [0.8, [0.334, 0.333, 0.333]]},\n 'monge_elkan': {'simple': [0.6, [0.7, 0.2, 0.1]], 'avg': [0.8, [0.6, 0.1, 0.3]]},\n 'soft_jaccard': {'simple': [0.8, [0.6, 0.1, 0.3]], 'avg': [0.8, [0.5, 0.1, 0.4]]},\n 'davies': {'simple': [0.8, [0.7, 0.1, 0.2]], 'avg': [0.8, [0.6, 0.1, 0.3]]},\n 'lgm_jaro_winkler': {'simple': [0.8, [0.7, 0.1, 0.2]], 'avg': [0.8, [0.6, 0.1, 0.3]]},\n 'lgm_jaro_winkler_r': {'simple': [0.6, [0.7, 0.1, 0.2]], 'avg': [0.8, [0.7, 0.1, 0.2]]},\n },\n 'global': {\n 'damerau_levenshtein': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.8, [0.4, 0.5, 0.1]]},\n 'jaro': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.8, [0.4, 0.5, 0.1]]},\n 'jaro_winkler': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.6, [0.4, 0.5, 0.1]]},\n 'jaro_winkler_r': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.8, [0.4, 0.5, 0.1]]},\n # 'permuted_winkler': [],\n # 'sorted_winkler': [],\n 'cosine': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.6, [0.4, 0.5, 0.1]]},\n 'jaccard': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.6, [0.4, 0.5, 0.1]]},\n 'strike_a_match': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.65, [0.4, 0.5, 0.1]]},\n 'skipgram': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.6, [0.4, 0.5, 0.1]]},\n 'monge_elkan': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.6, [0.4, 0.5, 0.1]]},\n 'soft_jaccard': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.7, [0.4, 0.5, 0.1]]},\n 'davies': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.7, [0.4, 0.5, 0.1]]},\n 'lgm_jaro_winkler': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.6, [0.4, 0.5, 0.1]]},\n 'lgm_jaro_winkler_r': {'simple': [0.6, [0.4, 0.5, 0.1]], 'avg': [0.8, [0.4, 0.5, 0.1]]},\n }\n }\n","sub_path":"src/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"595618673","text":"__author__ = \"franco_loyola\"\n\n\"\"\"\nContains the text for each label in the form, and it's equivalent numbric value\n\"\"\"\ndef generic_form_titles():\n \"\"\"\n Generic form titles equivalencies\n \"\"\"\n generic_form_titles = {\n \"day\" : \"Day\",\n \"month\" : \"Month\",\n \"year\" : \"Year\",\n }\n return generic_form_titles\n\ndef personal_form_fields_text():\n \"\"\"\n Personal data form equivalencies\n \"\"\"\n personal_form = {\n \"1.0.1\" : \"Are you already customer for this bank?\",\n \"1.0.2\" : \"Your title\",\n \"1.0.3\" : \"Last Name\",\n \"1.0.4\" : \"First Name\",\n \"1.0.5\" : \"Any other name you use/have used\",\n \"1.0.6\" : \"Date of Birth\",\n \"1.0.7\" : \"Gender\",\n \"1.0.7.1\" : \"Your relationship with the first customer\",\n \"1.0.8\" : \"Work status\",\n \"1.0.9\" : \"Other, please explain\",\n \"1.0.10\" : \"If unemployed, since when?\",\n \"1.0.11\" : \"Country of birth\",\n \"1.0.12\" : \"Town/City of birth\",\n \"1.0.13\" : \"Nationality\",\n \"1.0.14\" : \"Other Nationalities\",\n \"1.0.15\" : \"Country of residence\",\n \"1.0.16\" : \"Which countries are you tax resident in?\",\n \"1.1.1\" : \"Phone Information\",\n \"1.1.1.1\" : \"Home\",\n \"1.1.1.2\" : \"Mobile\",\n \"1.1.1.3\" : \"Work\",\n \"1.2.1\" : \"Home address\",\n \"1.2.2\" : \"Postcode\",\n \"1.2.3\" : \"When did you start living at this address?\",\n }\n return personal_form\n\ndef bank_form_fields_text():\n \"\"\"\n Bank data form equivalencies\n \"\"\"\n bank_form = {\n \"2.1\" : \"Would you like to apply for a Debit Card?\",\n \"3.1\" : \"Do you want to receive commercial news?\",\n \"3.2\" : \"Through which channels?\",\n \"3.3\" : \"Do you consent us to share information in order to determine the ‘best rate’?\",\n \"4.1\" : \"Are you working?\",\n \"4.2\" : \"Occupation\",\n \"4.3\" : \"Employer's name\",\n \"4.4\" : \"Employer's address\",\n \"4.5\" : \"Working there since\",\n \"4.6\" : \"How long did you work for the previuos employer?\",\n \"5.1.1\" : \"Salary\",\n \"5.1.2\" : \"Benefits\",\n \"5.1.3\" : \"Pension\",\n \"5.1.4\" : \"Investments\",\n \"5.1.5\" : \"Other\",\n \"5.1.6\" : \"Total monthly income\",\n \"5.2.1\" : \"Mortgage / Rent\",\n \"5.2.2\" : \"Loans\",\n \"5.2.3\" : \"Credit card debt\",\n \"5.2.4\" : \"Total expenditure\",\n \"5.2.5\" : \"Do you have a mortgage?\",\n \"5.2.6\" : \"Mortgage left to pay\",\n \"5.2.7\" : \"House value\",\n \"6.1\" : \"Savings type\",\n \"6.2\" : \"Total savings\",\n \"7.1\" : \"Account type\",\n \"7.2\" : \"Do you have Credit Cards?\",\n \"7.3\" : \"How many?\",\n }\n return bank_form\n\ndef personal_form_titles():\n \"\"\"\n Personal form titles equivalencies\n \"\"\"\n personal_form_titles = {\n \"1.0\" : \"Your personal details\",\n \"1.1\" : \"Phone Details\",\n \"1.2\" : \"Residential details\",\n }\n return personal_form_titles\n\ndef bank_form_titles():\n \"\"\"\n Bank form titles equivalencies\n \"\"\"\n bank_form_titles = {\n \"2.0\" : \"Debit Card\",\n \"3.0\" : \"Personal Information Agreement\", # Section 3 and 4 in the form\n \"4.0\" : \"Employment Details\",\n \"5.0\" : \"Economic details\",# Mortage from 8.3 here\n \"6.0\" : \"Savings\",\n \"7.0\" : \"Bank details\"\n }\n return bank_form_titles\n\ndef get_default_values_personal_data():\n \"\"\"\n Returns a dict with the default values to pre-populate the form\n \"\"\"\n data = {\n \"customer_number\" : \"0\",\n \"already_customer\" : \"no\",\n \"person_title\" : \"mr\",\n \"person_last_name\" : \"\",\n \"person_first_name\" : \"\",\n \"person_extra_name\" : \"-\",\n \"person_birthday_day\" : \"\",\n \"person_birthday_month\" : \"\",\n \"person_birthday_year\" : \"\",\n \"person_gender\" : \"male\",\n \"person_work_status\" : \"employed\",\n \"person_unemployed_month\" : \"\",\n \"person_unemployed_year\" : \"\",\n \"person_birth_country\" : \"\",\n \"person_birth_city\" : \"\",\n \"person_main_nationality\" : \"\",\n \"person_second_nationality\" : \"-\",\n \"person_country_residence\" : \"\",\n \"person_country_tax_residence\" : \"\",\n \"person_phone_home\" : \"-\",\n \"person_phone_mobile\" : \"\",\n \"person_phone_work\" : \"-\",\n \"person_residence_address\" : \"\",\n \"person_residence_zip\" : \"\",\n \"person_residence_month\" : \"\",\n \"person_residence_year\": \"\",\n }\n return data\n\ndef get_default_values_bank_data():\n \"\"\"\n Returns a dict with the default values to pre-populate the form\n \"\"\"\n data = {\n \"bank_number\" : \"0\",\n \"customer1_id\" : \"0\",\n \"customer1_new_debit_card\" : \"no\",\n \"customer1_commercial_news\" : \"no\",\n \"customer1_commercial_news_email\" : \"no\",\n \"customer1_commercial_news_post\" : \"no\",\n \"customer1_commercial_news_sms\" : \"no\",\n \"customer1_commercial_news_phone\" : \"no\",\n \"customer1_commercial_news_netbank\" : \"no\",\n \"customer1_data_share_consent\" : \"no\",\n \"customer1_working\" : \"yes\",\n \"customer1_occupation\" : \"\",\n \"customer1_employer_name\" : \"\",\n \"customer1_employer_address\" : \"\",\n \"customer1_employer_month\" : \"\",\n \"customer1_employer_year\" : \"\",\n \"customer1_prev_employer_month\" : \"\",\n \"customer1_prev_employer_year\" : \"\",\n \"customer1_income_salary\" : \"0\",\n \"customer1_income_benefits\" : \"0\",\n \"customer1_income_pension\" : \"0\",\n \"customer1_income_investments\" : \"0\",\n \"customer1_income_other\" : \"0\",\n \"customer1_income_total\" : \"Calculated on save\",\n \"customer1_expense_rent\" : \"0\",\n \"customer1_expense_loan\" : \"0\",\n \"customer1_expense_credit_card\" : \"0\",\n \"customer1_expense_total\" : \"Calculated on save\",\n \"customer1_mortgage\" : \"no\",\n \"customer1_mortgage_balance\" : \"\",\n \"customer1_mortgage_house_value\" : \"\",\n \"customer1_savings_type\" : \"no savings\",\n \"customer1_savings_total\" : \"\",\n \"customer1_account_type\" : \"private\",\n \"customer1_credit_card\" : \"no\",\n \"customer1_credit_card_amount\" : \"\",\n \"customer2_id\" : \"0\",\n \"customer2_new_debit_card\" : \"no\",\n \"customer2_commercial_news\" : \"no\",\n \"customer2_commercial_news_email\" : \"no\",\n \"customer2_commercial_news_post\" : \"no\",\n \"customer2_commercial_news_sms\" : \"no\",\n \"customer2_commercial_news_phone\" : \"no\",\n \"customer2_commercial_news_netbank\" : \"no\",\n \"customer2_data_share_consent\" : \"no\",\n \"customer2_working\" : \"yes\",\n \"customer2_occupation\" : \"\",\n \"customer2_employer_name\" : \"\",\n \"customer2_employer_address\" : \"\",\n \"customer2_employer_month\" : \"\",\n \"customer2_employer_year\" : \"\",\n \"customer2_prev_employer_month\" : \"\",\n \"customer2_prev_employer_year\" : \"\",\n \"customer2_income_salary\" : \"0\",\n \"customer2_income_benefits\" : \"0\",\n \"customer2_income_pension\" : \"0\",\n \"customer2_income_investments\" : \"0\",\n \"customer2_income_other\" : \"0\",\n \"customer2_income_total\" : \"Calculated on save\",\n \"customer2_expense_rent\" : \"0\",\n \"customer2_expense_loan\" : \"0\",\n \"customer2_expense_credit_card\" : \"0\",\n \"customer2_expense_total\" : \"Calculated on save\",\n \"customer2_mortgage\" : \"no\",\n \"customer2_mortgage_balance\" : \"\",\n \"customer2_mortgage_house_value\" : \"\",\n \"customer2_savings_type\" : \"no savings\",\n \"customer2_savings_total\" : \"\",\n \"customer2_account_type\" : \"private\",\n \"customer2_credit_card\" : \"no\",\n \"customer2_credit_card_amount\" : \"\",\n }\n return data\n\ndef personal_information_wall_of_text():\n \"\"\"\n Return the wall of text for personal information\n \"\"\"\n text = (\n \"---- Who looks after your personal information ----\\n\"\n \"Your personal information will be held by Lloyds Bank Plc which is part of the Lloyds Banking Group. More information on the Group can be found at www.lloydsbankinggroup.com\\n\"\n \"---- How we use your personal information ----\\n\"\n \"We will use your personal information:\\n\"\n \" - to provide products and services, manage your relationship with us and comply with any laws or regulations we are subject to (for example the laws that prevent financial crime or the regulatory requirements governing the products we offer).\\n\"\n \" - for other purposes including improving our services, exercising our rights in relation to agreements and contracts and identifying products and services that may be of interest.\\n\"\n \"To support us with the above we analyse information we know about you and how you use our products and services, including some automated decision making. \\nYou can find out more about how we do this, and in what circumstances you can ask us to stop, in our full privacy notice.\\n\"\n \"---- Who we share your personal information with ----\\n\"\n \"Your personal information will be shared within Lloyds Banking Group and other companies that provide services to you or us, so that we and any other companies in our Group can look after your relationship with us. \\nBy sharing this information it enables us to better understand our customers’ needs, run accounts and policies, and provide products and services efficiently. \\nThis processing may include activities which take place outside of the European Economic Area. \\nIf this is the case we will ensure appropriate safeguards are in place to protect your personal information. \\nYou can find out more about how we share your personal information with credit reference agencies below and can access more information about how else we share your information in our full privacy notice.\\n\"\n \"....\\n\"\n ).format()\n return text","sub_path":"form_models.py","file_name":"form_models.py","file_ext":"py","file_size_in_byte":10212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"486466697","text":"import os\n\nimport torch\nfrom torch.nn import functional as F\nimport numpy as np\n\nfrom vehicle_reid_pytorch.metrics import eval_func\nfrom vehicle_reid_pytorch.loss.triplet_loss import normalize, euclidean_dist\nfrom functools import reduce\n\nfrom vehicle_reid_pytorch.metrics.rerank import re_ranking\n\n\ndef clck_dist(feat1, feat2, vis_score1, vis_score2):\n \"\"\"\n 计算vpm论文中的clck距离\n\n :param torch.Tensor feat1: [B1, C, 3]\n :param torch.Tensor feat2: [B2, C, 3]\n :param torch.Tensor vis_score: [B, 3]\n :rtype torch.Tensor\n :return: clck distance. [B1, B2]\n \"\"\"\n\n B, C, N = feat1.shape\n dist_mats = []\n ckcls = []\n\n for i in range(N):\n parse_feat1 = feat1[:, :, i]\n parse_feat2 = feat2[:, :, i]\n dist_mat = euclidean_dist(parse_feat1, parse_feat2)\n ckcl = torch.mm(vis_score1[:, i].view(-1, 1), vis_score2[:, i].view(1, -1)) # [N, N]\n dist_mats.append(dist_mat)\n ckcls.append(ckcl)\n\n dist_mat = reduce(torch.add, [ckcls[i] * dist_mats[i] for i in range(N)]) / reduce(torch.add, ckcls)\n\n return dist_mat\n\n\nclass Clck_R1_mAP:\n def __init__(self, num_query, *, max_rank=50, feat_norm=True, output_path='', rerank=False, remove_junk=True,\n lambda_=0.5):\n \"\"\"\n 计算VPM中的可见性距离并计算性能\n\n :param num_query:\n :param max_rank:\n :param feat_norm:\n :param output_path:\n :param rerank:\n :param remove_junk:\n :param lambda_: distmat = global_dist + lambda_ * local_dist, default 0.5\n \"\"\"\n super(Clck_R1_mAP, self).__init__()\n self.num_query = num_query\n self.max_rank = max_rank\n self.feat_norm = feat_norm\n self.output_path = output_path\n self.rerank = rerank\n self.remove_junk = remove_junk\n self.lambda_ = lambda_\n self.reset()\n\n def reset(self):\n self.global_feats = []\n self.local_feats = []\n self.vis_scores = []\n self.pids = []\n self.camids = []\n self.paths = []\n\n def update(self, output):\n global_feat, local_feat, vis_score, pid, camid, paths = output\n self.global_feats.append(global_feat)\n self.local_feats.append(local_feat)\n self.vis_scores.append(vis_score)\n self.pids.extend(np.asarray(pid))\n self.camids.extend(np.asarray(camid))\n self.paths += paths\n\n def compute(self):\n global_feats = torch.cat(self.global_feats, dim=0)\n local_feats = torch.cat(self.local_feats, dim=0)\n vis_scores = torch.cat(self.vis_scores)\n if self.feat_norm:\n print(\"The test feature is normalized\")\n global_feats = F.normalize(global_feats, dim=1, p=2)\n local_feats = F.normalize(local_feats, dim=1, p=2)\n\n # 全局距离\n # query\n qf = global_feats[:self.num_query]\n q_pids = np.asarray(self.pids[:self.num_query])\n q_camids = np.asarray(self.camids[:self.num_query])\n # gallery\n gf = global_feats[self.num_query:]\n g_pids = np.asarray(self.pids[self.num_query:])\n g_camids = np.asarray(self.camids[self.num_query:])\n m, n = qf.shape[0], gf.shape[0]\n\n if self.rerank:\n distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)\n\n else:\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n distmat = distmat.cpu().numpy()\n\n # 局部距离\n local_distmat = clck_dist(local_feats[:self.num_query], local_feats[self.num_query:],\n vis_scores[:self.num_query], vis_scores[self.num_query:])\n local_distmat = local_distmat.cpu().numpy()\n\n # 保存结果\n query_paths = self.paths[:self.num_query]\n gallery_paths = self.paths[self.num_query:]\n if self.output_path != '':\n with open(os.path.join(self.output_path, 'test_output.pkl'), 'wb') as f:\n torch.save({\n 'gallery_paths': gallery_paths,\n 'query_paths': query_paths,\n 'gallery_ids': g_pids,\n 'query_ids': q_pids,\n 'query_features': qf,\n 'gallery_features': gf,\n 'query_cams': q_camids,\n 'gallery_cams': g_camids,\n 'distmat': distmat,\n 'localdistmat': local_distmat\n }, f)\n\n cmc, mAP = eval_func(distmat + self.lambda_ * local_distmat, q_pids, g_pids, q_camids, g_camids,\n remove_junk=self.remove_junk)\n\n return cmc, mAP\n","sub_path":"examples/parsing_reid/math_tools.py","file_name":"math_tools.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"349972416","text":"'''\r\nCreated on 19/10/2013\r\n\r\n@author: Alejandro\r\n'''\r\nimport random\r\n\r\nclass PriorityMap: \r\n\r\n def __init__(self, priorities_quant, aging_quant):\r\n self.priorities = {}\r\n self.max_priority = priorities_quant\r\n self.max_aging = aging_quant\r\n \r\n self._build_priorities_dict()\r\n\r\n def _build_priorities_dict(self):\r\n self.priorities[0] = {0:[]}\r\n for p in range(1, self.max_priority):\r\n new_dict = {}\r\n for a in range(self.max_aging):\r\n new_dict[a] = []\r\n \r\n self.priorities[p] = new_dict\r\n \r\n def add(self, process):\r\n if process.priority is None:\r\n process.set_priority(random.randint(0, self.max_priority - 1))\r\n self.priorities[process.priority][0].append(process)\r\n \r\n def pop_next_to_execute(self):\r\n process = None\r\n # Si la prioridad 0 con envejecimiento 0 esta vacia\r\n if not self.priorities[0][0]:\r\n # Busca en orden ascendente por el diccionario el proximo que no es vacio\r\n for p in range(1, self.max_priority):\r\n for a in range(self.max_aging - 1, -1, -1):\r\n if self.priorities[p][a]:\r\n process = self.priorities[p][a].pop(0)\r\n process.set_priority(p)\r\n return process\r\n else:\r\n process = self.priorities[0][0].pop(0)\r\n process.set_priority(0)\r\n return process\r\n # Si no encuentra nada devuelve nulo\r\n return None\r\n \r\n def age_all(self):\r\n for p in range(1, self.max_priority):\r\n # Si estoy parado en la prioridad 1 con el maximo envejecimiento\r\n if p == 1:\r\n self.priorities[p-1][0].extend(self.priorities[p][self.max_aging - 1])\r\n # Entonces estoy parado en cualquier otra prioridad con el maximo envejecimiento\r\n else:\r\n self.priorities[p-1][0] = self.priorities[p][self.max_aging - 1]\r\n # Actualiza los otros envejecimientos de cada prioridad\r\n for a in range(self.max_aging - 2, -1, -1):\r\n self.priorities[p][a+1] = self.priorities[p][a]\r\n # Asigna lista vacia a la maxima prioridad con envejecimiento 0 (Final del diccionario)\r\n self.priorities[self.max_priority-1][0] = []","sub_path":"PythonSO/scheduler/priority_map.py","file_name":"priority_map.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"319951549","text":"# -*- coding: utf-8 -*-\nfrom urlparse import parse_qs\nfrom datetime import date\n#\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.admin.views.main import ChangeList\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import ugettext as _\nfrom django.http import HttpResponseRedirect\nfrom django.utils.encoding import force_unicode\nfrom django.core.urlresolvers import reverse\n#\nfrom starmato.admin.options import StarmatoModelAdmin\nfrom starmato.savedlist.models import StarmatoList\n\nSTARMATO_SAVEDLIST_LOOKUP = \"s_sl_id\"\n\n################################################################################\n# Add actions to the ModelAdmin. To be used for MODELS TO EXPORT AS SAVED LISTS\n# (savedlists)\n# This class is the key to the working of the list feature\n# When used as the ModelAdmin, StarmatoListActions handles a \"savedlist_id\" GET\n# parameter to retrieve the list\n# This class also set a number of actions:\n# - create a savedlist with selected objects\n# - add selected objects to a savedlist\n# - remove selected objects from current savedlist\n################################################################################\nclass StarmatoChangeList(ChangeList):\n def get_filters(self, request):\n (self.filter_specs, self.has_filters, remaining_lookup_params,\n filters_use_distinct) = super(StarmatoChangeList, self).get_filters(request)\n if remaining_lookup_params.has_key(STARMATO_SAVEDLIST_LOOKUP):\n remaining_lookup_params.pop(STARMATO_SAVEDLIST_LOOKUP)\n return self.filter_specs, self.has_filters, remaining_lookup_params, filters_use_distinct\n\nclass StarmatoListActions(StarmatoModelAdmin):\n def lookup_allowed(self, lookup, value):\n if lookup == STARMATO_SAVEDLIST_LOOKUP:\n return True\n return super(StarmatoListActions, self).lookup_allowed(lookup, value)\n\n def get_changelist(self, request):\n return StarmatoChangeList\n\n\n def queryset(self, request):\n if request.GET.has_key(STARMATO_SAVEDLIST_LOOKUP):\n new_GET = request.GET.copy()\n list_id = new_GET.pop(STARMATO_SAVEDLIST_LOOKUP)\n self.savedlist_id = list_id[0]\n qs = super(StarmatoListActions, self).queryset(request)\n\n try:\n # get the elements\n elements = StarmatoList.objects.get(id=self.savedlist_id).listelement_set.values('model_id') \n # filter the queryset\n qs = qs.filter(id__in=elements)\n except:\n pass\n return qs\n\n # Add view to save list\n def _save_list(self, request, queryset):\n ct = ContentType.objects.get_for_model(queryset.model)\n model = ct.model_class()\n al = StarmatoList(title=date.today().strftime(\"%d/%m/%Y\"), model_ref=model, user=request.user)\n al.save()\n al.set_elements(queryset)\n self.message_user(request, _(u\"The list was successfully saved. Please type in an useful name.\"))\n return HttpResponseRedirect(\"%s?id=%d\" % (reverse('admin:savedlist_starmatolist_changelist'), al.id))\n _save_list.short_description = _(u'LIST: Create a new list from selected %(verbose_name_plural)s')\n\n # Add view to remove elements from list\n def _remove_from_list(self, request, queryset):\n if hasattr(self, 'savedlist_id'):\n # Check that the user has delete permission for the actual model\n al = StarmatoList.objects.get(id=self.savedlist_id)\n if al.user != None and request.user != al.user:\n raise PermissionDenied\n al.remove_elements(queryset)\n objects_name = force_unicode(self.model._meta.verbose_name_plural)\n self.message_user(request, _(u\"The selected %(objects_name)s were successfully deleted from the list %(listname)s.\" % {\n \"objects_name\": objects_name,\n \"listname\": al.title,\n }))\n else:\n self.message_user(request, _(u\"You are not editing a saved list.\"))\n \n return None\n\n _remove_from_list.short_description = _(u'LIST: Delete selected %(verbose_name_plural)s from current list')\n\n # Add view to add elements to list\n def _add_to_list(self, request, queryset):\n ct = ContentType.objects.get_for_model(queryset.model)\n model = ct.model_class()\n opts = model._meta\n app_label = opts.app_label\n\n # The user has already confirmed the addition.\n # Do the addition and return a None to display the change list view again.\n if request.POST.get('post'):\n try:\n savedlist = StarmatoList.objects.get(id=request.POST.get(STARMATO_SAVEDLIST_LOOKUP))\n if savedlist.user != None and request.user != savedlist.user:\n raise PermissionDenied\n n = savedlist.set_elements(queryset)\n m = queryset.count() - n\n \n self.message_user(request, _(\"Successfully added %(count)d %(items)s to list %(listname)s. %(duplicate)d %(items)s were already in the list.\") % {\n \"count\": n, \"duplicate\": m, \"items\": opts.verbose_name_plural, \"listname\": savedlist.title,\n })\n except:\n self.message_user(request, _(\"Cannot add %(items)s to list. %(listname)s\") % {\n \"items\": opts.verbose_name_plural,\n \"listname\": savedlist.title,\n })\n \n # Go the saved list.\n info = opts.app_label, opts.module_name\n return HttpResponseRedirect(\"%s?%s=%d\" % (reverse('admin:%s_%s_changelist' % info), STARMATO_SAVEDLIST_LOOKUP, savedlist.id))\n\n if len(queryset) == 1:\n objects_name = force_unicode(opts.verbose_name)\n else:\n objects_name = force_unicode(opts.verbose_name_plural)\n\n lists = StarmatoList.objects.filter(model=opts.object_name,user=request.user)\n\n context = {\n \"title\": _(u'Add to an existing saved list'),\n \"objects_name\": objects_name,\n \"model\": model,\n \"lists\": lists,\n 'queryset': queryset,\n \"opts\": opts,\n \"app_label\": app_label,\n \"lookup\": STARMATO_SAVEDLIST_LOOKUP,\n }\n\n # Display the confirmation page\n return TemplateResponse(request, self.delete_selected_confirmation_template or [\n \"admin/%s/%s/add_to_list_choice.html\" % (app_label, opts.object_name.lower()),\n \"admin/%s/add_to_list_choice.html\" % app_label,\n \"admin/add_to_list_choice.html\"\n ], context, current_app=self.admin_site.name)\n\n _add_to_list.short_description = _(u\"LIST: Add selected %(verbose_name_plural)s to an existing list\")\n\n actions = ['_save_list', '_remove_from_list', '_add_to_list']\n\n\n\n\n","sub_path":"starmato/savedlist/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"83324134","text":"import asyncio\nfrom sys import executable as sys_executable\nimport pickle\nfrom .ipc import UnixSocketServer\nfrom itertools import count\nfrom random import sample\nfrom transitions import Machine\n\n\nasync def std_reader(self, stream, output, proc_ident, line_id):\n while True:\n line = await stream.readline()\n if not line:\n break\n await output.put((\n proc_ident,\n line_id,\n line.decode('utf-8', errors='replace'),\n ))\n\n\nasync def parser_process(*, loop, parser_modulename, sockfile, proc_ident, stdlog):\n proc = None\n try:\n proc = await asyncio.create_subprocess_exec(\n sys_executable, '-m', 'scutigera.parser',\n '--module', parser_modulename,\n '--sock', sockfile,\n '--id', str(proc_ident),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n loop=loop\n )\n t = asyncio.ensure_future(std_reader(proc.stdout, stdlog, proc_ident, 1), loop=loop)\n t._log_destroy_pending = False\n t = asyncio.ensure_future(std_reader(proc.stderr, stdlog, proc_ident, 2), loop=loop)\n t._log_destroy_pending = False\n await proc.wait()\n finally:\n if proc is not None and proc.returncode is None:\n try:\n proc.terminate()\n except ProcessLookupError:\n pass\n\n\nasync def recv_message(stream):\n msglen = await stream.readexactly(4)\n msglen = int.from_bytes(msglen, byteorder='big', signed=False)\n msg = await stream.readexactly(msglen)\n return pickle.loads(msg)\n\n\nasync def send_message(stream, msg):\n req = pickle.dumps(msg)\n reqlen = len(req).to_bytes(length=4, byteorder='big')\n stream.write(reqlen)\n stream.write(req)\n await stream.drain()\n\n\nclass Worker:\n MAX_WORKER_CONN_TIME = 20\n DISCONNECTED_PROCESS_TIMEOUT = 20\n MAX_TASK_CRASHES = 3\n\n states = ['spawning', 'ready', 'processing', 'dying', 'dead']\n\n def __init__(self, *, loop, **process_kw):\n self.loop = loop\n self.process_kw = process_kw\n\n self.machine = Machine(model=self, states=self.states, initial='spawning')\n at = self.machine.add_transition\n at('_on_connected', 'spawning', 'ready')\n at('_on_task_sent', 'ready', 'processing')\n at('_on_result_received', 'processing', 'ready')\n at('_on_bad_data_received', 'processing', 'dying', after='_cb_kill_request')\n at('_on_disconnected', ['ready', 'processing'], 'dying', after='_cb_delayed_kill_request',\n ignore_invalid_triggers=True)\n at('_on_process_finish', 'spawning', 'dead', after='_cb_unexp_death')\n # ready included here because socket interaction isn't synchronized with process lifecycle\n at('_on_process_finish', ['ready', 'processing', 'dying'], 'dead')\n at('_on_readiness_fail', 'spawning', 'dying', after='_cb_kill_request')\n at('_on_planned_shutdown', 'ready', 'dying', after='_cb_delayed_kill_request')\n at('kill', ['spawning', 'ready', 'processing'], 'dying', after='_cb_kill_request')\n\n self.planned_shutdown = False\n self.task = asyncio.ensure_future(self._process(), loop=loop)\n self.task._log_destroy_pending = False\n self.readiness_task = asyncio.ensure_future(self._readiness(), loop=loop)\n self.readiness_task._log_destroy_pending = False\n self.delayed_kill_task = None\n\n # callbacks\n\n def _cb_kill_request(self):\n self.task.cancel()\n\n def _cb_delayed_kill_request(self):\n self.delayed_kill_task = asyncio.ensure_future(self._delayed_kill(), loop=self.loop)\n self.delayed_kill_task._log_destroy_pending = False\n\n def _cb_unexp_death(self):\n pass\n # TODO: report unexpected crashes\n\n # connection handling coroutines (invoked from outside)\n\n async def _handle_bad_response(self, tid, parse_task, e):\n # return back unhandled task\n # TODO: this logic with task crashes is wrong\n # crash must happen once and be a final state\n parse_task[0]['parse_fails'] = parse_task[0].get('parse_fails', 0) + 1\n if parse_task[0]['parse_fails'] >= self.MAX_TASK_CRASHES:\n self.request_pool.put_task_result(tid, {\n 'type': 'repeated_crash',\n 'reason': str(e),\n })\n else:\n await self.request_pool.put_intermediate_result(tid, {\n 'type': 'crash',\n 'reason': str(e),\n 'count': parse_task[0]['parse_fails'],\n })\n await self.request_pool.put_task_back(tid, parse_task)\n\n async def _task_session(self, reader, writer):\n tid, parse_task = await self.request_pool.get_task()\n try:\n self._on_task_sent()\n send_message(writer, parse_task)\n while True:\n msg = recv_message(reader)\n if msg['type'] in ('result', 'error'):\n # TODO: invert this condition, define explicitly all intermediate states\n self.request_pool.put_task_result(tid, msg)\n break\n else:\n await self.request_pool.put_intermediate_result(tid, msg)\n self._on_result_received()\n except pickle.UnpicklingError as e:\n self._on_bad_data_received()\n await self._handle_bad_response(tid, parse_task, e)\n except EOFError as e:\n self._on_disconnected()\n await self._handle_bad_response(tid, parse_task, e)\n\n async def connection(self, reader, writer):\n self._on_connected()\n while True:\n if self.state != 'ready':\n writer.write_eof()\n break\n if self.planned_shutdown:\n self._on_planned_shutdown()\n writer.write_eof()\n break\n await self._task_session(reader, writer)\n\n # process handling coroutines (invoked from this object)\n\n async def _process(self):\n await parser_process(loop=self.loop, **self.process_kw)\n self._on_process_finish()\n\n async def _readiness(self):\n await asyncio.sleep(self.MAX_WORKER_CONN_TIME, loop=self.loop)\n if self.state == 'spawning':\n self._on_readiness_fail()\n\n async def _delayed_kill(self):\n await asyncio.sleep(self.DISCONNECTED_PROCESS_TIMEOUT, loop=self.loop)\n if self.state == 'dying':\n self.kill()\n\n\nclass WorkerManager:\n def __init__(self, *, loop, worker_count=0, **worker_kw):\n self.loop = loop\n self.worker_count = worker_count\n self.worker_kw = worker_kw\n\n self.id_allocator = count()\n self.workers = {}\n self.task = asyncio.ensure_future(self._main(), loop=loop)\n self.task._log_destroy_pending = False\n\n def set_worker_count(self, n):\n assert 0 <= n < 100\n self.worker_count = n\n\n async def _proto(self, reader, writer):\n try:\n procid = recv_message(reader)\n assert isinstance(procid, int)\n assert procid in self.workers\n except (EOFError, pickle.UnpicklingError, AssertionError):\n writer.write_eof()\n return\n await self.workers[procid].connection(reader, writer)\n\n def _cleanup_workers(self):\n keys_for_removal = [k for k, w in self.workers.items() if w.state == 'dead']\n for k in keys_for_removal:\n del self.workers[k]\n\n def _shutdown_some_workers(self):\n good = []\n for k, w in self.workers.items():\n if w.state in ('spawning', 'ready', 'processing') and not w.planned_shutdown:\n good.append(k)\n if self.worker_count <= len(good):\n return\n for k in sample(good, len(good) - self.worker_count):\n self.workers[k].planned_shutdown = True\n\n async def _main(self):\n async with UnixSocketServer(proto=self._proto, fname=self.sockfname, loop=self.loop):\n while True:\n self._cleanup_workers()\n if len(self.workers) > self.worker_count:\n self._shutdown_some_workers()\n while len(self.workers) < self.worker_count:\n self.workers[next(self.id_allocator)] = Worker(loop=self.loop, **self.worker_kw)\n await asyncio.sleep(5, loop=self.loop)\n","sub_path":"scutigera/parserworker.py","file_name":"parserworker.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"387879745","text":"from django import template\n\nfrom wagtail.core.models import Page\n\nfrom base.models import FooterText, RawHtml\n\nregister = template.Library()\n# https://docs.djangoproject.com/en/1.9/howto/custom-template-tags/\n\n\n@register.inclusion_tag('tags/breadcrumbs.html', takes_context=True)\ndef breadcrumbs(context):\n self = context.get('self')\n if self is None or self.depth <= 2:\n # When on the home page, displaying breadcrumbs is irrelevant.\n ancestors = ()\n else:\n ancestors = Page.objects.ancestor_of(\n self, inclusive=True).filter(depth__gt=1)\n return {\n 'ancestors': ancestors,\n 'request': context['request'],\n }\n\n\n@register.inclusion_tag('tags/footer_text.html', takes_context=True)\ndef get_footer_text(context):\n footer_text = \"\"\n if FooterText.objects.first() is not None:\n footer_text = FooterText.objects.all()\n\n return {\n 'footer_text': footer_text,\n }\n\n\n# Advert snippets\n@register.inclusion_tag('tags/rawhtml.html', takes_context=True)\ndef rawhtml(context):\n return {\n 'raw_html': RawHtml.objects.all(),\n 'request': context['request'],\n }\n","sub_path":"base/templatetags/snippet_tags.py","file_name":"snippet_tags.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"370230240","text":"from .context import Context\nfrom .utils import *\nfrom torch import nn\nimport typing\n\n\nclass NNSystem(object):\n @property\n def parent(self) -> 'NNSystem':\n return self._parent\n\n @property\n def name(self) -> str:\n return self._sysname\n\n def __init__(self, name: str, context: Context):\n self._context = context\n self._config = context.merge_config({}, prefix=name)\n self._sysname = name\n\n self._children = {}\n self._parent = None\n self._workspace = context.workspace + \"/system\"\n mkdir_if_not_exist(self._workspace)\n self._buildspace = self._workspace + f\"/{'/'.join(self.name.split('.'))}\"\n mkdir_if_not_exist(self._buildspace)\n\n def new_child(self, name, system_type: 'typing.Type[NNSystem]', **kwargs):\n _name = self.name + \".\" + name\n ctx = self._context\n\n def _lazy_build():\n system = system_type(_name, ctx, **kwargs)\n system._parent = self\n return system\n\n self._children[_name] = _lazy_build\n\n def get_child(self, name) -> 'NNSystem':\n _name = self.name + \".\" + name\n assert _name in self._children, f\"Sub-system({_name}) is not found\"\n child = self._children[_name]\n if isinstance(child, NNSystem):\n return child\n else:\n lazy_build = child\n child = lazy_build()\n self._children[_name] = child\n return child\n\n def get_child_nn_module(self, system_name, network_name, with_load_params=False):\n system = self._children[system_name]\n return system.get_nn_module(network_name, with_load_params)\n\n def save_params(self, name, nn_module=None):\n if nn_module is None:\n nn_module = self.get_nn_module(name)\n torch.save(nn_module.state_dict(), f\"{self._buildspace}/{name}.pm\")\n\n def load_params(self, name, nn_module=None, required=True):\n if nn_module is None:\n nn_module = self.get_nn_module(name)\n try:\n nn_module.load_state_dict(torch.load(f\"{self._buildspace}/{name}.pm\"))\n except IOError as e:\n if required:\n raise e\n\n def open_resource(self, name, mode='r'):\n return open(f\"{self._buildspace}/{name}\", mode)\n\n def has_resource(self, name):\n return os.path.isfile(f\"{self._buildspace}/{name}\")\n\n def get_nn_module(self, name, with_load_params=False):\n if hasattr(self, name):\n nn_module = getattr(self, name)\n elif hasattr(self, \"_\" + name):\n nn_module = getattr(self, \"_\" + name)\n else:\n raise ValueError(f\"{self.name}.{name} is not found\")\n if isinstance(nn_module, nn.Module):\n if with_load_params:\n nn_module.load_state_dict(torch.load(f\"{self._buildspace}/{name}.pm\"))\n else:\n return nn_module\n else:\n raise TypeError(f\"{self.name}.{name} is not a pytorh nn.Module\")\n\n def train(self, name=None, **kwargs):\n if not name:\n train_fn_name = f\"_train\"\n else:\n train_fn_name = f\"_train_{name}\"\n if hasattr(self, train_fn_name):\n train_fn = getattr(self, train_fn_name)\n train_fn(**kwargs)\n else:\n raise ValueError(f\"The train-method({train_fn_name}) is not found\")\n","sub_path":"ngs/base/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"566161824","text":"#!/usr/bin/env python3\n\nimport sqlite3\nimport sys\nimport re\n\nfrom collections import Counter\nfrom ast import literal_eval\n\ndef main(args):\n db_path = args[0]\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n cursor.execute(\"SELECT role FROM participant WHERE role NOT NULL\")\n roles = [v[0] for v in cursor.fetchall() if not v[0]==\"test\"]\n count = Counter()\n\n# sub = {\n# 'anim': 'animator',\n# 'art' : 'artist',\n# 'cinematic' : 'cinematics',\n# }\n#\n# for r in roles:\n# #for w in \"artist animator senior manage admin lead\".split():\n# r = r.split('(')[0]\n# for w in r.split():\n# w = sub.get(w,w)\n# if w == \"/\":\n# continue\n# count[w] += 1\n# for c, v in count.most_common():\n# print(\"{}: {}\".format(c,v))\n#\n# roles = sorted([r.split('(')[0].strip() for r in roles])\n# print(', '.join(roles))\n#\n#\n# cursor.execute(\"SELECT * FROM participant WHERE role NOT NULL\")\n# for c in cursor.fetchall():\n# print(c)\n\n cursor.execute(\"\"\"\n SELECT question.title, answer.answer FROM question\n JOIN survey ON survey.id = question.id_survey\n JOIN answer ON id_question = question.id\n JOIN participant ON participant.id = answer.id_participant\n WHERE question.title LIKE '%short description%'\n \"\"\")\n\n\n sub = {\n 'tasks': 'task',\n 'shot-task': 'task',\n 'finding': 'find',\n 'lookup': 'find',\n 'searches': 'find',\n 'search': 'find',\n 'check': 'find',\n 'feedback': 'information',\n 'status': 'information',\n 'update': 'information',\n 'communicate': 'information',\n }\n\n count = Counter()\n set_ans = set()\n ans = cursor.fetchall()\n title = ans[0][0]\n for c in ans:\n if len(re.findall('test', c[1])) >= 3:\n continue\n set_ans.add(c[1])\n# print(title)\n dicts = [literal_eval(string) for string in set_ans]\n empty = 0\n# print()\n for d in dicts:\n for i, ans in d.items():\n for w in ans.split():\n w = w.lower()\n w = sub.get(w,w)\n count[w] += 1\n if not ans.strip():\n empty += 1\n else:\n pass\n# print(ans)\n# print()\n# print(\"{}/{}\".format(empty, 3*len(dicts)))\n\n for w in \"for to a on of and the i have\".split():\n if count.get(w):\n del count[w]\n\n for k,v in count.most_common(20):\n print(\"{}:{}\".format(k,v))\n\n dict_rev = {}\n for k,v in sub.items():\n l = dict_rev.get(v)\n if not l:\n l = dict_rev[v] = set()\n l.add(k)\n\n# for k,v in dict_rev.items():\n# print(\"\\\\begin{{minipage}}[c]{{{:.1f}\\\\textwidth}}\".format(1.0/3.0))\n# print(\"\\\\begin{equation*} \\\\left. \\\\begin{tabular}{c}\")\n# for w in v:\n# print(\" \\\\text{{{}}} \\\\\\\\\".format(w))\n# print(\"\\\\end{{tabular}}\\\\right \\\\}} \\\\text{{{}}} \\\\end{{equation*}}\".format(k))\n# print(\"\\\\end{minipage}\")\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"db_parse.py","file_name":"db_parse.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"87688577","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDatabase helpers\n\"\"\"\n\nimport sqlite3\n\n\ndef create_db_table(engine='sqlite3', db_name='db.sqlite3', table_name='example_table'):\n \"\"\"Create table in db\"\"\"\n\n try:\n if engine == 'sqlite3':\n connection = sqlite3.connect(db_name)\n cursor = connection.cursor()\n\n create_query = \"create table if not exists {table_name} (id integer primary key \\\n autoincrement, url text, title text, headings text, keywords text, \\\n full_content text);\".format(table_name=table_name)\n\n cursor.execute(create_query)\n\n connection.commit()\n connection.close()\n except:\n return False\n\n return True\n\n\ndef store_in_db(engine='sqlite3', db_name='db.sqlite3', table_name='example_table', url=None, data=None):\n \"\"\"\n Stores the data in specified db engine\n \"\"\"\n\n if not (data and url):\n return False\n\n try:\n db_data = [url, data['title_content'], data['headings_content'], data['keywords_content'], data['full_content']]\n\n if engine == 'sqlite3':\n insert_query = \"insert into {table_name} (url, title, headings, keywords, \\\n full_content) values (?, ?, ?, ?, ?);\".format(table_name=table_name)\n\n connection = sqlite3.connect(db_name)\n cursor = connection.cursor()\n\n cursor.execute(insert_query, db_data)\n\n connection.commit()\n connection.close()\n except:\n return False\n\n return True\n\n\ndef fetch_from_db(engine='sqlite3', db_name='db.sqlite3', table_name='example_table', url=None):\n \"\"\"Fetches data from db of the specified url\"\"\"\n\n if not url:\n return None\n\n try:\n if engine == 'sqlite3':\n select_query = \"SELECT * from {table_name} where url=?\".format(table_name=table_name)\n connection = sqlite3.connect(db_name)\n cursor = connection.cursor()\n\n cursor.execute(select_query, [url])\n db_data = cursor.fetchone()\n except:\n return None\n\n return db_data\n","sub_path":"online-crawler/db_helpers.py","file_name":"db_helpers.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"94386819","text":"#!/usr/bin/env python\n\n\"\"\"\nDaily Coding Problem #37 for 7 September, 2018\n------------------------------------------\nThis problem was asked by Google.\n\nThe power set of a set is the set of all its subsets. Write a function that, given a set, generates its power set.\n\nFor example, given the set {1, 2, 3}, it should return {{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}}.\n\nYou may also use a list or array to represent a set.\n\"\"\"\n\nl = [1,2,3]\n\ndef power_set(l):\n n = len(l)\n power_set = list()\n for i in range(1 << n):\n power_set.append([l[j] for j in range(n) if (i & (1 << j))])\n\n return power_set\n\nprint(power_set(l))","sub_path":"problems/37/thirty_seven.py","file_name":"thirty_seven.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"3433635","text":"import requests\nimport time\nimport json\nfrom alidayu.main import AlibabaAliqinFcSmsNumSendRequest\nfrom pymongo import MongoClient\n\n\ndef get_real_time_data():\n current_time = int(time.time())\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Host': 'www.smzdm.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/53.0.2785.143 Safari/537.36'\n }\n\n url = 'http://www.smzdm.com/json_more?timesort=' + str(current_time)\n response = requests.get(url=url, headers=headers)\n\n result_list = []\n\n for string in json.loads(response.text):\n title = string['article_title']\n price = string['article_price']\n link = string['article_link']\n url = string['article_url']\n\n result = {\n 'title': title,\n 'price': price,\n 'link': link,\n 'url': url\n }\n\n result_list.append(result)\n\n return result_list\n\n\ndef read_config():\n with open('./config/config.json', 'rt', encoding='utf-8') as file:\n return json.load(file)\n\n\ndef save_data(data):\n client = MongoClient()\n db = client.smzdm\n db.smzdm.insert_one(data)\n\n\ndef is_data_existed(data):\n client = MongoClient()\n db = client.smzdm\n url = data['url']\n cursor = db.smzdm.find({\"url\": url})\n return cursor.count() > 0\n\n\ndef send_sms(config, key, price):\n # 正式环境 http://gw.api.taobao.com/router/rest\n # 沙箱环境 http://gw.api.tbsandbox.com/router/rest\n appkey = config['appkey']\n secret = config['secret']\n domain = \"http://gw.api.taobao.com/router/rest\"\n req = AlibabaAliqinFcSmsNumSendRequest(appkey, secret, domain)\n\n req.extend = \"\"\n req.sms_type = \"normal\"\n req.sms_free_sign_name = config['sms_free_sign_name']\n req.sms_param = \"{key:'\" + key + \"',price:'\" + price + \"'}\"\n req.rec_num = config['rec_num']\n req.sms_template_code = config['sms_template_code']\n try:\n resp = req.getResponse()\n print(resp)\n except Exception as e:\n print(e)\n\n\ndef we_push(config, title, desp):\n sock = config['sock']\n url = 'https://sc.ftqq.com/%s.send' % sock\n payload = {'text': title, 'desp': desp}\n requests.post(url, data=payload, verify=False)\n\n\ndef main():\n config = read_config()\n real_time_data = get_real_time_data()\n for data in real_time_data:\n for key in config['keys']:\n if data['title'].find(key) != -1:\n if not is_data_existed(data):\n print(data)\n send_sms(config, key, data['price'])\n we_push(config, data['title'], data['url'])\n save_data(data)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"SmzdmSpider/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"606465232","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\n\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\nimport six\nfrom chainer import Chain, ChainList, Variable, optimizers\n\nAtomIdMax = 100\n\n\nclass nfp(object):\n \"\"\"NFP manager\n\n This class has the generator function of NFP and\n updator of NN for learning the generator of NFP.\n\n Args:\n d: Dimension of NFP.\n f: Dimension of the feature for generating NFP.\n R: Radius for generating NFP.\n \"\"\"\n\n def __init__(self, d, f, R):\n self.d = d\n self.f = f\n self.R = R\n g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])\n\n H = ChainList(*[ChainList(*[L.Linear(f, f)\n for i in six.moves.range(R)])\n for j in six.moves.range(5)])\n W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R)])\n self.model = Chain(H=H, W=W, g=g)\n self.optimizer = optimizers.Adam()\n self.optimizer.setup(self.model)\n\n def get_nfp(self, mol, train=True):\n d, f, R = self.d, self.f, self.R\n atoms = mol.GetAtoms()\n n = len(atoms)\n fp = Variable(np.zeros((1, d), dtype='float32'), volatile=not train)\n r = [[Variable(np.zeros((1, f), dtype='float32'), volatile=not train)\n for i in six.moves.range(n)] for j in six.moves.range(R + 1)]\n for atom in atoms:\n a = atom.GetIdx()\n anum = atom.GetAtomicNum()\n r[0][a] += self.model.g[anum](Variable(np.array([[1]],\n dtype='float32'),\n volatile=not train))\n for l in six.moves.range(R):\n v = [Variable(np.zeros([1, f], dtype='float32'),\n volatile=not train)\n for i in six.moves.range(n)]\n for atom in atoms:\n a = atom.GetIdx()\n v[a] += r[l][a]\n for n_atom in atom.GetNeighbors():\n na = n_atom.GetIdx()\n v[a] += r[l][na]\n for atom in atoms:\n a = atom.GetIdx()\n deg = atom.GetDegree()\n deg = min(5, max(1, deg))\n r[l + 1][a] = F.tanh(self.model.H[deg - 1][l](v[a]))\n i = F.softmax(self.model.W[l](r[l + 1][a]))\n fp += i\n return fp\n","sub_path":"bremen/fp.py","file_name":"fp.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"465618356","text":"class Animal:\n def __init__(self, animalName):\n self.animalName = animalName\n self.hintData = {\"elephant\": [\"I have exceptional memory\",\n \"I am the largest land-living mammal in the world\",\n \"I have long nose\"],\n \"tiger\": [\"I am the biggest cat\",\n \"I come in black and white or orange and black\",\n \"I am apex predators, primarily preying on ungulates such as deer and bovids\"],\n \"bat\": [\"I use echo-location\",\n \"I can fly\",\n \"I see well in dark\"]\n }\n\n def guess_who_am_i(self):\n print(\"I will give you 3 hints, guess what animal I am\" + \"\\n\")\n guessSuccess = False\n for i in range(3):\n print(self.hintData[self.animalName][i])\n guessName = input(\"Who am I?: \")\n if guessName == self.animalName:\n print(\"You got it! I am \" + guessName + \"\\n\\n\")\n guessSuccess = True\n break\n else:\n print(\"Nope, try again!\" + \"\\n\")\n if not guessSuccess:\n print(\"I'm out of hints! The answer is: \" + self.animalName + \"\\n\\n\")\n","sub_path":"Homework-6-Class/Program-1-Animal-Class/Animal.py","file_name":"Animal.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"583357651","text":"from reachy import Reachy, parts\nfrom reachy.trajectory import TrajectoryRecorder\nimport numpy as np\nfrom collections import OrderedDict\nimport traceback\nimport re\n\n\ndef patch_force_gripper(forceGripper):\n def __init__(self, root, io):\n \"\"\"Create a new Force Gripper Hand.\"\"\"\n parts.hand.Hand.__init__(self, root=root, io=io)\n\n dxl_motors = OrderedDict(\n {name: dict(conf) for name, conf in self.dxl_motors.items()}\n )\n\n self.attach_dxl_motors(dxl_motors)\n\n \"\"\"\n self._load_sensor = self.io.find_module('force_gripper')\n self._load_sensor.offset = 4\n self._load_sensor.scale = 10000\n \"\"\"\n\n forceGripper.__init__ = __init__\n\n return forceGripper\n\n\nparts.arm.RightForceGripper = patch_force_gripper(parts.arm.RightForceGripper)\n\nio_setting = None\nwhile not io_setting:\n input_answer = input(\"Is this a simulated Reachy? y/n? \")\n if input_answer == \"y\" or input_answer == \"Y\":\n io_setting = \"ws\"\n elif input_answer == \"n\" or input_answer == \"N\":\n io_setting = \"/dev/ttyUSB*\"\n\ntry:\n reachy = Reachy(right_arm=parts.RightArm(io=io_setting, hand=\"force_gripper\"))\nexcept:\n traceback.print_exc()\n exit(\"Exception when initializing Reachy\")\n\nif io_setting == \"ws\":\n input(\"Connect the Unity simulator, then press Enter to continue.\")\n\nfor m in reachy.right_arm.motors:\n print(f\"Motor found: {m.name} - pos:{m.present_position}\")\n m.compliant = True\n\n\nstop_loop = False\nwhile not stop_loop:\n print(\"Ready to record!\")\n\n record_motor_list = []\n\n print(\n \"Type in the motors you want to record by index. Shoulder pitch is 0, gripper is 7.\"\n )\n print(\n \"For example, if you want to record shoulder pitch, shoulder roll and forearm yaw, type '014'.\"\n )\n while not record_motor_list:\n input_motor_nums = input(\"Enter motors now. Only enter digits 0-7: \")\n if input_motor_nums and not re.compile(r\"[^0-7]\").search(input_motor_nums):\n if \"0\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.shoulder_pitch)\n if \"1\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.shoulder_roll)\n if \"2\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.arm_yaw)\n if \"3\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.elbow_pitch)\n if \"4\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.hand.forearm_yaw)\n if \"5\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.hand.wrist_pitch)\n if \"6\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.hand.wrist_roll)\n if \"7\" in input_motor_nums:\n record_motor_list.append(reachy.right_arm.hand.gripper)\n\n print(f\"You have selected {[motor.name for motor in record_motor_list]}\")\n input_answer = input(\n \"Enter y to continue, enter anything else to repeat selection: \"\n )\n if input_answer != \"y\" and input_answer != \"Y\":\n record_motor_list = []\n\n recording_type = None\n while not recording_type:\n input_answer = input(\n \"Enter recording type. p for position, or t for trajectory: \"\n )\n if input_answer == \"t\" or input_answer == \"T\":\n recording_type = \"trajectory\"\n elif input_answer == \"p\" or input_answer == \"P\":\n recording_type = \"position\"\n\n recording = None\n\n if recording_type == \"position\":\n input(\"Move the arm into the desired position, then press Enter to capture it:\")\n recording = {motor.name: motor.present_position for motor in record_motor_list}\n print(f\"Recorded position: {recording}\")\n else:\n recorder = TrajectoryRecorder(record_motor_list)\n input(\n \"Move the arm into the start position for the trajectory, then press Enter to begin recording:\"\n )\n recorder.start()\n input(\"To stop recording, press Enter again:\")\n recorder.stop()\n recording = recorder.trajectories\n print(\"Recorded trajectory:\")\n print(recording)\n\n save = None\n while not save:\n input_answer = input(\"Save this? y/n: \")\n if input_answer == \"n\" or input_answer == \"N\":\n save = True\n elif input_answer == \"y\" or input_answer == \"Y\":\n save = True\n filename = input(\"Enter filename to save as: \")\n filename = filename + \".npz\"\n try:\n np.savez(filename, **recording)\n print(f'Saved as {filename}!')\n except:\n traceback.print_exc()\n exit(\"Exception when saving file\")\n \n again = None\n while not again:\n input_answer = input(\"Record another? y/n: \")\n if input_answer == \"n\" or input_answer == \"N\":\n again = True\n stop_loop = True\n elif input_answer == \"y\" or input_answer == \"Y\":\n again = True\n","sub_path":"tools/record_trajectories.py","file_name":"record_trajectories.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"254074698","text":"import sys, random, pygame\nimport colors\n\n\nclass Paddle(pygame.Rect):\n\n def __init__(self, velocity, left_key, right_key, *args, **kwargs):\n self.velocity = velocity\n self.left_key = left_key\n self.right_key = right_key\n super().__init__(*args, **kwargs)\n\n def move_paddle(self, board_width):\n keys_pressed = pygame.key.get_pressed()\n\n if keys_pressed[self.left_key]:\n if self.x - self.velocity > 0:\n self.x -= self.velocity\n\n if keys_pressed[self.right_key]:\n if self.x + self.velocity < board_width - self.width:\n self.x += self.velocity\n\n\nclass Ball(pygame.Rect):\n def __init__(self, velocity, *args, **kwargs):\n self.velocity = velocity\n self.angle = 0\n super().__init__(*args, **kwargs)\n\n def move_ball(self):\n self.y += self.velocity\n self.x += self.angle\n\n\nclass Arkanoid:\n HEIGHT = 960\n WIDTH = 1280\n PADDLE_WIDTH = 200\n PADDLE_HEIGHT = 15\n PADDLE_VELOCITY = 12\n BALL_WIDTH = 20\n BALL_VELOCITY = 7\n BALL_ANGLE = 0\n BALL_COLOR = colors.LIGHTYELLOW2\n BRICK_WIDTH = 70\n BRICK_HEIGHT = 30\n BRICK_COLOR = random.choice(colors.colorlist)\n STATE_BALL_IN_PADDLE = 0\n STATE_PLAYING = 1\n STATE_WON = 2\n STATE_GAME_OVER = 3\n COLOUR = (255, 0, 0)\n BACKGROUND_IMAGE = pygame.image.load(\"images/back1.jpeg\")\n PADDLE_IMAGE = pygame.image.load(\"images/paddle.png\")\n\n def __init__(self):\n pygame.init()\n self.lives = 3\n self.score = 0\n self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))\n self.font = pygame.font.Font(None, 30)\n self.clock = pygame.time.Clock()\n self.state = self.STATE_BALL_IN_PADDLE\n\n self.paddles = []\n self.paddles.append(Paddle(self.PADDLE_VELOCITY, pygame.K_LEFT, pygame.K_RIGHT,\n self.WIDTH / 2 - self.PADDLE_WIDTH / 2,\n self.HEIGHT - self.PADDLE_HEIGHT,\n self.PADDLE_WIDTH, self.PADDLE_HEIGHT))\n\n self.balls = []\n self.balls.append(Ball(self.BALL_VELOCITY, self.WIDTH / 2 - self.BALL_WIDTH / 2,\n self.HEIGHT - self.PADDLE_HEIGHT * 2,\n self.BALL_WIDTH, self.BALL_WIDTH))\n\n self.bricks = []\n y_ofs = 30\n for i in range(8):\n x_ofs = 10\n for j in range(15):\n self.bricks.append(pygame.Rect(x_ofs, y_ofs, self.BRICK_WIDTH, self.BRICK_HEIGHT))\n x_ofs += self.BRICK_WIDTH + 15\n y_ofs += self.BRICK_HEIGHT + 10\n\n def get_brick_color(self):\n return random.choice(colors.colorlist)\n\n def check_ball_hits_wall(self):\n for ball in self.balls:\n if ball.y > self.HEIGHT:\n self.lives -= 1\n if self.lives > 0:\n self.state = self.STATE_BALL_IN_PADDLE\n else:\n self.state = self.STATE_GAME_OVER\n break\n\n if ball.x > self.WIDTH - self.BALL_WIDTH or ball.x < 0:\n ball.angle = -ball.angle\n elif ball.y < 0:\n ball.velocity = -ball.velocity\n\n def check_ball_hits_paddle(self):\n for ball in self.balls:\n for paddle in self.paddles:\n if ball.colliderect(paddle):\n ball.velocity = -ball.velocity\n ball.angle = random.randint(-10, 10)\n\n def check_ball_hits_brick(self):\n for ball in self.balls:\n for brick in self.bricks:\n if ball.colliderect(brick):\n self.score += 3\n ball.velocity = -ball.velocity\n self.bricks.remove(brick)\n break\n\n if len(self.bricks) == 0:\n self.state = self.STATE_WON\n\n def show_stats(self):\n if self.font:\n font_surface = self.font.render(\"SCORE: \" + str(self.score) + \" LIVES: \"\n + str(self.lives), False, (255, 255, 255))\n self.screen.blit(font_surface, (self.WIDTH / 2 - 100, 5))\n\n def show_message(self, message):\n if self.font:\n size = self.font.size(message)\n font_surface = self.font.render(message, False, self.COLOUR)\n x = (self.WIDTH - size[0]) / 2\n y = (self.HEIGHT - size[1]) / 2\n self.screen.blit(font_surface, (x, y))\n\n def check_input(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE] and self.state == self.STATE_BALL_IN_PADDLE:\n self.state = self.STATE_PLAYING\n\n def game_loop(self):\n while True:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n return\n\n self.screen.blit(self.BACKGROUND_IMAGE, (0, 0))\n self.check_input()\n\n for paddle in self.paddles:\n paddle.move_paddle(self.WIDTH)\n pygame.draw.rect(self.screen, self.COLOUR, paddle)\n\n for ball in self.balls:\n ball.move_ball()\n pygame.draw.rect(self.screen, self.BALL_COLOR, ball)\n\n for brick in self.bricks:\n pygame.draw.rect(self.screen, self.get_brick_color(), brick)\n\n if self.state == self.STATE_PLAYING:\n self.check_ball_hits_brick()\n self.check_ball_hits_paddle()\n self.check_ball_hits_wall()\n elif self.state == self.STATE_BALL_IN_PADDLE:\n self.balls[0].left = self.paddles[0].left + self.paddles[0].width / 2\n self.balls[0].top = self.paddles[0].top - 1.5 * self.balls[0].height\n self.show_message(\"PRESS SPACE TO LAUNCH THE BALL\")\n elif self.state == self.STATE_GAME_OVER:\n self.show_message(\"GAME OVER. PRESS ESC TO EXIT THE GAME\")\n elif self.state == self.STATE_WON:\n self.show_message(\"YOU WON! ESC TO EXIT THE GAME\")\n\n self.show_stats()\n pygame.display.update()\n # pygame.display.flip()\n # self.screen.fill((0, 45, 45))\n self.clock.tick(60)\n\n\nif __name__ == '__main__':\n arkanoid = Arkanoid()\n arkanoid.game_loop()\n","sub_path":"Arkanoid_Game/arkanoid.py","file_name":"arkanoid.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"314590744","text":"import JCampSG\r\nimport string\r\nimport numpy\r\nimport csv\r\nimport time\r\nimport timeit\r\nimport math\r\nimport sys\r\nfrom numpy import genfromtxt\r\nimport os.path\r\n\r\n#This variable determines the largest fragment size that the program can handle\r\nMaximumAtomicUnit = 300\r\n\r\ndef createArray(jcampDict, filename):\r\n DataArray =[]\r\n\r\n counterY=0\r\n counter =0\r\n for number in jcampDict['x']: \r\n counter = counter +1\r\n counterY2=0\r\n while counter != float(number):\r\n \r\n DataArray.append(0)\r\n #fileData.write('%f,'%zero)\r\n #fileData.write('\\n')\r\n counter = counter +1 \r\n\r\n for number2 in jcampDict['y']:\r\n if counterY2 == counterY:\r\n \r\n DataArray.append(number2) \r\n #fileData.write('%f,'%number2)\r\n #fileData.write('\\n')\r\n #counterY2 =counterY2 + 1\r\n break;\r\n else:\r\n counterY2=counterY2 +1 \r\n\r\n counterY= counterY +1\r\n\r\n if len(DataArray) < MaximumAtomicUnit:\r\n for i in range(len(DataArray), MaximumAtomicUnit):\r\n DataArray.append(0)\r\n return DataArray\r\n\r\ndef combineArray(Array1, Array2):\r\n \r\n for i in range(MaximumAtomicUnit):\r\n Array1.append(Array2[i])\r\n \r\n \r\n return Array1\r\n\r\ndef exportToCSV(filename, OverallArray, listOfFiles, MoleculeNames, ENumbers, MWeights, knownMoleculeIonizationTypes, knownIonizationFactorsRelativeToN2, SourceOfFragmentationPatterns, SourceOfIonizationData):\r\n\r\n \r\n f5 = open(filename, 'w')\r\n \r\n f5.write('#CommentsLine:')\r\n for i in range(len(MoleculeNames)):\r\n f5.write(',')\r\n f5.write('\\n')\r\n \r\n #write the molecules\r\n f5.write('Molecules')\r\n for i in MoleculeNames:\r\n f5.write(',%s' %i)\r\n f5.write('\\n')\r\n\r\n #write the Electron Numbers\r\n f5.write('Electron Numbers')\r\n for i in ENumbers:\r\n f5.write(',%f'%(int(i)))\r\n f5.write('\\n')\r\n \r\n #write the ionization type\r\n f5.write('knownMoleculesIonizationTypes')\r\n for i in knownMoleculeIonizationTypes:\r\n f5.write(',%s'%i)\r\n f5.write('\\n')\r\n \r\n #write the ionization factor\r\n f5.write('knownIonizationFactorsRelativeToN2')\r\n for i in knownIonizationFactorsRelativeToN2:\r\n f5.write(',%s'%i)\r\n f5.write('\\n')\r\n \r\n #write the header\r\n f5.write('SourceOfFragmentationPatterns')\r\n for i in SourceOfFragmentationPatterns:\r\n f5.write(',%s' %i)\r\n f5.write('\\n')\r\n \r\n #write the ionization data source\r\n f5.write(\"SourceOfIonizationData\")\r\n for i in SourceOfIonizationData:\r\n f5.write(',%s' %i)\r\n f5.write('\\n')\r\n \r\n #write the molecular weights\r\n f5.write('Molecular Mass')\r\n for i in MWeights:\r\n f5.write(',%f' %(float(i)))\r\n f5.write('\\n')\r\n\r\n Array1=OverallArray\r\n printRow= len(Array1)//MaximumAtomicUnit\r\n printArray =[]\r\n zeros = True\r\n\r\n \r\n for i in range(1,MaximumAtomicUnit+1):\r\n print(i)\r\n zeros = True\r\n for k in range(printRow):\r\n if Array1[MaximumAtomicUnit*k +i-1] != 0: #The -1 is for array indexing\r\n zeros =False \r\n if zeros == False:\r\n f5.write('%d'%(i)) \r\n for y in range(printRow): \r\n f5.write(',%d'%(Array1[MaximumAtomicUnit*y +i-1])) #The -1 is for array indexing\r\n f5.write('\\n')\r\n \r\n f5.close()\r\n \r\nSourceOfFragmentationPattern = ''\r\nSourceOfFragmentationPatterns = list()\r\nSourceOfIonizationDatum = ''\r\nSourceOfIonizationData = list()\r\nmoleculeName=''\r\nMoleculeNames=list()\r\nENumber = 0\r\nENumbers =list()\r\nMWeight =0.0 \r\nMWeights=list()\r\nknownMoleculeIonizationType = ''\r\nknownMoleculeIonizationTypes = list()\r\nknownIonizationFactorRelativeToN2 = 0.0\r\nknownIonizationFactorsRelativeToN2 = list()\r\nfilenames=''\r\nlistOfFiles=list()\r\n\r\n\r\nfileYorN=''\r\n\r\nprint(\"would you like to load molecular information from a csv file? Enter 'yes' or 'no'. If not, then you will enter files manually.\")\r\nfileYorN=input()\r\n\r\n\r\nif (fileYorN =='no'):\r\n print(\"Welcome! Enter the name of the molecule, its mass, its ionization factor relative to nitrogen (put unknown if you don't know), its ionization type (put unknown if you don't know), its number of electrons (or the number -1 if you don't need that), and the associated JDX file in order to generate a csv spectrum file\")\r\n print(\"If a molecule name has a comma in it (e.g. 1,3-pentadiene) or any other input has a comma in it, we recommend using an _ (e.g. 1_3-pentadiene) since this information is stored in a comma separated value file.\")\r\n print(\"Enter the molecule's Name: \")\r\n moleculeName = input()\r\n\r\n\r\n\r\n while moleculeName != 'EXIT':\r\n MoleculeNames.append(moleculeName)\r\n print(\" enter the electron Number: \")\r\n ENumber = input()\r\n ENumbers.append(ENumber)\r\n print(\" enter the molecule's ionization type (Enter unknown if unknown): \")\r\n knownMoleculeIonizationType = input()\r\n knownMoleculeIonizationTypes.append(knownMoleculeIonizationType)\r\n print(\" enter the molecule's ionization factor relative to N2 (Enter a unknown if unknown): \")\r\n knownIonizationFactorRelativeToN2 = input()\r\n knownIonizationFactorsRelativeToN2.append(knownIonizationFactorRelativeToN2)\r\n print(\" enter the source of the fragmention pattern: \")\r\n SourceOfFragmentationPattern = input()\r\n SourceOfFragmentationPatterns.append(SourceOfFragmentationPattern)\r\n print(\" enter the source of the ionization data: \")\r\n SourceOfIonizationDatum = input()\r\n SourceOfIonizationData.append(SourceOfIonizationDatum)\r\n print(\" enter the Molecular Weight:\")\r\n MWeight= input()\r\n MWeights.append(MWeight) \r\n print(\"enter the file name(EX: oxygen.jdx):\")\r\n print(\"If the file is in a separate directory, \\ninclude the path(EX: JDXFiles\\oxygen.jdx):\")\r\n filename=input()\r\n listOfFiles.append(filename)\r\n print(\"Enter the name of the next molecule or type EXIT to finish entering molecules\")\r\n moleculeName=input()\r\n\r\nelif(fileYorN=='yes'):\r\n fileInputName=''\r\n print(\"enter the file input name please:\")\r\n fileInputName=input()\r\n #input_file ='attempt.csv'\r\n list_holder=[]\r\n spamReader = csv.reader(open('%s' %fileInputName), delimiter=',')\r\n for row in spamReader:\r\n list_holder.append(row)\r\n \r\n \r\n#The user is provided with the option to direct the functions output as they would like. \r\nprint(\"Would you like to specify an output location? If yes, type the path to the location. For default, hit enter.\")\r\noutputDirectory = input()\r\n#If the user selects default, then the output is piped to \"OutputFiles\"\r\nif outputDirectory == \"\":\r\n outputDirectory = \"OutputFiles\"\r\n\r\n\r\n#mkaing the directory for exported files, if it isn't already there\r\nif not os.path.exists(outputDirectory):\r\n os.makedirs(outputDirectory)\r\n\r\n#only if files exist to draw from\r\nif fileYorN == 'yes':\r\n#Checking if a directory exists to be drawn from\r\n if os.path.isdir(\"JDXFiles\"):\r\n #This for loop draws from a directory of the user's choice (hard-coded)\r\n for i in range(1, len(list_holder)):\r\n \r\n# The below line has been added to allow the program to draw files from \r\n # outside it's own directory.\r\n list_holder[i][3] = \"JDXFiles\\\\\" + list_holder[i][3]\r\n \r\n MoleculeNames.append(list_holder[i][0])\r\n ENumbers.append(list_holder[i][1])\r\n MWeights.append(list_holder[i][2])\r\n listOfFiles.append(list_holder[i][3])\r\n knownMoleculeIonizationTypes.append(list_holder[i][4])\r\n knownIonizationFactorsRelativeToN2.append(list_holder[i][5])\r\n SourceOfFragmentationPatterns.append(list_holder[i][6])\r\n SourceOfIonizationData.append(list_holder[i][7])\r\n \r\n#Otherwise, assume that the files are in the directory of the JDXConv-UI\r\n else:\r\n \r\n#This for loop draws files from the current directory\r\n for i in range(1, len(list_holder)):\r\n \r\n MoleculeNames.append(list_holder[i][0])\r\n ENumbers.append(list_holder[i][1])\r\n MWeights.append(list_holder[i][2])\r\n listOfFiles.append(list_holder[i][3])\r\n knownMoleculeIonizationTypes.append(list_holder[i][4])\r\n knownIonizationFactorsRelativeToN2.append(list_holder[i][5])\r\n SourceOfFragmentationPatterns.append(list_holder[i][6])\r\n SourceOfIonizationData.append(list_holder[i][7])\r\n\r\nOverallArray=[]\r\nholderArray=[]\r\nfor i in listOfFiles:\r\n jcampDict=JCampSG.JCAMP_reader(i)\r\n holderArray=createArray(jcampDict, i)\r\n OverallArray=combineArray(OverallArray, holderArray)\r\n\r\nexportToCSV(\"%s\\\\ConvertedSpectra.csv\" %outputDirectory, OverallArray, listOfFiles, MoleculeNames, ENumbers, MWeights, knownMoleculeIonizationTypes, knownIonizationFactorsRelativeToN2, SourceOfFragmentationPatterns, SourceOfIonizationData)\r\n","sub_path":"JDXConverter.py","file_name":"JDXConverter.py","file_ext":"py","file_size_in_byte":9132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"127264134","text":"#!/usr/bin/env python\n#coding: utf8 \n\n__description__ = \\\n\"\"\"\nConverting *.ndb to *.spw format\n\"\"\"\n\n__author__ = \"Vinícius Contessoto / Matheus Mello\"\n__date__ = \"Nov/2019\"\n\n################################################################\n# \n# Trajectories file *.ndb to Nucleome Data Bank format .spw\n#\n# usage:\n# ./ndb2spw.py -f file.ndb -n name_SPW_file\n#\n################################################################\n\nimport re\nimport time\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Converting *.ndb to *.spw format')\nparser.add_argument('-f', metavar='input-file-Grofile-frames',help='ndb file with frames',type=argparse.FileType('rt'))\nparser.add_argument('-n', action='store', default='chromatin', dest='arg_name', help='Name of output file')\nparser.add_argument('-t', action='store', dest='header_name', help='Value of name field at spw header')\nparser.add_argument('-c', action='store', dest='arg_chro', help='Chromosome number')\nparser.add_argument('-g', action='store', dest='genome', help='Value of name field at spw header', type=str)\n\ntry:\n arguments = parser.parse_args()\n print('################################################')\n print('Chosen file: {:}'.format(arguments.f.name))\n\nexcept IOError as msg:\n parser.error(str(msg)) \n\nMain_chrom = ['ChrA','ChrB','ChrU'] # Type A B and Unknow\nChrom_types = ['ZA','OA','FB','SB','TB','LB','UN']\nChrom_types_NDB = ['A1','A2','B1','B2','B3','B4','UN']\nRes_types_PDB = ['ASP', 'GLU', 'ARG', 'LYS', 'HIS', 'HIS', 'GLY']\n\n\n##################################################################################################\n\nb_time = time.time()\n\n# pdb formats 1 2 3\n# 123456789012345678901234567890\nmodel = \"trace {0:4d}\"\natom = \"ATOM {0:5d} {1:^4s}{2:1s}{3:3s} {4:1s}{5:4d}{6:1s} {7:8.3f}{8:8.3f}{9:8.3f}{10:6.2f}{11:6.2f} {12:>2s}{13:2s}\"\nter = \"TER {0:5d} {1:3s} {2:1s}{3:4d}{4:1s}\"\n\nfile_ndb = arguments.f\nname = arguments.arg_name\n\nhname = arguments.header_name\nChro = arguments.arg_chro\ngen = arguments.genome\n\nspwf = open(name + '.spw', \"w+\")\n\nprint('Converting file...')\n\nloop = 0\n\nspwf.write('##format=sw1 ')\n\ntry:\n spwf.write('name={:} '.format(hname))\nexcept:\n spwf.write('name={:} '.format(name))\n\nfirst = True\nasmbly = False\n\nfor line in file_ndb:\n \n entry = line[0:6]\n\n info = line.split()\n\n if 'ASMBLY' in entry and gen is None:\n spwf.write('genome=' + line[6:].replace(' ','').replace('\\n', ''))\n asmbly = True\n\n if 'MODEL' in entry:\n if first:\n\n if not asmbly:\n if gen is None:\n print('Assembly information not available')\n else:\n spwf.write('genome=' + gen)\n\n spwf.write('\\nchromosome\tstart\tend\tx\ty\tz\\n')\n first = False\n spwf.write('trace {:}\\n'.format(int(line[6:].replace(' ',''))-1))\n \n elif 'CHROM' in entry:\n\n temp = re.findall(r'\\d+', info[3])\n chro = [ int(x) for x in temp ][0] # Getting chro number from 1st spw field\n\n try:\n spwf.write('chr{0:} {1:} {2:} {3:} {4:} {5:}\\n'.format(Chro, info[8], info[9], info[5], info[6], info[7]))\n except:\n spwf.write('chr{0:} {1:} {2:} {3:} {4:} {5:}\\n'.format(chro, info[8], info[9], info[5], info[6], info[7]))\n \n\n # [ Loops file ]\n\n elif 'LOOPS' in entry:\n if loop == 0:\n loops = open(name + '.loops', 'w+')\n \n loops.write('{0:d} {1:d}\\n'.format(int(info[1]), int(info[2])))\n loop += 1\n \nspwf.close()\n\nprint('Finished!')\n\nif not loop == 0:\n loops.close\n print('Generated files: {:} and {:}'.format(name + '.spw', name + '.loops'))\nelse:\n print('Generated files: {:}'.format(name + '.spw'))\n\ne_time = time.time()\nelapsed = e_time - b_time\nprint('Ran in %.3f sec' % elapsed)\n","sub_path":"ndb2spw.py","file_name":"ndb2spw.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"496335325","text":"import numpy as np\r\nfrom sklearn import preprocessing\r\nfrom tensorflow.keras.models import load_model\r\nfrom sklearn import metrics\r\nimport math\r\nimport pandas as pd\r\nall_vars = ['PM2.5_Bias', 'PM10_Bias', 'NO2_Bias', 'SO2_Bias', 'O3_Bias', 'CO_Bias', 'PM2.5_Obs', 'PM10_Obs', 'NO2_Obs', 'SO2_Obs', 'O3_Obs', 'CO_Obs', 'PM2.5_Sim','PM10_Sim','NO2_Sim','SO2_Sim','O3_Sim','CO_Sim', 'RH_Bias', 'TEM_Bias', 'WSPD_Bias', 'WDIR_Bias', 'PRE_Bias', 'RH_Obs', 'TEM_Obs', 'WSPD_Obs', 'WDIR_Obs', 'PRE_Obs', 'PBLH_Sim', 'SOLRAD_Sim','RH_Sim','TEM_Sim','WSPD_Sim','WDIR_Sim','PRE_Sim', 'WIN_N_Obs','WIN_N_Sim', 'WIN_E_Obs','WIN_E_Sim', 'WIN_N_Bias', 'WIN_E_Bias', 'PM2.5_Bias_ystd']\r\nvar_dict = {'PM2.5_Bias':0, 'NO2_Bias':2, 'SO2_Bias':3, 'O3_Bias':4, 'PM2.5_Obs':6, 'NO2_Obs':8, 'SO2_Obs':9, 'O3_Obs':10, 'PM2.5_Sim':12, 'RH_Bias':18, 'TEM_Bias':19, 'WSPD_Bias':20, 'WDIR_Bias':21, 'PRE_Bias':22, 'RH_Obs':23, 'TEM_Obs':24, 'WSPD_Obs':25, 'WDIR_Obs':26, 'PRE_Obs':27, 'PBLH_Sim':28, 'SOLRAD_Sim':29, 'WIN_N_Obs':35, 'WIN_E_Obs':37, 'WIN_N_Bias':39, 'WIN_E_Bias':40, 'PM2.5_Bias_ystd':41}\r\n# var_sele = ['PM2.5_Sim','PM2.5_Bias_ystd','NO2_Bias','SO2_Bias','O3_Bias','NO2_Obs','SO2_Obs','O3_Obs','RH_Bias','TEM_Bias','WDIR_Bias','WSPD_Bias','PRE_Bias','RH_Obs','TEM_Obs','WSPD_Obs','PRE_Obs','PBLH_Sim','SOLRAD_Sim']\r\nvar_sele = ['PM2.5_Bias_ystd','PM2.5_Sim','NO2_Bias','RH_Bias','O3_Bias','SO2_Bias','WSPD_Bias','NO2_Obs','O3_Obs']\r\n\r\ndata = np.load(\"D:/project/data/BTH/dataset_abs_corr2.npy\") #(44, 363, 42)\r\np_loc = [(129,99),(130,99),(139,100),(141,100),(135,101),(141,101),(138,102),(139,102),(141,102),(130,103),(141,103),(145,103),(146,103),(136,105),(141,105),(142,105),(127,106),(134,106),(143,106),(144,106),(127,107),(138,109),(133,111),(139,111),(138,112),(139,112),(140,112),(138,113),(140,113),(137,114),(135,115),(136,115),(141,115),(142,115),(135,116),(136,116),(137,116),(146,116),(135,117),(136,117),(131,119),(140,120),(144,125),(143,126)] #44个\r\nregion_num = np.array([2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,0,0,0,2,2,0,0,0,2,0,0,2,2,2,2]) #北京0,天津1,河北2\r\n# a = data[np.where(region_num == 0),:,:] #(1, 8, 363, 42)\r\n#取平均\r\nbj_data = np.mean(data[np.where(region_num == 0),:,:],axis=(0,1)) #(363, 42)\r\ntj_data = np.mean(data[np.where(region_num == 1),:,:],axis=(0,1))\r\nhb_data = np.mean(data[np.where(region_num == 2),:,:],axis=(0,1))\r\n\r\n\r\nbj_data_pd = pd.DataFrame(bj_data,index=np.arange(1,364),columns=all_vars)#(363, 42)\r\ntj_data_pd = pd.DataFrame(tj_data,index=np.arange(1,364),columns=all_vars)\r\nhb_data_pd = pd.DataFrame(hb_data,index=np.arange(1,364),columns=all_vars)\r\n\r\nwriter = pd.ExcelWriter(\"D:/project/data/BTH/lessvar/expr1/decriptive_analysis_temp.xlsx\")\r\nbj_data_pd.to_excel(writer, 'bj', float_format='%.2f')\t#保留小数点后2位\r\ntj_data_pd.to_excel(writer, 'tj', float_format='%.2f')\r\nhb_data_pd.to_excel(writer, 'hb', float_format='%.2f')\r\n\r\n\r\n#计算scaler1, scaler2\r\ndata = data.reshape((-1,42)) #(15972, 42)\r\nX = np.zeros((len(data),len(var_sele))) #(15972, 19)\r\nY = data[:,0].reshape((len(data),1)) #(15972, 1)\r\ni = 0\r\nfor var in var_sele:\r\n X[:,i] = data[:,var_dict.get(var)]\r\n i += 1\r\nscaler1 = preprocessing.StandardScaler().fit(X)\r\nscaler2 = preprocessing.StandardScaler().fit(Y)\r\nprint(scaler1,scaler2)\r\n\r\ndef get_xy_dataset(input_dataset):\r\n global scaler1, scaler2 \r\n Y = input_dataset[:,0] #'PM2.5_Bias'\r\n X = np.zeros((len(input_dataset),len(var_sele)))\r\n i = 0\r\n for var in var_sele:\r\n X[:,i] = input_dataset[:,var_dict.get(var)]\r\n i += 1\r\n X = scaler1.transform(X)\r\n Y = Y.reshape((Y.shape[0],1))\r\n Y = scaler2.transform(Y)\r\n return X, Y\r\n\r\ndatasetX_bj, datasetY_bj = get_xy_dataset(bj_data)\r\ndatasetX_tj, datasetY_tj = get_xy_dataset(tj_data)\r\ndatasetX_hb, datasetY_hb = get_xy_dataset(hb_data)\r\nprint(datasetX_bj.shape) #(363,17)\r\n\r\n##计算修正后的PM2.5:bias=sim-obs,PM25_revised=sim-bias,PM25_Bias_revised=revised-obs\r\nmodel = load_model(\"D:/project/data/BTH/lessvar/expr1/DNN_model1.h5\")\r\ny_pred_bj = model.predict(datasetX_bj)\r\ny_pred_bj = scaler2.inverse_transform(y_pred_bj).reshape(len(datasetX_bj),) #(363,)\r\nprint(y_pred_bj.shape)\r\nPM25_revised_bj = bj_data[:,var_dict.get('PM2.5_Sim')] - y_pred_bj\r\nPM25_Bias_revised_bj = PM25_revised_bj - bj_data[:,var_dict.get('PM2.5_Obs')]\r\n\r\ny_pred_tj = model.predict(datasetX_tj)\r\ny_pred_tj = scaler2.inverse_transform(y_pred_tj).reshape(len(datasetX_tj),) #(363,)\r\nprint(y_pred_tj.shape)\r\nPM25_revised_tj = tj_data[:,var_dict.get('PM2.5_Sim')] - y_pred_tj\r\nPM25_Bias_revised_tj = PM25_revised_tj - tj_data[:,var_dict.get('PM2.5_Obs')]\r\n\r\ny_pred_hb = model.predict(datasetX_hb)\r\ny_pred_hb = scaler2.inverse_transform(y_pred_hb).reshape(len(datasetX_hb),) #(363,)\r\nprint(y_pred_hb.shape)\r\nPM25_revised_hb = hb_data[:,var_dict.get('PM2.5_Sim')] - y_pred_hb\r\nPM25_Bias_revised_hb = PM25_revised_hb - hb_data[:,var_dict.get('PM2.5_Obs')]\r\n\r\n\r\n#bj-PM25 sheet:第1列PM2.5_Obs,第2列PM2.5_Sim,第3列PM2.5_Bias,第4列PM2.5_Bias_Predict,第5列PM2.5_revised,第6列PM2.5_Bias_revised\r\nbj_pm = pd.DataFrame({'PM2.5_Obs':bj_data[:,6],'PM2.5_Sim':bj_data[:,12],'PM2.5_Bias':bj_data[:,0],'PM2.5_Bias_Predict':y_pred_bj,'PM2.5_revised':PM25_revised_bj,'PM2.5_Bias_revised':PM25_Bias_revised_bj},index=np.arange(1,364))\r\ntj_pm = pd.DataFrame({'PM2.5_Obs':tj_data[:,6],'PM2.5_Sim':tj_data[:,12],'PM2.5_Bias':tj_data[:,0],'PM2.5_Bias_Predict':y_pred_tj,'PM2.5_revised':PM25_revised_tj,'PM2.5_Bias_revised':PM25_Bias_revised_tj},index=np.arange(1,364))\r\nhb_pm = pd.DataFrame({'PM2.5_Obs':hb_data[:,6],'PM2.5_Sim':hb_data[:,12],'PM2.5_Bias':hb_data[:,0],'PM2.5_Bias_Predict':y_pred_hb,'PM2.5_revised':PM25_revised_hb,'PM2.5_Bias_revised':PM25_Bias_revised_hb},index=np.arange(1,364))\r\nbj_pm.to_excel(writer, 'bj_PM25', float_format='%.2f')\t#保留小数点后2位\r\ntj_pm.to_excel(writer, 'tj_PM25', float_format='%.2f')\t#保留小数点后2位\r\nhb_pm.to_excel(writer, 'hb_PM25', float_format='%.2f')\t#保留小数点后2位\r\nwriter.close()","sub_path":"BTH/lessvar/expr1-old/descriptive and revised.py","file_name":"descriptive and revised.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"165652704","text":"import os\nimport time\nimport pickle\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nfrom src import DataProcessing\nfrom src.CorrelationMatrix import CorrMatrix\nfrom src.FeatureImportance import featuring_importance\nfrom src.DeployOut import get_csv_output\n\ndef suicide():\n start = time.time()\n\n # setting Path to the current working directory\n path = os.getcwd() + \"/models/\"\n\n # processing the data input by user\n X_test = DataProcessing.process()\n\n\n # creating a dictionary where binary blob of every model will be stored to their corresponding names\n model_dictionary = {}\n # model_name = ['knn', 'log', 'dt', 'rf', 'boost', 'bag', 'stack', 'rand_log', 'rand_knn', 'rand_dt','rand_rf', 'rand_ada','rand_bag','rand_stack']\n\n # Creating a list of all the files in models directory\n dir_list = os.listdir(path)\n\n # iterating through the dir_list which contains filenames of all the saved models\n for dir in dir_list:\n if dir == \"dnn_pkl\" or dir == \"test.py\":\n continue\n # storing the binary blob and the model name to the model_dictionary\n with open(path+dir, 'rb') as f:\n model_name = dir.partition(\"p\")[0]\n model_dictionary[model_name] = pickle.load(f)\n\n # defining the data directory, it will contain datafiles input by users\n data_dir = os.getcwd() + \"/data/\"\n\n # getting X_test as the input by user (y_test is temporary only for testing of accuracy scores)\n # X_test = pd.read_csv(data_dir+\"xtest\", index_col=0)\n # y_test = pd.read_csv(data_dir+\"ytest\", index_col=0)\n\n # creating a list to store all the prediciton values made by our models\n y_pred_class = []\n \n # iterating through model_dictionary which contains all our saved models\n for key in model_dictionary:\n # appending the prediciton values to y_pred_class list\n y_pred_class.append(model_dictionary[key].predict(X_test))\n print(key)\n\n # converting y_pred_class to a dataFrame to join it for Correlation Matrix\n y_pred_class_df = pd.DataFrame(y_pred_class).transpose()\n y_pred_class_df.columns = [key for key in model_dictionary]\n print(y_pred_class_df)\n\n # creating a list to store accuracy socres of our predictions (only for testing)\n # acc_score = []\n # iterating through y_pred_class and checking it with y_test to acertain the accuracy score\n # and append it to the acc_score list\n # for j in y_pred_class[0]: \n # acc_score.append(accuracy_score(y_test, y_pred_class[j]))\n # print(acc_score)\n\n # creating a correlation matrix between features \n combined_data = pd.concat([X_test, y_pred_class_df], axis=1, join='inner')\n CorrMatrix(combined_data)\n\n # FeatureImportance Graph\n featuring_importance(X_test, y_pred_class_df)\n\n # saving output in csv format\n for key in model_dictionary:\n get_csv_output(key, combined_data)\n\n end = time.time()\n print(\"Time Taken: \", end-start)\nsuicide()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"342302153","text":"q=0 # Initially we assigned 0 to \"q\", to use this variable for the summation purpose below.\n # The \"q\" value should be declared before using it(mandatory). And this value can be changed later.\n\nn=int(input(\"Enter Number: \")) # asking user for input\nwhile n>0: # Until \"n\" is greater than 0, execute the loop. This means that until all the digits of \"n\" got extracted.\n\n r=n%10 # Here, we are extracting each digit from \"n\" starting from one's place to ten's and hundred's... so on.\n\n q=q+r # Each extracted number is being added to \"q\".\n\n n=n//10 # \"n\" value is being changed in every iteration. Dividing with 10 gives exact digits in that number, reducing one digit in every iteration from one's place.\n\nprint(\"Sum of digits is: \"+str(q))\n","sub_path":"Sum of digits of a number.py","file_name":"Sum of digits of a number.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"57469100","text":"def handValue(hand,gotMorto):\r\n \"\"\"\r\n This function will register and count what a player (or team) has on their hands that will impact the score negatively.\r\n - hand: is expected to be a single list containing all the individual cards player(s) had on their hands;\r\n - gotMorto: boolean expression indicating whether the player or team have advanced to the second phase by picking up\r\n the second available pile denominated morto.\r\n \r\n This function returns the total sum of the cards on a players hand plus any boosters.\r\n \r\n \"\"\"\r\n \r\n # establishing counters and sum variables.\r\n # tranca, as a counter, is being declared for future player leaderboard and statistics\r\n totalSubtract = 0\r\n tranca = 0\r\n trancaSum = 0\r\n regularSubtract = 0\r\n \r\n # analyzes whether got morto will have an actual impact on the final scoring\r\n if gotMorto == True:\r\n mortoImpact = 0\r\n else:\r\n mortoImpact = -100\r\n \r\n # loop to analyze a player's hand\r\n for card in hand:\r\n if cardDict[card]['Tranca'] == True:\r\n tranca += 1\r\n else:\r\n regularSubtract -= cardDict[card]['Value']\r\n \r\n # generating total sums\r\n trancaSum = tranca * -100\r\n totalSubtract = trancaSum + regularSubtract + mortoImpact\r\n \r\n # returning results\r\n print(\"\")\r\n print(\"Negative factors: \")\r\n print(\"\")\r\n print(\"Hand value: \" + str(regularSubtract))\r\n print(\"Trancas: \" + str(tranca) + \" (\"+ str(trancaSum) + \")\")\r\n if mortoImpact == -100:\r\n print(\"Morto: \" + str(mortoImpact))\r\n print(\"\")\r\n print(\"Total negative impact: \" + str(totalSubtract))\r\n print(\"\")\r\n print(\"--------------------------------\")\r\n \r\n return totalSubtract\r\n\r\ndef tableValue(sequences,finalMove):\r\n \"\"\"\r\n \r\n This function registers all values that count positively towards a player or teams score.\r\n - sequences: receives a list of lists that contain each sequence a player has laid out in front of them\r\n - finalMove: boolean expression that indicates whether the player or team has made the final move of the game\r\n thus being awarded an extra 100 points.\r\n \r\n \"\"\"\r\n \r\n # establishing sum variables for displaying scoring boosters\r\n dirtyCanastraBooster = 0\r\n cleanCanastraBooster = 0\r\n dollarBooster = 0\r\n finalMoveBooster = 0\r\n totalTable = 0\r\n jokerCanastra = 0\r\n \r\n # establishing counters for effect multipliers\r\n dirtyCanastra = 0\r\n cleanCanastra = 0\r\n dollar = 0\r\n joker = 0\r\n \r\n # checking all sequences built for value and boosters\r\n for sequence in sequences:\r\n \r\n # summing the individual card values outside the boosters established above\r\n for card in sequence:\r\n totalTable += cardDict[card]['Value']\r\n # Checking whether the current card is a dollar (red 3)\r\n if cardDict[card]['Dollar'] == True:\r\n dollar += 1\r\n # Checking whether the current card is a joker (2)\r\n if cardDict[card]['Joker'] == True:\r\n joker += 1\r\n \r\n # Checking whether the sequence is a canastra\r\n if len(sequence) >= 7:\r\n # Checking whether the sequence is composed by jokers (2) only\r\n # if so, the booster is assigned and a dirty canastra is removed from the counter\r\n if joker == len(sequence):\r\n jokerCanastra = 1000\r\n # Checking whether there are any jokers at all to assign to dirty or clean canastra counters\r\n elif joker > 0:\r\n dirtyCanastra += 1\r\n else:\r\n cleanCanastra += 1\r\n \r\n # removing a canastra counter if there is a joker canastra in the game\r\n if jokerCanastra == 1000:\r\n dirtyCanastra -= 1\r\n \r\n # accounting for the boosters\r\n dirtyCanastraBooster = 100 * dirtyCanastra\r\n cleanCanastraBooster = 200 * cleanCanastra\r\n dollarBooster = 100 * dollar\r\n totalCanastra = dirtyCanastra + cleanCanastra\r\n \r\n # checking whether the dollar will have negative effect\r\n if dollar > 0 and totalCanastra == 0:\r\n dollarBooster = dollarBooster * -1\r\n \r\n # checking whether the player has the final move booster\r\n if finalMove == True:\r\n finalMoveBooster = 100\r\n \r\n # returning the results\r\n print(\"Positive factors: \")\r\n print(\"\")\r\n print(\"Total dirty canastras: \" + str(dirtyCanastra) + \" | Total value: \" + str(dirtyCanastraBooster))\r\n print(\"Total clean canastras: \" + str(cleanCanastra) + \" | Total value: \" + str(cleanCanastraBooster))\r\n print(\"Total dollars: \" + str(dollar) + \" | Total value: \" + str(dollarBooster))\r\n print(\"Table value: \" + str(totalTable))\r\n if finalMove == True:\r\n print(\"Final move booster: \" + str(finalMoveBooster))\r\n if jokerCanastra > 0:\r\n print(\"Joker canastra booster: \" + str(jokerCanastra))\r\n print(\"\")\r\n finalPositiveScoring = totalTable + dirtyCanastraBooster + cleanCanastraBooster + dollarBooster + finalMoveBooster + jokerCanastra\r\n print(\"Total positive scoring: \" + str(finalPositiveScoring))\r\n print(\"\")\r\n print(\"--------------------------------\")\r\n \r\n return finalPositiveScoring\r\n \r\ndef scoreRound(finalHand,gotMorto,finalTable,finalMove):\r\n \"\"\"\r\n This function runs the final score of a player or team's round.\r\n - finalHand: is expected to be a single list containing all the individual cards player(s) had on their hands;\r\n - gotMorto: boolean expression indicating whether the player or team have advanced to the second phase by picking up\r\n the second available pile denominated morto.\r\n - finalTable: receives a list of lists that contain each sequence a player has laid out in front of them\r\n - finalMove: boolean expression that indicates whether the player or team has made the final move of the game\r\n thus being awarded an extra 100 points.\r\n \"\"\"\r\n \r\n print(\"\")\r\n # calling both functions established above\r\n finalScoring = tableValue(finalTable,finalMove) + handValue(finalHand,gotMorto)\r\n #returning the value\r\n print(\"Final round scoring: \" + str(finalScoring))\r\n \r\n# establishing a list of jokers\r\njokers = ['two_hearts','two_clubs','two_spades','two_diamonds']\r\n\r\n# establishing lists of cards by value standards\r\nfifteenPoints = ['ace']\r\ntenPoints = ['eight','nine','ten','jack','queen','king']\r\nfivePoints = ['four','five','six','seven']\r\n\r\n# establishing the regular cards and suits to build a dictionary\r\nallCards = ['two','three','four','five','six','seven','eight','nine','ten',\r\n 'jack','queen','king','ace']\r\nsuits = ['clubs','spades','hearts','diamonds']\r\n\r\n# establishing card ordering for future validation of whether a given sequence is valid\r\n\r\ncardOrder = {\r\n 'four': 1,\r\n 'five': 2,\r\n 'six': 3,\r\n 'seven': 4,\r\n 'eight': 5,\r\n 'nine': 6,\r\n 'ten': 7,\r\n 'jack': 8,\r\n 'queen': 9,\r\n 'king': 10,\r\n 'ace': 11\r\n}\r\n\r\n# establishing a dictionary of all cards with their name, suits, value, order and boolean checks\r\ncard = \"\"\r\ncardDict = {card:{}}\r\n\r\n# updating dictionary for future reference\r\nfor suit in suits:\r\n for card in allCards:\r\n \r\n mask = card + \"_\" + suit\r\n # Analyzing cards of value three, which can have multiple effects\r\n if card == 'three':\r\n if suit == 'clubs' or suit == 'spades':\r\n cardDict.update({mask:{\r\n 'Name': card,\r\n 'Suit': suit,\r\n 'Value': -100,\r\n 'Tranca': True,\r\n 'Dollar': False,\r\n 'Joker': False,\r\n 'Order': None\r\n }})\r\n else:\r\n cardDict.update({mask:{\r\n 'Name': card,\r\n 'Suit': suit,\r\n 'Value': 5,\r\n 'Tranca': False,\r\n 'Dollar': True,\r\n 'Joker': False,\r\n 'Order': None\r\n }})\r\n \r\n # Analyzing jokers\r\n if card == 'two':\r\n cardDict.update({mask:{\r\n 'Name': card,\r\n 'Suit': suit,\r\n 'Value': 10,\r\n 'Tranca': False,\r\n 'Dollar': False,\r\n 'Joker': True,\r\n 'Order': None\r\n }})\r\n \r\n # Analyzing cards of value five\r\n if card in fivePoints:\r\n cardDict.update({mask:{\r\n 'Name': card,\r\n 'Suit': suit,\r\n 'Value': 5,\r\n 'Tranca': False,\r\n 'Dollar': False,\r\n 'Joker': False,\r\n 'Order': cardOrder[card]\r\n }})\r\n \r\n # Analyzing cards of value ten\r\n if card in tenPoints:\r\n cardDict.update({mask:{\r\n 'Name': card,\r\n 'Suit': suit,\r\n 'Value': 10,\r\n 'Tranca': False,\r\n 'Dollar': False,\r\n 'Joker': False,\r\n 'Order': cardOrder[card]\r\n }})\r\n \r\n # analyzing ace, which is valued at 15\r\n if card == 'ace':\r\n cardDict.update({mask:{\r\n 'Name': card,\r\n 'Suit': suit,\r\n 'Value': 15,\r\n 'Tranca': False,\r\n 'Dollar': False,\r\n 'Joker': False,\r\n 'Order': 11\r\n }})","sub_path":"contador_tranca.py","file_name":"contador_tranca.py","file_ext":"py","file_size_in_byte":9596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"88656548","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport re\nimport labsql\nfrom sklearn.cluster import KMeans\nimport collections\n\nreBODY = r'([\\s\\S]*?)<\\/body>'\nreCOMM = r''\nreTRIM = r'<{0}.*?>([\\s\\S]*?)<\\/{0}>'\nreTAG = r'<[\\s\\S]*?>|[ \\t\\r\\f\\v]'\n\nreIMG = re.compile(r'')\n\n\n# 如果需要提取正文区域出现的图片,只需要在第一步去除tag时保留标签的内容:\ndef deal_images(body):\n return reIMG.sub(r'{\\1}', body)\n\n\n# 去除所有tag,包括样式、Js脚本内容等,但保留原有的换行符\\n:\ndef deal_tags(body):\n body = re.sub(reCOMM, \"\", body)\n body = re.sub(reTRIM.format(\"script\"), \"\", re.sub(reTRIM.format(\"style\"), \"\", body))\n # self.body = re.sub(r\"[\\n]+\",\"\\n\", re.sub(reTAG, \"\", self.body))\n body = re.sub(reTAG, \"\", body)\n body = re.sub(r'[\\u3000\\u2003\\xa0]', \"\", body)\n return body\n\n\ndef special_fun(sentence_list):\n read_list = []\n corpus_dict = {}\n corpus_range = []\n # read each lines\n for id, sentence in enumerate(sentence_list):\n # use regex to find chinese in sentence\n ident_word = re.search(\"[\\u4e00-\\u9fa5]\", sentence)\n # judge the line whether is empty or not chinese\n if sentence is '' or ident_word is None: continue\n # when the line is contain chinese, capture after 5 lines\n # 五句頭\n head = id\n # 五句尾\n bottom = id + 5\n total_length = 0\n # 候選句子\n candidate_sentence = []\n # 如果外部for的id已在readied_list,跳過\n if id in read_list: continue\n # head到bottom的id加到read_list\n for read_id in range(head, bottom): read_list.append(read_id)\n # 讀取head到bottom的句子\n for capture in sentence_list[head:bottom]:\n # 只保留句子中的中文,。、\n words = re.sub('[^\\u4e00-\\u9fa5,。、]', '', capture)\n # 句子長度加到total_length\n total_length += len(words)\n # 句子加到candidate_sentence\n candidate_sentence.append(words)\n # 假如total_length大於閥值則將head, total_length & candidate_sentence加到corpus_dict\n if total_length >= 110:\n # corpus_range加入第一個candidate_sentence成功的head\n if not corpus_range: corpus_range.append(head)\n corpus_dict[head] = candidate_sentence\n\n # corpus_range.append(bottom)\n # print(corpus_range)\n keys_list = list(corpus_dict.keys())\n output = {}\n if len(keys_list) <= 3:\n output = corpus_dict\n elif keys_list[1] - keys_list[0] >= 100:\n output = {keys_list[0]: corpus_dict[keys_list[0]]}\n else:\n X = np.array(keys_list).reshape(len(keys_list), 1)\n kmeans = KMeans(n_clusters=3, random_state=0).fit(X)\n label_list = list(kmeans.labels_)\n most = collections.Counter(label_list).most_common()\n # 次數\n top = most[0][1]\n # 最多次數的群\n cluster = most[0][0]\n # 如果次數跟其他兩群不同,就對應群在label中的index再對應t的位置\n if top is not most[1][1] and top is not most[2][1]:\n for i, item in enumerate(corpus_dict):\n if label_list[i] == cluster:\n output[keys_list[i]] = corpus_dict[keys_list[i]]\n # self.main_content_range[ID] = corpus_range\n content = output.values()\n out_list = []\n for out_data in content:\n out_list += list(filter(None, out_data))\n out1 = ', '.join(out_list)\n if not out1: return 'continue'\n return out1\n\n\nclass Extractor:\n def __init__(self, block_size=3, image=False):\n # where id = 9 or id = 88 or id = 125 or id = 175 or id = 332 or id = 635 or id = 724\n self.conn = labsql.LabSQL('172.168.1.36', 'sohu', 'sa', 'scucc')\n self.data = self.conn.fetch(\n \"select * from dis where id = 9 or id = 88 or id = 125 or id = 175 or id = 332 or id = 635 or id = 724\") # where id between '110' and '120'\n self.blockSize = block_size\n self.saveImage = image\n\n # 将网页内容按行分割,定义行块 blocki 为第 [i,i+blockSize] 行文本之和并给出行块长度基于行号的分布函数:\n def deal_blocks(self, body):\n # 把文章split\n sentence_list = body.split(\"\\n\")\n\n # print(sentence_list)\n self.textLens = [len(text) for text in sentence_list]\n self.cblocks = [0] * (len(sentence_list) - self.blockSize - 1)\n lines = len(sentence_list)\n for i in range(self.blockSize):\n self.cblocks = list(map(lambda x, y: x + y, self.textLens[i: lines - 1 - self.blockSize + i], self.cblocks))\n print(lines)\n\n if not self.cblocks: return special_fun(sentence_list)\n\n total_length = len(sentence_list)\n head_limit_edge = round(total_length * 0.3)\n tail_limit_edge = round(total_length * 0.7)\n # 後選前二長\n top2_maxTextLen = list(reversed(sorted(self.textLens)))[:3]\n # 移除極端值\n # 最後選擇字元最多的\n candidate_top = [candidate for candidate in top2_maxTextLen if candidate <= 1000]\n candidate_index = [self.textLens.index(index) for index in candidate_top]\n # print(candidate_index)\n \"\"\"\n method 1\n \"\"\"\n if len(candidate_index) > 1:\n for check in candidate_index:\n if head_limit_edge <= check <= tail_limit_edge:\n maxTextLen = check\n if abs(check - candidate_index[-1]) <= 10:\n maxTextLen = candidate_index[-1]\n else:\n if not sentence_list[candidate_index[0]:candidate_index[-1]]: return special_fun(sentence_list)\n return sentence_list[candidate_index[0]:candidate_index[-1]], [min(candidate_index),\n max(candidate_index)]\n\n self.start = self.end = maxTextLen\n while self.start > 0 and self.cblocks[self.start] > min(self.textLens):\n self.start -= 1\n while self.end < lines - self.blockSize and self.cblocks[self.end] > min(self.textLens):\n self.end += 1\n if not \"\".join(sentence_list[self.start:self.end]): return special_fun(sentence_list)\n return \"\".join(sentence_list[self.start:self.end]), [self.start, self.end]\n else:\n if not sentence_list[0]: return special_fun(sentence_list)\n return sentence_list[0], [0, 0]\n\n # 正文出现在最长的行块,截取两边至行块长度为 0 的范围:\n def get_context(self):\n for c, sql_fetch_data in enumerate(self.data):\n ID, corpus, url = sql_fetch_data\n body = re.findall(reBODY, corpus)\n if not body: continue\n body = body[0]\n if self.saveImage:\n body = deal_images(body)\n body = deal_tags(body)\n # print(body)\n # output, range_list = self.deal_blocks(body)\n output = self.deal_blocks(body)\n\n if output == 'continue': continue\n print(output)\n # print(range_list)\n\n\nif __name__ == '__main__':\n ext = Extractor()\n ext.get_context()\n # print(out)\n","sub_path":"ML/content_extractor.py","file_name":"content_extractor.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"352443197","text":"import os\nimport dbus\nimport dbus.service\nimport dbus.mainloop.glib\n\ntry:\n from gi.repository import GObject\nexcept ImportError:\n import gobject as GObject\n\ndbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\nbus = dbus.SystemBus()\n\n\nclass Profile(dbus.service.Object):\n fd = -1\n\n def __init__(self, bus, path, read_cb):\n self.read_io_cb = read_cb\n dbus.service.Object.__init__(self, bus, path)\n\n @dbus.service.method(\"org.bluez.Profile1\", in_signature=\"\", out_signature=\"\")\n def Release(self):\n print(\"Release\")\n mainloop.quit()\n\n @dbus.service.method(\"org.bluez.Profile1\", in_signature=\"oha{sv}\", out_signature=\"\")\n def NewConnection(self, path, fd, properties):\n self.fd = fd.take()\n print(\"NewConnection(%s, %d)\" % (path, self.fd))\n io_id = GObject.io_add_watch(self.fd, GObject.PRIORITY_DEFAULT, GObject.IO_IN | GObject.IO_PRI, self.io_cb)\n\n @dbus.service.method(\"org.bluez.Profile1\", in_signature=\"o\", out_signature=\"\")\n def RequestDisconnection(self, path):\n print(\"RequestDisconnection(%s)\" % (path))\n\n if self.fd > 0:\n os.close(self.fd)\n self.fd = -1\n\n def io_cb(self, fd, conditions):\n data = os.read(fd, 1024)\n self.read_io_cb(\"{0}\".format(data.decode(\"ascii\")))\n return True\n\n def write_io(self, value):\n try:\n os.write(self.fd, value.encode(\"utf8\"))\n except ConnectionResetError:\n self.fd = -1\n\n\nclass SPP:\n def __init__(self, read_cb):\n self.profile = None\n manager = dbus.Interface(bus.get_object(\"org.bluez\", \"/org/bluez\"), \"org.bluez.ProfileManager1\")\n\n self.mainloop = GObject.MainLoop()\n adapter_props = dbus.Interface(\n bus.get_object(\"org.bluez\", \"/org/bluez/hci0\"), \"org.freedesktop.DBus.Properties\"\n )\n\n adapter_props.Set(\"org.bluez.Adapter1\", \"Powered\", dbus.Boolean(1))\n profile_path = \"/foo/bar/profile\"\n server_uuid = \"00001101-0000-1000-8000-00805f9b34fb\"\n opts = {\"AutoConnect\": True, \"Role\": \"server\", \"Channel\": dbus.UInt16(1), \"Name\": \"SerialPort\"}\n\n print(\"Starting Serial Port Profile...\")\n\n if read_cb is None:\n self.profile = Profile(bus, profile_path, self.read_cb)\n else:\n self.profile = Profile(bus, profile_path, read_cb)\n\n manager.RegisterProfile(profile_path, server_uuid, opts)\n\n def read_cb(self, value):\n print(value)\n\n def write_spp(self, value):\n self.profile.write_io(value)\n\n def fd_available(self):\n if self.profile.fd > 0:\n return True\n else:\n return False\n\n def start(self):\n self.mainloop.run()\n\n","sub_path":"dashio/spp_server.py","file_name":"spp_server.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"343743631","text":"from __future__ import division\nimport matplotlib.pyplot as pl\nfrom matplotlib.patches import Arc,Arrow,Circle\nimport matplotlib.image as mpimg\nimport math as math\nimport json\nimport sys\nimport argparse\n\ndef plot_network(data,output_file,arg_figsize):\n pl.clf()\n fig, ax = pl.subplots(nrows=1, ncols=1, sharex=True, sharey=False)\n fig_size = (10,5)\n line_width = 1\n tail_width = 0\n head_width = 5\n line_color = 'k'\n light_color = 'w'\n text_color = 'k'\n ext = [-200,200,-110,110]\n if 'Plot' in data:\n ext = data['Plot']['extent']\n if 'bg_image' in data['Plot']:\n if data['Plot']['bg_image'] != None:\n img = mpimg.imread(data['Plot']['bg_image'])\n bg_alpha = 1.0\n if 'bg_alpha' in data['Plot']:\n if data['Plot']['bg_alpha'] != None:\n bg_alpha = data['Plot']['bg_alpha']\n pl.imshow(img,extent=ext,alpha=bg_alpha)\n\n fig_size = tuple(data['Plot']['fig_size'])\n if 'line_width' in data['Plot']:\n line_width = data['Plot']['line_width']\n head_width = data['Plot']['head_width']\n tail_width = data['Plot']['tail_width']\n line_color = data['Plot']['line_color']\n light_color = data['Plot']['light_color']\n text_color = data['Plot']['text_color']\n\n if arg_figsize != None:\n fig.set_size_inches(arg_figsize)\n else:\n fig.set_size_inches(fig_size)\n r=15 # radius of intersection nodes\n d=5 # distance to space two edges that share a pair of nodes\n\n nodes = []\n edges = {}\n\n\n x_min=data['Nodes'][0]['p'][0]\n y_min=data['Nodes'][0]['p'][1]\n x_max=x_min\n y_max=y_min\n\n for i,n in enumerate(data['Nodes']):\n nodes.append({'n':n,'e':0,'l':None})\n x=n['p'][0]\n y=n['p'][1]\n x_min=min(x_min,x)\n y_min=min(x_min,y)\n x_max=max(x_max,x)\n y_max=max(x_max,y)\n\n for i,q in enumerate(data['Queues']):\n n0=q['edge'][0]\n n1=q['edge'][1]\n pair = (n0,n1)\n if n1 1\n n0= data['Nodes'][q['edge'][0]]\n n1= data['Nodes'][q['edge'][1]]\n rx0=n0['p'][0]\n ry0=n0['p'][1]\n rx1=n1['p'][0]\n ry1=n1['p'][1]\n\n rx = rx0-rx1\n ry = ry0-ry1\n lth = math.sqrt(rx*rx+ry*ry)\n rx/=lth\n ry/=lth\n trx0=rx0\n try0=ry0\n if 'light' in n0:\n if pair:\n theta = -math.asin(d/r)\n trx = rx * math.cos(theta) - ry * math.sin(theta);\n ry = rx * math.sin(theta) + ry * math.cos(theta);\n rx=trx\n trx0-=rx * r; try0-=ry * r\n elif pair:\n trx0-=ry * d; try0+=rx * d\n rx = rx1-rx0\n ry = ry1-ry0\n lth = math.sqrt(rx*rx+ry*ry)\n rx/=lth\n ry/=lth\n if 'light' in n1:\n if pair:\n theta = math.asin(d/r)\n trx = rx * math.cos(theta) - ry * math.sin(theta);\n ry = rx * math.sin(theta) + ry * math.cos(theta);\n rx=trx\n rx1-=rx * (r+line_width); ry1-=ry * (r+line_width)\n elif pair:\n rx1+=ry * d; ry1-=rx * d\n rx0=trx0\n ry0=try0\n rx = rx1-rx0\n ry = ry1-ry0\n lth = math.sqrt(rx*rx+ry*ry)\n tx=rx/lth * r; ty=ry/lth * r\n rx = rx0+(rx1-rx0)/2\n ry = ry0+(ry1-ry0)/2\n ax.text(rx+(ty-7),ry-(tx),r'$q_{%d}$' % i,fontsize=16,color=text_color)\n #plot([rx,rx+ty],[ry,ry-tx])\n arrow = ax.arrow(rx0,ry0,rx1-rx0,ry1-ry0, shape='full', lw=line_width,color=line_color,length_includes_head=True, head_width=head_width, width=tail_width)\n arrow.set_ec('k')\n arrow.set_fc(line_color)\n\n pl.axis('scaled')\n #ax.set_ylim([x_min-10,x_max+10])\n #ax.set_xlim([y_min-10,y_max+10])\n #[-200,200,-110,110]\n #ax.set_ylim([-110,110])\n #ax.set_xlim([-200,200])\n ax.set_ylim(ext[2:4])\n ax.set_xlim(ext[0:2])\n pl.axis('off')\n if output_file:\n pl.savefig(output_file, bbox_inches='tight')\n pl.show()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\", help=\"the model file to animate\")\n parser.add_argument(\"-o\", \"--out\", help=\"save the plot as OUT\")\n parser.add_argument(\"--figsize\", help=\"width and height of the plot\", nargs='+',type=int)\n args = parser.parse_args()\n f = open(str(args.file),'r')\n data = json.load(f)\n f.close()\n plot_network(data,args.out,args.figsize)\n","sub_path":"plot_network.py","file_name":"plot_network.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"114972586","text":"import os\nimport pyinotify\n\n# Update user-directory .ssh/authorized_keys from a central directory.\n# Expects keys to be of the form 'plam@django-130', throws out\n# everything after the @, concatenates, and puts the result in\n# /home/plam/.ssh/authorized_keys.\n\n# Uses pyinotify to watch for changes.\n\n# Copyright 2013 Patrick Lam\n#\n# Released under the Expat license:\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n \n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nKEYDIR = os.path.join(os.getenv(\"HOME\"), \"keydir\")\nHOMES = '/home'\nAUTHORIZED_KEYS = '.ssh/authorized_keys'\n\nclass UpdateKeys(pyinotify.ProcessEvent):\n def process_default(self, event):\n userid = event.name.split(\"@\")[0]\n userdir = os.path.join(HOMES, userid)\n if (not os.path.exists(userdir)):\n return\n #print(\"Received event on filename {0}, userid {1}\".format(event.name, userid))\n newKey = '';\n for root, _, files in os.walk(event.path):\n for f in files:\n if (f.startswith(userid)):\n key = os.path.join(root, f)\n f = open(key, 'r')\n newKey = newKey + f.read()\n f.close()\n combinedKeyFile = os.path.join(userdir, AUTHORIZED_KEYS)\n kk = open(combinedKeyFile, 'w')\n kk.write(newKey)\n kk.close()\n\nwm = pyinotify.WatchManager()\nnotifier = pyinotify.Notifier(wm, UpdateKeys())\nwm.add_watch(KEYDIR, pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MODIFY)\nnotifier.loop()\n","sub_path":"watch-and-update.py","file_name":"watch-and-update.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"300919998","text":"from __future__ import annotations\n\n# fmt: off\nimport sys # isort: skip\nfrom pathlib import Path # isort: skip\nROOT = Path(__file__).resolve().parent.parent.parent # isort: skip\nsys.path.append(str(ROOT)) # isort: skip\n# fmt: on\n\nimport sys\nfrom typing import Any, Collection, Dict, List, Optional, Sequence, Union\n\nfrom typing_extensions import Literal\n\nfrom src.constants import SKLEARN_SGD_LR_DEFAULT as LR_DEFAULT\nfrom src.constants import SKLEARN_SGD_LR_MAX as LR_MAX\nfrom src.constants import SKLEARN_SGD_LR_MIN as LR_MIN\nfrom src.enumerables import Dataset\nfrom src.hparams.hparams import (\n CategoricalHparam,\n ContinuousHparam,\n FixedHparam,\n Hparam,\n Hparams,\n OrdinalHparam,\n)\n\nSGD_TUNED: Dict[Dataset, Optional[Dict[str, Any]]] = {\n Dataset.Diabetes: None,\n Dataset.Diabetes130: None,\n Dataset.HeartFailure: None,\n Dataset.MimicIV: None,\n Dataset.Parkinsons: None,\n Dataset.SPECT: None,\n Dataset.Transfusion: None,\n Dataset.UTIResistance: None,\n}\n\n\ndef nystroem_hparams(\n gamma: Optional[float] = 0.1,\n n_components: Optional[int] = 100,\n alpha: Optional[float] = 1e-4,\n l1_ratio: Optional[float] = 0.15,\n lr_init: Optional[float] = 1e-3,\n penalty: Literal[\"l1\", \"l2\", \"elasticnet\", None] = \"l2\",\n average: bool = False,\n) -> List[Hparam]:\n # see https://jcheminf.biomedcentral.com/articles/10.1186/s13321-015-0088-0#Sec6\n # for a possible tuning range on C, gamma\n return [\n ContinuousHparam(\"gamma\", gamma, max=1e3, min=1e-10, log_scale=True, default=0.1),\n OrdinalHparam(\"n_components\", n_components, max=1000, min=5, default=100),\n ContinuousHparam(\n \"alpha\", alpha, max=1e-1, min=1e-7, log_scale=True, default=1e-4\n ),\n ContinuousHparam(\n \"l1_ratio\", l1_ratio, max=1.0, min=0.0, log_scale=False, default=0.15\n ),\n ContinuousHparam(\n \"eta0\", lr_init, max=LR_MAX, min=LR_MIN, log_scale=True, default=LR_DEFAULT\n ),\n CategoricalHparam(\n \"penalty\",\n value=penalty,\n categories=[\"l1\", \"l2\", \"elasticnet\", None],\n default=\"l2\",\n ),\n CategoricalHparam(\"average\", average, categories=[True, False], default=False),\n FixedHparam(\"loss\", value=\"hinge\", default=\"hinge\"),\n FixedHparam(\"learning_rate\", value=\"adaptive\", default=\"adaptive\"),\n FixedHparam(\"n_jobs\", value=1, default=1),\n ]\n\n\nclass NystroemHparams(Hparams):\n def __init__(\n self,\n hparams: Union[Collection[Hparam], Sequence[Hparam], None] = None,\n ) -> None:\n if hparams is None:\n hparams = nystroem_hparams()\n super().__init__(hparams)\n\n def tuned_dict(self, dataset: Dataset) -> Dict[str, Any]:\n hps = SGD_TUNED[dataset]\n if hps is None:\n return self.defaults().to_dict()\n return hps\n\n def set_n_jobs(self, n_jobs: int) -> None:\n self.hparams[\"n_jobs\"].value = n_jobs\n\n def ny_dict(self) -> Dict[str, Any]:\n full = self.to_dict()\n d = {\"gamma\": full[\"gamma\"], \"n_components\": full[\"n_components\"]}\n return d\n\n def sgd_dict(self) -> Dict[str, Any]:\n d = self.to_dict()\n d.pop(\"gamma\")\n d.pop(\"n_components\")\n return d\n","sub_path":"src/hparams/nystroem.py","file_name":"nystroem.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"130488518","text":"'''\n- Necessário instalar as libs Pygame e gTTS\n\n- pip install pygame\n- pip install gTTS\n\n'''\n\nimport io\nimport os\nimport pygame\nfrom gtts import gTTS\n\n\ndef falar_texto(texto, opcao):\n\n if opcao == 1:\n with open('audio.mp3', 'wb') as arquivo:\n gTTS(text=texto, lang=\"pt-br\").write_to_fp(arquivo)\n else:\n with io.BytesIO() as arquivo:\n gTTS(text=texto, lang=\"pt-br\").write_to_fp(arquivo)\n arquivo.seek(0)\n pygame.mixer.init()\n pygame.mixer.music.load(arquivo)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n continue\n\n\ndef main():\n\n while True:\n os.system(\"cls || clear\")\n\n opcao = int(input('''\n [1] Salvar arquivo de áudio\n [2] Apenas ouvir o áudio \n \n Escolha sua opção: '''))\n\n if opcao == 1 or opcao == 2:\n texto = input(\"\\nDigite o que você deseja ouvir/salvar em áudio: \")\n break\n else:\n continue\n\n falar_texto(texto, opcao)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"texto_para_som.py","file_name":"texto_para_som.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"589073648","text":"#!/usr/bin/env python3\n\"\"\" Calculates the fraction of wobble positions that are conserved across all 4 species starting from\na conserved start codon until the end of the sequence (which is a stop codon in all 4 species). \nConsider only third position wobbles.\n\n\tUsage: \n\t\tpython3 neutral_rate.py \n\n\t\tExample:\n\t\t\tpython3 neutral_rate.py PRE1.aln \n\n\tOutput file name:\n\t\tS_cer_conserved.txt\n\"\"\"\n\n\nimport sys\nimport numpy as np\nfrom scipy.stats import binom\n\n\n# sys.arg is a list containing 2 elements: the script name and command line argument\n# Check that all the command line argument was given. If not, prints an error statement, the documentation and then exits.\nif (len(sys.argv) != 4):\n\tsys.exit(\"ERROR: incorrect number of arguments.\\n\" + __doc__)\n\n\npre1_alignment_file = open(sys.argv[1], 'r')\nmin_bp_conserved = int(sys.argv[2])\nwindow_length = int(sys.argv[3])\n\n\n# Standard table of amino acids - a dictionary\n# Codon choices code to a list amino acids\ncodons_to_aa = {'TTT': 'F', 'TCT': 'S', 'TAT': 'Y', 'TGT': 'C',\n\t\t\t\t'TTC': 'F', 'TCC': 'S', 'TAC': 'Y', 'TGC': 'C',\n\t\t\t\t'TTA': 'L', 'TCA': 'S', 'TAA': '*', 'TGA': '*',\n\t\t\t\t'TTG': 'L', 'TCG': 'S', 'TAG': '*', 'TGG': 'W',\n\t\t\t\t'CTT': 'L', 'CCT': 'P', 'CAT': 'H', 'CGT': 'R',\n\t\t\t\t'CTC': 'L', 'CCC': 'P', 'CAC': 'H', 'CGC': 'R',\n\t\t\t\t'CTA': 'L', 'CCA': 'P', 'CAA': 'Q', 'CGA': 'R',\n\t\t\t\t'CTG': 'L', 'CCG': 'P', 'CAG': 'Q', 'CGG': 'R',\n\t\t\t\t'ATT': 'I', 'ACT': 'T', 'AAT': 'N', 'AGT': 'S',\n\t\t\t\t'ATC': 'I', 'ACC': 'T', 'AAC': 'N', 'AGC': 'S',\n\t\t\t\t'ATA': 'I', 'ACA': 'T', 'AAA': 'K', 'AGA': 'R',\n\t\t\t\t'ATG': 'M', 'ACG': 'T', 'AAG': 'K', 'AGG': 'R',\n\t\t\t\t'GTT': 'V', 'GCT': 'A', 'GAT': 'D', 'GGT': 'G',\n\t\t\t\t'GTC': 'V', 'GCC': 'A', 'GAC': 'D', 'GGC': 'G',\n\t\t\t\t'GTA': 'V', 'GCA': 'A', 'GAA': 'E', 'GGA': 'G',\n\t\t\t\t'GTG': 'V', 'GCG': 'A', 'GAG': 'E', 'GGG': 'G'}\n\n\nskud_seq = ''\nsmik_seq = ''\nscer_seq = ''\nsbay_seq = ''\nmax_seq_length = 0\n\n# Using a for loop to iterate through every line in pre1_alignment_file\nfor line in pre1_alignment_file:\n\n\t# Making each sequence whole after alignment\n\t##############################\n\tif line.startswith('Skud'):\n\t\tskud_seq += line.strip().split()[1]\n\n\tif line.startswith('Smik'):\n\t\tsmik_seq += line.strip().split()[1]\n\n\tif line.startswith('Scer'):\n\t\tscer_seq += line.strip().split()[1]\n\n\tif line.startswith('Sbay'):\n\t\tsbay_seq += line.strip().split()[1]\n\t##############################\n\n# Searching through every reading frame in a 3 bp sliding window to identify a conserved start codon until the end of the sequence (which is a stop codon in all 4 species) taking into account ORF parameters\n##############################\nfor reading_frame in range(0,3):\n\n\t#print('Reading Frame: ' + str(reading_frame) )\n\n\tcurrently_in_orf = False\n\n\tfor i in range(reading_frame,len(skud_seq),3):\n\n\t\tif (skud_seq[i:i+3] in codons_to_aa) and (smik_seq[i:i+3] in codons_to_aa) and (scer_seq[i:i+3] in codons_to_aa) and (sbay_seq[i:i+3] in codons_to_aa):\n\n\t\t\tskud_aa = codons_to_aa[skud_seq[i:i+3]]\n\t\t\tsmik_aa = codons_to_aa[smik_seq[i:i+3]]\n\t\t\tscer_aa = codons_to_aa[scer_seq[i:i+3]]\n\t\t\tsbay_aa = codons_to_aa[sbay_seq[i:i+3]]\n\n\t\t\tif 'M' == skud_aa == smik_aa == scer_aa == sbay_aa:\n\n\t\t\t\t#print(str(i) + '\\t' + skud_aa)\n\n\t\t\t\tif currently_in_orf == False:\n\n\t\t\t\t\tcurrently_in_orf = True\n\n\t\t\t\t\tstart_position = i\n\n\t\t\tif '*' == skud_aa == smik_aa == scer_aa == sbay_aa:\n\n\t\t\t\t#print(str(i) + '\\t' + skud_aa)\n\n\t\t\t\tif currently_in_orf == True:\n\n\t\t\t\t\tstop_position = (i + 3)\n\n\t\t\t\t\tif (stop_position - start_position) > max_seq_length:\n\n\t\t\t\t\t\t#print(skud_seq[start_position:stop_position])\n\n\t\t\t\t\t\tmax_seq_length = (stop_position - start_position)\n\n\t\t\t\t\t\torf_start = start_position\n\n\t\t\t\t\t\torf_stop = stop_position\n\n\t\t\t\t\tcurrently_in_orf = False\n##############################\n\n# Identifying the fraction of wobble positions that are conserved within the largest ORF\n##############################\npossible_wobble_sites = 0\nconserved_sites = 0\n\nfor i in range(orf_start, orf_stop, 3):\n\n\tskud_codon = skud_seq[i:i+3]\n\tsmik_codon = smik_seq[i:i+3]\n\tscer_codon = scer_seq[i:i+3]\n\tsbay_codon = sbay_seq[i:i+3]\n\n\tif codons_to_aa[skud_codon] == codons_to_aa[smik_codon] == codons_to_aa[scer_codon] == codons_to_aa[sbay_codon] and codons_to_aa[skud_codon] != 'M' and codons_to_aa[skud_codon] != 'W':\n\n\t\t#print('\\n' + codons_to_aa[skud_codon] + '\\t' + codons_to_aa[smik_codon] + '\\t' + codons_to_aa[scer_codon] + '\\t' + codons_to_aa[sbay_codon])\n\n\t\tif skud_codon[0:2] == smik_codon[0:2] == scer_codon[0:2] == sbay_codon[0:2]:\n\n\t\t\t#print(skud_codon[0:2] + '\\t' + smik_codon[0:2] + '\\t' + scer_codon[0:2] + '\\t' + sbay_codon[0:2])\n\n\t\t\tpossible_wobble_sites += 1\n\n\t\t\tif skud_codon == smik_codon == scer_codon == sbay_codon:\n\n\t\t\t\tconserved_sites += 1\n##############################\n\n#print(max_seq_length)\n#print(max_seq_length/3)\n#print(skud_seq[orf_start:orf_stop])\n\nprint('Number of conserved sites: ' + '\\t' + str(conserved_sites))\nprint('Number of wobble sites: ' + '\\t' + str(possible_wobble_sites))\nprint('Fraction of conserved wobble sites: ' + '\\t' + str(round(conserved_sites/possible_wobble_sites, 2)))\n\n\n\n#for i in range(0,11):\n#\tprint( str(i) + '\\t' + str(binom.pmf(i, 10, 0.45)))\n\n\n\npromoter_position_list = []\n\n# Using an overlapping windows approach to identify promoter sequence regions >= 10 bp within each 10 sub-sequence is more conserved than expected\n##############################\n# Using an overlapping windows approach to identify a list of the beginning promoter prositions \nfor i in range(0, orf_start - window_length):\n\n\tbase_conservation_count = 0\n\n\tfor base in range(i, i + window_length):\n\t\t\n\t\tif skud_seq[base] == smik_seq[base] == scer_seq[base] == sbay_seq[base]:\n\n\t\t\tbase_conservation_count += 1\n\n\tif base_conservation_count >= min_bp_conserved:\n\n\t\tpromoter_position_list.append(i)\n\n\t\t#print(str(i) + '\\t' + skud_seq[i:i+10])\n\n\n\n# Using the promoter position list to determine promoter sequence regions >= 10 bp within each 10 sub-sequence is more conserved than expected\noutput_fileobject = open('S_cer_conserved.txt', 'w')\nprint('Start\\tStop\\tScer Sequence', file = output_fileobject)\n\ncurrently_in_promoter = False\ncomplete_promoter = False\n\nfor i in range(len(promoter_position_list)-1):\n\n\tif promoter_position_list[i+1] - promoter_position_list[i] == 1:\n\t\n\t\tif currently_in_promoter == False:\n\n\t\t\tcurrently_in_promoter = True\n\n\t\t\tcomplete_promoter = False\n\n\t\t\tpromoter_start = promoter_position_list[i]\n\n\telse:\n\n\t\tif currently_in_promoter == True:\n\n\t\t\tpromter_stop = promoter_position_list[i]+10\n\n\t\t\tcurrently_in_promoter = False\n\n\t\t\tcomplete_promoter = True\n\n\t\tprint(str(promoter_start) + '\\t' + str(promter_stop) + '\\t' + scer_seq[promoter_start:promter_stop], file = output_fileobject)\n\n\tif complete_promoter == False and i == ( len(promoter_position_list) - 2):\n\n\t\tpromter_stop = promoter_position_list[-1] + 10\n\n\t\tprint(str(promoter_start) + '\\t' + str(promter_stop) + '\\t' + scer_seq[promoter_start:promter_stop], file = output_fileobject)\n\noutput_fileobject.close()\n##############################\n\n\n\n\n\n\n\n\n","sub_path":"assignment13/work/neutral_rate.py","file_name":"neutral_rate.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"203800838","text":"\"\"\"\nWSGI config for webhookinbox project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/\n\"\"\"\n\nimport os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"webhookinbox.settings\")\n\nvars = (\n\t'DJANGO_SECRET_KEY',\n\t'DJANGO_DEBUG',\n\t'REDIS_HOST',\n\t'REDIS_PORT',\n\t'REDIS_DB',\n\t'GRIP_URL',\n\t'WHINBOX_API_BASE',\n\t'WHINBOX_REDIS_PREFIX',\n\t'WHINBOX_GRIP_PREFIX',\n\t'WHINBOX_ITEM_MAX',\n\t'WHINBOX_ITEM_BURST_TIME',\n\t'WHINBOX_ITEM_BURST_MAX',\n\t'WHINBOX_ORIG_HEADERS',\n)\n\nfrom django.core.wsgi import get_wsgi_application\nfrom django.conf import settings\n\ndef application(environ, start_response):\n\tfor var in vars:\n\t\tif var in environ:\n\t\t\tos.environ[var] = environ[var]\n\tif not settings.DEBUG:\n\t\tfrom whitenoise.django import DjangoWhiteNoise\n\t\treturn DjangoWhiteNoise(get_wsgi_application())(environ, start_response)\n\telse:\n\t\tfrom dj_static import Cling\n\t\treturn Cling(get_wsgi_application())(environ, start_response)\n","sub_path":"webhookinbox/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"353586915","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import division\nfrom argparse import ArgumentParser\nimport networkx as nx\nimport os\nimport sys\n\ndef get_options():\n parser = ArgumentParser()\n parser.add_argument('-o', '--output', type=str)\n parser.add_argument('ifiles', nargs='*')\n return parser.parse_args()\n\ndef input_streams(ifiles):\n if not ifiles:\n yield (None, sys.stdin)\n else:\n for ifile in ifiles:\n yield (ifile, open(ifile, 'r'))\n\ndef print_header(ofile, ifiles=None):\n if not ifiles:\n print('valid,n,d,diam,aspl', file=ofile)\n else:\n print('name,valid,n,d,diam,aspl', file=ofile)\n\ndef print_row(G, ofile, ifile=None):\n n = G.number_of_nodes()\n d = sum(G.degree().values()) // n if n > 0 else 0\n diam = nx.diameter(G)\n aspl = nx.average_shortest_path_length(G)\n is_valid = all([G.degree(v) == d for v in G.nodes()])\n if not ifile:\n print(','.join(map(str, [is_valid, n, d, diam, aspl])), file=ofile)\n else:\n print(','.join(map(str, [ifile, is_valid, n, d, diam, aspl])),\n file=ofile)\n\ndef main():\n opts = get_options()\n ostream = open(opts.output, 'w') if opts.output else sys.stdout\n print_header(ostream, opts.ifiles)\n for ifile, istream in input_streams(opts.ifiles):\n lines = istream.read().splitlines()\n istream.close()\n G = nx.parse_edgelist(lines, nodetype=int)\n print_row(G, ostream, ifile)\n\nif __name__ == '__main__':\n main()\n","sub_path":"general/score-graph.py","file_name":"score-graph.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"590149868","text":"import os\r\n\r\n\r\ngpu_specify = input ('Please choose a device, 0~3 for single GPU, 4 for all GPUs, none for CPU: \\n')\r\nif gpu_specify == '':\r\n gpu_sign = 0\r\nelif gpu_specify == '4':\r\n gpu_sign = 1\r\nelif gpu_specify == '0' or gpu_specify == '1' or gpu_specify == '2' or gpu_specify == '3':\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_specify\r\n gpu_sign = 2\r\nelse:\r\n raise ValueError('incorrect GPU symbol')\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision.datasets as dsets\r\nimport torchvision.transforms as transforms\r\nfrom torchsummary import summary\r\nfrom torch.autograd import Variable\r\nfrom torch.optim.sgd import SGD\r\nimport pickle\r\nimport alpha_optimizers\r\nimport os\r\nimport numpy as np\r\nfrom utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig\r\nfrom DNN_models import cifar10_CNN, cifar10_DenseNet, cifar10_ResNet18\r\nimport torch.nn.functional as F\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\n\r\n# Hyper Parameters\r\nnum_classes = 10\r\nnum_epochs = 500\r\nbatch_size = 1000\r\n\r\n#dataset_sign = int(input('Please input a dataset sign, 0 for mnist, 1 for cifar10, 2 for imagenet'))\r\nmodel_sign = int(input('please input model sign: \\n 0 for Densenet, 1 for CNN, 2 for ResNet18 \\nmodel_sign:'))\r\n #cifar10 dataset\r\ndataset_path = 'cifar-10-batches-py/'\r\nfor i in range(1,6):\r\n path = dataset_path + 'data_batch_' + str(i)\r\n with open(path, 'rb') as batch:\r\n dict = pickle.load(batch, encoding='bytes')\r\n if i == 1:\r\n images = dict[b'data']\r\n image_labels = dict[b'labels']\r\n else:\r\n images = np.concatenate([images, dict[b'data']], axis=0)\r\n image_labels = np.concatenate([image_labels, dict[b'labels']], axis=0)\r\npath = dataset_path + 'test_batch'\r\nwith open(path, 'rb') as batch:\r\n dict = pickle.load(batch, encoding='bytes')\r\ntest_images = np.array(dict[b'data'])\r\ntest_image_labels = np.array(dict[b'labels'])\r\nimages = np.array(images)\r\nimage_labels = np.array(image_labels)\r\nimages = np.reshape(images, [-1, 3, 32, 32])\r\ntest_images = np.reshape(test_images, [-1, 3, 32, 32])\r\nif model_sign == 2:\r\n images = images.transpose((0, 2, 3, 1))\r\n resized_images = []\r\n for i in range(len(images)):\r\n resized_images.append(np.array(Image.fromarray(images[i]).resize((224, 224), Image.BICUBIC)))\r\n images = np.array(resized_images).transpose((0, 3, 1, 2))\r\n test_images = test_images.transpose((0, 2, 3, 1))\r\n resized_images = []\r\n for i in range(len(test_images)):\r\n resized_images.append(np.array(Image.fromarray(test_images[i]).resize((224, 224), Image.BICUBIC)))\r\n test_images = np.array(resized_images).transpose((0, 3, 1, 2))\r\nprint('dataset extract completed,there are ' + str(len(images)) + ' images')\r\n\r\nclass cifar10_dataset(torch.utils.data.Dataset):\r\n def __init__(self):\r\n self.images = images\r\n self.labels = image_labels\r\n super(cifar10_dataset, self).__init__()\r\n def __getitem__(self, index):\r\n data = self.images[index]\r\n label = self.labels[index]\r\n return data, label\r\n def __len__(self):\r\n return len(self.images)\r\n\r\nclass cifar10_test_dataset(torch.utils.data.Dataset):\r\n def __init__(self):\r\n self.images = test_images\r\n self.labels = test_image_labels\r\n super(cifar10_test_dataset, self).__init__()\r\n def __getitem__(self, index):\r\n data = self.images[index]\r\n label = self.labels[index]\r\n return data, label\r\n def __len__(self):\r\n return len(self.images)\r\n\r\ntrain_loader = torch.utils.data.DataLoader(dataset=cifar10_dataset(), batch_size=batch_size, shuffle=True)\r\ntest_loader = torch.utils.data.DataLoader(dataset=cifar10_test_dataset(), batch_size=batch_size, shuffle=True)\r\nBGD_loader = torch.utils.data.DataLoader(dataset=cifar10_dataset(),batch_size=len(images),shuffle=True)\r\n\r\n#testing functon\r\ndef training(model_sign=0, optimizer_sign=0, learning_rate=0.01, momentum=0.9, beta=0.999, alpha=1):\r\n training_data = {'train_loss': [], 'val_loss': [], 'train_acc': [], 'val_acc': []}\r\n if model_sign == 0:\r\n net = cifar10_DenseNet(num_classes)\r\n padding_sign = True\r\n elif model_sign == 1:\r\n net = cifar10_CNN(num_classes)\r\n padding_sign = False\r\n elif model_sign == 2:\r\n net = cifar10_ResNet18(num_classes)\r\n padding_sign = False\r\n else:\r\n raise ValueError('Not correct model sign')\r\n if gpu_sign == 1:\r\n net = torch.nn.DataParallel(net, device_ids=[0, 1, 2, 3])\r\n if gpu_sign != 0:\r\n net.cuda()\r\n net.train()\r\n # Loss and Optimizer\r\n criterion = nn.CrossEntropyLoss()\r\n print('optimizer_sign:' + str(optimizer_sign))\r\n if optimizer_sign == 0:\r\n optimizer = alpha_optimizers.Adamoptimizer(net.parameters(), lr=learning_rate, weight_decay=0.0001,\r\n momentum=momentum, beta=beta)\r\n elif optimizer_sign == 1:\r\n optimizer = alpha_optimizers.alpha_Adamoptimizer(net.parameters(), lr=learning_rate, weight_decay=0.0001,\r\n momentum=momentum, beta=beta, alpha=alpha)\r\n elif optimizer_sign == 2:\r\n optimizer = alpha_optimizers.alpha_SGDoptimizer(net.parameters(), lr=learning_rate, weight_decay=0.0001,\r\n momentum=momentum, alpha=alpha)\r\n elif optimizer_sign == 3:\r\n optimizer = alpha_optimizers.alpha_ascent_Adamoptimizer(net.parameters(), lr=learning_rate, weight_decay=0.0001,\r\n momentum=momentum, beta=beta)\r\n elif optimizer_sign == 4:\r\n optimizer = alpha_optimizers.double_alpha_Adamoptimizer(net.parameters(), lr=learning_rate, weight_decay=0.0001,\r\n momentum=momentum, beta=beta, alpha=alpha)\r\n elif optimizer_sign == 5:\r\n optimizer = alpha_optimizers.alpha2ascent_Adamoptimizer(net.parameters(), lr=learning_rate[0],sgd_lr=learning_rate[1], weight_decay=0.0001,\r\n momentum=momentum, beta=beta)\r\n elif optimizer_sign == 6:\r\n optimizer = alpha_optimizers.alpha2_SGDoptimizer(net.parameters(), lr=learning_rate, weight_decay=0.0001,\r\n momentum=momentum, alpha=alpha)\r\n elif optimizer_sign == 7:\r\n optimizer = alpha_optimizers.SGD_momentumoptimizer(net.parameters(), lr=learning_rate, weight_decay=0.0001, momentum=momentum)\r\n elif optimizer_sign == 8:\r\n optimizer = alpha_optimizers.Adam_to_SGDoptimizer(net.parameters(), lr=learning_rate[0], sgd_lr=learning_rate[1], weight_decay=0.0001,\r\n momentum=momentum, beta=beta)\r\n else:\r\n raise ValueError('Not correct algorithm symbol')\r\n\r\n # Train the Model\r\n for epoch in range(num_epochs):\r\n\r\n train_loss_log = AverageMeter()\r\n train_acc_log = AverageMeter()\r\n val_loss_log = AverageMeter()\r\n val_acc_log = AverageMeter()\r\n for i, (images, labels) in enumerate(train_loader):\r\n # Convert torch tensor to Variable\r\n if gpu_sign != 0:\r\n images = images.cuda()\r\n labels = labels.cuda()\r\n images = images.float()\r\n if padding_sign == True:\r\n images = images.view(-1, 3072)\r\n labels = Variable(labels).long()\r\n\r\n # Forward + Backward + Optimize\r\n optimizer.zero_grad() # zero the gradient buffer\r\n outputs = net(images)\r\n train_loss = criterion(outputs, labels)\r\n train_loss.backward()\r\n optimizer.step()\r\n prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))\r\n train_loss_log.update(train_loss.data, images.size(0))\r\n train_acc_log.update(prec1, images.size(0))\r\n\r\n if (i + 1) % 20 == 0:\r\n print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Acc: %.8f'\r\n % (epoch + 1, num_epochs, i + 1, 50000 / batch_size, train_loss_log.avg,\r\n train_acc_log.avg))\r\n training_data['train_loss'].append(train_loss_log.avg.detach().cpu().numpy())\r\n training_data['train_acc'].append(train_acc_log.avg.detach().cpu().numpy())\r\n # Test the Model\r\n net.eval()\r\n correct = 0\r\n loss = 0\r\n total = 0\r\n for images, labels in test_loader:\r\n if gpu_sign != 0:\r\n images = images.cuda()\r\n labels = labels.cuda()\r\n images = images.float()\r\n if padding_sign:\r\n images = images.view(-1, 3072)\r\n labels = Variable(labels).long()\r\n outputs = net(images)\r\n test_loss = criterion(outputs, labels)\r\n val_loss_log.update(test_loss.data, images.size(0))\r\n prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))\r\n val_acc_log.update(prec1, images.size(0))\r\n\r\n #logger.append([learning_rate, train_loss_log.avg, val_loss_log.avg, train_acc_log.avg, val_acc_log.avg])\r\n print('Accuracy of the network on the 10000 test images: %.8f %%' % (val_acc_log.avg))\r\n print('Loss of the network on the 10000 test images: %.8f' % (val_loss_log.avg))\r\n training_data['val_loss'].append(val_loss_log.avg.detach().cpu().numpy())\r\n training_data['val_acc'].append(val_acc_log.avg.detach().cpu().numpy())\r\n #logger.close()\r\n #logger.plot()\r\n training_data['learning_rate'] = learning_rate\r\n return training_data\r\n\r\n\r\n'Algorithms that can be choosed'\r\nalgorithm_labels = ['0.Adam', '1.alpha_adam', '2.alpha_SGD', '3.alpha_ascent_adam', '4.double_alpha_adam',\r\n '5.alpha2ascent_adam', '6.alpha2_SGD', '7.SGD', '8.Adam_to_SGD']\r\n\r\ntask = int(input('please input a task, 0 for algorithm comparing, 1 for learning rate modify, '\r\n '2 for alpha modify \\n'))\r\nif task == 0:\r\n test_algorithms = eval(input('please input testing algorithms, only list consist of int(algorithm sign) supported\\n'))\r\n test_algorithms = [int(i) for i in test_algorithms]\r\n learning_rates = eval(input('please input learning rates, must corresponding to the algorithms \\n'))\r\n if len(test_algorithms) < 1 or len(test_algorithms) != len(learning_rates):\r\n raise ValueError('lr and algorithms are not corresponding')\r\n alphas = eval(input('please input the list of testing alphas correspond to test algorithms \\n'))\r\n if len(test_algorithms) != len(alphas):\r\n raise ValueError('alphas and algorithms are not corresponding')\r\n for i in range(len(test_algorithms)):\r\n if test_algorithms[i] == 4:\r\n if not isinstance(alphas[i], list):\r\n raise ValueError('algorithm4 need to input a list')\r\n else:\r\n alphas[i] = float(alphas[i])\r\nelif task == 1:\r\n test_algorithm = int(input('please input a single algorithm symbol \\n'))\r\n learning_rates = eval(input('please input testing learning rates,only list supported \\n'))\r\n if test_algorithm == 4:\r\n alphas = eval(input('please input a single alpha list for algorithm 4\\n'))\r\n else:\r\n alphas = float(input('please input a single alpha \\n'))\r\nelif task == 2:\r\n test_algorithm = int(input('please input a single algorithm symbol \\n'))\r\n learning_rates = eval(input('please input learning rates for the algorithm \\n'))\r\n alphas = eval(input('please input alphas corresponding to learning rates, some algorithms may need to input lists \\n'))\r\n if len(alphas) != len(learning_rates):\r\n raise ValueError('alphas and learning rates are not corresponding')\r\nelse:\r\n raise ValueError('not correct task symbol')\r\nrepeats = int(input('please input how many times to repeat \\n'))\r\n\r\nshow_symbol = eval(input('please choose what to show, 0 for accuracy, 1 for loss, 2 for training_err,'\r\n ' 3 for mean derivatives, support multiple chioce. please input an list \\n'))\r\nfor i in show_symbol:\r\n i = int(i)\r\n if not(i == 0 or i == 1 or i == 2 or i == 3 or i == 4):\r\n raise ValueError('incorrect show symbol')\r\n\r\nshows = ['acc', 'loss', 'training_err']\r\nmodels = ['DenseNet', 'CNN', 'ResNet']\r\ncomparing_datas = [[] for i in show_symbol]\r\ncomparing_data = [[] for i in show_symbol]\r\ntest_algorithm_labels = [[] for i in show_symbol]\r\nif task == 0:\r\n for i in range(len(test_algorithms)):\r\n for j in range(repeats):\r\n output = training(model_sign=model_sign, optimizer_sign=test_algorithms[i],\r\n learning_rate=learning_rates[i],alpha=alphas[i])\r\n for a in range(len(show_symbol)):\r\n if j == 0:\r\n if show_symbol[a] == 0:\r\n comparing_data[a] = np.array(output['train_acc'])\r\n elif show_symbol[a] == 1:\r\n comparing_data[a] = np.array(output['train_loss'])\r\n elif show_symbol[a] == 2:\r\n comparing_data[a] = 100 - np.array(output['train_acc'])\r\n elif show_symbol[a] == 3:\r\n comparing_data[a] = np.array(output['val_acc'])\r\n else:\r\n comparing_data[a] = np.array(output['val_loss'])\r\n else:\r\n if show_symbol[a] == 0:\r\n comparing_data[a] += np.array(output['train_acc'])\r\n elif show_symbol[a] == 1:\r\n comparing_data[a] += np.array(output['train_loss'])\r\n elif show_symbol[a] == 2:\r\n comparing_data[a] += 100 - np.array(output['train_acc'])\r\n elif show_symbol[a] == 3:\r\n comparing_data[a] += np.array(output['val_acc'])\r\n else:\r\n comparing_data[a] += np.array(output['val_loss'])\r\n for a in range(len(show_symbol)):\r\n comparing_datas[a].append(np.array(comparing_data[a]) / repeats)\r\n test_algorithm_labels[a].append(\r\n algorithm_labels[test_algorithms[i]] + ' learning_rate=' + str(learning_rates[i]) + ' alpha=' + str(alphas[i]))\r\nelif task == 1:\r\n for i in range(len(learning_rates)):\r\n for j in range(repeats):\r\n output = training(model_sign=model_sign, optimizer_sign=test_algorithm,\r\n learning_rate=learning_rates[i], alpha=alphas)\r\n for a in range(len(show_symbol)):\r\n if j == 0:\r\n if show_symbol[a] == 0:\r\n comparing_data[a] = np.array(output['train_acc'])\r\n elif show_symbol[a] == 1:\r\n comparing_data[a] = np.array(output['train_loss'])\r\n elif show_symbol[a] == 2:\r\n comparing_data[a] = 100 - np.array(output['train_acc'])\r\n elif show_symbol[a] == 3:\r\n comparing_data[a] = np.array(output['val_acc'])\r\n else:\r\n comparing_data[a] = np.array(output['val_loss'])\r\n else:\r\n if show_symbol[a] == 0:\r\n comparing_data[a] += np.array(output['train_acc'])\r\n elif show_symbol[a] == 1:\r\n comparing_data[a] += np.array(output['train_loss'])\r\n elif show_symbol[a] == 2:\r\n comparing_data[a] += 100 - np.array(output['train_acc'])\r\n elif show_symbol[a] == 3:\r\n comparing_data[a] += np.array(output['val_acc'])\r\n else:\r\n comparing_data[a] += np.array(output['val_loss'])\r\n for a in range(len(show_symbol)):\r\n comparing_datas[a].append(np.array(comparing_data[a]) / repeats)\r\n test_algorithm_labels[a].append(\r\n algorithm_labels[test_algorithm] + ' learning_rate=' + str(learning_rates[i]) + 'alpha=' + str(alphas))\r\nelif task == 2:\r\n for i in range(len(learning_rates)):\r\n for j in range(repeats):\r\n output = training(model_sign=model_sign, optimizer_sign=test_algorithm,\r\n learning_rate=learning_rates[i],\r\n alpha=alphas[i])\r\n for a in range(len(show_symbol)):\r\n if j == 0:\r\n if show_symbol[a] == 0:\r\n comparing_data[a] = np.array(output['train_acc'])\r\n elif show_symbol[a] == 1:\r\n comparing_data[a] = np.array(output['train_loss'])\r\n elif show_symbol[a] == 2:\r\n comparing_data[a] = 100 - np.array(output['train_acc'])\r\n elif show_symbol[a] == 3:\r\n comparing_data[a] = np.array(output['val_acc'])\r\n else:\r\n comparing_data[a] = np.array(output['cal_loss'])\r\n else:\r\n if show_symbol[a] == 0:\r\n comparing_data[a] += np.array(output['train_acc'])\r\n elif show_symbol[a] == 1:\r\n comparing_data[a] += np.array(output['train_loss'])\r\n elif show_symbol[a] == 2:\r\n comparing_data[a] += 100 - np.array(output['train_acc'])\r\n elif show_symbol[a] == 3:\r\n comparing_data[a] += np.array(output['val_acc'])\r\n else:\r\n comparing_data[a] += np.array(output['val_loss'])\r\n for a in range(len(show_symbol)):\r\n comparing_datas[a].append(np.array(comparing_data[a]) / repeats)\r\n test_algorithm_labels[a].append(\r\n algorithm_labels[test_algorithm] + ' learning_rate=' + str(learning_rates[i]) + ' alpha=' + str(alphas[i]))\r\nsave_sign = 10\r\nfor a in range(len(show_symbol)):\r\n for i in range(len(comparing_datas[a])):\r\n plt.plot(range(len(comparing_datas[a][i])), comparing_datas[a][i])\r\n plt.legend(test_algorithm_labels[a])\r\n\r\n plt.title(models[model_sign] + ' CIFAR10, ' + shows[show_symbol[a]])\r\n plt.savefig('data/matplotlib/' + str(save_sign))\r\n save_sign += 1\r\n plt.show()\r\n plt.cla()\r\n\r\na = 0\r\n\r\n\r\n\r\n\r\n","sub_path":"CIFAR10_algorithms_comparing.py","file_name":"CIFAR10_algorithms_comparing.py","file_ext":"py","file_size_in_byte":18559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"77616000","text":"\"\"\"Program that stores song information: Song, Album, Artist, and Genre.\n\nUser can:\n View all songs\n Serch for a song\n Add a song\n Update a song\n Delete a song\n Exit the application\n\nNOTE: See wireframe.png for sketch of interface.\n\n\"\"\"\n\nfrom tkinter import *\nfrom tkinter import messagebox\n\nimport backend_06 as backend\n\n#########\n# Create our button handlers (see where we create the buttons and notice how\n# we hook these to the appropriate button using the commmand= option)\ndef view_command():\n list1.delete(0, END)\n for row in backend.view():\n list1.insert(END, row)\n\ndef search_command():\n list1.delete(0, END)\n for row in backend.search(song_text.get(), artist_text.get(), album_text.get(), year_text.get()):\n list1.insert(END, row)\n\ndef add_command():\n backend.insert(song_text.get(), artist_text.get(), album_text.get(), year_text.get())\n view_command()\n\ndef delete_command():\n index = list1.curselection()[0]\n selected_tuple = list1.get(index)\n backend.delete(selected_tuple[0])\n list1.delete(index)\n\ndef update_command():\n backend.update(id=selected_id, song=e1.get(), artist=e2.get(), album=e3.get(), year=e4.get())\n view_command()\n\ndef get_selected_row(event):\n index = list1.curselection()[0]\n selected_tuple = list1.get(index)\n\n global selected_id # update command will need this, so we must make it global\n selected_id = selected_tuple[0]\n\n e1.delete(0,END)\n e1.insert(END, selected_tuple[1])\n e2.delete(0,END)\n e2.insert(END, selected_tuple[2])\n e3.delete(0,END)\n e3.insert(END, selected_tuple[3])\n e4.delete(0,END)\n e4.insert(END, selected_tuple[4])\n\ndef exit_command():\n if messagebox.askokcancel(\"Quit\", \"Do you really wish to quit?\"):\n window.destroy()\n\nwindow=Tk() # TK method that creates a windows objective\nwindow.wm_title(\"The Ultimate/Elite SongDB that is the Pinnacle/Apex of SongDB Techology\")\n#########\n# Display Titles\nl1=Label(window, text=\"Song\")\nl1.grid(row=0,column=0)\n\nl2=Label(window, text=\"Arist\")\nl2.grid(row=0,column=2)\n\nl3=Label(window, text=\"Album\")\nl3.grid(row=1,column=0)\n\nl3=Label(window, text=\"Year\")\nl3.grid(row=1,column=2)\n\n#########\n# Display Titles# Display text entry fields\nsong_text=StringVar()\ne1=Entry(window,textvariable=song_text, width=35)\ne1.grid(row=0,column=1)\n\nartist_text=StringVar()\ne2=Entry(window,textvariable=artist_text, width=25)\ne2.grid(row=0,column=3)\n\nalbum_text=StringVar()\ne3=Entry(window,textvariable=album_text, width=35)\ne3.grid(row=1,column=1)\n\nyear_text=StringVar()\ne4=Entry(window,textvariable=year_text, width=25)\ne4.grid(row=1 , column=3)\n\n############################\n# display listbox and attached a Scrollbar\nlist1=Listbox(window,height=9, width=60)\nlist1.grid(row=2, column=0, rowspan=6, columnspan=2 ) # we want to span across multiple rows and columns\n\nlist1.bind(\"<>\", get_selected_row)\n\nsb1 = Scrollbar(window)\nsb1.grid(row=2, column=2, rowspan=6)\n\nlist1.configure(yscrollcommand=sb1.set)\nsb1.configure(command=list1.yview)\n\n# Display Buttons\nb1=Button(window, text=\"View All songs\", width=25, command=view_command)\nb1.grid(row=2, column=3)\nb2=Button(window, text=\"Search \", width=25, command=search_command)\nb2.grid(row=3, column=3)\nb3=Button(window, text=\"Add Song\", width=25, command=add_command)\nb3.grid(row=4, column=3)\nb4=Button(window, text=\"Update Song\", width=25 ,command=update_command)\nb4.grid(row=5, column=3)\nb5=Button(window, text=\"Delete Song\", width=25, command=delete_command)\nb5.grid(row=6, column=3)\n#b6=Button(window, text=\"Exit\", width=25, command=window.destroy)\nb6=Button(window, text=\"Exit\", width=25, command=exit_command)\nb6.grid(row=7, column=3)\n\nwindow.mainloop()\n","sub_path":"Class Notes/w08c24/frontend_07.py","file_name":"frontend_07.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"528132448","text":"\"\"\"Cleanup Kubeflow deployments in our ci system.\"\"\"\nimport argparse\nimport datetime\nfrom dateutil import parser as date_parser\nimport logging\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport yaml\n\nfrom kubeflow.testing import argo_client\nfrom kubeflow.testing import util\nfrom kubernetes import client as k8s_client\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\n\n# Regexes that select matching deployments\nMATCHING = [re.compile(\"e2e-.*\"), re.compile(\"kfctl.*\"),\n re.compile(\"z-.*\"), re.compile(\".*presubmit.*\")]\n\nMATCHING_FIREWALL_RULES = [re.compile(\"gke-kfctl-.*\"),\n re.compile(\"gke-e2e-.*\"),\n re.compile(\".*presubmit.*\"),\n re.compile(\".*postsubmit.*\")]\n\n# Regexes that select matching disks\nMATCHING_DISK = [re.compile(\".*postsubmit.*\"), re.compile(\".*presubmit.*\")]\n\ndef is_match_disk(name):\n for m in MATCHING_DISK:\n if m.match(name):\n return True\n\n return False\n\ndef is_match(name, patterns=None):\n if not patterns:\n patterns = MATCHING\n for m in patterns:\n if m.match(name):\n return True\n\n return False\n\ndef cleanup_workflows(args):\n # We need to load the kube config so that we can have credentials to\n # talk to the APIServer.\n util.load_kube_config(persist_config=False)\n\n client = k8s_client.ApiClient()\n crd_api = k8s_client.CustomObjectsApi(client)\n workflows = crd_api.list_namespaced_custom_object(\n argo_client.GROUP, argo_client.VERSION, args.namespace, argo_client.PLURAL)\n\n expired = []\n unexpired = []\n\n for w in workflows[\"items\"]:\n is_expired = False\n\n start_time = date_parser.parse(w[\"status\"][\"startedAt\"])\n now = datetime.datetime.now(start_time.tzinfo)\n\n name = w[\"metadata\"][\"name\"]\n age = now - start_time\n if age > datetime.timedelta(hours=args.max_age_hours):\n logging.info(\"Deleting workflow: %s\", name)\n is_expired = True\n if not args.dryrun:\n crd_api.delete_namespaced_custom_object(\n argo_client.GROUP, argo_client.VERSION, args.namespace,\n argo_client.PLURAL, name, k8s_client.V1DeleteOptions())\n break\n\n if is_expired:\n expired.append(name)\n else:\n unexpired.append(name)\n\n logging.info(\"Unexpired workflows:\\n%s\", \"\\n\".join(unexpired))\n logging.info(\"expired workflows:\\n%s\", \"\\n\".join(expired))\n\ndef cleanup_endpoints(args):\n credentials = GoogleCredentials.get_application_default()\n\n services_management = discovery.build('servicemanagement', 'v1', credentials=credentials)\n services = services_management.services()\n rollouts = services.rollouts()\n next_page_token = None\n\n expired = []\n unexpired = []\n unmatched = []\n\n while True:\n results = services.list(producerProjectId=args.project,\n pageToken=next_page_token).execute()\n\n for s in results[\"services\"]:\n name = s[\"serviceName\"]\n if not is_match(name):\n unmatched.append(name)\n continue\n\n all_rollouts = rollouts.list(serviceName=name).execute()\n is_expired = False\n if not all_rollouts.get(\"rollouts\", []):\n logging.info(\"Service %s has no rollouts\", name)\n is_expired = True\n else:\n r = all_rollouts[\"rollouts\"][0]\n create_time = date_parser.parse(r[\"createTime\"])\n\n now = datetime.datetime.now(create_time.tzinfo)\n\n age = now - create_time\n if age > datetime.timedelta(hours=args.max_age_hours):\n is_expired = True\n\n if is_expired:\n logging.info(\"Deleting service: %s\", name)\n is_expired = True\n if not args.dryrun:\n services.delete(serviceName=name).execute()\n expired.append(name)\n else:\n unexpired.append(name)\n\n if not \"nextPageToken\" in results:\n break\n next_page_token = results[\"nextPageToken\"]\n\n\n logging.info(\"Unmatched services:\\n%s\", \"\\n\".join(unmatched))\n logging.info(\"Unexpired services:\\n%s\", \"\\n\".join(unexpired))\n logging.info(\"expired services:\\n%s\", \"\\n\".join(expired))\n\ndef cleanup_disks(args):\n credentials = GoogleCredentials.get_application_default()\n\n compute = discovery.build('compute', 'v1', credentials=credentials)\n disks = compute.disks()\n next_page_token = None\n\n expired = []\n unexpired = []\n unmatched = []\n\n for zone in args.zones.split(\",\"):\n while True:\n results = disks.list(project=args.project,\n zone=zone,\n pageToken=next_page_token).execute()\n if not \"items\" in results:\n break\n for d in results[\"items\"]:\n name = d[\"name\"]\n if not is_match_disk(name):\n unmatched.append(name)\n continue\n\n age = getAge(d[\"creationTimestamp\"])\n if age > datetime.timedelta(hours=args.max_age_hours):\n logging.info(\"Deleting disk: %s, age = %r\", name, age)\n if not args.dryrun:\n response = disks.delete(project=args.project, zone=zone, disk=name)\n logging.info(\"respone = %s\", response)\n expired.append(name)\n else:\n unexpired.append(name)\n if not \"nextPageToken\" in results:\n break\n next_page_token = results[\"nextPageToken\"]\n\n logging.info(\"Unmatched disks:\\n%s\", \"\\n\".join(unmatched))\n logging.info(\"Unexpired disks:\\n%s\", \"\\n\".join(unexpired))\n logging.info(\"expired disks:\\n%s\", \"\\n\".join(expired))\n\ndef cleanup_firewall_rules(args):\n credentials = GoogleCredentials.get_application_default()\n\n compute = discovery.build('compute', 'v1', credentials=credentials)\n firewalls = compute.firewalls()\n next_page_token = None\n\n expired = []\n unexpired = []\n unmatched = []\n\n while True:\n results = firewalls.list(project=args.project,\n pageToken=next_page_token).execute()\n if not \"items\" in results:\n break\n for d in results[\"items\"]:\n name = d[\"name\"]\n\n match = False\n if is_match(name, patterns=MATCHING_FIREWALL_RULES):\n match = True\n\n for tag in d.get(\"targetTags\", []):\n if is_match(tag, patterns=MATCHING_FIREWALL_RULES):\n match = True\n break\n\n if not match:\n unmatched.append(name)\n continue\n\n age = getAge(d[\"creationTimestamp\"])\n if age > datetime.timedelta(hours=args.max_age_hours):\n logging.info(\"Deleting firewall: %s, age = %r\", name, age)\n if not args.dryrun:\n response = firewalls.delete(project=args.project,\n firewall=name).execute()\n logging.info(\"respone = %s\", response)\n expired.append(name)\n else:\n unexpired.append(name)\n if not \"nextPageToken\" in results:\n break\n next_page_token = results[\"nextPageToken\"]\n\n logging.info(\"Unmatched firewall rules:\\n%s\", \"\\n\".join(unmatched))\n logging.info(\"Unexpired firewall rules:\\n%s\", \"\\n\".join(unexpired))\n logging.info(\"expired firewall rules:\\n%s\", \"\\n\".join(expired))\n\n\ndef cleanup_service_accounts(args):\n credentials = GoogleCredentials.get_application_default()\n\n iam = discovery.build('iam', 'v1', credentials=credentials)\n projects = iam.projects()\n accounts = []\n next_page_token = None\n while True:\n service_accounts = iam.projects().serviceAccounts().list(\n name='projects/' + args.project, pageToken=next_page_token).execute()\n accounts.extend(service_accounts[\"accounts\"])\n if not \"nextPageToken\" in service_accounts:\n break\n next_page_token = service_accounts[\"nextPageToken\"]\n\n keys_client = projects.serviceAccounts().keys()\n\n unmatched_emails = []\n expired_emails = []\n unexpired_emails = []\n # Service accounts don't specify the creation date time. So we\n # use the creation time of the key associated with the account.\n for a in accounts:\n if not is_match(a[\"email\"]):\n logging.info(\"Skipping key %s; it does not match expected names.\",\n a[\"email\"])\n\n unmatched_emails.append(a[\"email\"])\n continue\n\n keys = keys_client.list(name=a[\"name\"]).execute()\n\n is_expired = True\n for k in keys[\"keys\"]:\n valid_time = date_parser.parse(k[\"validAfterTime\"])\n now = datetime.datetime.now(valid_time.tzinfo)\n\n age = now - valid_time\n if age < datetime.timedelta(hours=args.max_age_hours):\n is_expired = False\n break\n if is_expired:\n logging.info(\"Deleting account: %s\", a[\"email\"])\n if not args.dryrun:\n iam.projects().serviceAccounts().delete(name=a[\"name\"]).execute()\n expired_emails.append(a[\"email\"])\n else:\n unexpired_emails.append(a[\"email\"])\n\n logging.info(\"Unmatched emails:\\n%s\", \"\\n\".join(unmatched_emails))\n logging.info(\"Unexpired emails:\\n%s\", \"\\n\".join(unexpired_emails))\n logging.info(\"expired emails:\\n%s\", \"\\n\".join(expired_emails))\n\n\ndef trim_unused_bindings(iamPolicy, accounts):\n keepBindings = []\n for binding in iamPolicy['bindings']:\n members_to_keep = []\n members_to_delete = []\n for member in binding['members']:\n if not member.startswith('serviceAccount:'):\n members_to_keep.append(member)\n else:\n accountEmail = member[15:]\n if (not is_match(accountEmail)) or (accountEmail in accounts):\n members_to_keep.append(member)\n else:\n members_to_delete.append(member)\n if members_to_keep:\n binding['members'] = members_to_keep\n keepBindings.append(binding)\n if members_to_delete:\n logging.info(\"Delete binding for members:\\n%s\", \", \".join(members_to_delete))\n iamPolicy['bindings'] = keepBindings\n\ndef cleanup_service_account_bindings(args):\n credentials = GoogleCredentials.get_application_default()\n iam = discovery.build('iam', 'v1', credentials=credentials)\n accounts = []\n next_page_token = None\n while True:\n service_accounts = iam.projects().serviceAccounts().list(\n name='projects/' + args.project, pageToken=next_page_token).execute()\n for a in service_accounts[\"accounts\"]:\n accounts.append(a[\"email\"])\n if not \"nextPageToken\" in service_accounts:\n break\n next_page_token = service_accounts[\"nextPageToken\"]\n\n resourcemanager = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)\n iamPolicy = resourcemanager.projects().getIamPolicy(resource=args.project).execute()\n trim_unused_bindings(iamPolicy, accounts)\n\n setBody = {'policy': iamPolicy}\n if not args.dryrun:\n resourcemanager.projects().setIamPolicy(resource=args.project, body=setBody).execute()\n\n\ndef getAge(tsInRFC3339):\n # The docs say insert time will be in RFC339 format.\n # But it looks like it also includes a time zone offset in the form\n # -HH:MM; which is slightly different from what datetime strftime %z\n # uses (no colon).\n # https://cloud.google.com/deployment-manager/docs/reference/latest/deployments/insert\n #\n # So we parse out the hours.\n #\n # TODO(jlewi): Can we use date_parser like we do in cleanup_service_accounts\n insert_time_str = tsInRFC3339[:-6]\n tz_offset = tsInRFC3339[-6:]\n hours_offset = int(tz_offset.split(\":\", 1)[0])\n RFC3339 = \"%Y-%m-%dT%H:%M:%S.%f\"\n insert_time = datetime.datetime.strptime(insert_time_str, RFC3339)\n\n # Convert the time to UTC\n insert_time_utc = insert_time + datetime.timedelta(hours=-1 * hours_offset)\n age = datetime.datetime.utcnow()- insert_time_utc\n return age\n\n\ndef cleanup_deployments(args): # pylint: disable=too-many-statements,too-many-branches\n if not args.delete_script:\n raise ValueError(\"--delete_script must be specified.\")\n\n credentials = GoogleCredentials.get_application_default()\n dm = discovery.build(\"deploymentmanager\", \"v2\", credentials=credentials)\n\n manifests_client = dm.manifests()\n deployments_client = dm.deployments()\n deployments = deployments_client.list(project=args.project).execute()\n\n for d in deployments.get(\"deployments\", []):\n if not d.get(\"insertTime\", None):\n logging.warning(\"Deployment %s doesn't have a deployment time \"\n \"skipping it\", d[\"name\"])\n continue\n\n name = d[\"name\"]\n\n if not is_match(name):\n logging.info(\"Skipping Deployment %s; it does not match expected names.\",\n name)\n continue\n\n full_insert_time = d.get(\"insertTime\")\n age = getAge(full_insert_time)\n\n if age > datetime.timedelta(hours=args.max_age_hours):\n # Get the zone.\n if \"update\" in d:\n manifest_url = d[\"update\"][\"manifest\"]\n else:\n manifest_url = d[\"manifest\"]\n manifest_name = manifest_url.split(\"/\")[-1]\n manifest = manifests_client.get(\n project=args.project, deployment=name, manifest=manifest_name).execute()\n\n # Create a temporary directory to store the deployment.\n manifest_dir = tempfile.mkdtemp(prefix=\"tmp\" + name)\n logging.info(\"Creating directory %s to store manifests for deployment %s\",\n manifest_dir, name)\n with open(os.path.join(manifest_dir, \"cluster-kubeflow.yaml\"), \"w\") as hf:\n hf.write(manifest[\"config\"][\"content\"])\n\n config = yaml.load(manifest[\"config\"][\"content\"])\n\n if not config:\n logging.warning(\"Skipping deployment %s because it has no config; \"\n \"is it already being deleted?\", name)\n zone = config[\"resources\"][0][\"properties\"][\"zone\"]\n command = [args.delete_script,\n \"--project=\" + args.project, \"--deployment=\" + name,\n \"--zone=\" + zone]\n cwd = None\n # If we download the manifests first delete_deployment will issue\n # an update before the delete which can help do a clean delete.\n # But that has the disadvantage of creating GKE clusters if they have\n # already been deleted; which is slow and wasteful.\n if args.update_first:\n # Download the manifest for this deployment.\n # We want to do an update and then a delete because this is necessary\n # for deleting role bindings.\n command.append(\"cluster-kubeflow.yaml\")\n cwd = manifest_dir\n logging.info(\"Deleting deployment %s; inserted at %s\", name,\n full_insert_time)\n\n # We could potentially run the deletes in parallel but that would lead\n # to very confusing logs.\n if not args.dryrun:\n subprocess.check_call(command, cwd=cwd)\n\n gke = discovery.build(\"container\", \"v1\", credentials=credentials)\n\n # Collect clusters for which deployment might no longer exist.\n clusters_client = gke.projects().zones().clusters()\n\n for zone in args.zones.split(\",\"):\n clusters = clusters_client.list(projectId=args.project, zone=zone).execute()\n\n if not clusters:\n continue\n for c in clusters[\"clusters\"]:\n name = c[\"name\"]\n if not is_match(name):\n logging.info(\"Skipping cluster%s; it does not match expected names.\",\n name)\n continue\n\n full_insert_time = c[\"createTime\"]\n insert_time_str = full_insert_time[:-6]\n tz_offset = full_insert_time[-6:]\n hours_offset = int(tz_offset.split(\":\", 1)[0])\n RFC3339 = \"%Y-%m-%dT%H:%M:%S\"\n insert_time = datetime.datetime.strptime(insert_time_str, RFC3339)\n\n # Convert the time to UTC\n insert_time_utc = insert_time + datetime.timedelta(hours=-1 * hours_offset)\n age = datetime.datetime.utcnow()- insert_time_utc\n\n if age > datetime.timedelta(hours=args.max_age_hours):\n logging.info(\"Deleting cluster %s in zone %s\", name, zone)\n\n if not args.dryrun:\n clusters_client.delete(projectId=args.project, zone=zone,\n clusterId=name).execute()\n\ndef cleanup_all(args):\n cleanup_deployments(args)\n cleanup_endpoints(args)\n cleanup_service_accounts(args)\n cleanup_service_account_bindings(args)\n cleanup_workflows(args)\n cleanup_disks(args)\n cleanup_firewall_rules(args)\n\ndef add_workflow_args(parser):\n parser.add_argument(\n \"--namespace\", default=\"kubeflow-test-infra\",\n help=\"Namespace to cleanup.\")\n\ndef add_deployments_args(parser):\n parser.add_argument(\n \"--update_first\", default=False, type=bool,\n help=\"Whether to update the deployment first.\")\n\n parser.add_argument(\n \"--delete_script\", default=\"\", type=str,\n help=(\"The path to the delete_deployment.sh script which is in the \"\n \"Kubeflow repository.\"))\n parser.add_argument(\n \"--zones\", default=\"us-east1-d,us-central1-a\", type=str,\n help=\"Comma separated list of zones to check.\")\n\ndef main():\n logging.basicConfig(level=logging.INFO,\n format=('%(levelname)s|%(asctime)s'\n '|%(pathname)s|%(lineno)d| %(message)s'),\n datefmt='%Y-%m-%dT%H:%M:%S',\n )\n logging.getLogger().setLevel(logging.INFO)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--project\", default=\"kubeflow-ci\", type=str, help=(\"The project.\"))\n\n parser.add_argument(\n \"--max_age_hours\", default=3, type=int, help=(\"The age of deployments to gc.\"))\n\n parser.add_argument('--dryrun', dest='dryrun', action='store_true')\n parser.add_argument('--no-dryrun', dest='dryrun', action='store_false')\n parser.set_defaults(dryrun=False)\n\n subparsers = parser.add_subparsers()\n\n ######################################################\n # Paraser for everything\n parser_all = subparsers.add_parser(\n \"all\", help=\"Cleanup everything\")\n\n add_deployments_args(parser_all)\n add_workflow_args(parser_all)\n\n parser_all.set_defaults(func=cleanup_all)\n\n ######################################################\n # Parser for argo_workflows\n parser_argo = subparsers.add_parser(\n \"workflows\", help=\"Cleanup workflows\")\n\n add_workflow_args(parser_argo)\n parser_argo.set_defaults(func=cleanup_workflows)\n\n ######################################################\n # Parser for endpoints\n parser_endpoints = subparsers.add_parser(\n \"endpoints\", help=\"Cleanup endpoints\")\n\n parser_endpoints.set_defaults(func=cleanup_endpoints)\n\n ######################################################\n # Parser for firewallrules\n parser_firewall = subparsers.add_parser(\n \"firewall\", help=\"Cleanup firewall rules\")\n\n parser_firewall.set_defaults(func=cleanup_firewall_rules)\n\n ######################################################\n # Parser for service accounts\n parser_service_account = subparsers.add_parser(\n \"service_accounts\", help=\"Cleanup service accounts\")\n\n parser_service_account.set_defaults(func=cleanup_service_accounts)\n\n ######################################################\n # Parser for deployments\n parser_deployments = subparsers.add_parser(\n \"deployments\", help=\"Cleanup deployments\")\n\n add_deployments_args(parser_deployments)\n parser_deployments.set_defaults(func=cleanup_deployments)\n args = parser.parse_args()\n\n util.maybe_activate_service_account()\n args.func(args)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"py/kubeflow/testing/cleanup_ci.py","file_name":"cleanup_ci.py","file_ext":"py","file_size_in_byte":18868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"223118536","text":"class Arquivo:\n\tdef __init__(self, *kwargs):\n\t\tself.pastas = [ './data/database/tables/create/',\n\t\t\t\t\t\t'./data/database/querrys/insert/',\n\t\t\t\t\t\t'./data/database/querrys/select/']\n\n\t\tself.arquivos = [self.pastas[0]+'CLIENTE.sql',\n\t\t\t\t\t\t self.pastas[0]+'CARRO.sql',\n\t\t\t\t\t\t self.pastas[0]+'POSTO.sql',\n\t\t\t\t\t\t self.pastas[0]+'BOMBA.sql',\n\t\t\t\t\t\t self.pastas[0]+'ABASTECIMENTO.sql',\n\n\t\t\t\t\t\t self.pastas[1]+'CLIENTE.sql',\n\t\t\t\t\t\t self.pastas[1]+'CARRO.sql',\n\t\t\t\t\t\t self.pastas[1]+'POSTO.sql',\n\t\t\t\t\t\t self.pastas[1]+'BOMBA.sql',\n\t\t\t\t\t\t self.pastas[1]+'ABASTECIMENTO.sql',\n\n\t\t\t\t\t\t self.pastas[2]+'mostrarAQuantidadeDeCombustivelEOTipoQueForamVendidosEmUmPosto.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarAQuantidadeDeVezesQueOsClientesAbasteram.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarAQuantidadeDeVezesQueUmCliente.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarAsVendasPorMes.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarClientesPorOrdemAlfabetica.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarClientesQueAbasteceramComUmTipoDeGasolina.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarClientesQueAbasteceramEmUmaBomba.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarClientesQueAbasteceramEmUmMes.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarClientesQueAbasteceramEmUmOuMaisPostos.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarClientesQueAbasteceramUmValorMaiorQue.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarHistoricoDoClientePorCpf.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarNotaParaOCliente.sql',\n\t\t\t\t\t\t self.pastas[2]+'mostrarQualCombustivelVendeuMais.sql'\n\t\t\t\t\t\t ]\n\n\t\tself.indiceDoArquivo = -1\n\t\n\tdef obterCaminhos(self):\n\t\treturn self.pastas\n\n\tdef obterArquivos(self):\n\t\treturn self.arquivos\n\n\tdef atualizarIndice(self, sentido=None):\n\t\tif self.indiceDoArquivo >= -1:\n\t\t\tif sentido == '+':\n\t\t\t\tself.indiceDoArquivo += 1\n\n\t\tif self.indiceDoArquivo > -1:\n\t\t\tif sentido == '-':\n\t\t\t\tself.indiceDoArquivo -= 1\n\n\tdef validarIndice(self):\n\t\ttry:\n\t\t\tself.arquivos[self.indiceDoArquivo]\n\t\t\treturn True\n\t\texcept:\n\t\t\treturn False\n\n\tdef obterArquivo(self, sentido=None):\n\t\tif self.validarIndice() == False:\n\t\t\tself.atualizarIndice('-')\n\n\t\tself.atualizarIndice(sentido)\n\n\t\tif self.indiceDoArquivo != -1:\n\t\t\tindice = self.indiceDoArquivo\n\t\telse:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn self.arquivos[indice]\n\t\texcept:\n\t\t\tpass\n\n\tdef abrirArquivo(self, arquivo):\n\t\ttry:\n\t\t\tarqv = open(arquivo)\n\t\t\tarq = arqv.read()\n\t\t\tarqv.close()\n\t\t\treturn arq\n\t\texcept:\n\t\t\treturn None\n","sub_path":"src/control/bancodedados.py","file_name":"bancodedados.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"22312669","text":"#!/usr/bin/python\n\n'''\nconst solution = (arr) => {\n if (!arr) return \"\";\n if (arr.length === 0) return \"\";\n const sum = (arr, idx) => {\n if (idx - 1 < arr.length) {\n if (arr[idx - 1] === -1) return 0;\n return arr[idx - 1] + sum(arr, idx * 2) + sum(arr, idx * 2 + 1);\n }\n return 0;\n };\n const left = sum(arr, 2);\n const right = sum(arr, 3);\n return (left == right) ? \"\" : (left > right ? \"Left\" : \"Right\");\n};\n'''\n\n\ndef CBT(arr=''):\n result = 0\n if arr == '' or arr == 0:\n print('None')\n else:\n for i in range(0, len(arr)):\n # print(i, arr[i])\n if arr[i-1] == -1:\n return 0\n else:\n result = arr[i-1] + sum(arr, i*2) + sum(arr, i*2+1)\n\nCBT([3, 6, 2, 9, -1, 10])\n\ndef solution(n, a, b):\n count = 0\n if n == 0:\n return \"No stone to start with\"\n while n > max(a, b):\n n = n-a-b\n count += 1\n print(count, n, a, b)\n if count%2==0:\n if n < a:\n return False\n else:\n return True\n else:\n if n > b:\n return True\n else:\n return False\n# print(solution(9088911,65,117))\n# print(solution(7620085,17,110))\n# print(solution(7576302, 48, 99))\n\ndef next_palindrome(num=None):\n next_palin = num\n try:\n if isinstance(num, str)==True:\n print(\"{} is not an integer\".format(num))\n else:\n if num < 10:\n next_palin = 11\n else:\n next_num = int(num) + 1\n while next_num != int(str(next_num)[::-1]):\n next_num += 1\n next_palin = next_num\n print('The {} next palindrome = {}'.format(num, next_palin))\n return next_palin\n except:\n print(\"{} is not an integer\".format(num))\n\nnext_palindrome(0)\nnext_palindrome(10)\nnext_palindrome(11)\nnext_palindrome(101)\nnext_palindrome()\nnext_palindrome(1000)\nnext_palindrome(12321)\nnext_palindrome(48576676)\nnext_palindrome(11.5)\nnext_palindrome(-1)\nnext_palindrome(-1000)\nnext_palindrome(0.3)\nnext_palindrome('Sunday')\n","sub_path":"results/tmp/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"650087459","text":"# -*- coding: UTF-8 -*-\n__author__ = 'lowID'\nfrom datetime import datetime\nfrom math import ceil\nimport time\n\n\nclass REST_API(object):\n @staticmethod\n def gen_show_info(status, content, description):\n if status in xrange(200, 600):\n return {'status': status,\n 'content': content,\n 'description': description\n }\n else:\n raise ValueError(\"Status code must between 200 and 599\")\n\n\nclass TDSUti(object):\n @staticmethod\n def request_form_to_show_dict(request):\n show_dict = dict()\n show_dict['title'] = request.form.get('title')\n show_dict['create_date'] = TDSUti.now_timestamp()\n show_dict['description'] = request.form.get('description', '')\n show_dict['visible'] = int(request.form.get('visible', 1))\n show_dict['image_url'] = request.form.get('image_url', '')\n show_dict['channel'] = request.form.get('channel')\n show_dict['episode'] = request.form.get('episode', '')\n show_dict['block'] = request.form.get('block', '')\n return show_dict\n\n @staticmethod\n def now_timestamp():\n return time.mktime(datetime.now().timetuple())\n\n @staticmethod\n def time_to_str(t):\n return datetime.fromtimestamp(t).strftime(\"%Y-%m-%d %H:%M\")\n\n @staticmethod\n def paging(item_count, displayed, current_page, pre_tail=2):\n pages = int(ceil(float(item_count) / displayed))\n if current_page > pages:\n current_page = 1\n result = dict()\n result['pages'] = pages\n result['current_page'] = current_page\n if pages < pre_tail*2+1:\n result['page_list'] = range(1, pages+1)\n elif current_page <= pre_tail:\n result['page_list'] = range(1, pre_tail*2+2)\n elif pages - current_page <= pre_tail:\n result['page_list'] = range(pages-pre_tail*2, pages+1)\n else:\n result['page_list'] = range(current_page-pre_tail, current_page+pre_tail+1)\n return result","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"653810164","text":"\nimport os,sys\nfrom PIL import Image\n\nclass GraphicObject:\n\tdef __init__(self,fname):\n\t\tself.fileName = fname\n\t\tself.image = Image.open(fname)\n\n\tdef width(self):\n\t\treturn self.image.size[0]\n\n\tdef height(self):\n\t\treturn self.image.size[1]\n\n\tdef show(self):\n\t\tself.image.show()\n\n\tdef makeSize(self,xSprites,ySprites):\n\t\tself.xSize = xSprites\n\t\tself.ySize = ySprites\n\t\tself.image = self.image.resize((16 * xSprites,16 * ySprites),Image.LANCZOS)\n\n\tdef toSpriteData(self,offsetX = 0,offsetY = 0):\n\t\tcropImage = self.image.crop((offsetX*16,offsetY*16,offsetX*16+16,offsetY*16+16))\n\t\timageData = list(cropImage.getdata())\n\t\tfor i in range(0,256):\n\t\t\tp = imageData[i]\n\t\t\tif p[3] < 32:\n\t\t\t\timageData[i] = 0xE3\n\t\t\telse:\n\t\t\t\timageData[i] = (p[0] & 0xE0)+((p[1] >> 3) & 0x1C) + ((p[2] >> 6) & 0x03)\n\t\t\t\tif imageData[i] == 0xE3:\n\t\t\t\t\timageData[i] = 0xC3\n\t\treturn imageData[:256]\n\n\tdef generateSimple(self):\n\t\th = open(\"demo.inc\",\"w\")\n\t\th.write(\": define.sprite\\n\")\t\t\n\t\tcode = self.toSpriteData()\n\t\tfor i in code:\n\t\t\th.write(\" {0} $5B p! \".format(i))\n\t\th.write(\"\\n;\\n\")\n\n\n\n\nimg = GraphicObject(\"spcyan.png\")\nimg.makeSize(1,1)\n#print(img.width(),img.height())\n#print(img.toSpriteData())\nimg.generateSimple()\t\n#img.show()\n","sub_path":"documents/experiments/sprites/old/convertsprite.py","file_name":"convertsprite.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"546971747","text":"\"\"\"Fixtures for pywmataio tests.\"\"\"\nimport json\nimport logging\nimport pathlib\nimport re\n\nimport pytest\nfrom aioresponses import CallbackResult, aioresponses\n\nfrom wmataio.bus.const import BusEndpoint\nfrom wmataio.const import (\n ADDITIONAL_PATH_HEADER,\n BASE_WMATA_URL,\n CLASS_HEADER,\n ENUM_HEADER,\n GEOCODE_URL,\n)\nfrom wmataio.rail.const import RailEndpoint\n\n_LOGGER = logging.getLogger(__name__)\n\nWMATA_MODEL_CLASS_MAP = {\n BusEndpoint.__name__: {\"class\": BusEndpoint, \"api_type\": \"bus\"},\n RailEndpoint.__name__: {\"class\": RailEndpoint, \"api_type\": \"rail\"},\n}\n\n\ndef _process_fixture_path_as_result(path: pathlib.Path) -> CallbackResult:\n \"\"\"Process fixture data and return callback result.\"\"\"\n if not path.exists():\n _LOGGER.error(\"Fixture %s does not exist\", path)\n return CallbackResult(status=404)\n\n with open(path, \"r\") as fp:\n data = json.load(fp)\n\n return CallbackResult(status=200, payload=data)\n\n\ndef _wmata_callback(url: str, **kwargs) -> CallbackResult:\n \"\"\"Respond to WMATA API calls.\"\"\"\n _LOGGER.debug(\"Received request for %s\", url)\n params = kwargs.get(\"params\")\n headers = kwargs[\"headers\"]\n class_name = headers[CLASS_HEADER]\n enum_name = headers[ENUM_HEADER]\n additional_path = headers[ADDITIONAL_PATH_HEADER]\n cls_data = WMATA_MODEL_CLASS_MAP[class_name]\n enum_: BusEndpoint | RailEndpoint = cls_data[\"class\"][enum_name]\n api_type = cls_data[\"api_type\"]\n\n base_fixture_name = enum_.name.lower()\n\n additional_path_fixture_name = \"\"\n if additional_path:\n additional_path_fixture_name = additional_path.replace(\"/\", \"_\")\n\n params_fixture_name = \"\"\n if params:\n params_fixture_name = \"_\".join(f\"{k}_{v}\" for k, v in params.items())\n\n fixture_name = base_fixture_name\n if additional_path_fixture_name or params_fixture_name:\n fixture_name = \".\".join(\n [base_fixture_name, additional_path_fixture_name, params_fixture_name]\n )\n\n return _process_fixture_path_as_result(\n pathlib.Path(f\"test/fixtures/models/{api_type}/{fixture_name}.json\")\n )\n\n\n@pytest.fixture(name=\"wmata_responses\")\ndef mock_wmata_responses():\n \"\"\"Mock aiohttp response from WMATA API.\"\"\"\n with aioresponses() as m:\n m.get(re.compile(f\"{BASE_WMATA_URL}.*$\"), callback=_wmata_callback, repeat=True)\n yield m\n\n\ndef _geocode_callback(url: str, **kwargs) -> CallbackResult:\n \"\"\"Respond to Geocode API calls.\"\"\"\n _LOGGER.debug(\"Received request for %s\", url)\n address = kwargs[\"params\"][\"address\"]\n return _process_fixture_path_as_result(\n pathlib.Path(f\"test/fixtures/util/geocode/{address}.json\")\n )\n\n\n@pytest.fixture(name=\"geocode_responses\")\ndef mock_geocode_responses():\n \"\"\"Mock aiohttp responses from geocode_service.\"\"\"\n with aioresponses() as m:\n m.get(re.compile(f\"{GEOCODE_URL}.*$\"), callback=_geocode_callback, repeat=True)\n yield m\n","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"119834311","text":"from parsuite.core.argument import Argument\nfrom parsuite import helpers\nfrom parsuite.core.suffix_printer import *\nfrom pathlib import Path\nfrom sys import exit,stderr,stdout\nfrom collections import namedtuple\nfrom re import compile,match\n\nhelp='Parse the Nmap services file and dump the most commonly open ports.'\n\ndefault_services_path = '/usr/share/nmap/nmap-services'\n\nargs = [\n Argument('--input-file','-if', default=default_services_path,\n help='Input file to parse'),\n Argument('--top','-t', default=10, type=int,\n help='The top number of ports to return'),\n Argument('--csv-only','-csv', action='store_true',\n help='Return only the CSV output'),\n Argument('--all-protocols','-ap', action='store_true',\n help='Determine if all protocols should be dumped'),\n Argument('--tcp', action='store_true',\n help='Dump the top tcp services'),\n Argument('--sctp', action='store_true',\n help='Dump the top sctp services'),\n Argument('--udp',action='store_true',\n help='Dump the top udp services'),\n Argument('--minimum-frequency', '-mf', default=0.000001,\n type=float,\n help='Minimum frequency that must be met for a given service')\n]\n\nService = namedtuple(\n 'Service',\n ['name','port','protocol','frequency']\n)\n\nservice_re = compile('^(?P(\\w|\\-|\\.|:)+)\\s+'\\\n '(?P[0-9]{1,5})/'\\\n '(?P(tcp|udp|sctp))\\s+'\\\n '(?P[0-9]\\.[0-9]+)')\n\ndef parse(csv_only=None,\n tcp=None, udp=None, sctp=None, top=None, all_protocols=False,\n minimum_frequency=None, **kwargs):\n\n if not Path(default_services_path).exists() and not input_file:\n esprint('Services file not detected. Either nmap isn\\'t installed or you\\'re not using'\\\n ' a real computer (Winders)\\n\\n Exiting like a pretentious boss')\n exit()\n\n esprint(f'Dumping the {top} ports')\n\n # make a list of desired protocols\n protocols = []\n\n if udp: protocols.append('udp')\n if tcp: protocols.append('tcp')\n if sctp: protocols.append('sctp')\n if not protocols or all_protocols: protocols = ['tcp','udp']\n\n services = {}\n for proto in protocols: services[proto] = {\n 'services':{},\n 'frequencies':[],\n 'top_ports':[]\n }\n \n # parse the services\n with open(default_services_path) as service_file:\n\n for line in service_file:\n\n # strip whitespace\n line = line.strip()\n\n # assure content is there for parsing\n if not line or line[0] == '#':\n continue\n\n # create the namedtuple\n groups = match(service_re, line).groupdict()\n groups['frequency'] = float(groups['frequency'])\n if groups['frequency'] < minimum_frequency:\n continue\n groups['port'] = int(groups['port'])\n service = Service(**groups)\n\n if not service.protocol in protocols:\n continue\n\n srvs = services[service.protocol]['services']\n freqs = services[service.protocol]['frequencies']\n\n if service.frequency not in freqs:\n freqs.append(service.frequency)\n\n if not service.frequency in srvs:\n srvs[service.frequency] = [service]\n else:\n srvs[service.frequency].append(service)\n\n # Collecting the top ports per protocol\n for proto in protocols:\n\n srvs = services[proto]\n freqs = sorted(srvs['frequencies'],key=float)[-top:]\n if not csv_only:\n print('{:-<39}'.format(''),file=stderr)\n print('{: >24}'.format(proto.upper()+' Services'),file=stderr)\n print('{:-<39}'.format(''),file=stderr)\n print('{:16}{:15} Service'.format('Freq','Port/Proto'),file=stderr)\n print('{: <16}{: <15}{: >8}'.format('----','----------','-------'),file=stderr)\n for freq in freqs:\n for s in services[proto]['services'][freq]:\n srvs['top_ports'].append(s.port)\n if not csv_only:\n print(f'{s.frequency:0<8}\\t{str(s.port)+\"/\"+s.protocol:8}\\t{s.name}',file=stderr)\n if not csv_only:\n print(file=stderr)\n\n if not csv_only:\n esprint('CSV List(s):\\n')\n for protocol in protocols:\n esprint(f'{protocol}:')\n ports = ','.join(\n [str(p) for p in sorted(services[protocol][\"top_ports\"])]\n )\n print(ports)\n\n return 0\n","sub_path":"parsuite/modules/nmap_top_port_dumper.py","file_name":"nmap_top_port_dumper.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"467871675","text":"#SPDX-License-Identifier: BSD-3-Clause\n#Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.\n\nfrom src.dr_common import *\nfrom src.dr_db import _db, _config_args\nfrom src.dr_ste import dr_parse_ste\nfrom src.dr_hw_resources import dr_parse_fw_stc_action_get_obj_id, dr_parse_fw_stc_get_addr, dr_parse_fw_modify_pattern\nfrom src.dr_visual import interactive_progress_bar\n\n\ndef parse_fw_stc_rd_bin_output(stc_index, load_to_db, file):\n _dests = {}\n _config_args[\"tmp_file\"] = open(_config_args.get(\"tmp_file_path\"), 'rb+')\n bin_file = _config_args.get(\"tmp_file\")\n stc = ''\n\n #There are 68B of prefix data before first STC dump\n data = bin_file.read(68)\n while data:\n #Leading zeros will be ignored\n data = hex(int.from_bytes(data, byteorder='big'))\n data_type = data[2:8]\n if data_type == RESOURCE_DUMP_SEGMENT_TYPE_STC_BIN:\n stc = '0x' + data[32:]\n data = bin_file.read(48)\n continue\n elif data_type[:-1] == RESOURCE_DUMP_SEGMENT_TYPE_ACTION_STC_BIN:\n stc_action = '0x' + data[31:]\n obj = dr_parse_fw_stc_action_get_obj_id(stc_action)\n if obj != None:\n addr = dr_parse_fw_stc_get_addr(stc)\n write_line = '%s,%s,%s,%s\\n' % (MLX5DR_DEBUG_RES_TYPE_ADDRESS, addr, obj.get(\"type\"), obj.get(\"id\"))\n file.write(write_line)\n _dests[addr] = obj\n\n data = bin_file.read(80)\n\n bin_file.close()\n _config_args[\"tmp_file\"] = None\n\n if load_to_db:\n _db._term_dest_db.update(_dests)\n\n\ndef parse_fw_ste_rd_bin_output(fw_ste_index, load_to_db, file):\n min_addr = '0xffffffff'\n max_addr = '0x00000000'\n first_ste = True\n ste_dic = {}\n count = 0\n\n _config_args[\"tmp_file\"] = open(_config_args.get(\"tmp_file_path\"), 'rb+')\n bin_file = _config_args.get(\"tmp_file\")\n\n file.write(MLX5DR_DEBUG_RES_TYPE_FW_STE + ',' + fw_ste_index + '\\n')\n\n #First read DW(4B) each time till reaching first STE\n data = bin_file.read(4)\n while data:\n data = hex(int.from_bytes(data, byteorder='big'))\n if data[2:8] == RESOURCE_DUMP_SEGMENT_TYPE_STE_BIN:\n #Seek to the first STE location in the bin_file\n bin_file.seek(count)\n break\n\n count += 4\n data = bin_file.read(4)\n\n #Each STE dump contain 64B(STE) + 16(STE prefix)\n data = bin_file.read(80)\n\n while data:\n #Leading zeros will be ignored\n data = hex(int.from_bytes(data, byteorder='big'))\n if data[2:8] == RESOURCE_DUMP_SEGMENT_TYPE_STE_BIN:\n ste = '0x' + data[32:]\n hit_add = ste[32 : 41]\n if first_ste:\n ste_addr = '0x' + data[16:24]\n if ste_addr < min_addr:\n min_addr = ste_addr\n first_ste = False\n if int(hit_add, 16) & STE_ALWAYS_HIT_ADDRESS != STE_ALWAYS_HIT_ADDRESS:\n ste_addr = '0x' + data[16:24]\n ste_prefix = MLX5DR_DEBUG_RES_TYPE_STE + ','\n ste_prefix += ste_addr + ','\n ste_prefix += fw_ste_index + ','\n file.write(ste_prefix + ste + '\\n')\n if load_to_db:\n ste = dr_parse_ste([MLX5DR_DEBUG_RES_TYPE_STE, ste_addr, fw_ste_index, ste])\n ste_dic[ste_addr] = ste\n if ste_addr > max_addr:\n max_addr = ste_addr\n\n #Each STE dump contain 64B(STE) + 16(STE prefix)\n data = bin_file.read(80)\n\n bin_file.close()\n _config_args[\"tmp_file\"] = None\n\n if load_to_db:\n _db._fw_ste_db[fw_ste_index] = ste_dic\n _db._stes_range_db[fw_ste_index] = (min_addr, max_addr)\n\n file.write(\"%s,%s,%s,%s\\n\" % (MLX5DR_DEBUG_RES_TYPE_FW_STE_STATS, fw_ste_index, min_addr, max_addr))\n\n\ndef parse_fw_ste_rd_output(data, fw_ste_index, load_to_db, file):\n ste_dic = {}\n min_addr = '0xffffffff'\n max_addr = '0x00000000'\n data_arr = data.split('\\n')\n file.write(MLX5DR_DEBUG_RES_TYPE_FW_STE + ',' + fw_ste_index + '\\n')\n for count in range(0, len(data_arr)):\n if RESOURCE_DUMP_SEGMENT_TYPE_STE in data_arr[count][0:10]:\n ste_addr = (data_arr[count][22 : 32]).lower()\n ste = data_arr[count + 1] + data_arr[count + 2] + data_arr[count + 3] + data_arr[count + 4]\n ste = ste.replace(' 0x', '')\n hit_add = ste[32 : 41]\n if int(hit_add, 16) & STE_ALWAYS_HIT_ADDRESS != STE_ALWAYS_HIT_ADDRESS:\n ste_prefix = MLX5DR_DEBUG_RES_TYPE_STE + ','\n ste_prefix += ste_addr + ','\n ste_prefix += fw_ste_index + ','\n file.write(ste_prefix + ste + '\\n')\n if load_to_db:\n ste = dr_parse_ste([MLX5DR_DEBUG_RES_TYPE_STE, ste_addr, fw_ste_index, ste], True)\n ste_dic[ste_addr] = ste\n if ste_addr < min_addr:\n min_addr = ste_addr\n if ste_addr > max_addr:\n max_addr = ste_addr\n\n if load_to_db:\n #Save the STE's to FW STE DB\n _db._fw_ste_db[fw_ste_index] = ste_dic\n #Save the STE's range for this FW STE\n _db._stes_range_db[fw_ste_index] = (min_addr, max_addr)\n\n\ndef dump_hw_resources(load_to_db, dev, dev_name, file):\n total_resources = _config_args.get(\"total_resources\")\n interactive_progress_bar(0, total_resources, DUMPING_HW_RESOURCES)\n i = 0\n for stc_index in _db._stc_indexes_arr:\n output = call_resource_dump(dev, dev_name, \"STC\", stc_index, None, 'all', None)\n parse_fw_stc_rd_bin_output(stc_index, load_to_db, file)\n i += 1\n interactive_progress_bar(i, total_resources, DUMPING_HW_RESOURCES)\n\n #Dump FW STE's\n for fw_ste_index in _db._fw_ste_indexes_arr:\n output = call_resource_dump(dev, dev_name, \"FW_STE\", fw_ste_index, None, 'all', None)\n if _config_args.get(\"resourcedump_mem_mode\"):\n parse_fw_ste_rd_bin_output(fw_ste_index, load_to_db, file)\n else:\n parse_fw_ste_rd_output(output, fw_ste_index, load_to_db, file)\n\n i += 1\n interactive_progress_bar(i, total_resources, DUMPING_HW_RESOURCES)\n\n\ndef dr_hw_data_engine(obj, file):\n if _config_args.get(\"dump_hw_resources\"):\n load_to_db = _config_args.get(\"load_hw_resources\")\n dev = _config_args.get(\"shared_device\")\n if dev == None:\n dev = _config_args.get(\"device\")\n if dev == None:\n print('Unknown MST device')\n exit()\n dev_name = _config_args.get(\"dev_name\")\n _vhca_id = _config_args.get(\"vhca_id\")\n else:\n dev_name = _config_args.get(\"shared_dev_name\")\n _vhca_id = _config_args.get(\"shared_vhca_id\")\n\n file.write(MLX5DR_DEBUG_RES_TYPE_HW_RRESOURCES_DUMP_START + '\\n')\n _config_args[\"hw_resources_dump_started\"] = True\n _config_args[\"_dev\"] = dev\n _config_args[\"_dev_name\"] = dev_name\n _config_args[\"_vhca_id\"] = _vhca_id\n dump_hw_resources(load_to_db, dev, dev_name, file)\n","sub_path":"hws/src/dr_dump_hw.py","file_name":"dr_dump_hw.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"546830891","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\n__author__ = 'maxim'\n\nimport tensorflow as tf\n\nx = tf.Variable(10, name='x')\ny = tf.Variable(20, name='y')\nz = tf.constant(99, name='z') # not present in tensorboard!\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n writer = tf.summary.FileWriter('/tmp/inline', sess.graph)\n for _ in range(100):\n sess.run(tf.add(x, y)) # someone decides to be clever to save one line of code\n writer.close()\n\n # will print 100 add ops\n print(tf.get_default_graph().as_graph_def())\n","sub_path":"my_notes/02_inline_op.py","file_name":"02_inline_op.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"334136416","text":"import pico2d\nimport game_framework\n\nimage = None\n\nPIXEL_PER_METER = (10.0 / 0.3) # 10 pixel 30 cm\nRUN_SPEED_KMPH = 30.0 # Km / Hour\nRUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)\nRUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)\nRUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)\nvelocity = RUN_SPEED_PPS\nx = 100\ny = 150\nisMoveLeft=False\n\ndef draw():\n image.draw(x,y)\n pico2d.draw_rectangle(*get_bb())\n\ndef get_bb():\n return x-90,y-20,x+90,y+20\n\ndef update():\n global x\n global isMoveLeft\n if isMoveLeft:\n x -= velocity*game_framework.frame_time\n if(x<=100):\n isMoveLeft=False\n else:\n x += velocity*game_framework.frame_time\n if(x>=1300):\n isMoveLeft=True","sub_path":"brick.py","file_name":"brick.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"507083379","text":"import tensorflow as tf\n\n\nclass LinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, learning_rate, warmup_steps, name=None):\n super().__init__()\n self.learning_rate = learning_rate\n self.warmup_steps = warmup_steps\n self.name = name\n\n if isinstance(learning_rate, tf.keras.optimizers.schedules.LearningRateSchedule):\n self.lr_is_schedule = True\n self.warmup_rates = tf.linspace(0.0, learning_rate.initial_learning_rate, warmup_steps)\n else:\n self.lr_is_schedule = False\n self.warmup_rates = tf.linspace(0.0, learning_rate, warmup_steps)\n\n @tf.function\n def __call__(self, step):\n if step < self.warmup_steps - 1:\n step = tf.cast(step, tf.int32)\n return self.warmup_rates[step]\n else:\n if self.lr_is_schedule:\n return self.learning_rate(step - self.warmup_steps + 1)\n else:\n return self.learning_rate\n\n def get_config(self):\n return {\n \"learning_rate\": self.learning_rate,\n \"warmup_steps\": self.warmup_steps,\n \"name\": self.name\n }\n\n\ntf.keras.utils.get_custom_objects().update({\n \"LinearWarmup\": LinearWarmup,\n})\n","sub_path":"chambers/schedules.py","file_name":"schedules.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"55829324","text":"\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\n\"\"\"\nThis example demonstrates how to create a sphere.\n\"\"\"\n\nimport sys\n\nfrom vispy import scene\nfrom vispy.visuals.transforms import STTransform\n\ncanvas = scene.SceneCanvas(keys='interactive', bgcolor='white',\n size=(800, 600), show=True)\n\nview = canvas.central_widget.add_view()\nview.camera = 'arcball'\n\nspheres = scene.Node(parent=view.scene)\nfor x in range(100):\n scene.visuals.Cube(size=1,parent=spheres,edge_color='#ff00ff', color = '#ff0000')\n'''\nindex = 0\nfor x in range(10):\n for y in range(10):\n for z in range(10):\n if(y%2==0):\n if x%2==0:\n spheres[index]= STTransform(translate=[x, z, y])\n #newlist[index,0] = ((x*10)+z+(y*100));\n else:\n spheres[index]= STTransform(translate=[x, 9-z, y])\n #newlist[index,0] = ((x*10)+9-z+(y*100));\n else:\n\n if x%2==0:\n spheres[index]= STTransform(translate=[9-x, 9-z, y])\n #newlist[index,0] = ((90-(x*10))+9-z+(y*100));\n else:\n spheres[index]= STTransform(translate=[9-x, z, y])\n #newlist[index,0] = ((90-(x*10))+z+(y*100));#sphere1.transform = STTransform(translate=[-2.5, 0, 0])\n index = index+1\n#sphere3.transform = STTransform(translate=[2.5, 0, 0])\n'''\n\n'''\ndef update(ev):\n global pos, color, line\n pos[:, 1] = np.random.normal(size=N)\n color = np.roll(color, 1, axis=0)\n line.set_data(pos=pos, color=color)\n\ntimer = app.Timer()\ntimer.connect(update)\ntimer.start(0)\n'''\nif __name__ == '__main__' and sys.flags.interactive == 0:\n canvas.app.run()\n","sub_path":"l3d-visualizer/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"216126482","text":"import pika\nimport sys\nimport time\nimport json\nimport threading\nimport re # For RegEx : To check IP address validity\nfrom database_management import *\nfrom client_submissions import submission\nfrom init_server import initialize_server\nclass InitializationError(Exception):\n\tpass\n\nclass manage_clients():\n\tchannel = ''\n\tdata_changed_flags = ''\n\ttask_queue = ''\n\tkey = ''\n\tjudge_key = ''\n\tconfig = ''\n\tfile_password = ''\n\talready_read = 0\n\tcodes = ''\n\tlanguages = ''\n\tclient_dict = {}\n\n\tdef prepare(data_changed_flags, task_queue, log_queue):\n\t\tmanage_clients.data_changed_flags = data_changed_flags\n\t\tmanage_clients.task_queue = task_queue\n\t\tmanage_clients.log_queue = log_queue\n\t\tmanage_clients.config = initialize_server.read_config()\n\t\tmanage_clients.codes = manage_clients.config['Problem Codes']\n\t\tmanage_clients.languages = manage_clients.config['Languages']\n\t\tsuperuser_username = manage_clients.config[\"Server Username\"]\n\t\tsuperuser_password = manage_clients.config[\"Server Password\"]\n\t\thost = manage_clients.config[\"Server IP\"]\n\t\tmanage_clients.key = manage_clients.config[\"Client Key\"]\n\t\tmanage_clients.judge_key = manage_clients.config[\"Judge Key\"]\n\t\tmanage_clients.file_password = manage_clients.config[\"File Password\"]\n\t\tprint(' [ START ] Client Manager subprocess started.')\n\t\tmanage_clients.log(' [ START ] Client Manager subprocess started.')\n\t\t\n\t\ttry:\n\t\t\tcreds = pika.PlainCredentials(superuser_username, superuser_password)\n\t\t\tparams = pika.ConnectionParameters(\n\t\t\t\thost = host, \n\t\t\t\tcredentials = creds, \n\t\t\t\theartbeat=0, \n\t\t\t\tblocked_connection_timeout=0\n\t\t\t)\n\t\t\tconnection = pika.BlockingConnection(params)\n\n\t\t\tchannel = connection.channel()\n\t\t\tmanage_clients.channel = channel\n\t\t\tchannel.exchange_declare(\n\t\t\t\texchange = 'connection_manager', \n\t\t\t\texchange_type = 'direct', \n\t\t\t\tdurable = True\n\t\t\t)\n\t\t\tchannel.exchange_declare(\n\t\t\t\texchange = 'broadcast_manager', \n\t\t\t\texchange_type = 'fanout', \n\t\t\t\tdurable = True\n\t\t\t)\n \n\t\t\tchannel.queue_declare(queue = 'client_requests', durable = True)\n\t\t\tchannel.queue_declare(queue = 'judge_requests', durable = True)\n\n\t\t\tchannel.queue_bind(exchange = 'connection_manager', queue = 'client_requests')\n\t\t\tchannel.queue_bind(exchange = 'connection_manager', queue = 'judge_requests')\n\n\t\t\t# Initialize run_id counter from database\n\t\texcept Exception as error:\n\t\t\tprint('[ CRITICAL ] Could not connect to RabbitMQ server : ' + str(error))\n\t\t\tmanage_clients.log('[ CRITICAL ] Could not connect to RabbitMQ server : ' + str(error))\n\t\t\t# Wait until GUI is closed!\n\t\t\t# Inform GUI thread\n\t\t\tmanage_clients.data_changed_flags[26] = 1\n\t\t\twhile manage_clients.data_changed_flags[7] !=1:\n\t\t\t\ttime.sleep(0.5)\n\t\t\tsys.exit()\n\n\t\ttry:\n\t\t\t# Initialize Run ID\n\t\t\trun_id = submission.init_run_id()\n\t\t\tif run_id == -1:\n\t\t\t\tprint('[ CLIENT ][ ERROR ] Run ID init failed!')\n\t\t\t\traise InitializationError\n\n\t\t\t# Initialize Client ID\n\t\t\tcid_status = previous_data.get_last_client_id()\n\t\t\tif cid_status == -1:\n\t\t\t\tprint('[ CLIENT ][ ERROR ] Client ID init failed!')\n\t\t\t\traise InitializationError\n\n\t\texcept InitializationError:\n\t\t\tprint('[ CLIENT ][ ERROR ] Process initialization failed! Restart Server.')\n\t\t\tconnection.close()\n\t\t\t# Wait until GUI is closed!\n\t\t\t# Inform GUI thread\n\t\t\tmanage_clients.data_changed_flags[26] = 1\n\t\t\twhile manage_clients.data_changed_flags[7] !=1:\n\t\t\t\ttime.sleep(0.5)\n\t\t\treturn\n\t\texcept:\n\t\t\tprint('[ ERROR ] Could not fetch previous client_id')\n\t\t\tmanage_clients.log('[ ERROR ] Could not fetch previous client_id')\n\t\telse:\n\t\t\t# Start listening to client_requests\n\t\t\tmanage_clients.listen_clients(connection, channel, superuser_username, superuser_password, host)\n\t\t\n\tdef log(message):\n\t\tmanage_clients.log_queue.put(message)\n\n\t# This function continously listens for client messages \n\tdef listen_clients(connection, channel, superuser_username, superuser_password, host):\n\t\ttry:\n\t\t\t# Clients send requests on client_requests\n\t\t\t# As soon as a new message is recieved, it is sent to client_message_handler for further processing\n\t\t\tprint('[ LISTEN ] Started listening on client_requests')\n\t\t\tmanage_clients.log('[ LISTEN ] Started listening on client_requests')\n\t\t\tchannel.basic_consume(\n\t\t\t\tqueue = 'client_requests', \n\t\t\t\ton_message_callback = manage_clients.client_message_handler,\n\t\t\t\texclusive = True, \t\t# Only server can listen to this queue\n\t\t\t\tauto_ack = False\n\t\t\t)\n\t\t\tchannel.start_consuming()\n\t\t# Handle keyboard interrupt ctrl+c and terminate successfully\n\t\texcept (KeyboardInterrupt, SystemExit):\n\t\t\tchannel.stop_consuming()\n\t\t\tprint('[ LISTEN ] STOPPED listening to client channel')\n\t\t\tmanage_clients.log('[ LISTEN ] STOPPED listening to client channel')\n\t\t\t\n\t\texcept (pika.exceptions.ChannelWrongStateError):\n\t\t\tprint('[ ERROR ] : Channel closed by Broker. Please restart')\n\t\t\tmanage_clients.log('[ ERROR ] : Channel closed by Broker')\n\n\t\texcept (pika.exceptions.ChannelClosedByBroker):\n\t\t\tprint( \n\t\t\t\t'[ ERROR ] : Could not get a lock on client_requests.' +\n\t\t\t\t' Please check management portal and remove any consumers from the queue'\n\t\t\t)\n\t\t\tmanage_clients.log(\n\t\t\t\t'[ ERROR ] : Could not get a lock on client_requests.' +\n\t\t\t\t'Please check management portal and remove any consumers from the queue'\n\t\t\t)\n\t\texcept Exception as error: \n\t\t\tprint('[ CLIENT PROCESS ][ CRITICAL ]: ' + str(error))\n\t\t\tmanage_clients.log('[ CLIENT PROCESS ][ CRITICAL ]: ' + str(error))\n\n\t\tfinally: \n\t\t\tmanage_clients.data_changed_flags[7] = 1\n\t\t\tconnection.close()\n\t\t\tprint('[ STOP ] Client subprocess terminated successfully!')\n\t\t\tmanage_clients.log('[ STOP ] Client subprocess terminated successfully!')\n\t\t\treturn\n\t\t\n\t# This function works on client messages and passes them on to their respective handler function\n\tdef client_message_handler(ch, method, properties, body):\n\t\tprint('\\n' + '#' * 100)\n\t\tprint('[ ALERT ] Recieved a new client message.')\n\t\tmanage_clients.log('[ ALERT ] Recieved a new client message.')\n\t\t\n\t\ttry:\n\t\t\t# Decode the message sent by client\n\t\t\tclient_message = str(body.decode('utf-8'))\n\t\t\t# JSON Parsing here \n\t\t\tjson_data = json.loads(client_message)\n\t\t\t# Validate Client Key( Make sure client is authentic! )\n\t\t\tclient_key = json_data.get(\"Client Key\")\n\t\t\tclient_code = json_data.get(\"Code\")\n\t\t\tclient_ip = json_data.get(\"IP\")\n\n\t\t\tif client_key == None or client_code == None or client_ip == None:\n\t\t\t\tprint('[ SECURITY ] Client data is not in the correct format.')\n\t\t\t\tmanage_clients.log('[ SECURITY ] Client data is not in the correct format.')\n\t\t\t\tprint('[ SECURITY ] Complete Message: ' + client_message)\n\t\t\t\tmanage_clients.log('[ SECURITY ] Complete Message: ' + client_message)\n\t\t\t\tch.basic_ack(delivery_tag = method.delivery_tag)\n\t\t\t\treturn\n\n\t\t\t# Strip IP address of spaces\n\t\t\tclient_ip = client_ip.replace(' ', '')\n\n\t\t\tif client_key != manage_clients.key and client_key != manage_clients.judge_key :\n\t\t\t\tprint('[ SECURITY ] Client Key did not match. Client ID: ' + str(json_data['ID']))\n\t\t\t\tmanage_clients.log('[ SECURITY ] Client Key did not match. Client ID: ' + str(json_data['ID']))\n\t\t\t\tprint('[ SECURITY ] Complete Message: ' + client_message)\n\t\t\t\tmanage_clients.log('[ SECURITY ] Complete Message: ' + client_message)\n\t\t\t\tch.basic_ack(delivery_tag = method.delivery_tag)\n\t\t\t\treturn\n\n\t\t\tif client_code == 'LOGIN':\n\t\t\t\tclient_username = json_data.get(\"Username\", 'NONE')\n\t\t\t\tclient_password = json_data.get(\"Password\", 'NONE')\n\t\t\t\tclient_id = json_data.get(\"ID\", 'NONE')\n\t\t\t\tclient_type = json_data.get(\"Type\", 'NONE')\n\t\t\t\tmanage_clients.client_login_handler(\n\t\t\t\t\tclient_key,\n\t\t\t\t\tclient_username, \n\t\t\t\t\tclient_password, \n\t\t\t\t\tclient_id, \n\t\t\t\t\tclient_type,\n\t\t\t\t\tclient_ip\n\t\t\t\t)\n\n\t\t\telif client_code == 'SUBMT':\n\t\t\t\t# recieved_client_username = json_data[\"Username\"]\n\t\t\t\tlocal_run_id = json_data.get(\"Local Run ID\", 'NONE')\n\t\t\t\tclient_id = json_data.get(\"ID\", 'NONE')\n\t\t\t\tproblem_code = json_data.get(\"PCode\", 'NONE')\n\t\t\t\tlanguage = json_data.get(\"Language\", 'NONE')\n\t\t\t\ttime_stamp = json_data.get(\"Time\", 'NONE')\n\t\t\t\tsource_code = json_data.get(\"Source\", 'NONE')\n\t\t\t\tusername = json_data.get(\"Username\", 'NONE')\n\t\t\t\tmanage_clients.client_submission_handler(\n\t\t\t\t\tclient_id,\n\t\t\t\t\tclient_ip,\n\t\t\t\t\tusername,\n\t\t\t\t\tlocal_run_id, \n\t\t\t\t\tproblem_code, \n\t\t\t\t\tlanguage, \n\t\t\t\t\ttime_stamp, \n\t\t\t\t\tsource_code\n\t\t\t\t)\n\t\t\telif client_code == 'QUERY':\n\t\t\t\tclient_id = json_data.get('ID', \"NONE\")\n\t\t\t\tquery = json_data.get('Query', \"NONE\")\n\t\t\t\tusername = json_data.get('Username', \"NONE\")\n\t\t\t\tquery = query[:100]\n\n\t\t\t\tif client_id == 'NONE' or client_id == \"Nul\":\n\t\t\t\t\tprint('[ REJECT ] Client has not logged in.')\n\t\t\t\t\tmanage_clients.log('[ REJECT ] Client has not logged in.')\n\t\t\t\t\treturn\n\n\t\t\t\tmanage_clients.client_query_handler(\n\t\t\t\t\tclient_id, \n\t\t\t\t\tusername,\n\t\t\t\t\tquery\n\t\t\t\t)\n\n\t\t\telif client_code == 'DSCNT':\n\t\t\t\tclient_username = json_data.get(\"Username\", \"NONE\")\n\t\t\t\tclient_id = json_data.get(\"ID\", \"NONE\")\n\t\t\t\tmanage_clients.client_logout_handler(\n\t\t\t\t\tclient_username, \n\t\t\t\t\tclient_id,\n\t\t\t\t\tclient_ip\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tprint('[ CODE ERROR ] Client sent wrong code. Complete Message: ' + client_message)\n\t\t\t\tmanage_clients.log('[ CODE ERROR ] Client sent wrong code. Complete Message: ' + client_message)\n\t\t\t\t# Raise Security Exception maybe?\n\t\texcept Exception as error:\n\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\t\tfname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n\t\t\t# print(exc_type, fname, )\n\t\t\tprint('[ ERROR ] Client sent unparsable data. Line: ' + str(exc_tb.tb_lineno) + ' Complete Message: ' + client_message + \"\\nError: \" + str(error))\n\t\t\tmanage_clients.log('[ ERROR ] Client sent unparsable data. Line: ' + str(exc_tb.tb_lineno) + ' Complete Message: ' + client_message + \"\\nError: \" + str(error))\n\t\t# Acknowledge the message\n\t\tch.basic_ack(delivery_tag = method.delivery_tag)\n\t\treturn\n\t\n\t# This function handles all client login requests\n\tdef client_login_handler(\n\t\t\tclient_key, \n\t\t\tclient_username, \n\t\t\tclient_password, \n\t\t\tclient_id, \n\t\t\tclient_type, \n\t\t\tclient_ip = '0.0.0.0'\n\t\t):\n\t\tif (\n\t\t\t\tclient_type == 'CLIENT' and \n\t\t\t\tclient_key != manage_clients.key or \n\t\t\t\tclient_type == 'JUDGE' and \n\t\t\t\tclient_key != manage_clients.judge_key\n\t\t\t):\n\t\t\t# REJECT\n\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Message' : 'You are using an incompatible client.\\nPlease contact ADMIN. '\n\t\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\tmanage_clients.task_queue.put(message)\n\t\t\treturn\n\t\t\t\n\t\tmessage = ''\n\t\tprint(\n\t\t\t'[ LOGIN REQUEST ] ::: ' + \n\t\t\tstr(client_id) + \n\t\t\t' :::' + \n\t\t\tclient_username + \n\t\t\t'@' + client_password + \n\t\t\t'[ TYPE ] ' + \n\t\t\tclient_type + \n\t\t\t' IP: ' + \n\t\t\tclient_ip\n\t\t)\n\t\tmanage_clients.log(\n\t\t\t'[ LOGIN REQUEST ] ::: ' + \n\t\t\tstr(client_id) + \n\t\t\t' :::' + \n\t\t\tclient_username + \n\t\t\t'@' + \n\t\t\tclient_password + \n\t\t\t'[ TYPE ] ' + \n\t\t\tclient_type + \n\t\t\t' IP: ' + \n\t\t\tclient_ip\n\t\t)\n\n\t\ttry:\n\t\t\t# Declare queue with same name as client_username\n\t\t\tmanage_clients.channel.queue_declare(queue = client_username, durable = True)\n\t\texcept Exception as error:\n\t\t\tprint('[ ERROR ][ CRITICAL ] Could not declare queues: ' + str(error))\n\t\t\tmanage_clients.log('[ ERROR ][ CRITICAL ] Could not declare queues: ' + str(error))\n\t\t\treturn\n\n\t\ttry:\n\t\t\t#Bind the connection_manager exchange to client queue (queue name is same as username)\n\t\t\tmanage_clients.channel.queue_bind(exchange = 'connection_manager', queue = client_username)\n\t\t\tmanage_clients.channel.queue_bind(exchange = 'broadcast_manager', queue = client_username)\n\t\texcept Exception as error:\n\t\t\tprint('[ ERROR ][ CRITICAL ] Could not bind queues: ' + str(error))\n\t\t\tmanage_clients.log('[ ERROR ][ CRITICAL ] Could not bind queues: '+ str(error))\n\t\t\treturn\n\t\t\n\t\tif client_type == 'CLIENT':\n\t\t\t# If client logins have been halted by the ADMIN, Send a rejection message to the client\n\t\t\tif(manage_clients.data_changed_flags[2] == 0):\n\t\t\t\tprint('[ LOGIN ][ REJECT ] Rejected by ADMIN')\n\t\t\t\tmanage_clients.log('[ LOGIN ][ REJECT ] Rejected by ADMIN')\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Message' : 'Logins are not allowed right now.\\nPlease wait for announcement.'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\t\t\t\n\n\t\t\t# The client listens on its own queue, whose name = client_username (Hard-coded)\n\t\t\t# This queue is declared in the ../Client/client.py file\n\t\t\t# Every response sent to client has 5 initial characters which specify what server is going to talk about.\n\t\t\t# 'VALID' signifies a VALID login.\n\t\t\t# 'INVLD' signifies an INVaLiD login attempt.\n\t\t\t# 'LRJCT' signifies a Login ReJeCTed message.\n\n\t\t\t# Validate the client from the database\n\t\t\tstatus = client_authentication.validate_client(client_username, client_password)\n\t\t\tstored_client_id = str(client_authentication.get_client_id(client_username))\n\t\t\tprint('[ LOGIN ] Stored client ID: ', stored_client_id)\n\t\t\tif stored_client_id != client_id and stored_client_id != str(-1):\n\t\t\t\tprint('[ ' + client_username + ' ] Client ID does not match.')\n\t\t\t\tmanage_clients.log('[ ' + client_username + ' ] Client ID does not match.')\n\t\t\t\tstatus = False\n\n\t\t\t# If login is successful:\n\t\t\tif status != True:\n\t\t\t\t# Reply 'Invalid credentials' to client\n\t\t\t\tprint('[ ' + client_username + ' ][ REJECT ] NOT verified.')\n\t\t\t\tmanage_clients.log('[ ' + client_username + ' ][ REJECT ] NOT verified.')\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'INVLD',\n\t\t\t\t\t'Receiver' : client_username\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t# Send response to client\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\n\t\t\t# Check if client has logged in for the first time or is already connected:\n\t\t\tpreviously_connected_state = client_authentication.check_connected_client(\n\t\t\t\tclient_username, \n\t\t\t\t'connected_clients'\n\t\t\t)\n\t\t\t# If client has NOT logged in for the first time\n\t\t\t# MAYBE A SECURITY EVENT?\n\t\t\t# Raise a confirmation box to ADMIN maybe?\n\t\t\t\n\t\t\tif previously_connected_state == 'Connected':\n\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ][ REJECT ] Multiple Logins' )\n\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ][ REJECT ] Multiple Logins' )\n\t\t\t\t# Reject client login\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Message' : 'Maximum Login limit is 1 per user.'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\n\t\t\t\t# Raise a security event?\n\t\t\telif previously_connected_state == 'Disconnected':\n\t\t\t\tif client_id == 'Null':\n\t\t\t\t\t# If the client has disconnected, it must have remembered its client id\n\t\t\t\t\t# REJECT login\n\t\t\t\t\tmessage = {\n\t\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t\t'Message' : 'System has detected an unusual Login behavior. Please contact ADMIN.'\n\t\t\t\t\t}\n\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\t\treturn\n\n\t\t\t\tif manage_clients.data_changed_flags[27] == 0: \n\t\t\t\t\tprint('[ LOGIN ][ VALIDATION ] Checking client IP Address...')\n\t\t\t\t\tmanage_clients.log('[ LOGIN ][ VALIDATION ] Checking client IP Address...')\n\t\t\t\t\tstate = client_authentication.check_client_ip(client_id, client_ip)\n\t\t\t\telse:\n\t\t\t\t\tstate = 1\n\n\t\t\t\tif state == 1:\n\t\t\t\t\tprint('[ RE-LOGIN ][ ' + client_username + ' ][ ACCEPT ] Previous Client ID : ' + str(client_id) )\n\t\t\t\t\tmanage_clients.log('[ RE-LOGIN ][ ' + client_username + ' ][ ACCEPT ] Previous Client ID : ' + str(client_id) )\n\t\t\t\t\n\t\t\t\t\tmessage = {\n\t\t\t\t\t\t'Code' : 'VALID',\n\t\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t\t'Client ID' : client_id, \n\t\t\t\t\t\t'Message' : 'Welcome back!.'\n\t\t\t\t\t}\n\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\tmanage_clients.task_queue.put(message) \n\n\t\t\t\t\t# Update client state from Disconnected to Connected \n\t\t\t\t\tmessage = {\n\t\t\t\t\t\t'Code' : 'UpUserStat', \n\t\t\t\t\t\t'Username' : client_username,\n\t\t\t\t\t\t'State' : 'Connected',\n\t\t\t\t\t\t'IP' : client_ip\n\t\t\t\t\t}\n\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\t\t# #####################################################################\n\t\t\t\t\t# Check if contest has started, also send client the \n\t\t\t\t\t# START signal for contest\n\t\t\t\t\tif manage_clients.data_changed_flags[10] == 1:\n\t\t\t\t\t\t# Update self config\n\t\t\t\t\t\tmanage_clients.config = initialize_server.read_config()\n\t\t\t\t\t\ttotal_time = manage_clients.config['Contest Set Time']\n\t\t\t\t\t\tstart_time = initialize_server.get_start_time()\n\t\t\t\t\t\tend_time = initialize_server.get_end_time()\n\n\t\t\t\t\t\tcurrent_time = time.time()\n\t\t\t\t\t\ttime_difference = total_time - current_time\n\t\t\t\t\t\tremaining_time = time.strftime('%H:%M:%S', time.gmtime(time_difference))\n\n\t\t\t\t\t\tmessage = {\n\t\t\t\t\t\t\t'Code' : 'START', \n\t\t\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t\t\t'Duration' : remaining_time,\n\t\t\t\t\t\t\t'Start Time' : start_time,\n\t\t\t\t\t\t\t'End Time' : end_time,\n\t\t\t\t\t\t\t'Problem Key' : manage_clients.file_password\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\t\t\tprint('[ LOGIN ][ RESPONSE ] Sent START to ' + client_username)\n\t\t\t\t\t\tmanage_clients.log('[ LOGIN ][ RESPONSE ] Sent START to ' + client_username)\n\t\t\t\t\t\t\n\t\t\t\t\t#######################################################################\n\n\t\t\t\t\treturn\n\t\n\t\t\t\telse:\n\t\t\t\t\tprint('[ RE-LOGIN ][ ' + client_username + ' ][ REJECT ] IP validation failed')\n\t\t\t\t\tmanage_clients.log('[ RE-LOGIN ][ ' + client_username + ' ][ REJECT ] IP validation failed')\n\t\t\t\t\tmessage = {\n\t\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t\t'Client ID' : client_id, \n\t\t\t\t\t\t'Message' : 'Preliminary Validation Failed. Contact site ADMIN for more information.'\n\t\t\t\t\t}\n\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\t\treturn\n\n\t\t\t# If client has logged in for the first time\n\t\t\telif previously_connected_state == 'New':\n\t\t\t\t# Check for IP address Duplicacy\n\t\t\t\tif manage_clients.data_changed_flags[14] == 1:\n\t\t\t\t\tstatus = client_authentication.check_duplicate_ip(client_ip)\n\t\t\t\t\tif status == 0:\n\t\t\t\t\t\t# Unique IP\n\t\t\t\t\t\tprint('[ LOGIN ] IP duplicacy : None')\n\t\t\t\t\t\tmanage_clients.log('[ LOGIN ] IP duplicacy : None')\n\t\t\t\t\t\tpass\n\t\t\t\t\telif status == 1:\n\t\t\t\t\t\t# Duplicate IP address\n\t\t\t\t\t\tprint('[ LOGIN ][ REJECT] Duplicate IP Address.')\n\t\t\t\t\t\tmanage_clients.log('[ LOGIN ][ REJECT] Duplicate IP Address.')\n\t\t\t\t\t\tmessage = {\n\t\t\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t\t\t'Client ID' : client_id, \n\t\t\t\t\t\t\t'Message' : 'Multiple logins with same PC are not allowed.'\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('[ CLIENT ] Error while checking for client IP address.')\n\t\t\t\t\t\tmanage_clients.log('[ CLIENT ] Error while checking for client IP address.')\n\t\t\t\n\t\t\t\t# Fetch new client ID\n\t\t\t\tclient_id = client_authentication.generate_new_client_id()\n\t\t\t\t# Add client to connected users database\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'AddNewUser', \n\t\t\t\t\t'Username' : client_username,\n\t\t\t\t\t'State' : 'Connected',\n\t\t\t\t\t'IP' : client_ip,\n\t\t\t\t\t'ID' : client_id,\n\t\t\t\t\t'Password' : client_password,\n\t\t\t\t\t'Table' : 'connected_clients'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'AddNewScore', \n\t\t\t\t\t'Username' : client_username,\n\t\t\t\t\t'ID' : client_id,\n\t\t\t\t\t'Score' : 0,\n\t\t\t\t\t'Problems Solved' : 0,\n\t\t\t\t\t'Total Time' : '00:00:00'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\n\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ] Assigned : ' + str(client_id) )\n\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ] Assigned : ' + str(client_id) )\n\n\t\t\t\t# Reply to be sent to client\n\t\t\t\tserver_message = 'BitsOJ V1.0: Validation Successful.'\n\t\t\t\t\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'VALID', \n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Client ID' : client_id, \n\t\t\t\t\t'Message' : server_message\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\tprint('[ LOGIN ][ RESPONSE ] VALID to ' + client_username)\n\t\t\t\tmanage_clients.log('[ LOGIN ][ RESPONSE ] VALID to ' + client_username)\n\n\t\t\t\t# Check if contest has started, also send client the \n\t\t\t\t# START signal for contest\n\t\t\t\tif manage_clients.data_changed_flags[10] == 1:\n\t\t\t\t\t# Update self config\n\t\t\t\t\tmanage_clients.config = initialize_server.read_config()\n\t\t\t\t\ttotal_time = manage_clients.config['Contest Set Time']\n\t\t\t\t\tstart_time = initialize_server.get_start_time()\n\t\t\t\t\tend_time = initialize_server.get_end_time()\n\n\t\t\t\t\tcurrent_time = time.time()\n\t\t\t\t\ttime_difference = total_time - current_time\n\t\t\t\t\tremaining_time = time.strftime('%H:%M:%S', time.gmtime(time_difference))\n\n\t\t\t\t\tmessage = {\n\t\t\t\t\t\t'Code' : 'START', \n\t\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t\t'Duration' : remaining_time,\n\t\t\t\t\t\t'Start Time' : start_time,\n\t\t\t\t\t\t'End Time' : end_time,\n\t\t\t\t\t\t'Problem Key' : manage_clients.file_password\n\t\t\t\t\t}\n\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\t\tprint('[ LOGIN ][ RESPONSE ] Sent START to ' + client_username)\n\t\t\t\t\tmanage_clients.log('[ LOGIN ][ RESPONSE ] Sent START to ' + client_username)\n\n\t\t\t\treturn\n\t\t\t\t\t\n\t\t\telif previously_connected_state == 'Blocked':\n\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ][ REJECT ] Blocked LOGIN attempt')\n\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ][ REJECT ] Blocked LOGIN attempt')\n\t\t\t\t# Reject client login\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Message' : 'You are blocked from the contest!\\nPlease contact ADMIN.'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\t\t########################################################################################################################\n\t\t# Judge login is handled as a client to avoid redundancy in code\n\t\telif client_type == 'JUDGE':\n\t\t\tif(manage_clients.data_changed_flags[12] == 0):\n\t\t\t\tprint('[ LOGIN ][ REJECT ] Rejected by ADMIN')\n\t\t\t\tmanage_clients.log('[ LOGIN ][ REJECT ] Rejected by ADMIN')\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Message' : 'Judge Logins are not allowed right now.'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\n\t\t\t# IN ALL REGARDS, CLIENT HERE MEANS A JUDGE\n\t\t\tstatus = client_authentication.validate_client(client_username, client_password)\n\n\t\t\t\n\t\t\t# If login is not successful:\n\t\t\tif status != True:\n\t\t\t\tprint('[ LOGIN ] Judge NOT verified.')\n\t\t\t\tmanage_clients.log('[ LOGIN ] Judge NOT verified.')\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'INVLD',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\tprint('[ LOGIN ][ REJECT ] Sent INVLD to ' + client_username)\n\t\t\t\tmanage_clients.log('[ LOGIN ][ REJECT ] Sent INVLD to ' + client_username)\n\t\t\t\treturn\n\n\t\t\t# Check if client has logged in for the first time or is already connected:\n\t\t\tpreviously_connected_state = client_authentication.check_connected_client(client_username, 'connected_judges')\n\n\t\t\tif previously_connected_state == 'Disconnected':\n\t\t\t\tstatus = client_authentication.validate_connected_judge(client_username, client_id, client_ip)\n\t\t\t\tif status == False:\n\t\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ][ RE-LOGIN ] Rejected : ID/IP mismatch')\n\t\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ][ RE-LOGIN ] Rejected : ID/IP mismatch')\n\t\t\t\t\tmessage = {\n\t\t\t\t\t\t'Code' : 'LRJCT', \n\t\t\t\t\t\t'Receiver' : client_username, \n\t\t\t\t\t\t'ID' : client_id,\n\t\t\t\t\t\t'Message' : 'Login Rejected : IP mismatch'\n\t\t\t\t\t}\n\t\t\t\t\tmessage = json.dumps(message)\n\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\t\treturn\n\n\t\t\t\t# State = True here, which means a successful LOGIN\n\t\t\t\t# Update state in database ( from BitsOJCore )\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'UpJudgeStat', \n\t\t\t\t\t'Username' : client_username,\n\t\t\t\t\t'State' : 'Connected',\n\t\t\t\t\t'IP' : client_ip\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\n\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ][ RE-LOGIN ]')\n\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ][ RE-LOGIN ]')\n\t\t\t\t\n\t\t\t\tserver_message = 'Gotta work harder, Judge :)'\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'VALID', \n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'ID' : client_id,\n\t\t\t\t\t'Message' : server_message\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\n\t\t\telif previously_connected_state == 'Connected':\n\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ][ RE-LOGIN ] Rejected')\n\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ][ RE-LOGIN ] Rejected')\n\t\t\t\tserver_message = 'Rejected - Multiple logins are not allowed.'\n\t\t\t\t\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'LRJCT', \n\t\t\t\t\t'Receiver' : client_username, \n\t\t\t\t\t'Message' : server_message\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\n\t\t\t# If client has logged in for the first time\n\t\t\telif previously_connected_state == 'New':\n\t\t\t\tjudge_session_key = client_authentication.generate_judge_key()\n\t\t\t\t# Add client to connected users database\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'AddNewUser', \n\t\t\t\t\t'Username' : client_username,\n\t\t\t\t\t'State' : 'Connected',\n\t\t\t\t\t'IP' : client_ip,\n\t\t\t\t\t'ID' : judge_session_key,\n\t\t\t\t\t'Password' : client_password,\n\t\t\t\t\t'Table' : 'connected_judges'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\n\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ][ JUDGE ][ VALID ] ID: ' + str(judge_session_key))\n\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ][ JUDGE ][ VALID ] ID: ' + str(judge_session_key))\n\n\t\t\t\t# Reply to be sent to judge\n\t\t\t\tserver_message = 'Hello Judge!'\n\t\t\t\t\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'VALID', \n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'ID' : judge_session_key, \n\t\t\t\t\t'Message' : server_message\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\n\t\t\t\t\n\t\t\telif previously_connected_state == 'Blocked':\n\t\t\t\tprint('[ LOGIN ][ ' + client_username + ' ][ REJECT ] Blocked LOGIN attempt')\n\t\t\t\tmanage_clients.log('[ LOGIN ][ ' + client_username + ' ][ REJECT ] Blocked LOGIN attempt')\n\t\t\t\t# Reject client login\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'LRJCT',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Message' : 'You are blocked from the contest!\\nPlease contact ADMIN.'\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\treturn\n\n\tdef validate_ip(ip):\n\t\t# This function validates wherther an ip address matches coorect pattern or not.\n\t\t# Credits: GeeksForGeeks \n\t\ttry:\n\t\t\tregex = '''^(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\\\\.( \n\t\t\t25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\\\\.( \n\t\t\t25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\\\\.( \n\t\t\t25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)'''\n\t\t\tif re.search(regex, ip):\n\t\t\t\t# valid IP address\n\t\t\t\treturn True\n\t\t\treturn False\n\t\texcept:\n\t\t\treturn False\n\t\t\n\tdef client_submission_handler(\n\t\t\tclient_id, \n\t\t\tclient_ip, \n\t\t\tclient_username, \n\t\t\tlocal_run_id, \n\t\t\tproblem_code, \n\t\t\tlanguage, \n\t\t\ttime_stamp, \n\t\t\tsource_code\n\t\t):\n\t\t# This block ensures that the config read by this process is latest\n\t\tif manage_clients.already_read == 0:\n\t\t\tmanage_clients.already_read = 1\n\t\t\tmanage_clients.config = initialize_server.read_config()\n\t\t\tmanage_clients.codes = manage_clients.config['Problem Codes']\n\t\t\tmanage_clients.languages = manage_clients.config['Languages']\n\n\t\ttext = (\n\t\t\t'[ SUBMISSION ] Client ID :' + \n\t\t\tstr(client_id) + \n\t\t\t' IP:' + \n\t\t\tclient_ip + \n\t\t\t' Username: ' + \n\t\t\tclient_username + \n\t\t\t' Problem:' + \n\t\t\tproblem_code + \n\t\t\t' Language :' + \n\t\t\tlanguage + \n\t\t\t' Time stamp :' + \n\t\t\ttime_stamp\n\t\t)\n\n\t\tprint(text)\n\t\tmanage_clients.log(text)\n\t\t\t\n\t\tcontest_start_time = manage_clients.config['Contest Start Time']\n\n\t\t# Validate client submisssion datatypes:\n\t\tflag = 0\n\t\tif len(str(client_id)) > 10 or len(client_username) > 20 or len(str(local_run_id)) > 5 or len(problem_code) > 10 or len(language) > 10:\n\t\t\tprint('[ SUBMISSION ][ EXCESSIVE DATA ] Validation failed!')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ EXCESSIVE DATA ] Validation failed!')\n\t\t\tflag = 1\n\t\tif len(time_stamp) != len('HH:MM:SS'):\n\t\t\tprint('[ SUBMISSION ][ EXCESSIVE DATA ] Validation failed!')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ EXCESSIVE DATA ] Validation failed!')\n\t\t\tflag = 1\n\t\tif initialize_server.convert_to_seconds(time_stamp) == -1:\n\t\t\tprint('[ SUBMISSION ][ Timestamp ] Validation failed!')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ Timestamp ] Validation failed!')\n\t\t\tflag = 1\n\t\tif manage_clients.validate_ip(client_ip) == False:\n\t\t\tprint('[ SUBMISSION ][ IP ] Validation failed!')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ IP ] Validation failed!')\n\t\t\tflag = 1\n\t\tif problem_code not in manage_clients.codes:\n\t\t\tprint('[ SUBMISSION ][ Problem Code ] Validation failed!')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ Problem Code ] Validation failed!')\n\t\t\tflag = 1\n\t\tif language not in manage_clients.languages:\n\t\t\tprint('[ SUBMISSION ][ Language ] Validation failed!')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ Language ] Validation failed!')\n\t\t\tflag = 1\n\t\t\n\n\t\tif flag == 1:\n\t\t\tprint('[ SUBMISSION ][ FAIL ] Preliminary validation Failed.')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ FAIL ] Preliminary validation Failed.')\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'SRJCT',\n\t\t\t\t'Receiver' : client_username,\n\t\t\t\t'Message' : 'Your submission could not be processed! Please contact ADMIN with your problem.',\n\t\t\t\t'Local Run ID' : local_run_id\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\ttry:\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\texcept Exception as error:\n\t\t\t\tprint('[ ERROR ][ SECURITY ] Could not send message to client: ', str(error))\n\t\t\t\tmanage_clients.log('[ ERROR ][ SECURITY ] Could not send message to client: ' + str(error))\n\t\t\treturn\n\n\t\tstatus = client_authentication.validate_connected_client(client_username, client_id, client_ip)\n\t\tif status == False:\n\t\t\tprint('[ SUBMISSION ][ FAIL ] Client could not be Validated.')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ FAIL ] Client could not be Validated.')\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'SRJCT',\n\t\t\t\t'Receiver' : client_username,\n\t\t\t\t'Message' : 'Your submission could not be processed! Validation failed.',\n\t\t\t\t'Local Run ID' : local_run_id\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\ttry:\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\texcept Exception as error:\n\t\t\t\tprint('[ ERROR ][ SECURITY ] Could not send message to client: ', str(error))\n\t\t\t\tmanage_clients.log('[ ERROR ][ SECURITY ] Could not send message to client: ' + str(error))\n\t\t\treturn\n\t\t\n\t\t# Validate Time\n\t\t# If the time sent by client is too far away from current time\n\t\t# then timestamp is considered to be the time server recieves the submission\n\t\tcurrent_time = time.strftime(\"%H:%M:%S\", time.localtime())\t\n\t\t# current_time = initialize_server.get_time_difference(contest_start_time, current_time)\n\t\ttime_difference = initialize_server.get_abs_time_difference(current_time, time_stamp)\n\n\t\t# We don't believe in clients, so timestamp is server time.\n\t\ttime_stamp = initialize_server.get_time_difference(contest_start_time, current_time)\n\t\t\n\t\t################################Preliminary Validation Finished##########################################\n\n\t\t# If contest is not in running state, reject all submissions.\n\t\t# This might reject some submissions when user sends code just before contest ends\n\t\tif manage_clients.data_changed_flags[10] != 1:\n\t\t\tprint('[ SUBMISSION ][ REJECT ] Contest is not running.')\n\t\t\tmanage_clients.log('[ SUBMISSION ][ REJECT ] Contest is not running.')\n\t\t\t# Send SRJCT : SubmissionReject\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'SRJCT',\n\t\t\t\t'Receiver' : client_username,\n\t\t\t\t'Message' : 'Your submission could not be processed! Contest status: NOT RUNNING.',\n\t\t\t\t'Local Run ID' : local_run_id\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\ttry:\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\texcept Exception as error:\n\t\t\t\tprint('[ ERROR ][ SECURITY ] Client has no username so could not send error code.')\n\t\t\t\tmanage_clients.log('[ ERROR ][ SECURITY ] Client has no username so could not send error code.')\n\t\t\treturn\n\n\t\t# If submissions are not allowed by ADMIN\n\t\tif(manage_clients.data_changed_flags[3] == 0):\n\t\t\tprint('[ SUBMISSION ] Rejected by ADMIN')\n\t\t\tmanage_clients.log('[ SUBMISSION ] Rejected by ADMIN')\n\t\t\t# Send SRJCT : SubmissionReject\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'SRJCT',\n\t\t\t\t'Receiver' : client_username,\n\t\t\t\t'Message' : 'Your submission could not be processed!\\nSubmissions are not allowed right now.',\n\t\t\t\t'Local Run ID' : local_run_id\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\ttry:\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\texcept Exception as error:\n\t\t\t\tprint('[ ERROR ][ SECURITY ] Client has no username so could not send error code.' )\n\t\t\t\tmanage_clients.log('[ ERROR ][ SECURITY ] Client has no username so could not send error code.' )\n\t\t\treturn\n\n\n\t\t# Check client status, and accept only if it is CONNECTED and not BLOCKED or NEW\n\t\tstate = client_authentication.check_connected_client(client_username, 'connected_clients')\n\t\tif state != 'Connected':\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'SRJCT',\n\t\t\t\t'Receiver' : client_username,\n\t\t\t\t'Message' : 'Your submission could not be processed. Please Login to send submissions.',\n\t\t\t\t'Local Run ID' : local_run_id\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\ttry:\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\texcept Exception as error:\n\t\t\t\tprint('[ ERROR ][ SECURITY ] Client attempted to send submission without being logged in.' )\n\t\t\t\tmanage_clients.log('[ ERROR ][ SECURITY ] Client attempted to send submission without being logged in.' )\n\t\t\treturn\n\n\t\t# Check if client has sent a submission in the previous 'time_minutes_limit' minutes, where 'time_minutes_limit' is set by the ADMIN\n\t\t# Reject the submission if this case is true\n\t\tprev_time = submissions_management.get_last_sub_time(client_id)\n\t\tstart_time = manage_clients.config[\"Contest Start Time\"]\n\n\t\tif prev_time == \"NONE\":\n\t\t\t# This is the first submission of the client\n\t\t\tpass\n\t\telse:\n\t\t\ttime_minutes_limit = manage_clients.data_changed_flags[21]\n\t\t\tdifference = initialize_server.get_time_difference(prev_time, time_stamp)\n\t\t\tdifference_seconds = initialize_server.convert_to_seconds(difference)\n\t\t\tdifference_minutes = int(difference_seconds / 60)\n\t\t\ttime_seconds_limit = time_minutes_limit * 60\n\n\t\t\tif difference_minutes < time_minutes_limit:\n\t\t\t\tprint('[ SUBMISSION ][ REJECT ] Client sent more than allowed submissions in the time frame.')\n\t\t\t\tmanage_clients.log('[ SUBMISSION ][ REJECT ] Client sent more than allowed submissions in the time frame.')\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'SRJCT',\n\t\t\t\t\t'Receiver' : client_username,\n\t\t\t\t\t'Message' : 'Your submission could not be processed. Resend after ' + str(time_seconds_limit - difference_seconds) + ' Seconds',\n\t\t\t\t\t'Local Run ID' : local_run_id\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\ttry:\n\t\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\texcept Exception as error:\n\t\t\t\t\tprint('[ ERROR ]Could not publish message to client.' )\n\t\t\t\t\tmanage_clients.log('[ ERROR ]Could not publish message to client.' )\n\t\t\t\treturn\n\n\t\t# Preliminary Checks successful : Process Submission now\n\t\ttry:\n\t\t\trun_id, source_file_name = submission.new_submission(client_id, problem_code, language, time_stamp, source_code)\n\t\t\t# Update database by BitsOJCore\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'AddNewSub', \n\t\t\t\t'RunID' : run_id,\n\t\t\t\t'Local ID' : local_run_id,\n\t\t\t\t'Client ID' : client_id,\n\t\t\t\t'Language' : language,\n\t\t\t\t'Source File Name' : source_file_name,\n\t\t\t\t'Problem Code' : problem_code,\n\t\t\t\t'Status' : 'Running',\n\t\t\t\t'Timestamp' : time_stamp\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\tmanage_clients.task_queue.put(message)\n\n\t\t\tprint('[ CLIENT ] Sent submission request to CORE')\n\t\t\tmanage_clients.log('[ CLIENT ] Sent submission request to CORE')\n\n\t\t\t# Send this run id to the client\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'RESPONSE',\n\t\t\t\t'Receiver' : client_username,\n\t\t\t\t'Run ID' : run_id,\n\t\t\t\t'Local Run ID' : local_run_id\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\tmanage_clients.task_queue.put(message)\n\t\t\t\n\t\t\t# Push the submission in judging queue\n\t\t\tprint('[ JUDGE ] Requesting a new judgement')\n\t\t\tmanage_clients.log('[ JUDGE ] Requesting a new judgement')\n\t\t\tmessage = {\n\t\t\t\t'Code' : 'JUDGE', \n\t\t\t\t'Client ID' : client_id, \n\t\t\t\t'Client Username' : client_username,\n\t\t\t\t'Run ID' : run_id,\n\t\t\t\t'Language' : language,\n\t\t\t\t'PCode' : problem_code,\n\t\t\t\t'Source' : source_code,\n\t\t\t\t'Local Run ID' : local_run_id,\n\t\t\t\t'Time Stamp' : time_stamp\n\t\t\t}\n\t\t\tmessage = json.dumps(message)\n\t\t\tmanage_clients.task_queue.put(message)\n\n\t\t\tprint('[ REQUEST ] New judging request sent successfully.')\n\t\t\tmanage_clients.log('[ REQUEST ] New judging request sent successfully.')\n\t\t\t#######################################################################\n\t\t\t\t\n\t\texcept Exception as error:\n\t\t\tprint('[ ERROR ] Client submisssion could not be processed : ' + str(error))\n\t\t\tmanage_clients.log('[ ERROR ] Client submisssion could not be processed : ' + str(error))\n\t\treturn\n\n\tdef client_query_handler(client_id, client_username, query):\n\t\tprint('[ QUERY ] From ' + str(client_id) + ' : ' + query)\n\t\tmanage_clients.log('[ QUERY ] From ' + str(client_id) + ' : ' + query)\n\n\t\tquery_id = submission.generate_query_id() \n\t\tprint('[ QUERY ] Assigned Query ID: ' + str(query_id))\n\t\tmanage_clients.log('[ QUERY ] Assigned Query ID: ' + str(query_id))\n\t\t# Update Database using BitsOJCore\n\t\tmessage = {\n\t\t\t'Code' : 'AddQuery',\n\t\t\t'Query ID' : query_id,\n\t\t\t'Client ID' : client_id,\n\t\t\t'Query' : query\n\t\t}\n\t\tmessage = json.dumps(message)\n\t\tmanage_clients.task_queue.put(message)\n\t\treturn\n\t\t\n\n\t# This function handles client logout requests\n\tdef client_logout_handler(client_username, client_id, client_ip):\n\t\tprint('[ LOGOUT ][ ', client_username, ' ] Initiated')\n\t\t# Get client username from database and validate\n\t\tdatabase_client_username = client_authentication.get_client_username(client_id) \n\t\tstatus = client_authentication.check_client_ip(client_id, client_ip)\n\t\t# If IP does not match\n\t\tif status == 0:\n\t\t\tprint('[ LOG OUT ][ ' + client_username + ' ][ REJECT ]')\n\t\t\tmanage_clients.log_queue.put('[ LOG OUT ][ ' + client_username + ' ][ REJECT ]')\n\t\t\treturn\n\n\t\tif database_client_username == client_username:\n\t\t\t# ie, client_username and client_id pair matches, \n\t\t\t# check if client is connected\n\t\t\tpreviously_connected_state = client_authentication.check_connected_client(client_username, 'connected_clients')\n\t\t\tif previously_connected_state == 'Connected':\n\t\t\t\tprint('[ LOG OUT ][ ' + client_username + ' ][ ACCEPT ]')\n\t\t\t\tmanage_clients.log_queue.put('[ LOG OUT ][ ' + client_username + ' ][ ACCEPT ]')\n\t\t\t\t# Disconnect client in database using BitsOJCore\n\t\t\t\tmessage = {\n\t\t\t\t\t'Code' : 'UpUserStat', \n\t\t\t\t\t'Username' : client_username,\n\t\t\t\t\t'State' : 'Disconnected',\n\t\t\t\t\t'IP' : client_ip\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\n\t\t\t\tmessage = {\n\t\t\t\t\t\"Code\" : \"SHUTDOWN\",\n\t\t\t\t\t\"Receiver\" : client_username\n\t\t\t\t}\n\t\t\t\tmessage = json.dumps(message)\n\t\t\t\tmanage_clients.task_queue.put(message)\n\t\t\telse:\n\t\t\t\tprint('[ LOG OUT ][ ' + str(client_id) + ' ][ REJECT ] Client is not connected.')\n\t\t\t\tmanage_clients.log_queue.put('[ LOG OUT ][ ' + str(client_id) + ' ][ REJECT ] Client is not connected.')\n\t\telse:\n\t\t\tprint('[ LOG OUT ][ ' + client_username + ' ][ REJECT ] ClientID does not match.')\n\t\t\tmanage_clients.log_queue.put('[ LOG OUT ][ ' + client_username + ' ][ REJECT ] ClientID does not match.')\n\n\t\treturn\n\n","sub_path":"Server/client_connections.py","file_name":"client_connections.py","file_ext":"py","file_size_in_byte":39135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"64972023","text":"from django.contrib import admin\n\nfrom edc.base.modeladmin.admin import BaseTabularInline, BaseModelAdmin\nfrom edc.export.actions import export_as_csv_action\n\nfrom apps.bcpp_household.models import HouseholdStructure\n\nfrom ..forms import HouseholdMemberForm\nfrom ..models import HouseholdMember\n\n\nclass HouseholdMemberInline(BaseTabularInline):\n model = HouseholdMember\n extra = 3\n\n\nclass HouseholdMemberAdmin(BaseModelAdmin):\n\n form = HouseholdMemberForm\n date_hierarchy = 'modified'\n actions = [export_as_csv_action(\"Export as csv\", fields=['initials', 'gender', 'age_in_years', 'present_today', 'study_resident', 'relation',\n 'eligible_member',\n 'eligible_subject',\n 'member_status'], extra_fields={'plot_identifier': 'household_structure__household__plot__plot_identifier'})]\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"household_structure\":\n kwargs[\"queryset\"] = HouseholdStructure.objects.filter(id__exact=request.GET.get('household_structure', 0))\n\n return super(HouseholdMemberAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n fields = ('household_structure',\n 'first_name',\n 'initials',\n 'gender',\n 'age_in_years',\n 'present_today',\n 'inability_to_participate',\n 'study_resident',\n 'relation')\n\n radio_fields = {\n \"gender\": admin.VERTICAL,\n \"relation\": admin.VERTICAL,\n \"present_today\": admin.VERTICAL,\n \"inability_to_participate\": admin.VERTICAL,\n \"study_resident\": admin.VERTICAL,\n }\n\n list_display = ('first_name', 'initials',\n 'household_structure',\n 'to_locator',\n 'hiv_history',\n 'relation',\n 'visit_attempts',\n 'member_status',\n 'inability_to_participate',\n 'eligible_member',\n 'eligible_subject',\n 'enrollment_checklist_completed',\n 'enrollment_loss_completed',\n 'reported',\n 'refused',\n 'is_consented',\n 'eligible_htc',\n 'created',\n 'hostname_created')\n\n search_fields = [\n 'first_name',\n 'household_structure__id',\n 'household_structure__household__household_identifier',\n 'household_structure__household__id',\n 'household_structure__household__plot__plot_identifier',\n 'household_structure__household__plot__id',\n 'relation', 'id']\n\n list_filter = ('household_structure__survey__survey_name', 'present_today', 'study_resident', 'member_status','inability_to_participate',\n 'eligible_member', 'eligible_subject', 'enrollment_checklist_completed', 'enrollment_loss_completed', 'reported',\n 'refused', 'is_consented', 'eligible_htc', 'target', 'hiv_history', 'household_structure__household__community',\n 'modified', 'hostname_created', 'user_created', 'visit_attempts')\n\n list_per_page = 25\nadmin.site.register(HouseholdMember, HouseholdMemberAdmin)\n","sub_path":"apps/bcpp_household_member/admin/household_member_admin.py","file_name":"household_member_admin.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"303887728","text":"from django.db.models.base import ModelBase\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils import six\nfrom django.conf.urls import url\n\nfrom .options.generic import ModelOptions\n#from .views.generic.mixins import IssuesManagerRequiredTemplateView\nfrom .views.generic import Home\n\n\nclass AlreadyRegistered(Exception):\n pass\n\n\nclass NotRegistered(Exception):\n pass\n\n\nclass IssuesdbSite(object):\n home_view = Home\n _report_view = None\n _report_cover = None\n _report_pdf = None\n\n def __init__(self, name='admin', app_name='admin'):\n self._registry = {} # model_class class -> admin_class instance\n self.name = name\n self.app_name = app_name\n\n def register(self, model_or_iterable, options_class=None, **options):\n \"\"\"\n Registers the model(s) with the given sitew class.\n\n The model(s) should be Model classes, not instances.\n If an admin class isn't given, it will use ModelAdmin (the default\n admin options). If keyword arguments are given -- e.g., list_display --\n they'll be applied as options to the admin class.\n If a model is already registered, this will raise AlreadyRegistered.\n If a model is abstract, this will raise ImproperlyConfigured.\n \"\"\"\n if not options_class:\n options_class = ModelOptions\n\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model._meta.abstract:\n raise ImproperlyConfigured('The model %s is abstract, so it '\n 'cannot be registered with issuesdb.' % model.__name__)\n\n if model in self._registry:\n raise AlreadyRegistered(\n 'The model %s is already registered' % model.__name__,\n )\n\n self._registry[model] = options_class(model, self)\n\n def unregister(self, model_or_iterable):\n \"\"\"\n Unregisters the given model(s).\n If a model isn't already registered, this will raise NotRegistered.\n \"\"\"\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model not in self._registry:\n raise NotRegistered(\n 'The model %s is not registered' % model.__name__,\n )\n del self._registry[model]\n\n def register_report_view(self, report_view):\n self._report_view = report_view\n\n def register_report_cover(self, report_cover):\n self._report_cover = report_cover\n\n def register_report_pdf(self, report_pdf):\n self._report_pdf = report_pdf\n\n def get_urls(self):\n\n # Admin-site-wide views.\n urls = [\n url(r'^$', self.home_view.as_view(), name='issuesdb-home'),\n url(r'^view_report/(?P\\d+)/$',\n self._report_view.as_view(),\n name='issuesdb-view-report'),\n url(r'^view_report_cover/(?P\\d+)/$',\n self._report_cover.as_view(),\n name='issuesdb-view-report-cover'),\n url(r'^download_report_pdf/(?P\\d+)/$',\n self._report_pdf.as_view(),\n name='issuesdb-download-report-pdf'),\n\n ]\n\n # Add in each model's views.\n for model, model_options in six.iteritems(self._registry):\n urls += model_options.get_urls()\n return urls\n\n @property\n def urls(self):\n return self.get_urls()\n\n","sub_path":"issuesdb/sites.py","file_name":"sites.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"620047207","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 16 12:08:00 2020\n\n@author: longbao\n\"\"\"\n\nnumber = [99, 44, 6, 2, 1, 5, 63, 87, 283, 4, 0]\n\ndef partition(array, start, end):\n pivot = array[start]\n low = start+1\n high = end\n \n while True:\n # move left\n while low <= high and array[high] >= pivot:\n high = high - 1\n \n # move right\n while low <= high and array[low] <= pivot:\n low = low + 1\n \n \n # we either found a value for both high and low that\n # is out of order or low is higher than high, in which\n # case we exit the loop\n if low <= high:\n array[low], array[high] = array[high], array[low]\n else:\n break\n \n array[start], array[high] = array[high], array[start]\n return high\n\ndef quick_sort(array, start, end):\n if start >= end:\n return\n \n p = partition(array, start, end)\n quick_sort(array, start, p-1)\n quick_sort(array, p+1, end)\n \nprint(number)\nquick_sort(number, 0, len(number) -1)\nprint(number)","sub_path":"DataScienceJob/src/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"134680267","text":"import re\n\ndef validate_email(email):\n pattern = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n if re.match(pattern, email):\n return True\n else:\n return False\n\nr = \"[0-3]?[0-9]-[0-9]{2}-[0-9]{4}$\" #12-01-1998 (1-31)\n\nprint(validate_email('info@greyatom.co.in'))\nprint (validate_email('info@greyatom'))\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"642674389","text":"#trabajo03 variables\r\nmi_nombre_es=\"angel robinson\"\r\nnº_dni=74286646\r\nprimer_apellido=\"carrion\"\r\n\r\ndireccion=\"demetrio acosta\"\r\ncuadra_nº=8\r\ndepartamento=\"lambayeque\"\r\n\r\ndonate_de_organos=\"False\"\r\ncodigo_poblacion=45353098\r\nserie=2\r\n\r\nruc=\"1016650121\"\r\ncliente=\"rosa\"\r\nn_de_boleta=\"37038\"\r\n\r\nnombre_de_la_madre=\"carmen\"\r\nimporte=\"S/.25.00\"\r\nnombre_del_producto=\"guarda polvo\"\r\n\r\ndireccion_de_la_tienda=\"vicente de la vega #633\"\r\npropietario=\"villavicencio sanchez\"\r\nn_de_recibo=\"251-38839838\"\r\n\r\nconsultas=225534955\r\nconexion=\"monofasica\"\r\nconsumo=\"18 kwh\"\r\n\r\npotencia=\"1.00 kw\"\r\ninicio_de_contrato=31/10/2008\r\ntermino_de_contrato=30/10/209\r\n\r\nfecha_de_vencimiento=\"11/10/2019\"\r\ndescuento=16.00\r\ntotal_a_pagar=\"S/.15.00\"\r\n\r\nIGV=0.84\r\nalumbrado_publico=\"S/.0.42\"\r\npagina_web=\"www.distriluz.com.pe\"\r\n\r\ndNI_del_cliente=16824742\r\nnombre_de_la_empresa=\"nor oriente\"\r\nnombre_del_usuario=\"franquil\"\r\n\r\nmodulo=\"15\"\r\ndireccion_y_sentido=\"primer cuadrante\"\r\ntema=\"vectores\"\r\n\r\ntemperatura=\"18°c\"\r\nautor=\"a.venero\"\r\ncodigo_de_estudiante=\"14321d\"\r\n\r\nmatricula=\"S/.270\"\r\ntotal=\"S/.380\"\r\npagina_unprg=\"www.unprg.edu.pe\"\r\n\r\n\r\nnº_celular=980083183\r\nimei=12136168910213\r\nlanzamiento=2015\r\n\r\ninscripcion=13/10/2019\r\npromedio_final=14\r\nn_de_cursos=4\r\n\r\nplaca=\"3145-d\"\r\nchasis=13561\r\ncolor_vehiculo=\"rojo\"\r\n\r\nmodelo=\"l455-h\"\r\nlugra_de_compra=\"chiclayo\"\r\npartida=15328\r\n\r\nfilial=\"chiclayo\"\r\nboucher_n=123591\r\nagencia_de_pago=\"0233\"\r\n\r\ntipo=\"nacional\"\r\ndepartamento=\"lambayeque\"\r\nprovincia=\"chiclayo\"\r\n\r\nA_egreso=\"2017\"\r\nescuela=\"ingeneria electronica\"\r\nturno=\"mañana\"\r\n\r\nnº_sis=112\r\nciclo=\"2019II\"\r\nafiliado=\"sis\"\r\n\r\naula=2\r\nlocal=\"cpu chiclayo\"\r\nley_n=33790\r\n\r\nunidad=\"servicios\"\r\ntension=\"736 v\"\r\nhilos=5\r\n\r\ndistrito=\"la victoria\"\r\ndescripcion=\"shampoo herbal\"\r\nprecio=\"S/,10.50\"\r\n\r\nugel=\"ugel chiclayo\"\r\nnivel=\"secundaria\"\r\ncolegio_egresado=\"jorge basadre\"\r\n\r\ngrado=\"5to\"\r\nseccion=\"A\"\r\nsexo=\"masculino\"\r\n\r\nemoji=\"☺\"\r\nnombre_de_la_marca=\"pioneer\"\r\ncorreo_electronico=\"carrioncruz01@gmail.com\"\r\n\r\ncontrasena=\"1956unprg\"\r\ntalla=\"1.61m\"\r\npeso=\"62kg\"\r\n\r\ngravedad_de_la_tierra=\"9.8m/s\"\r\nformula_de_la_energia=\"e=mc\"\r\nautor_de_la_teoria_de_la_relatividad=\"alberth einsten\"\r\n\r\nsistema_operativo=\"android9.0\"\r\nsistema_operativo_de_la_computadora=\"windows10 ultimate\"\r\nnombre_del_videojuego=\"free fire\"\r\n\r\nanimal=\"gato\"\r\ncodigo_postal=25805\r\npi=3.1416\r\n\r\ncancion=\"tremor\"\r\nautor=\"matin garrix\"\r\ngenero=\"electronica\"\r\n\r\nheroe_lambayecano=\"jose abelardo quiñones\"\r\nmrta=\"movimiento revolucionario tupac amaru\"\r\nlider_del_grupo_terrorista_sendero_luminoso=\"abimael Guzman\"\r\n\r\nedad=17\r\npago_de_alquiler=\"S/.180\"\r\nintereses=\"s/.0.00\"\r\n\r\nubicacion_de_peru=\"america del sur\"\r\npais=\"peru\"\r\nn_de_tarjeta=\"5632 8210 0005 5799\"\r\n\r\narea_de_terreno=\"350 M2\"\r\nforma_de_pago=\"efectivo\"\r\nservicios=\"agua y desague\"\r\n\r\nlengua=\"castellano\"\r\nreligion=\"catolico\"\r\nestado_civil=\"soltero\"\r\n\r\nn_hermanos=\"2\"\r\ncolor_de_ojos=\"verdes\"\r\nestudian=\"si\"\r\n\r\ntienda=\"mifarma\"\r\ntipo_de_documeto=\"dni\"\r\nn_de_boleta=\"003562\"\r\n\r\nhora_de_operacion=\"9:45:53\"\r\nagente=\"farmancia x\"\r\nnombre=\"smith sanchez\"\r\n\r\ncategoria_vehicular=\"dos b\"\r\nclase_vehicular=\"A\"\r\nn_de_licencia=\"c4568792\"\r\n\r\nfecha_de_expedicion=\"29/03/2002\"\r\nfecha_de_revalidacion=\"05/08/2018\"\r\nfecha_de_nacimiento_del_condcutor=\"12/09/1988\"\r\n\r\ngrupo_sanguinio=\"O+\"\r\ntipo_de_moneda=\"sol\"\r\nocupacion=\"ama de casa\"\r\n\r\npagina_web_del_banco_de_la_nacion=\"www.bn.com.pe\"\r\nnombre_de_la_universidad=\"unprg\"\r\nnombre_del_curso=\"programacion 1\"\r\n\r\noperadora=\"movistar\"\r\ntipo_de_archivo=\"zip\"\r\nplan_contratado=\"20 mbps\"\r\n\r\ntipo_de_red=\"4G\"\r\nVPN=\"ninguna\"\r\ndireccion_ip=\"192.168.1.33\"\r\n\r\ndireccion_mac=\"04:1:67:62:d3:14\"\r\npuerta_de_enlace=\"192.168.1.1\"\r\nvelocidad_d=\"72 mbps\"\r\n\r\nprograma=\"python\"\r\nruta=\"chiclayo lamabayeque\"\r\nnumero_de_telefono=\"224468\"\r\n\r\ncurso_1=\"basica 1\"\r\ncurso_2=\"analisis matematico\"\r\ncurso_3=\"tecnicas de estudio\"\r\n\r\nfecha_y_ubigeo=\"130101\"\r\ndni_de_la_madre=\"16463165\"\r\nvideojuego2=\"gta\"\r\n\r\nnota=10\r\naplicacion=\"spotify\"\r\nnivel_de_bateria=80\r\n\r\nnombre=\"jose\"\r\ncurso=\"programacion\"\r\nturno=\"mañana\"\r\n\r\ngrupo=\"a\"\r\nuniversidad=\"unprg\"\r\nprofesor=\"Fernando\"\r\n\r\napellido_paterno=\"carrion\"\r\napellido_materno=\"cruz\"\r\nsegundo_nombre=\"robinson\"\r\n\r\npeso=\"53\"\r\naltura=\"1.57\"\r\nsexo=\"masculino\"\r\n\r\ncosto=\"2.80\"\r\ndescuento=\"49.9\"\r\ntienda=\"ripley\"\r\n\r\ndias_de_la_semana=\"7\"\r\nestudio=\"Falso\"\r\nes_responsable=\"Falso\"\r\n\r\ntemperatura=\"22\"\r\npais=\"peru\"\r\nlugar=\"costa\"\r\n\r\ndepartamento=\"lambayeque\"\r\ndistrito=\"chiclayo\"\r\nprovincia=\"la victoria\"\r\n\r\ndia_del_mes=\"21\"\r\nnumero_del_mes=\"10\"\r\ndia=\"lunes\"\r\n\r\nnro_de_cliente=\"500\"\r\nnombre_de_la_tienda=\"moxxi\"\r\nson_honestos=\"Verdadero\"\r\n\r\ninicial_del_auto=\"200976.78\"\r\npropietaria_del_auto=\"ana\"\r\nresponsable_con_las_cuotas=\"verdadero\"\r\n\r\nmarca_del_carro=\"toyota\"\r\ncuotas_del_carro=\"25\"\r\ncolor_del_carro=\"rojo\"\r\n\r\ndireccion=\"manuel seoane\"\r\ncuadra=\"3\"\r\ncolor_de_casa=\"azul\"\r\n\r\nnro_de_hermanos=\"3\"\r\nnro_de_tios=\"4\"\r\nnro_de_primos=\"17\"\r\n\r\narchivo=\"programacion\"\r\nnro_de_documentos=\"10\"\r\nnro_de_trabajos=\"03\"\r\n\r\n#variables agrupadas de tres\r\nprint(\"mi nombre es\",mi_nombre_es, \"mi primer apellido es\",primer_apellido, \"me indentifico con numero de dni\",nº_dni)\r\nprint(\"mi direccion actual es\", direccion, \"cuadra numero\",cuadra_nº, \"del departamento de\",departamento)\r\nprint(\"Es\", donate_de_organos,\"que dono organos\" \" y el codigo de poblacion es\", codigo_poblacion,\"serie\", serie)\r\nprint(\"el numero de ruc es\",ruc,\"el cliente es\",cliente,\"su numero de voleta es:\",n_de_boleta)\r\nprint(\"mi madre\",nombre_de_la_madre,\"me dio un importe de\",importe,\"para mi\",nombre_del_producto)\r\nprint(\"direcion de la tienda\",direccion_de_la_tienda,\"su propietario es\",propietario,\",el numero de recibo es\",n_de_recibo)\r\nprint(\"el codigo de consulta es:\",consultas,\"e alli hay wifi de conepcion tipo\",conexion,\"posee una corriente de\",consumo)\r\nprint(\"la potencia contratada es de\",potencia,\"cuyo inicio de contrato es\",inicio_de_contrato,\"y termina el\",termino_de_contrato)\r\nprint(\"la fecha de vencimiento es\",fecha_de_vencimiento,\"cuyo producto tiene un descuento de\",descuento,\"y el total de pago es de\",total_a_pagar)\r\nprint(\"el igv de un\",IGV,\"y el alumbrado publico es de un\",alumbrado_publico,\"para conocer mas puedes ingrear a la pagina web\",pagina_web)\r\nprint(\"el numero del dni de dicho cliente es\",dNI_del_cliente,\"este pertence a la empresa\",nombre_de_la_empresa,\"cuyo nombre es\",nombre_del_usuario)\r\nprint(\"el tema de analisis matematico es de\",tema,\"por ejemplo un vector puede estar en el\",direccion_y_sentido,\"con un modulo de\",modulo)\r\nprint(\"en lambayeque estamos a una temperatura de\",temperatura,\"el autor de la obra es\",autor,\"mi codigo de estudiante es\",codigo_de_estudiante)\r\nprint(\"la matricula del ciclo es\",matricula,\"pero el examen es de\",total,\"puedes ingresar a la pagina web\",pagina_unprg)\r\nprint(\"mi numero de telefono es\",nº_celular,\"con un imei\",imei,\"cuyo producto se lanzo en el\",lanzamiento)\r\nprint(\"lafechad de inscipcion fue el\",inscripcion,\"con un promedio final de\",promedio_final,\"el numero de curssos es de\",n_de_cursos)\r\nprint(\"el numero de placa\",placa,\"serie del chasis es\",chasis,\"el color del vehiculo es\",color_vehiculo)\r\nprint(\"el modelo de vehiculo es\",modelo,\"el lugra de compra es de la ciudad de\",lugra_de_compra,\"numero e partida es\",partida)\r\nprint(\"el numero de boucher es\",boucher_n,\"filial\",filial,\"y la agencia de pago es\",agencia_de_pago)\r\nprint(\"tipo\",tipo,\"del departamento de\",departamento,\"provincia\",provincia)\r\nprint(\"el año que egrese fue el\",A_egreso,\"a la escuela prof. de\",escuela,\"turno\",turno)\r\nprint(\"numero de sis es\",nº_sis,\"ciclo\",ciclo,\"afiliado al\",afiliado)\r\nprint(\"mi aula es\",aula,\"el local es\",local,\"y mi nº de ley es\",ley_n)\r\nprint(\"mi unidad es\",unidad,\"la tension es\",tension,\"el numero de hilos es\",hilos)\r\nprint(\"el distrito es\",distrito,\"uno el\",descripcion,\"y cuesta\",precio)\r\nprint(\"la ugel es\",ugel,\"mi nivel de estudios era\",nivel,\"el que colegio que termine mis estudios es\",colegio_egresado)\r\nprint(\"El grado es\",grado,\"la seccion\",seccion,\"mi tipo de sexo es\",sexo)\r\nprint(\"mi emoji favorito es\",emoji,\"en nombre de la marca se llama\",nombre_de_la_marca,\"mi correo personal es\",correo_electronico)\r\nprint(\"mi contraseña es\",contrasena,\"mi talla es\",talla,\"peso\",peso)\r\nprint(\"la gravedad de la tierra es\",gravedad_de_la_tierra,\"la formula de la energia es\",formula_de_la_energia,\"el inventor de la teoria de la relatividad es\",autor_de_la_teoria_de_la_relatividad)\r\nprint(\"el sistema es\",sistema_operativo,\"el sistema operativo de mi compu es\",sistema_operativo_de_la_computadora,\"mi juego favorito es\",nombre_del_videojuego)\r\nprint(\"mi animal favorito es\",animal,\"mi codigo postal es\",codigo_postal,\"el valor de pi es igual\",pi)\r\nprint(\"la cancion que mas escucho es\",cancion,\"el autor es\",autor,\"y es de tipo\",genero)\r\nprint(\"el heroe lambayecano es\",heroe_lambayecano, \"las iniciales de MRTA significa\",mrta,\"El lider del grupo sendero luminoso es\",lider_del_grupo_terrorista_sendero_luminoso)\r\nprint(\"mi edad es\",17,\"el alquiler de mi cuarto mensual es de\",pago_de_alquiler,\"el interes que me cobran es de\",intereses)\r\nprint(\"mi pais se encuentra ubicado es\",ubicacion_de_peru,\"se llama\",pais,\"mi numero de tarjeta es\",n_de_tarjeta)\r\nprint(\"el area del terreno que deseo comprar es de\",area_de_terreno,\"lo pagare en\",forma_de_pago,\"este terreno cuenta con\",servicios)\r\nprint(\"el lenguaje que utilizo para comunicarme es el\",lengua,\"soy de religion\",religion,\"mi estado civil es\",estado_civil)\r\nprint(\"mi numero de hermanos es\",n_hermanos,\"el color de mis ojos son\",color_de_ojos,\"ellos estudian:\",estudian)\r\nprint(\"realize mis compras en la farmacia\",tienda,\"con mi documento que es el\",tipo_de_documeto,\"el numero de comprobante\",n_de_boleta)\r\nprint(\"la hora que hice el deposito fue\",hora_de_operacion,\"en el agente de\",agente,\"a nombre de\",nombre)\r\nprint(\"la categoria behicular es\",categoria_vehicular,\"la clase vehicular es\",clase_vehicular,\"el numero de licencia es\",n_de_licencia)\r\nprint(\" mi nombre es\",nombre, \", y estoy en el curso de\", curso , \", en el turno de la \", turno)\r\nprint(\"soy del grupo\", grupo, \", de la universidad\", universidad ,\", con el profesor\", profesor)\r\nprint(\"mi apellido paterno es \", apellido_paterno ,\", y mi apeliido materno es \", apellido_materno ,\", y llevo segundo nombre que es \", segundo_nombre)\r\nprint(\"mi peso es \",peso,\" y mido \",altura,\" y el sexo es \",sexo)\r\nprint(\"el costo de un polo fue de \",costo,\"y tuvo un descuento de \",descuento,\"en la tienda\",tienda)\r\nprint(\"la semana tiene\",dias_de_la_semana,\" dias , en la cual ser responsable es \",es_responsable,\" y ser estudioso es \",estudio)\r\nprint(\"actualmente hay una temperatura de \",temperatura,\" grados en la \",lugar,\" del \",pais)\r\nprint(\"vivo en el departamento \",departamento,\" ,en el distrito de \",distrito,\" ,en la provincia de \",provincia)\r\nprint(\"el dia del mes de hoy dia es \",dia_del_mes,\", del mes \",numero_del_mes,\", del \",dia)\r\nprint(\"en la tienda \",nombre_de_la_tienda,\" tiene una cantidad de \",nro_de_cliente,\" de clientes, y todos ellos son honestos, \",son_honestos)\r\nprint(\"el inical de u auto es de \",inicial_del_auto,\" culla propietaria es \",propietaria_del_auto,\" ,y es responsable con sus cuotas ,\",responsable_con_las_cuotas)\r\nprint(\"un carro de la empresa \",marca_del_carro,\", tiene\",cuotas_del_carro,\" de pago , y es de color \",color_del_carro)\r\nprint(\"vivo en la avenida \",direccion,\" ,en la cuadra \",str(cuadra),\", en una casa de color \",color_de_casa)\r\nprint(\"el total de hermanos somos \",str(nro_de_hermanos),\" hermanos,con \", str(nro_de_tios),\" tios ,y \",nro_de_primos,\" numeros de primos\")\r\nprint(\"el nombre de mi archivo se llama \",archivo,\" ,de lo cual tiene \",nro_de_documentos,\" documentos y \",nro_de_trabajos,\" trabajos\")\r\nprint(\"pertenesco al grupo sangunieo\",grupo_sanguinio,\"tipo de moneda\",tipo_de_moneda,\"ocupacion\",ocupacion)\r\nprint(\"hola,soy de la universidad nacional\",nombre_de_la_universidad,\"asisto a clases de\",nombre_del_curso,\"mas imfo a\",pagina_web_del_banco_de_la_nacion)\r\nprint(\"el tipo de archivo es\",tipo_de_archivo,\",operadora\",operadora,\"cuenta con una plan de\",plan_contratado)\r\nprint(\"tipo de red\",tipo_de_red,\"vpn\",VPN,\"direccion del ip\",direccion_ip)\r\nprint(\"direccion del mac\",direccion_mac,\"y la puerta de enlaces es,\",puerta_de_enlace,\"a una velocidad de\",velocidad_d)\r\nprint(\"este programa es\",programa,\"cuya ruta es\",ruta,\"numero de telefono\",numero_de_telefono)\r\nprint(\"los cursos que llebo son\",curso_1,curso_2,curso_3)\r\nprint(\"fecha de ubigeo\",fecha_y_ubigeo,\"el dni de mi madre es\",dni_de_la_madre,\"mi udeo juego favorito es\",videojuego2)\r\nprint(\"escucho musica en spotyfy\",aplicacion,\"siempre cuando mi movil esta en\",nivel_de_bateria,\"nota\",nota)\r\n","sub_path":"carrion.py","file_name":"carrion.py","file_ext":"py","file_size_in_byte":12640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"652607869","text":"from skimage import io\r\nfrom skimage import color\r\nimport numpy as np\r\n\r\nrgb = io.imread('C:\\\\Users\\\\ZHAO Yuzhi\\\\Desktop\\\\dataset\\\\ILSVRC2012_train_256\\\\n01855032_5783.JPEG')\r\nprint(rgb.shape, rgb.dtype)\r\nnoise = np.random.normal(0, 10, (rgb.shape[0], rgb.shape[1], rgb.shape[2]))\r\nrgb = rgb + noise\r\nrgb = rgb.astype(np.uint8)\r\nprint(noise.shape)\r\nio.imshow(rgb)\r\nio.show()\r\n","sub_path":"testt.py","file_name":"testt.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"161633650","text":"# -*- encoding: utf-8 -*-\n'''\n@File : 204.count-primes.py\n@Time : 2020/01/31 00:23:28\n@Author : jhchen\n@E-mail : jianhuchen@163.com\n@License : Copyright(C), USTC\n@Desc : None\n'''\n#\n# @lc app=leetcode id=204 lang=python\n#\n# [204] Count Primes\n#\n\n# @lc code=start\nclass Solution(object):\n # def countPrimes(self, n):\n # \"\"\"方法一:暴力,超时\n # :type n: int\n # :rtype: int\n # \"\"\"\n # if n == 1 or n == 2:\n # return 0\n # count = 0\n # for i in range(2, n):\n # if self.isPrime(i):\n # count += 1\n # return count\n\n def countPrimes(self, n):\n \"\"\"方法2:\n ref: https://pic.leetcode-cn.com/23d348bef930ca4bb73f749500f664ccffc5e41467aac0ba9787025392ca207b-1.gif\n :type n: int\n :rtype: int\n \"\"\"\n if n < 2:\n return 0\n array = [True] * n\n array[0] = array[1] = False\n for i in range(2, int(n ** 0.5) + 1):\n if array[i] and self.isPrime(i):\n array[i*i:n:i] = [False] * len(array[i*i:n:i])\n return sum(array)\n\n def isPrime(self, n):\n if n <= 1:\n return False\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True\n# @lc code=end\nif __name__ == \"__main__\":\n r = Solution().isPrime(2)\n print(r)\n","sub_path":"LeetCode-Python/204.count-primes.py","file_name":"204.count-primes.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"296940795","text":"#!/usr/bin/python3\n\nyear = input(\"Saisissez une année : \")\nyear = int(year)\n\n# bissextile = False\n#\n# if year % 400 == 0:\n# bissextile = True\n# elif year % 100 == 0:\n# bissextile = False\n# elif year % 4 == 0:\n# bissextile = True\n# else:\n# bissextile = False\n#\n# if bissextile:\n# print(year,\"est bissextile\")\n# else:\n# print(year,\"n'est pas bissextile\")\n\nif year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):\n print(\"L'année\",year,\"est bissextile.\")\nelse:\n print(\"L'année\",year,\"n'est pas bissextile.\")\n","sub_path":"openClassRoom-bissextile.py","file_name":"openClassRoom-bissextile.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"360750667","text":"#!/usr/bin/python2.7\nimport sys\nimport os\nimport time\nimport datetime\nimport argparse\nimport logging\nimport json\nimport socket\nfrom hera_corr_f import HeraCorrelator, SnapFengine, __version__, __package__, helpers\n\nlogger = helpers.add_default_log_handlers(logging.getLogger(__file__))\nFAIL_COUNT_LIMIT = 5\n\nhostname = socket.gethostname()\n\n\ndef get_all_pam_stats(corr):\n \"\"\"\n Get PAM stats.\n returns: Dictionary of dictionaries\n dict[key1][key2][key3] = value\n where key1 is an antenna name, key2 is a polarization, key3 is a stat type (eg. \"pam_power\")\n \"\"\"\n rv = {}\n sensors = {}\n sensors[\"pam_east_powers\"] = []\n sensors[\"pam_north_powers\"] = []\n sensors[\"pam_attens\"] = []\n sensors[\"pam_voltages\"] = []\n sensors[\"pam_currents\"] = []\n sensors[\"pam_ids\"] = []\n for i in range(3):\n sensors[\"pam_east_powers\"] += [corr.do_for_all_f(\"power\", block=\"pams\", block_index=i, kwargs={\"name\":\"east\"})]\n sensors[\"pam_north_powers\"] += [corr.do_for_all_f(\"power\", block=\"pams\", block_index=i, kwargs={\"name\":\"north\"})]\n sensors[\"pam_attens\"] += [corr.do_for_all_f(\"get_attenuation\", block=\"pams\", block_index=i)]\n sensors[\"pam_voltages\"] += [corr.do_for_all_f(\"shunt\", block=\"pams\", block_index=i, kwargs={\"name\":\"u\"})]\n sensors[\"pam_currents\"] += [corr.do_for_all_f(\"shunt\", block=\"pams\", block_index=i, kwargs={\"name\":\"i\"})]\n sensors[\"pam_ids\"] += [corr.do_for_all_f(\"id\", block=\"pams\", block_index=i)]\n \n for feng in corr.fengs:\n for antn, antpol in enumerate(feng.ants):\n if antpol is None:\n continue\n try:\n host = feng.host\n except:\n continue\n ant, pol = helpers.hera_antpol_to_ant_pol(antpol)\n if ant not in rv.keys():\n rv[ant] = {\"e\":{}, \"n\":{}}\n\n block_index = antn // 2\n if pol == \"e\":\n try:\n rv[ant][pol][\"pam_power\"] = sensors[\"pam_east_powers\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"pam_power\"] = None\n try:\n rv[ant][pol][\"pam_atten\"] = sensors[\"pam_attens\"][block_index][host][0]\n except KeyError:\n rv[ant][pol][\"pam_atten\"] = None\n elif pol == \"n\":\n try:\n rv[ant][pol][\"pam_power\"] = sensors[\"pam_north_powers\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"pam_power\"] = None\n try:\n rv[ant][pol][\"pam_atten\"] = sensors[\"pam_attens\"][block_index][host][1]\n except KeyError:\n rv[ant][pol][\"pam_atten\"] = None\n \n try:\n rv[ant][pol][\"pam_voltage\"] = sensors[\"pam_voltages\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"pam_voltage\"] = None\n try:\n rv[ant][pol][\"pam_current\"] = sensors[\"pam_currents\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"pam_current\"] = None\n try:\n rv[ant][pol][\"pam_id\"] = sensors[\"pam_ids\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"pam_id\"] = None\n return rv\n\ndef get_all_fem_stats(corr):\n \"\"\"\n Get FEM stats.\n returns: Dictionary of dictionaries\n dict[key1][key2][key3] = value\n where key1 is an antenna name, key2 is a polarization, key3 is a stat type (eg. \"fem_power\")\n \"\"\"\n rv = {}\n sensors = {}\n sensors[\"fem_switches\"] = []\n sensors[\"fem_temps\"] = []\n sensors[\"fem_currents\"] = []\n sensors[\"fem_voltages\"] = []\n sensors[\"fem_ids\"] = []\n sensors[\"fem_imu\"] = []\n for i in range(3):\n sensors[\"fem_switches\"] += [corr.do_for_all_f(\"switch\", block=\"fems\", block_index=i)]\n sensors[\"fem_temps\"] += [corr.do_for_all_f(\"temperature\", block=\"fems\", block_index=i)]\n sensors[\"fem_currents\"] += [corr.do_for_all_f(\"shunt\", block=\"fems\", block_index=i, kwargs={\"name\":\"i\"})]\n sensors[\"fem_voltages\"] += [corr.do_for_all_f(\"shunt\", block=\"fems\", block_index=i, kwargs={\"name\":\"u\"})]\n sensors[\"fem_ids\"] += [corr.do_for_all_f(\"id\", block=\"fems\", block_index=i)]\n sensors[\"fem_imu\"] += [corr.do_for_all_f(\"imu\", block=\"fems\", block_index=i)]\n \n for feng in corr.fengs:\n for antn, antpol in enumerate(feng.ants):\n if antpol is None:\n continue\n try:\n host = feng.host\n except:\n continue\n ant, pol = helpers.hera_antpol_to_ant_pol(antpol)\n if ant not in rv.keys():\n rv[ant] = {\"e\":{}, \"n\":{}}\n\n block_index = antn // 2\n try:\n if sensors[\"fem_switches\"][block_index][host] is None:\n switch_state = None\n e_powered = None\n n_powered = None\n else:\n switch_state, e_powered, n_powered = sensors[\"fem_switches\"][block_index][host]\n rv[ant][pol][\"fem_switch\"] = switch_state\n rv[ant][pol][\"fem_e_lna_power\"] = e_powered\n rv[ant][pol][\"fem_n_lna_power\"] = n_powered\n except KeyError:\n rv[ant][pol][\"fem_switch\"] = None\n rv[ant][pol][\"fem_e_lna_power\"] = None\n rv[ant][pol][\"fem_n_lna_power\"] = None\n try:\n rv[ant][pol][\"fem_temp\"] = sensors[\"fem_temps\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"fem_temp\"] = None\n try:\n rv[ant][pol][\"fem_current\"] = sensors[\"fem_currents\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"fem_current\"] = None\n try:\n rv[ant][pol][\"fem_voltage\"] = sensors[\"fem_voltages\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"fem_voltage\"] = None\n try:\n rv[ant][pol][\"fem_id\"] = sensors[\"fem_ids\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"fem_id\"] = None\n try:\n rv[ant][pol][\"fem_imu_theta\"], rv[ant][pol][\"fem_imu_phi\"] = sensors[\"fem_imu\"][block_index][host]\n except KeyError:\n rv[ant][pol][\"fem_imu_theta\"], rv[ant][pol][\"fem_imu_phi\"] = None, None\n return rv\n\n\ndef get_poco_output(feng,redishost):\n \"\"\"\n Get pocket correlator output. Antennas and integration time \n polled from redis database.\n Params: feng: SnapFengine object that maps to a SNAP board\n redishost: Redis database to upload the data to.\n Returns: Dict: {'data':shape(fqs, pols), \n 'times':list of unix times}\n \"\"\"\n int_time = int(redishost.hget('poco', 'integration_time'))\n acc_len = int_time * 250e6/ (8192 * feng.corr.spec_per_acc)\n \n if (acc_len != feng.corr.get_acc_len()):\n feng.corr.set_acc_len(acc_len)\n \n antenna_pairs = ([[0,0],[0,1],[0,2],[1,1],[1,2],[2,2]])\n pair = (int(redishost.hget('poco','ant1')), int(redishost.hget('poco','ant2')))\n idx = antenna_pairs.index(pair)\n ant1, ant2 = antenna_pairs[(idx+1)%len(antenna_pairs)]\n redishost.hset('poco','ant1',ant1)\n redishost.hset('poco','ant2',ant2) \n\n ant1 *= 2; ant2 *= 2\n xcorr = []; times = []\n for i in range(4):\n xcorr.append(feng.corr.get_new_corr(ant1+i%2, (ant2+(i//2+i%2)%2)))\n times.append(time.time())\n xcorr = np.asarray(xcorr); times = np.asarray(times)\n redishost.hset('poco', 'corr', xcorr.tostring())\n redishost.hset('poco', 'times', times.tostring())\n\n # to unpack from string: c = struct.unpack('<8192d'); xcorr = (c[::2] + 1j*c[1::2]).reshape(4,1024)\n \n return {'data':xcorr, 'times':times}\n \ndef print_ant_log_messages(corr):\n for ant, antval in corr.ant_to_snap.iteritems():\n for pol, polval in antval.iteritems():\n # Skip if the antenna is associated with a board we can't reach\n if polval['host'] in corr.dead_fengs.keys():\n logger.warning(\"Won't get data from Ant %s, Pol %s because host %s is dead\" % (ant, pol, polval['host']))\n continue\n else:\n if isinstance(polval['host'], SnapFengine):\n host = polval['host'].host # the dictionary contains FEngine instances\n chan = polval['channel']\n logger.debug(\"Expecting data from Ant %s, Pol %s from host %s input %d\" % (ant, pol, host, chan))\n else:\n logger.warning(\"Failed to find F-Engine %s associated with ant/pol %s/%s\" % (polval['host'], ant, pol))\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser(description='Poll running SNAPs for FPGA/PAM/FEM monitoring data',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-r', dest='redishost', type=str, default='redishost',\n help ='Host servicing redis requests')\n parser.add_argument('-d', dest='delay', type=float, default=10.0,\n help ='Seconds between polling loops')\n parser.add_argument('-c', dest='poco',action='store_true',default=False,\n help='Upload pocket correlator output to redis')\n parser.add_argument('-D', dest='retrytime', type=float, default=300.0,\n help ='Seconds between reconnection attempts to dead boards')\n parser.add_argument('-l', dest='loglevel', type=str, default=\"INFO\",\n help ='Log level. DEBUG / INFO / WARNING / ERROR')\n parser.add_argument('--noredistapcp', action='store_true',\n help='Don\\'t use the redis-based SNAP comms protocol')\n args = parser.parse_args()\n\n if args.loglevel not in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"]:\n logger.error(\"I don't undestand log level %s\" % args.loglevel)\n else:\n for handler in logger.handlers:\n handler.setLevel(getattr(logging, args.loglevel))\n \n\n corr = HeraCorrelator(redishost=args.redishost, use_redis=(not args.noredistapcp), block_monitoring=False)\n upload_time = corr.r.hget('snap_configuration', 'upload_time')\n print_ant_log_messages(corr)\n\n retry_tick = time.time()\n script_redis_key = \"status:script:%s:%s\" % (hostname, __file__)\n locked_out = False\n logger.info('Starting SNAP redis monitor')\n while(True):\n tick = time.time()\n corr.r.set(script_redis_key, \"alive\", ex=max(60, args.delay * 2))\n while corr.r.exists('disable_monitoring'):\n if not locked_out:\n logger.warning('Monitoring locked out. Retrying every 10 seconds')\n locked_out = True\n corr.r.set(script_redis_key, \"locked out\", ex=20)\n time.sleep(10)\n if locked_out:\n logger.warning('Monitoring unlocked')\n locked_out = False\n \n # Check for a new configuration, and if one exists, update the Fengine list\n if corr.r.hget('snap_configuration', 'upload_time') != upload_time:\n upload_time = corr.r.hget('snap_configuration', 'upload_time')\n logger.info('New configuration detected. Reinitializing fengine list')\n corr = HeraCorrelator(redishost=args.redishost, use_redis=(not args.noredistapcp), block_monitoring=False)\n print_ant_log_messages(corr)\n \n # Recompute the hookup every time. It's fast\n corr.compute_hookup()\n\n # load this module's version into redis\n corr.r.hmset('version:%s:%s' % (__package__, os.path.basename(__file__)), {'version':__version__, 'timestamp':datetime.datetime.now().isoformat()})\n\n # Get antenna stats\n input_stats = corr.do_for_all_f(\"get_stats\", block=\"input\", kwargs={\"sum_cores\" : True})\n if corr.r.exists('disable_monitoring'):\n continue\n histograms = []\n eq_coeffs = []\n autocorrs = []\n for i in range(6):\n if corr.r.exists('disable_monitoring'):\n continue\n histograms += [corr.do_for_all_f(\"get_input_histogram\", block=\"input\", args=(i,))]\n eq_coeffs += [corr.do_for_all_f(\"get_coeffs\", block=\"eq\", args=(i,))]\n autocorrs += [corr.do_for_all_f(\"get_new_corr\", block=\"corr\", args=(i,i))]\n # We only detect overflow once per FPGA (not per antenna).\n # Get the overflow flag and reset it\n fft_of = corr.do_for_all_f(\"is_overflowing\", block=\"pfb\")\n corr.do_for_all_f(\"rst_stats\", block=\"pfb\")\n\n # Get FEM/PAM sensor values\n fem_stats = get_all_fem_stats(corr)\n if corr.r.exists('disable_monitoring'):\n continue\n pam_stats = get_all_pam_stats(corr)\n if corr.r.exists('disable_monitoring'):\n continue\n\n # Write spectra to snap-indexed keys. This means we'll get spectra even from\n # unconnected (according to the CM database) antennas\n for snap in histograms[0].keys():\n for antn in range(6):\n status_key = 'status:snaprf:%s:%d' % (snap, antn)\n snap_rf_stats = {}\n try:\n hist_bins, hist_vals = histograms[antn][snap]\n snap_rf_stats['histogram'] = json.dumps([hist_bins.tolist(), hist_vals.tolist()])\n except:\n snap_rf_stats['histogram'] = None\n try:\n snap_rf_stats['autocorrelation'] = json.dumps(autocorrs[antn][snap].real.tolist())\n except:\n snap_rf_stats['autocorrelation'] = None\n try:\n coeffs = eq_coeffs[antn][snap]\n snap_rf_stats['eq_coeffs'] = json.dumps(coeffs.tolist())\n except:\n snap_rf_stats['eq_coeffs'] = None\n snap_rf_stats['timestamp'] = datetime.datetime.now().isoformat()\n corr.r.hmset(status_key, snap_rf_stats)\n \n \n for key, val in input_stats.iteritems():\n antpols = corr.fengs_by_name[key].ants\n means, powers, rmss = val\n for antn, antpol in enumerate(antpols):\n # Don't report inputs which aren't connected\n if antpol is None:\n continue\n ant, pol = helpers.hera_antpol_to_ant_pol(antpol)\n status_key = 'status:ant:%s:%s' % (ant, pol)\n mean = means[antn]\n power = powers[antn]\n rms = rmss[antn]\n redis_vals = {'adc_mean':mean, 'adc_power':power, 'adc_rms':rms}\n # Give the antenna hash a key indicating the SNAP and input number it is associated with\n redis_vals['f_host'] = key\n redis_vals['host_ant_id'] = antn\n try:\n hist_bins, hist_vals = histograms[antn][key]\n redis_vals['histogram'] = json.dumps([hist_bins.tolist(), hist_vals.tolist()])\n except:\n redis_vals['histogram'] = None\n try:\n redis_vals['autocorrelation'] = json.dumps(autocorrs[antn][key].real.tolist())\n except:\n redis_vals['autocorrelation'] = None\n try:\n coeffs = eq_coeffs[antn][key]\n redis_vals['eq_coeffs'] = json.dumps(coeffs.tolist())\n except:\n redis_vals['eq_coeffs'] = None\n try:\n redis_vals.update(pam_stats[ant][pol])\n except KeyError:\n # if a SNAP died between getting input stats (which is the dictionary we are looping over)\n # and getting pam/fem stats, the appropriate PAM/FEM keys may not exist\n pass\n try:\n redis_vals.update(fem_stats[ant][pol])\n except KeyError:\n pass\n try:\n redis_vals[\"fft_of\"] = fft_of[key]\n except KeyError:\n pass\n redis_vals['timestamp'] = datetime.datetime.now().isoformat()\n corr.r.hmset(status_key, redis_vals)\n \n # Get FPGA stats\n fpga_stats = corr.do_for_all_f(\"get_fpga_stats\")\n for key, val in fpga_stats.iteritems():\n corr.r.hmset(\"status:snap:%s\" % key, val) \n \n # If the retry period has been exceeded, try to reconnect to dead boards:\n if time.time() > (retry_tick + args.retrytime):\n if len(corr.dead_fengs) > 0:\n logger.debug('Trying to reconnect to dead boards')\n corr.reestablish_dead_connections(programmed_only=True)\n retry_tick = time.time()\n \n # If executing the loop hasn't already taken longer than the loop delay time, add extra wait.\n extra_delay = args.delay - (time.time() - tick)\n if extra_delay > 0:\n logger.debug(\"Sleeping for %.2f seconds\" % extra_delay)\n time.sleep(extra_delay)\n","sub_path":"hera_corr_f/control_software/build/scripts-2.7/hera_snap_redis_monitor.py","file_name":"hera_snap_redis_monitor.py","file_ext":"py","file_size_in_byte":17341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107906672","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 15 10:39:28 2016\n\n@author: sheng\n\"\"\"\n\nimport os, math, json\nfrom collections import defaultdict\n\nclass NaiveBayesClassifier:\n \n def __init__(self):\n self.k = 1\n self.spam = defaultdict(lambda:self.k)\n self.ham = defaultdict(lambda:self.k)\n self.d_spam = 0\n self.d_ham = 0\n \n\n def classification_report(self, y_true, y_pred):\n if len(y_pred) != len(y_true):\n print(\"Vector size don't match!\")\n return\n tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0\n for i in range(len(y_true)):\n if y_true[i] == 1 and y_pred[i] == 1:\n tp += 1\n elif y_true[i] == 0 and y_pred[i] == 1:\n fp += 1\n elif y_true[i] == 1 and y_pred[i] == 0:\n fn += 1\n elif y_true[i] == 0 and y_pred[i] == 0:\n tn += 1\n p_spam = tp/(tp+fp)\n p_ham = tn/(tn+fn)\n r_spam = tp/(tp+fn)\n r_ham = tn/(tn+fp)\n s_spam = int(tp+fn)\n s_ham = int(tn+fp)\n f1_spam = 2*(p_spam * r_spam) / (p_spam + r_spam)\n f1_ham = 2*(p_ham * r_ham) / (p_ham + r_ham)\n s_total = s_spam + s_ham\n p_avg = (p_spam * s_spam + p_ham * s_ham) / s_total\n r_avg = (r_spam * s_spam + r_ham * s_ham) / s_total\n f1_avg = (f1_spam * s_spam + f1_ham * s_ham) / s_total\n print(\" Precision Recall F1-Score Support\")\n print(\" Ham {:.2f} {:.2f} {:.2f} {:d}\"\\\n .format(p_ham, r_ham, f1_ham, s_ham))\n print(\" Spam {:.2f} {:.2f} {:.2f} {:d}\"\\\n .format(p_spam, r_spam, f1_spam, s_spam))\n print(\" Avg {:.2f} {:.2f} {:.3f} {:d}\"\\\n .format(p_avg, r_avg, f1_avg, s_ham + s_spam))\n\n \n def partial_print(self, dic, n):\n \"\"\" dic is a dictionary of {word : count}, \n n denotes how many elements to print \"\"\"\n counts = [(v,k) for k, v in dic.items()]\n counts.sort(key=lambda x:x[0], reverse=True)\n print(counts[:n])\n\n\n def normalize(self, dic):\n total_count = sum([dic[k] for k in dic])\n for k in dic:\n dic[k] = math.log(float(dic[k])/total_count)\n \n def save(self, path):\n ss = json.dumps(self, default=lambda x: x.__dict__, \n sort_keys=True, indent=4)\n file = open(path, \"w\")\n file.write(ss)\n file.close()\n \n def load(self, path):\n print(\"loading from: %s\" % path)\n file = open(path, \"r\")\n ss = file.read()\n file.close()\n j = json.loads(ss)\n self.ham = j['ham']\n self.spam = j['spam']\n self.d_ham = j['d_ham']\n self.d_spam = j['d_spam']\n\n \n def fit(self, path, skip=False):\n count = 0\n vocabulary = set()\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n count += 1\n fp = os.path.join(root, name)\n if not fp.endswith(\".txt\"):\n \tcontinue\n if skip == True and count % 10 != 0:\n continue\n if count % 1000 == 0:\n print(count, fp)\n with open(fp, 'r', encoding=\"latin1\") as content_file:\n content = content_file.read() \n tokens = content.split()\n for token in tokens:\n if \"spam\" in fp:\n self.spam[token] += 1\n elif \"ham\" in fp:\n self.ham[token] += 1\n vocabulary.add(token)\n for v in vocabulary:\n if v not in self.spam:\n self.spam[v] = self.k\n if v not in self.ham:\n self.ham[v] = self.k\n self.normalize(self.ham)\n self.normalize(self.spam)\n #self.partial_print(self.ham, 50)\n self.calculate_prior()\n\n\n def calculate_prior(self):\n nspam = float(sum([self.spam[key] for key in self.spam]))\n nham = float(sum([self.ham[key] for key in self.ham]))\n self.d_spam = nspam/(nspam+nham)\n self.d_ham = nham/(nspam+nham)\n # print(self.d_spam, self.d_ham)\n self.d_spam = math.log(self.d_spam)\n self.d_ham = math.log(self.d_ham)\n \n \n def classify(self, path, labeled=False):\n count = 0\n # spam = 1, ham = 0\n y_true = []\n y_pred = []\n output = []\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n count += 1\n fp = os.path.join(root, name)\n if not fp.endswith(\".txt\"):\n continue\n if count % 1000 == 0:\n print(count, fp)\n with open(fp, 'r', encoding=\"latin1\") as content_file:\n content = content_file.read() \n tokens = content.split()\n score_spam, score_ham = self.d_spam, self.d_ham\n for token in tokens:\n if token in self.ham:\n score_ham += self.ham[token]\n if token in self.spam:\n score_spam += self.spam[token]\n true_label = 1 if \"spam\" in fp else 0\n pred_label = 1 if score_spam > score_ham else 0\n y_pred.append(pred_label)\n if labeled:\n y_true.append(true_label)\n output_label = \"spam\" if score_spam > score_ham else \"ham\"\n output.append(\"%s %s\" % (output_label, fp))\n nboutput = open('nboutput.txt', 'w')\n for item in output:\n nboutput.write(\"%s\\n\" % item)\n if labeled:\n self.classification_report(y_true, y_pred)\n\n\n","sub_path":"HW1/naive_bayes_classifier.py","file_name":"naive_bayes_classifier.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"149523822","text":"from apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.executors.pool import ThreadPoolExecutor\nimport asyncio\n\n#计时ws的连接时间,30分钟没有websocket的数据传输就自动断开\nclass timer():\n\n executors = {\n 'default': ThreadPoolExecutor(100)\n }\n\n def start(self, action_check, action_set, close,loop):\n scheduler = BackgroundScheduler(executors=timer.executors)\n # 添加定时任务\n self.job = scheduler.add_job(self.action, 'interval', seconds=60,\n args=[action_check, action_set, close,loop])\n scheduler.start()\n\n def action(self, action_check, action_set, close,loop):\n if action_check():\n action_set()\n else:\n #关闭websocket连接\n asyncio.set_event_loop(loop)\n close()\n\n def remove(self):\n #移除计时任务\n self.job.remove()\n\n\n#计时websocket的连接校验,防止连接不发送token验证的恶意链接\nclass start_timer():\n executors = {\n \"default\": ThreadPoolExecutor(100)\n }\n\n def start(self,close,loop):\n scheduler = BackgroundScheduler(executors=start_timer.executors)\n self.job = scheduler.add_job(self.action, 'interval',seconds=5,args=[close,loop])\n scheduler.start()\n\n def action(self,close,loop):\n # 关闭websocket连接\n self.job.remove()\n asyncio.set_event_loop(loop)\n close()\n\n def remove(self):\n #移除计时任务\n self.job.remove()","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"164201129","text":"# -*- coding: utf-8 -*-\n\nimport flask\nfrom flask import Blueprint, Flask, request\nfrom api_v2 import api_v2\nimport MySQLdb\nimport MySQLdb.cursors\nfrom flask import make_response\nfrom functools import wraps\nfrom pprint import pprint\n\napp = Flask(__name__)\n\napp.register_blueprint(api_v2, url_prefix='/api/v2')\n\n@app.before_request\ndef mysql_initial():\n db = MySQLdb.connect(\n charset=app.config[\"DB\"][\"charset\"],\n host=app.config[\"DB\"][\"host\"],\n user=app.config[\"DB\"][\"user\"],\n passwd=app.config[\"DB\"][\"passwd\"],\n db=app.config[\"DB\"][\"db\"],\n cursorclass=MySQLdb.cursors.DictCursor\n )\n db.autocommit(False)\n db_cursor = db.cursor()\n\n flask.g.db_cursor = db_cursor\n flask.g.db = db\n\nif __name__ == '__main__':\n app.config.from_envvar('WATER_CONF_PATH')\n app.debug = app.config[\"DEBUG\"]\n app.run(host=app.config[\"SERVER\"][\"HOST\"], port=app.config[\"SERVER\"][\"PORT\"])\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"100123896","text":"from django.shortcuts import render\nfrom django.http import HttpResponse , JsonResponse\nfrom django.utils import timezone\nfrom .models import news\n\nimport datetime\n\nimport os,sys\nSCRIPT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSCRIPT_DIR = os.path.join(SCRIPT_DIR, 'scripts'); \nsys.path.append(SCRIPT_DIR)\nimport prothom_alo,kaler_kantho\n \n\n\ndef home(request):\n\treturn render(request,'home.html')\n\n\ndef scrap(request):\n\tdata = {}\n\n\tif request.POST['portal']=='pa':\n\t\tdata = prothom_alo.news(request.POST['url'] )\n\telif request.POST['portal']=='kk':\n\t\tdata = kaler_kantho.news(request.POST['url'] )\n\n\treturn JsonResponse(data)\n\n\ndef postnews(request): \n\trequest.session.set_expiry(300)\n\tdata = {}\n\n\tif news.objects.filter(url =request.POST['url'] ).exists()==False:\n\t\tnew_news = news(\n\t\t\turl \t\t= request.POST['url'],\n\t\t\tportal_name = request.POST['portal'],\n\t\t\theadline \t= request.POST['headline'], \n\t\t\tbody\t \t= request.POST['body'],\n\t\t\timg\t \t\t= request.POST['image'],\n\t\t\temail\t \t= request.session['email']\n\t\t\t)\n\t\tnew_news.save()\n\n\t\tdata ['message'] = \"Thanks for your contribution!\"\n\t\tdata ['success'] = True\n\n\telse:\n\t\tdata ['message'] = \"This news already in our database. We appreciate your contribution!\"\n\t\tdata ['success'] = False\n\n\treturn JsonResponse(data)\n\n\n\ndef contribution(request):\n\trequest.session.set_expiry(300)\n\tdata = {}\n\ttoday = datetime.date.today()\n\tif 'email' not in request.session:\n\t\trequest.session['email'] = request.POST['email']\n\n\n\tdata[ \"contribution1\" ] = news.objects.filter(email=request.session['email']).count()\n\tdata[ \"contribution2\" ] = news.objects.filter(email=request.session['email'],date__year = today.year,date__month=today.month,date__day=today.day ).count()\n\tdata[ 'email' ] = request.session['email']\n\n\treturn JsonResponse(data)\n","sub_path":"scrap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"27029796","text":"# =================================================================\n#\n# Authors: Richard Law \n#\n# Copyright (c) 2020 Richard Law\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the 'Software'), to deal in the Software without\n# restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# =================================================================\n\nimport os\nimport datetime\nfrom itertools import repeat\nimport logging\nfrom numbers import Number\n\nfrom pyproj import CRS, datadir\nfrom pyproj.transformer import Transformer, AreaOfInterest, TransformerGroup\nfrom pyproj.enums import WktVersion, TransformDirection\nfrom shapely import wkt\nfrom shapely.geometry import Polygon, MultiPoint, MultiLineString, MultiPolygon, GeometryCollection\nfrom shapely.ops import transform\n\nLOGGER = logging.getLogger(__name__)\nCURRENT_YEAR = float(datetime.datetime.now().year)\n\n_USER_PROJ_DATA = os.getenv('PROJ_DIR', None)\nif _USER_PROJ_DATA and datadir.get_data_dir() != _USER_PROJ_DATA:\n datadir.set_data_dir(_USER_PROJ_DATA)\n datadir.append_data_dir(_USER_PROJ_DATA)\n\n#: Process metadata and description\nPROCESS_METADATA = {\n 'version': '1.2.0',\n 'id': 'wkt-reprojector',\n 'title': 'WKT Reprojector',\n 'description': 'An example process that reprojects a geometry from one CRS to another, using PROJ v6. This will take account of possible datum shifts. Because of the use of PROJ v6, late-binding can be used, and 4D coordinates (three spatial components and one temporal component) are supported.',\n 'keywords': ['reprojection', 'PROJ', '2D', '3D', '4D', 'spatiotemporal transformation'],\n 'links': [{\n 'type': 'text/html',\n 'rel': 'related',\n 'title': 'pyproj Documentation',\n 'href': 'https://pyproj4.github.io/pyproj/stable/',\n 'hreflang': 'en'\n }],\n 'inputs': [{\n 'id': 'wkt',\n 'title': 'WKT representation of the input geometry',\n 'abstract': 'Well-known text (WKT) representation of the input geometry. M-values will be ignored, e.g. a POINT ZM will be converted to a POINT Z due to current limitations of Shapely. This also means that POINT M, LINESTRING M, etc. should be used with care since they will be interpreted as POINT Z, LINESTRING Z, etc. If you use M to represent time, use the time parameter to capture the epoch in order to perform 4D transformation.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'string',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': 'MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35), (30 20, 20 15, 20 25, 30 20)))'\n }\n }\n },\n 'minOccurs': 1,\n 'maxOccurs': 1,\n 'metadata': None,\n 'keywords': ['WKT', 'geometry']\n }, {\n 'id': 'time',\n 'title': 'Time (decimal year)',\n 'abstract': 'To support time-dependent datum transformations, given that the M-coordinate of the WKT input will be ignored, the time for the transformation should be specified with this parameter (as a decimal year, e.g. 2020.0). This M-value will be assumed for all vertices of the geometry.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'float',\n 'valueDefinition': {\n 'anyvalue': False,\n 'defaultValue': CURRENT_YEAR\n }\n }\n },\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'metadata': None,\n 'keywords': ['time', '4D transformation']\n }, {\n 'id': 'src_crs',\n 'title': 'Source CRS',\n 'abstract': 'The input coordinate reference system (CRS), with a known identifier, or an OGC WKT string.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'string',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': 'EPSG:4326'\n }\n }\n },\n 'minOccurs': 1,\n 'maxOccurs': 1,\n 'keywords': ['CRS', 'PROJ']\n }, {\n 'id': 'dst_crs',\n 'title': 'Destination CRS',\n 'abstract': 'The output coordinate reference system (CRS), with a known identifier, or an OGC WKT string.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'string',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': \"EPSG:3857\"\n }\n }\n },\n 'minOccurs': 1,\n 'maxOccurs': 1,\n 'keywords': ['CRS', 'PROJ']\n }, {\n 'id': 'always_xy',\n 'title': 'Force x,y axis order',\n 'abstract': 'If this is false, axis order may be swapped relative to the input WKT geometry, if the source and destination CRSs are defined as the having the first coordinate component point in a northerly direction. See pyproj documentation for more information.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'boolean',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': False\n }\n }\n },\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['axis order']\n }, {\n 'id': 'errcheck',\n 'title': 'Error checking',\n 'abstract': 'If True, the result is an error if the transformation is invalid. By default this is False and an invalid transformation returns inf for coordinate values yet the result is considered successful.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'boolean',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': True\n }\n }\n },\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['validation']\n }, {\n 'id': 'best_available',\n 'title': 'Require best transformation',\n 'abstract': 'Require the best possible transformation to be applied; if it cannot be applied due to missing grids in this service, the result will be an error rather than an inferior output.',\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['validation'],\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'boolean',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': True\n }\n }\n }\n }, {\n 'id': 'radians',\n 'title': 'Radians input',\n 'abstract': 'If True, will expect input data to be in radians and will return radians if the projection is geographic. Default is False (degrees). Ignored for pipeline transformations.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'boolean',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': False\n }\n }\n },\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['radians','degrees','input']\n }, {\n 'id': 'direction',\n 'title': 'Transformation direction',\n 'abstract': 'The direction of the transform.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'enum',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': TransformDirection.FORWARD.value,\n 'possibleValues': list(map(lambda _: _.value, TransformDirection))\n }\n }\n },\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['transformation direction']\n }, {\n 'id': 'rounding_precision',\n 'title': 'Output coordinate precision',\n 'abstract': 'The desired rounding precision of output coordinates, in decimal places. The default is five decimal places. The minimum value is 0. xkcd has a helpful guide.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'integer',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': 5\n }\n }\n },\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['coordinate precision','precision']\n }, {\n 'id': 'minimum_accuracy',\n 'title': 'Acceptable loss of accuracy',\n 'abstract': 'Transformations usually have an expected accuracy, and not all transformations can promise to introduce no new error (before considering floating point or rounding errors). This parameter allows you to control the acceptable level of accuracy lost: if the expected accuracy degradation is more than this value (or if it is unknown), specified in metres, the result will be an error and the transformation will not be performed. If this parameter is absent or null, the transformation will always proceed.',\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'float',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': None\n }\n }\n },\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['accuracy','error']\n },{\n 'id': 'west_lon_degree',\n 'title': 'Area of interest (western extent)',\n 'abstract': 'The west bound in degrees of the area of interest. All four area of interest parameters must be specified for this to take effect.',\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['area of interest','longitude'],\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'float',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': None\n }\n }\n }\n }, {\n 'id': 'south_lat_degree',\n 'title': 'Area of interest (southern extent)',\n 'abstract': 'The south bound in degrees of the area of interest. All four area of interest parameters must be specified for this to take effect.',\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['area of interest','latitude'],\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'float',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': None\n }\n }\n }\n }, {\n 'id': 'east_lon_degree',\n 'title': 'Area of interest (eastern extent)',\n 'abstract': 'The east bound in degrees of the area of interest. All four area of interest parameters must be specified for this to take effect.',\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['area of interest','longitude'],\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'float',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': None\n }\n }\n }\n }, {\n 'id': 'north_lat_degree',\n 'title': 'Area of interest (northern extent)',\n 'abstract': 'The north bound in degrees of the area of interest. All four area of interest parameters must be specified for this to take effect.',\n 'minOccurs': 0,\n 'maxOccurs': 1,\n 'keywords': ['area of interest','latitude'],\n 'input': {\n 'literalDataDomain': {\n 'dataType': 'float',\n 'valueDefinition': {\n 'anyValue': False,\n 'defaultValue': None\n }\n }\n }\n }],\n 'outputs': [{\n 'id': 'wkt',\n 'title': 'Reprojected geometry',\n 'description': 'A geometry that has been transformed into the destination CRS',\n 'output': {\n 'literalDataDomain': {\n 'dataType': {\n 'name': 'string'\n },\n 'valueDefinition': {\n 'anyValue': True\n }\n }\n }\n }, {\n # 'id': 'src_crs',\n # 'title': 'Source CRS (PROJJSON)',\n # 'description': 'A PROJJSON representation of the source CRS, useful for verifying that the input CRS was correctly interpreted.',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'string'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n # 'id': 'dst_crs',\n # 'title': 'Destination CRS (PROJJSON)',\n # 'description': 'A PROJJSON representation of the destination CRS, useful for verifying that the input CRS was correctly interpreted.',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'string'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n # 'id': 'accuracy',\n # 'title': 'Accuracy',\n # 'description': 'Operation accuracy is an optional attribute which indicates the typical error the application of the coordinate operation has introduced into the transformed target CRS coordinates, assuming input of errorless source CRS coordinates. It is an approximate figure for the area of applicability of the coordinate operation as a whole, given in metres.',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'float'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n 'id': 'definition',\n 'title': 'Projection definition',\n 'description': 'Definition of the applied transformation',\n 'output': {\n 'literalDataDomain': {\n 'dataType': {\n 'name': 'string'\n },\n 'valueDefinition': {\n 'anyValue': True\n }\n }\n }\n }, {\n # 'id': 'description',\n # 'title': 'Projection description',\n # 'description': 'Description of the applied transformation',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'string'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n # 'id': 'name',\n # 'title': 'Projection name',\n # 'description': 'Name of the applied transformation',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'string'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n # 'id': 'remarks',\n # 'title': 'Transformation remarks',\n # 'description': 'Optional remarks about the applied transformation',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'string'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n # 'id': 'scope',\n # 'title': 'Transformation scope',\n # 'description': 'Scope of the applied projection',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'string'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n # 'id': 'transformer_wkt',\n # 'title': 'Transformation WKT string',\n # 'description': 'The transformation represented as a well-known text (WKT2 2019) string',\n # 'output': {\n # 'literalDataDomain': {\n # 'dataType': {\n # 'name': 'string'\n # },\n # 'valueDefinition': {\n # 'anyValue': True\n # }\n # }\n # }\n }, {\n 'id': 'transformer',\n 'title': 'Transformation ',\n 'description': 'The applied transformation represented as JSON object, including the source CRS, target CRS, transformation description, scope, etc.',\n 'output': {\n 'literalDataDomain': {\n 'dataType': {\n 'name': 'string'\n },\n 'valueDefinition': {\n 'anyValue': True\n }\n }\n }\n }, {\n 'id': 'best_available',\n 'title': 'Application of best available transformation',\n 'description': 'Boolean indicating whether the best available transformation was applied (true) or not (false). In the latter case, this may be because the server was missing an optimal grid to perform the requested transformation. Use the \"best_available\" input parameter to control whether the projection should still proceed if the best available transformation is not available.',\n 'output': {\n 'literalDataDomain': {\n 'dataType': {\n 'name': 'boolean'\n },\n 'valueDefinition': {\n 'anyValue': False\n }\n }\n }\n }],\n 'example': {\n 'inputs': [\n {\n 'id': 'wkt',\n 'value': 'MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35), (30 20, 20 15, 20 25, 30 20)))',\n 'type': 'text/plain'\n }, {\n 'id': 'src_crs',\n 'value': 'EPSG:4326',\n 'type': 'text/plain'\n }, {\n 'id': 'dst_crs',\n 'value': \"EPSG:3857\",\n 'type': 'text/plain'\n }, {\n 'id': 'always_xy',\n 'value': True\n }, {\n 'id': 'best_available',\n 'value': True\n }, {\n 'id': 'west_lon_degree',\n 'value': -180.0\n }, {\n 'id': 'south_lat_degree',\n 'value': -90.0\n }, {\n 'id': 'east_lon_degree',\n 'value': 180.0\n }, {\n 'id': 'north_lat_degree',\n 'value': 90.0\n }, {\n 'id': 'rounding_precision',\n 'value': 5\n }]\n }\n}\n\ntry:\n from pygeoapi.process.base import BaseProcessor, ProcessorExecuteError\n\n class BestTransformationUnavailableError(ProcessorExecuteError):\n \"\"\"\n Raise when the transformation is limited to using the best possible\n transformation, but this is not available due to missing grids\n \"\"\"\n def __init__(self, message, best_transformation, *args):\n self.message = message\n self.best_transformation = best_transformation\n super(BestTransformationUnavailableError, self).__init__(message, best_transformation, *args)\n\n class TransformationUnavailableError(ProcessorExecuteError):\n \"\"\"\n Raise when there is no identified transformation between two CRSs\n \"\"\"\n def __init__(self, input_crs, output_crs, *args):\n self.input_crs = input_crs\n self.output_crs = output_crs\n message = f'There is no transformation between {self.input_crs.name} and {self.output_crs.name}'\n self.message = message\n super(TransformationUnavailableError, self).__init__(message, input_crs, output_crs, *args)\n\n class TooInaccurateError(ProcessorExecuteError):\n \"\"\"\n Raise when the transformation accuracy is too high relative to client\n expectation\n \"\"\"\n def __init__(self, message, accuracy, *args):\n self.message = message\n self.accuracy = best_transformation\n super(TooInaccurateError, self).__init__(message, accuracy, *args)\n\n def geom_transformation(transformer, geom, params):\n LOGGER.debug(geom.type)\n if geom.type == 'GeometryCollection':\n collection = [geom_transformation(transformer, _geom, params) for _geom in geom.geoms]\n geom_output = GeometryCollection(collection)\n elif not geom.type.startswith('Multi'):\n geom_output = singlepart_geom_transformation(transformer, geom, params)\n elif geom.type.startswith('Multi'):\n parts = [geom_transformation(transformer, part, params) for part in geom]\n if parts[0].type == 'Point':\n geom_output = MultiPoint(parts)\n elif parts[0].type == 'LineString':\n geom_output = MultiLineString(parts)\n elif parts[0].type == 'Polygon':\n geom_output = MultiPolygon(parts)\n return geom_output\n\n def singlepart_geom_transformation(transformer, geom, params):\n LOGGER.debug('Performing singlepart geometry transformation')\n trans_kwargs = {\n 'radians': params['radians'],\n 'errcheck': params['errcheck'],\n 'direction': params['direction']\n }\n if hasattr(geom, 'type') and geom.type == 'Polygon':\n LOGGER.debug('Polygon type: transforming in rings')\n LOGGER.debug(geom.exterior)\n LOGGER.debug(geom.interiors)\n shell = singlepart_geom_transformation(transformer, geom.exterior, params)\n holes = [singlepart_geom_transformation(transformer, hole, params) for hole in geom.interiors]\n return Polygon(shell, holes=holes)\n print(geom)\n trans_kwargs = {\n 'tt': tuple([float(params.get('time'))]) * len(geom.coords),\n **trans_kwargs\n }\n if not geom.has_z:\n # GEOS does not understand M, but to allow for time-dependent\n # transformations, M must be used to represent time, and then\n # removed from the output - because GEOS re-interprets M as Z\n # see https://github.com/Toblerity/Shapely/issues/882\n geom_slice = slice(2)\n coord_transformer = lambda xx, yy: transformer.transform(xx, yy,\n zz=None,\n **trans_kwargs\n )[geom_slice]\n return transform(coord_transformer, geom)\n # Input is 3-dimensional (X, Y, Z), or\n # Input has M (X, Y, M) - M will be interpreted as Z\n # In the case of ZM: M is silently dropped by GEOS, Z is retained\n # Therefore, POINT M (1 2 3) is interpreted as POINT Z (1 2 3)\n # and POINT ZM (1 2 3 4) is interpreted as POINT Z (1 2 3)\n # and this limitation is regretfully acknowledged\n geom_slice = slice(3)\n coord_transformer = lambda xx, yy, zz: transformer.transform(xx, yy,\n zz=zz,\n **trans_kwargs\n )[geom_slice]\n return transform(coord_transformer, geom)\n\n class WKTReprojectorProcessor(BaseProcessor):\n '''WKT reprojection example'''\n\n def __init__(self, provider_def):\n '''\n Initialize object\n :param provider_def: provider definition\n :returns: pygeoapi.process.reproject-coords.WKTReprojectorProcessor\n '''\n # Filter out empty outputs\n metadata = dict(PROCESS_METADATA)\n metadata['outputs'] = list(filter(bool,metadata['outputs']))\n\n BaseProcessor.__init__(self, provider_def, metadata)\n\n def __repr__(self):\n return ' {}'.format(self.name)\n\n def execute(self, data):\n wkt_input = data.get('wkt', self.get_default('wkt'))\n geom = wkt.loads(wkt_input)\n params = {p: data.get(p, self.get_default(p)) for p in (\n 'always_xy', 'errcheck', 'radians', 'direction', 'src_crs', 'dst_crs',\n 'best_available', 'rounding_precision', 'time', 'minimum_accuracy'\n )}\n aoi_params = ('west_lon_degree','south_lat_degree','east_lon_degree','north_lat_degree')\n if all(map(lambda b: isinstance(data.get(b, None), Number), aoi_params)):\n params['area_of_interest'] = AreaOfInterest(*map(data.get, aoi_params))\n input_crs = CRS.from_user_input(params.get('src_crs').strip())\n output_crs = CRS.from_user_input(params.get('dst_crs').strip())\n\n transformerGroup = TransformerGroup(\n crs_from=input_crs,\n crs_to=output_crs,\n skip_equivalent=True, # Don't perform a transformation between equivalent CRSs\n always_xy=params.get('always_xy'),\n area_of_interest=params.get('area_of_interest', None)\n )\n if not len(transformerGroup.transformers):\n raise TransformationUnavailableError(input_crs, output_crs)\n if params.get('best_available') and not transformerGroup.best_available:\n _operation = transformerGroup.unavailable_operations[0]\n raise BestTransformationUnavailableError(f'Transformation {_operation.name} ({_operation.method_code} {_operation.method_auth_name}) is unavailable', transformerGroup.unavailable_operations[0])\n # try:\n # transformerGroup.download_grids(verbose=True)\n # return self.execute(data)\n # except:\n # raise BestTransformationUnavailableError(f'Transformation {transformerGroup.unavailable_operations[0].name} is unavailable', transformerGroup.unavailable_operations[0])\n elif transformerGroup.best_available:\n is_best_available = True\n else:\n is_best_available = False\n minimum_accuracy = params.get('minimum_accuracy')\n transformer = transformerGroup.transformers[0]\n if minimum_accuracy is not None and transformer.accuracy > minimum_accuracy:\n raise TooInaccurateError(f'The transformation would introduce too much inaccuracy in the output ({transformer.accuracy} > {minimum_accuracy})', transformer.accuracy)\n rounding_precision = max(0, int(params.get('rounding_precision')))\n kwargs = {\n 'radians': params['radians'],\n 'errcheck': params['errcheck'],\n 'direction': params['direction']\n }\n LOGGER.debug(geom.type)\n geom_output = geom_transformation(transformer, geom, params)\n wkt_geom = wkt.dumps(geom_output, rounding_precision=rounding_precision).replace('\"','')\n if not geom.has_z and geom_output.has_z:\n wkt_geom = wkt_geom.replace('Z', 'M')\n LOGGER.debug('Replaced false Z with M')\n LOGGER.debug(wkt_geom)\n outputs = {\n 'wkt': wkt_geom,\n # 'src_crs': input_crs.to_json_dict(),#.to_wkt(version=WktVersion.WKT2_2019, pretty=False),\n # 'dst_crs': output_crs.to_json_dict(),#.to_wkt(version=WktVersion.WKT2_2019, pretty=False),\n # 'area_of_use': {\n # 'west': transformer.area_of_use.west,\n # 'south': transformer.area_of_use.south,\n # 'east': transformer.area_of_use.east,\n # 'north': transformer.area_of_use.north,\n # 'name': transformer.area_of_use.name\n # },\n # 'accuracy': float(transformer.accuracy) if transformer.accuracy != -1 else None,\n 'definition': transformer.definition,\n # 'description': transformer.description,\n # 'name': transformer.name,\n # 'remarks': transformer.remarks,\n # 'scope': transformer.scope,\n 'transformer': transformer.to_json_dict(),\n # 'transformer_wkt': transformer.to_wkt(version=WktVersion.WKT2_2019),\n 'best_available': is_best_available,\n # 'is_bound': output_crs.is_bound,\n # 'is_engineering': output_crs.is_engineering,\n # 'is_geocentric': output_crs.is_geocentric,\n # 'is_geographic': output_crs.is_geographic,\n # 'is_projected': output_crs.is_projected,\n # 'is_vertical': output_crs.is_vertical\n }\n outputs = [{'id': k, 'value': v} for k, v in outputs.items()]\n return outputs\n\nexcept (ImportError, RuntimeError):\n pass\n","sub_path":"build/lib/wkt_reprojector_plugin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":30313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"627203063","text":"from django.contrib import admin\nfrom django.urls import path, include, re_path\nfrom fcuser.views import index, logout, RegisterView, LoginView\nfrom product.views import (\n ProductList, ProductRegister, ProductDetail,\n ProductListAPI, ProductDetailAPI\n)\nfrom order.views import OrderList, OrderRegister\nfrom django.views.generic import TemplateView\nfrom django.template.response import TemplateResponse\nimport datetime\nfrom order.models import Order\nfrom .functions import get_exchange\n\norigin_index = admin.site.index\n\n\ndef thefast_index(request, extra_context=None):\n # return TemplateResponse(request, 'admin/index.html', extra_context)\n base_date = datetime.datetime.now() - datetime.timedelta(days=7)\n order_data = {}\n for i in range(7):\n target_dttm = base_date + datetime.timedelta(days=i)\n date_key = target_dttm.strftime('%Y-%m-%d')\n target_date = datetime.date(target_dttm.year, target_dttm.month, target_dttm.day)\n order_cnt = Order.objects.filter(register_date__date=target_date).count()\n order_data[date_key] = order_cnt\n extra_context = {\n 'orders': order_data,\n 'exchange': get_exchange()\n }\n return origin_index(request, extra_context)\n\n\nadmin.site.index = thefast_index\n\nurlpatterns = [\n re_path(r'^admin/manual/$', TemplateView.as_view(\n template_name='admin/manual.html',\n extra_context={\n 'title': '메뉴얼',\n 'site_title': \"HwanSeok's BackOffice\",\n 'site_header': \"HwanSeok's BackOffice\"\n }\n )),\n path('admin/', admin.site.urls),\n path('baton/', include('baton.urls')),\n path('', index),\n path('login/', LoginView.as_view()),\n path('logout/', logout),\n path('register/', RegisterView.as_view()),\n path('product/', ProductList.as_view()),\n path('product//', ProductDetail.as_view()),\n path('product/register/', ProductRegister.as_view()),\n path('order/', OrderList.as_view()),\n path('order/register/', OrderRegister.as_view()),\n path('api/product/', ProductListAPI.as_view()),\n path('api/product/', ProductDetailAPI.as_view())\n]\n","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"94408556","text":"name = input(\"Enter file:\")\nif len(name) < 1 : name = \"mbox-short.txt\"\nhandle = open(name)\n\nmail = dict()\naddresses = list()\nfor line in handle:\n line = line.rstrip()\n if not line.startswith('From'):\n continue\n if line.startswith('From:'):\n continue\n words = line.split()\n #print(words)\n mail[words[1]] = mail.get(words[1],0)+1\n print(mail)\n\n\nbigcount = None\nbigword = None\nfor k,v in mail.items():\n if bigcount is None or v>bigcount:\n bigword = k\n bigcount = v\nprint(bigword, bigcount)\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"470381866","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 14 09:33:00 2019\n\n@author: MarcFish\n\"\"\"\n\nimport numpy as np\nimport csv\nfrom datetime import datetime\nfrom scipy.spatial import distance\nfrom scipy import sparse\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import normalize\nfrom gensim.models import Word2Vec\nimport pickle\nimport os\n\n#path = 'E:/project/data/raw/douban/douban_beijing/GERF/'\npath = 'E:/project/data/raw/meetup/GERF/'\n\ndef normalize_matrix(matrix):\n return normalize(matrix, norm='max')\n\nclass GERF:\n \n def __init__(self,d,i,neg_num=400,alpha=0.05,lam=0.02):\n self.neg_num = neg_num\n self.alpha = alpha\n self.lam = lam\n self.event_info = d.event_info\n self.i = i\n self.train,self.test = d.get_train_test()\n self.train_event,self.test_event = d.get_train_test_event()\n self.user_count = d.user_count\n self.event_count = d.event_count\n \n def make_features(self):\n if os.path.isfile(path+'features{}'.format(self.i)):\n self.features = pickle.load(open(path+'features{}'.format(self.i),'rb'))\n self.features = self.features[0:4]\n return\n print('time-aware recommendation')\n self.S_t = self.time_aware()\n print('venue-aware recommendation')\n self.S_v = self.venue_aware()\n print('indirect_social_aware recommendation')\n # 因为meetup没有社交关系 所以只计算了indirect这一部分\n self.S_s = self.indirect_social_aware()\n print('geography-aware recommendation')\n self.S_l = self.geo_aware()\n print('content recommendation')\n self.S_c = self.content_aware()\n print('get features')\n self.features = [\n normalize_matrix(self.S_t), # time\n normalize_matrix(self.S_v), # venue\n normalize_matrix(self.S_s), # social\n normalize_matrix(self.S_l), # geo\n normalize_matrix(self.S_c), # content\n ]\n pickle.dump(self.features, open(path+'features{}'.format(self.i),'wb'))\n \n def init(self):\n self.make_train_test()\n self.theta = np.random.uniform(low=0.0,high=0.1,size=(self.user_count,len(self.features)))\n \n def run_test(self,n):\n pre = dict()\n for u,f in self.test_features.items():\n pre.setdefault(u,list())\n for i in f:\n pre[u].append(np.dot(self.theta[u], i))\n rel = dict()\n for u,t in self.test_targets.items():\n rel.setdefault(u,set())\n rel[u] = rel[u].union(set(list(np.nonzero(t)[0])))\n hit = 0\n recall = 0\n precision = n * len(rel)\n for u,p in pre.items():\n rank = np.argsort(pre[u])[-n:]\n for index in rank:\n if index in rel[u]:\n hit += 1\n recall += len(rel[u])\n self.recall = float(hit) / recall\n self.precision = float(hit) / precision\n print(\"{} recall:{}\".format(self.i,self.recall))\n print(\"{} precision:{}\".format(self.i,self.precision))\n# \n def run_train(self,N):\n for i in tqdm(range(N)):\n u,i,j = self.sample()\n self.theta[u] += self.alpha * (\n 1 / (1 + np.exp(self.predict(u,self.train_features[u][i],self.train_features[u][j])))\n * (self.train_features[u][i] - self.train_features[u][j])\n - self.lam * self.theta[u])\n \n def predict(self,u,e1,e2):\n return np.dot(self.theta[u],e1)-np.dot(self.theta[u],e2)\n \n def sample(self):\n u = np.random.choice(list(self.train_targets.keys()))\n nzero = np.nonzero(self.train_targets[u])[0]\n while len(nzero) == 0: # 如果这个群组啥也没看过 换其他群组\n u = np.random.choice(list(self.train_targets.keys()))\n nzero = np.nonzero(self.train_targets[u])[0]\n i = np.random.choice(nzero)\n j = np.random.choice(np.setdiff1d(list(range(len(self.train_targets[u]))), nzero))\n return (u, i, j)\n \n def make_train_test(self):\n self.train_targets,self.train_features = self.make_train()\n self.test_targets,self.test_features = self.make_test()\n \n def make_train(self):\n targets = dict()\n features = dict()\n for u, es in tqdm(self.train.items()):\n targets.setdefault(u,list())\n features.setdefault(u,list())\n for ue in es:\n targets[u].append(1)\n f = np.zeros(shape=(len(self.features,)))\n for i,s_m in enumerate(self.features):\n value = s_m[u,ue]\n if value > 0:\n f[i] = s_m[u,ue]\n features[u].append(f)\n for e in np.random.choice(self.train_event,self.neg_num):\n targets[u].append(0)\n f = np.zeros(shape=(len(self.features,)))\n for i,s_m in enumerate(self.features):\n value = s_m[u,e]\n if value > 0:\n f[i] = s_m[u,e]\n features[u].append(f)\n return targets,features\n \n def make_test(self):\n targets = dict()\n features = dict()\n for u,es in tqdm(self.test.items()):\n targets.setdefault(u,list())\n features.setdefault(u,list())\n for e in self.test_event:\n if e in es:\n targets[u].append(1)\n else:\n targets[u].append(0)\n f = np.zeros(shape=(len(self.features,)))\n for i,s_m in enumerate(self.features):\n value = s_m[u,e]\n if value > 0:\n f[i] = value\n features[u].append(f)\n return targets,features\n \n def make_train_test_(self):\n self.make_train_()\n self.make_test_()\n \n def make_train_(self,filename='train.txt'):\n with open(filename,'w',encoding='utf-8') as f:\n for u, es in tqdm(self.train.items()):\n for ue in es:\n f.write('1 qid:%d ' % (u + 1))\n fid = 0\n for s_m in self.features:\n fid += 1\n value = s_m[u, ue]\n if value > 0:\n f.write('%d:%.4f ' % (fid, value))\n f.write('\\n')\n for e in np.random.choice(self.train_event, self.neg_num):\n f.write('0 qid:%d ' % (u + 1))\n fid = 0\n for s_m in self.features:\n fid += 1\n value = s_m[u, e]\n if value > 0:\n f.write('%d:%.4f ' % (fid, value))\n f.write('\\n') \n \n def make_test_(self,filename='test.txt'):\n with open(filename,'w',encoding='utf-8') as f:\n for u,es in tqdm(self.test.items()):\n for e in self.test_event:\n if e in es:\n f.write('1 qid:%d ' % (u + 1))\n else:\n f.write('0 qid:%d ' % (u + 1))\n fid = 1\n for s_m in self.features:\n value = s_m[u, e]\n if value > 0:\n f.write('%d:%.4f ' % (fid, value))\n fid += 1\n f.write('\\n')\n \n def time_aware(self):\n t_u = np.zeros((self.user_count, 7 * 24))\n t_e = np.zeros((self.event_count, 7, 24))\n \n for e,item in self.event_info.items():\n dt = datetime.utcfromtimestamp(item['time'])\n weekday = dt.weekday()\n hour = dt.hour\n t_e[e, weekday, hour] = 1\n t_e = t_e.reshape((self.event_count, 7 * 24))\n \n for u, es in self.train.items():\n t_u[u] = t_e[es, :].mean(0)\n# Compute distance between each pair of the two collections of inputs.\n S_t = 1 - distance.cdist(t_u,t_e,'cosine')\n S_t[np.isnan(S_t)] = np.mean(S_t[~np.isnan(S_t)])\n return S_t\n \n def venue_aware(self):\n s_v = sparse.lil_matrix((self.user_count, self.event_count))\n for u, es in tqdm(self.train.items()):\n for e,item in self.event_info.items():\n for ue in es:\n if item['venue'] == self.event_info[ue]['venue']:\n s_v[u, e] += 1\n return s_v\n\n def indirect_social_aware(self):\n s_s = sparse.lil_matrix((self.user_count, self.event_count))\n for u, es in tqdm(self.train.items()):\n for e,item in self.event_info.items():\n for ue in es:\n if item['group'] == self.event_info[ue]['group']: \n s_s[u, e] += 1\n return s_s\n \n def geo_aware(self):\n s_l = np.zeros((self.user_count, self.event_count))\n for u, es in tqdm(self.train.items()):\n h = len(es)**(-1 / 6)\n for e,item in self.event_info.items():\n for ue in es:\n l = np.array([item['lon']-self.event_info[ue]['lon'],\n item['lat']-self.event_info[ue]['lat']])\n s_l[u, e] += np.exp(-np.dot(l, l) / (2 * h * h))\n s_l[u, e] /= 2 * 3.1415 * len(es) * h * h\n return s_l\n \n def content_aware(self):\n# word2vec\n doc = []\n word = set()\n for e,item in self.event_info.items():\n doc.append(item['content'].split())\n word = word.union(set(item['content'].split()))\n model = Word2Vec(doc, size=100, window=5, min_count=1, workers=4)\n q = np.ndarray(shape=(self.event_count, 100))\n p = np.ndarray(shape=(self.user_count, 100))\n for e,item in tqdm(self.event_info.items()):\n for v in item['content'].split():\n try:\n q[e] += model.wv[v]\n except:\n continue\n q[e] /= len(word)\n for u, es in tqdm(self.train.items()):\n for ue in es:\n p[u] += q[ue]\n p[u] /= len(es)\n s_c = 1 - distance.cdist(p,q,'cosine')\n s_c[np.isnan(s_c)] = np.mean(s_c[~np.isnan(s_c)])\n return s_c\n \nclass data:\n \n def __init__(self, path,i):\n self.path = path\n \n self.info_filename = path+'event_lon_lat_venue_time_group_content.csv'\n self.train_filename = path+'train_event_user_{}.csv'.format(i)\n self.test_filename = path+'test_event_user_{}.csv'.format(i)\n\n# event,user,venue,org\n self.event_map = {}\n self.user_map = {}\n self.venue_map = {}\n self.org_map = {}\n self._getmap()\n \n self.event_count = len(self.event_map)\n self.user_count = len(self.user_map)\n self.venue_count = len(self.venue_map)\n self.org_count = len(self.org_map)\n \n# event info key:eventid value:dict:time,venue,org,location,content:value\n self.event_info = self._geteventinfo()\n \n# train and test\n# key:u value:e\n self.train = dict()\n self.test = dict()\n \n self.train_event = set()\n self.test_event = set()\n with open(self.train_filename,'r',encoding='utf-8') as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n e = self.event_map[row[0]]\n u = self.user_map[row[1]]\n self.train.setdefault(u,list())\n self.train[u].append(e)\n self.train_event.add(e)\n with open(self.test_filename,'r',encoding='utf-8') as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n e = self.event_map[row[0]]\n u = self.user_map[row[1]]\n self.test.setdefault(u,list())\n self.test[u].append(e)\n self.test_event.add(e)\n self.train_event = np.array(list(self.train_event))\n self.test_event = np.array(list(self.test_event))\n \n def get_train_test(self):\n return self.train, self.test\n \n def get_train_test_event(self):\n return self.train_event, self.test_event\n \n def _geteventinfo(self):\n event_info = dict()\n with open(self.info_filename,'r',encoding='utf-8') as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n temp = {}\n e = self.event_map[row[0]]\n temp['lon'] = float(row[1])\n temp['lat'] = float(row[2])\n temp['venue'] = self.venue_map[row[3]]\n temp['time'] = int(row[4])\n temp['group'] = self.org_map[row[5]]\n temp['content'] = row[6]\n event_info[e] = temp\n return event_info\n\n def _getmap(self):\n with open(self.info_filename,'r',encoding='utf-8') as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n e = row[0]\n v = row[3]\n g = row[5]\n self.event_map.setdefault(e,len(self.event_map))\n self.venue_map.setdefault(v,len(self.venue_map))\n self.org_map.setdefault(g,len(self.org_map))\n\n with open(self.train_filename,'r',encoding='utf-8') as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n e = row[0]\n u = row[1]\n self.event_map.setdefault(e,len(self.event_map))\n self.user_map.setdefault(u,len(self.user_map))\n\n with open(self.test_filename,'r',encoding='utf-8') as f:\n f_csv = csv.reader(f)\n for row in f_csv:\n e = row[0]\n u = row[1]\n self.event_map.setdefault(e,len(self.event_map))\n self.user_map.setdefault(u,len(self.user_map))\n \nif __name__ == '__main__':\n recall = 0.0\n precision = 0.0\n for i in range(6):\n if os.path.isfile(path+'gerf{}'.format(i)):\n gerf = pickle.load(open(path+'gerf{}'.format(i),'rb'))\n else:\n d = data(path,i)\n gerf = GERF(d,i)\n gerf.make_features()\n gerf.init()\n# gerf.make_train_test_()\n gerf.run_train(5000)\n# d = data(path,i)\n# gerf = GERF(d,i)\n# gerf.make_features()\n# gerf.init()\n# gerf.run_train(2000000)\n gerf.run_test(5)\n recall += gerf.recall\n precision += gerf.precision\n# pickle.dump(gerf, open(path+'gerf{}'.format(i),'wb'))\n print(\"recall:{}\".format(recall/6))\n print(\"precision:{}\".format(precision/6))\n ","sub_path":"GERF/GERF.py","file_name":"GERF.py","file_ext":"py","file_size_in_byte":14860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"19581848","text":"from typing import Union\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nfrom enities.train_params import TrainingParams\n\nSklearnRegressionModel = Union[RandomForestClassifier, LogisticRegression]\n\n\ndef train_model(\n features: pd.DataFrame, target: pd.Series, train_params: TrainingParams\n) -> SklearnRegressionModel:\n if train_params.model_type == \"RandomForestClassifier\":\n model = RandomForestClassifier(\n n_estimators=100, random_state=train_params.random_state\n )\n elif train_params.model_type == \"LogisticRegression\":\n model = LogisticRegression()\n else:\n raise NotImplementedError()\n model.fit(features, target)\n return model\n","sub_path":"ml_project/src/models/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"426714484","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom absl import flags\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_boolean(\"random_flip_left_right\", True,\n \"random flip left and right\")\nflags.DEFINE_boolean(\"random_flip_up_down\", False,\n \"random flip up and down\")\nflags.DEFINE_boolean(\"random_brightness\", False,\n \"randomly adjust brightness\")\nflags.DEFINE_boolean(\"random_contrast\", False,\n \"randomly adjust contrast\")\nflags.DEFINE_boolean(\"random_hue\", False,\n \"randomly adjust hue\")\nflags.DEFINE_boolean(\"random_saturation\", False,\n \"randomly adjust saturation\")\nflags.DEFINE_boolean(\"cutout\", False,\n \"cutout\")\n\nchunk_size = 5000\n\nimage_size = 32\nimage_depth = 3\nnum_classes = 10\n\n\ndef iterator(files, batch_size, training):\n num_chunks = len(files)\n dataset = tf.data.Dataset.from_tensor_slices(files).apply(\n tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, num_chunks))\n\n if training:\n min_queue_examples = int(0.4 * chunk_size * num_chunks)\n dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(\n min_queue_examples + 3 * batch_size))\n else:\n dataset = dataset.repeat()\n\n dataset = dataset.apply(tf.contrib.data.map_and_batch(\n lambda example: parse(example, training), batch_size))\n\n dataset = dataset.prefetch(1)\n\n return dataset.make_one_shot_iterator()\n\n\ndef parse(example, training):\n features = tf.parse_single_example(\n example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64)\n })\n\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [image_depth, image_size, image_size])\n\n image = tf.transpose(image, [1, 2, 0])\n image = preprocess(image, training)\n image = tf.transpose(image, [2, 0, 1])\n\n label = tf.cast(features[\"label\"], tf.int32)\n label = tf.one_hot(label, num_classes)\n\n return image, label\n\n\ndef preprocess(image, training):\n if training:\n image = augment(image)\n image = tf.image.per_image_standardization(image)\n if training and FLAGS.cutout:\n center = tf.random_uniform(shape=[2], maxval=image_size, dtype=tf.int32)\n offset_width = tf.maximum(0, center[0] - half_length)\n offset_height = tf.maximum(0, center[1] - half_length)\n target_width = tf.minimum(center[0] + half_length, image_size) - offset_width\n target_height = tf.minimum(center[1] + half_length, image_size) - offset_height\n\n patch = tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)\n patch = tf.image.pad_to_bounding_box(patch, offset_height, offset_width, image_size, image_size)\n\n image -= patch\n\n return image\n\n\ndef augment(image):\n image = tf.image.resize_image_with_crop_or_pad(image, image_size+4, image_size+4)\n image = tf.random_crop(image, [image_size, image_size, 3])\n if FLAGS.random_flip_left_right:\n image = tf.image.random_flip_left_right(image)\n if FLAGS.random_flip_up_down:\n image = tf.image.random_flip_up_down(image)\n if FLAGS.random_brightness:\n image = tf.image.random_brightness(image, max_delta=0.3)\n if FLAGS.random_contrast:\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n if FLAGS.random_hue:\n image = tf.image.random_hue(image, max_delta=0.2)\n if FLAGS.random_saturation:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n\n return image\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"637710723","text":"\nfrom datetime import datetime\n# import threading\n# client=pymongo.MongoClient(host='127.0.0.1',port=27017)\n# db=client['scrapy_data']['tianya']\n\nclass TYPipeline:\n\n def process_item(self,item ,spider):\n with open('tianya4.txt','a') as f:\n f.write(str(item))\n f.write('\\n')","sub_path":"web_crawl/tianya/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"472934071","text":"from abc import abstractmethod, ABCMeta\n\nfrom sqlalchemy import (Column, Boolean)\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\n\nfrom ..lib.abc import abstractclassmethod\nfrom ..lib.sqla import (\n Base, TimestampedBase, get_metadata, get_session_maker,\n get_named_object, get_database_id, Tombstone, UPDATE_OP, DELETE_OP)\nfrom ..lib.history_meta import declare_history_mappers\n\n\nclass DeclarativeAbstractMeta(DeclarativeMeta, ABCMeta):\n pass\n\n\nclass DiscussionBoundBase(Base):\n __metaclass__ = DeclarativeAbstractMeta\n __abstract__ = True\n\n @abstractmethod\n def get_discussion_id(self):\n \"Get the ID of an associated discussion object, if any.\"\n return self.discussion_id\n\n def send_to_changes(self, connection=None, operation=UPDATE_OP):\n if not connection:\n # WARNING: invalidate has to be called within an active transaction.\n # This should be the case in general, no need to add a transaction manager.\n connection = self.db().connection()\n if 'cdict' not in connection.info:\n connection.info['cdict'] = {}\n connection.info['cdict'][self.uri()] = (\n self.get_discussion_id(), self)\n\n @abstractclassmethod\n def get_discussion_conditions(cls, discussion_id, alias_maker=None):\n return (cls.discussion_id == discussion_id, )\n\n def unique_query(self, query):\n discussion_id = self.discussion_id\n if not discussion_id and self.discussion:\n discussion_id = self.discussion.id\n if not discussion_id:\n return (query, False)\n return (query.filter_by(discussion_id=discussion_id), False)\n\n def tombstone(self):\n return DiscussionBoundTombstone(self)\n\n\nclass Tombstonable(object):\n # Marker class for objects with the tombstone flag\n is_tombstone = Column(Boolean, server_default='0', default=False)\n\n @classmethod\n def base_conditions(cls, alias=None, alias_maker=None):\n return (cls.tombstone_condition(alias),)\n\n @classmethod\n def tombstone_condition(cls, alias=None):\n cls = alias or cls\n return cls.is_tombstone == False\n\n\nclass DiscussionBoundTombstone(Tombstone):\n def __init__(self, ob, **kwargs):\n super(DiscussionBoundTombstone, self).__init__(ob, **kwargs)\n self.discussion_id = ob.get_discussion_id()\n\n def send_to_changes(self, connection, operation=DELETE_OP):\n assert connection\n if 'cdict' not in connection.info:\n connection.info['cdict'] = {}\n connection.info['cdict'][self.uri] = (\n self.discussion_id, self)\n\n\nfrom .auth import (\n AbstractAgentAccount,\n AgentProfile,\n DiscussionPermission,\n EmailAccount,\n IdentityProvider,\n IdentityProviderAccount,\n LocalUserRole,\n PartnerOrganization,\n Permission,\n Role,\n User,\n UserRole,\n UserTemplate,\n Username,\n)\nfrom .action import (\n Action,\n ActionOnPost,\n CollapsePost,\n ExpandPost,\n ViewPost,\n)\nfrom .discussion import Discussion\nfrom .generic import (\n AnnotatorSource,\n Content,\n ContentSource,\n PostSource,\n)\nfrom .post import (\n AssemblPost,\n IdeaProposalPost,\n ImportedPost,\n Post,\n PostWithMetadata,\n SynthesisPost,\n)\nfrom .mail import (\n AbstractFilesystemMailbox,\n AbstractMailbox,\n Email,\n IMAPMailbox,\n MaildirMailbox,\n MailingList,\n)\nfrom .idea import (\n Argument,\n Criterion,\n Idea,\n IdeaLink,\n Issue,\n Position,\n RootIdea,\n)\nfrom .idea_content_link import (\n Extract,\n Idea,\n IdeaContentLink,\n IdeaContentNegativeLink,\n IdeaContentPositiveLink,\n IdeaContentWidgetLink,\n IdeaRelatedPostLink,\n IdeaThreadContextBreakLink,\n TextFragmentIdentifier,\n)\nfrom .idea_graph_view import (\n ExplicitSubGraphView,\n IdeaGraphView,\n SubGraphIdeaAssociation,\n SubGraphIdeaLinkAssociation,\n Synthesis,\n TableOfContents,\n)\nfrom .votes import (\n AbstractIdeaVote,\n BinaryIdeaVote,\n LickertIdeaVote,\n LickertRange,\n)\nfrom .annotation import (\n Webpage,\n)\nfrom .timeline import (\n DiscussionMilestone,\n DiscussionPhase,\n DiscussionSession,\n TimelineEvent,\n)\nfrom .widgets import (\n BaseIdeaWidget,\n BaseIdeaWidgetLink,\n CreativitySessionWidget,\n GeneratedIdeaWidgetLink,\n IdeaCreatingWidget,\n IdeaWidgetLink,\n InspirationWidget,\n MultiCriterionVotingWidget,\n VotableIdeaWidgetLink,\n VotedIdeaWidgetLink,\n VotingCriterionWidgetLink,\n Widget,\n WidgetUserConfig,\n)\n\nfrom .notification import (\n NotificationSubscription,\n NotificationSubscriptionGlobal,\n NotificationSubscriptionOnPost,\n NotificationSubscriptionOnIdea,\n NotificationSubscriptionOnExtract,\n NotificationSubscriptionOnUserAccount,\n NotificationSubscriptionFollowSyntheses,\n NotificationSubscriptionFollowAllMessages,\n NotificationSubscriptionFollowOwnMessageDirectReplies,\n NotificationSubscriptionStatus,\n Notification,\n NotificationCreationOrigin,\n NotificationOnPost,\n NotificationOnPostCreated,\n)\n\ndeclare_history_mappers()","sub_path":"assembl/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"514454198","text":"\"\"\"\nThis script drops tables that were associated with the old Galaxy Cloud functionality.\n\"\"\"\nfrom __future__ import print_function\n\nimport datetime\nimport logging\n\nfrom sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, MetaData, Table, TEXT\n\nfrom galaxy.model.migrate.versions.util import create_table, drop_table\n\nnow = datetime.datetime.utcnow\nlog = logging.getLogger(__name__)\nmetadata = MetaData()\n\nCloudImage_table = Table(\"cloud_image\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"provider_type\", TEXT),\n Column(\"image_id\", TEXT, nullable=False),\n Column(\"manifest\", TEXT),\n Column(\"state\", TEXT),\n Column(\"architecture\", TEXT),\n Column(\"deleted\", Boolean, default=False))\n\n\"\"\" UserConfiguredInstance (UCI) table \"\"\"\nUCI_table = Table(\"cloud_uci\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"user_id\", Integer, ForeignKey(\"galaxy_user.id\"), index=True, nullable=False),\n Column(\"credentials_id\", Integer, ForeignKey(\"cloud_user_credentials.id\"), index=True),\n Column(\"key_pair_name\", TEXT),\n Column(\"key_pair_material\", TEXT),\n Column(\"name\", TEXT),\n Column(\"state\", TEXT),\n Column(\"error\", TEXT),\n Column(\"total_size\", Integer),\n Column(\"launch_time\", DateTime),\n Column(\"deleted\", Boolean, default=False))\n\nCloudInstance_table = Table(\"cloud_instance\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"launch_time\", DateTime),\n Column(\"stop_time\", DateTime),\n Column(\"user_id\", Integer, ForeignKey(\"galaxy_user.id\"), index=True, nullable=False),\n Column(\"uci_id\", Integer, ForeignKey(\"cloud_uci.id\"), index=True),\n Column(\"type\", TEXT),\n Column(\"reservation_id\", TEXT),\n Column(\"instance_id\", TEXT),\n Column(\"mi_id\", Integer, ForeignKey(\"cloud_image.id\"), index=True),\n Column(\"state\", TEXT),\n Column(\"error\", TEXT),\n Column(\"public_dns\", TEXT),\n Column(\"private_dns\", TEXT),\n Column(\"security_group\", TEXT),\n Column(\"availability_zone\", TEXT))\n\nCloudStore_table = Table(\"cloud_store\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"attach_time\", DateTime),\n Column(\"user_id\", Integer, ForeignKey(\"galaxy_user.id\"), index=True, nullable=False),\n Column(\"uci_id\", Integer, ForeignKey(\"cloud_uci.id\"), index=True, nullable=False),\n Column(\"volume_id\", TEXT),\n Column(\"size\", Integer, nullable=False),\n Column(\"availability_zone\", TEXT),\n Column(\"inst_id\", Integer, ForeignKey(\"cloud_instance.id\")),\n Column(\"status\", TEXT),\n Column(\"device\", TEXT),\n Column(\"space_consumed\", Integer),\n Column(\"error\", TEXT),\n Column(\"deleted\", Boolean, default=False))\n\nCloudSnapshot_table = Table(\"cloud_snapshot\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"user_id\", Integer, ForeignKey(\"galaxy_user.id\"), index=True, nullable=False),\n Column(\"uci_id\", Integer, ForeignKey(\"cloud_uci.id\"), index=True),\n Column(\"store_id\", Integer, ForeignKey(\"cloud_store.id\"), index=True, nullable=False),\n Column(\"snapshot_id\", TEXT),\n Column(\"status\", TEXT),\n Column(\"description\", TEXT),\n Column(\"error\", TEXT),\n Column(\"deleted\", Boolean, default=False))\n\nCloudUserCredentials_table = Table(\"cloud_user_credentials\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"user_id\", Integer, ForeignKey(\"galaxy_user.id\"), index=True, nullable=False),\n Column(\"provider_id\", Integer, ForeignKey(\"cloud_provider.id\"), index=True, nullable=False),\n Column(\"name\", TEXT),\n Column(\"access_key\", TEXT),\n Column(\"secret_key\", TEXT),\n Column(\"deleted\", Boolean, default=False))\n\nCloudProvider_table = Table(\"cloud_provider\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"user_id\", Integer, ForeignKey(\"galaxy_user.id\"), index=True, nullable=False),\n Column(\"type\", TEXT, nullable=False),\n Column(\"name\", TEXT),\n Column(\"region_connection\", TEXT),\n Column(\"region_name\", TEXT),\n Column(\"region_endpoint\", TEXT),\n Column(\"is_secure\", Boolean),\n Column(\"host\", TEXT),\n Column(\"port\", Integer),\n Column(\"proxy\", TEXT),\n Column(\"proxy_port\", TEXT),\n Column(\"proxy_user\", TEXT),\n Column(\"proxy_pass\", TEXT),\n Column(\"debug\", Integer),\n Column(\"https_connection_factory\", TEXT),\n Column(\"path\", TEXT),\n Column(\"deleted\", Boolean, default=False))\n\n\ndef upgrade(migrate_engine):\n print(__doc__)\n metadata.bind = migrate_engine\n metadata.reflect()\n\n drop_table(CloudSnapshot_table)\n drop_table(CloudStore_table)\n drop_table(CloudInstance_table)\n drop_table(UCI_table)\n drop_table(CloudImage_table)\n drop_table(CloudUserCredentials_table)\n drop_table(CloudProvider_table)\n\n\ndef downgrade(migrate_engine):\n metadata.bind = migrate_engine\n metadata.reflect()\n\n create_table(CloudProvider_table)\n create_table(CloudUserCredentials_table)\n create_table(CloudImage_table)\n create_table(UCI_table)\n create_table(CloudInstance_table)\n create_table(CloudStore_table)\n create_table(CloudSnapshot_table)\n","sub_path":"lib/galaxy/model/migrate/versions/0050_drop_cloud_tables.py","file_name":"0050_drop_cloud_tables.py","file_ext":"py","file_size_in_byte":7984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"15177759","text":"import os\n\nfrom db_data import DB_HOST, DB_NAME, DB_USERNAME\nfrom dbconnectors import PostgreSqlDb\n\ndb = PostgreSqlDb(username=DB_USERNAME, host=DB_HOST, database=DB_NAME)\n\n\ndef create_tables(project_dir, tables):\n sql_path = os.path.join(project_dir, 'sql', '{}.sql')\n for t in tables:\n query = open(sql_path.format(t), 'r').read()\n db.execute('DROP TABLE IF EXISTS {} CASCADE'.format(t))\n db.execute(query)\n\n\ndef insert_values(df, table_name, stats_dict):\n insert_df = df[list(stats_dict.keys())]\n insert_df.rename(columns=stats_dict, inplace=True)\n\n db.insert_from_frame(insert_df, table_name)\n\n return None\n\n\ndef insert_df(df, table, stats_dict, id_col, id_table_col='id'):\n query = 'select {} from {}'.format(id_table_col, table)\n ids_inserted_df = get_df_from_query(query)\n ids_inserted = []\n if not ids_inserted_df.empty:\n ids_inserted = ids_inserted_df[id_table_col].tolist()\n df[id_col] = df[id_col].astype(int)\n df_to_insert = df[~df[id_col].isin(ids_inserted)]\n if not df_to_insert.empty:\n insert_values(df_to_insert, table, stats_dict)\n\n\ndef get_df_from_query(query):\n df = db.get_pandas_df(query)\n return df\n\n\nif __name__ == '__main__':\n # import pandas as pd\n # d = {'id':7, 'name':['GK Save']}\n # insert_df = pd.DataFrame(d)\n # db.insert_from_frame(insert_df, 'win_types')\n query = 'select * from game_events where game_id = 1234772'\n\n df = get_df_from_query(query)\n\n import pdb; pdb.set_trace() # noqa # yapf: disable\n","sub_path":"lib_common/db_handle.py","file_name":"db_handle.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"406818062","text":"\"\"\"\n6.\tВ одномерном массиве найти сумму элементов, находящихся\nмежду минимальным и максимальным элементами.\nСами минимальный и максимальный элементы в сумму не включать.\n\"\"\"\n\nimport random\nN = 10\narr = [random.choice([i for i in range(10)]) for j in range(N)]\n# arr = [18, 99, 19, 78, 51, 71, 2, 59, 13, 90]\nmin_num = min(arr)\nmax_num = max(arr)\n\nfor j in range(len(arr)):\n if arr[j] == min_num:\n min = j\n break\nfor j in range(len(arr)):\n if arr[j] == max_num:\n max = j\n break\n\nprint(arr)\nprint(f'Минимальное число массива: {min_num} на позиции № {min + 1} из {N}')\nprint(f'Максимальное число массива: {max_num} на позиции № {max + 1} из {N}')\n\nsum = 0\nif min < max:\n while min != max - 1:\n min += 1\n sum += arr[min]\nelif min > max:\n while max != min - 1:\n max += 1\n sum += arr[max]\nif sum == 0:\n print('Между числами нет элементов, они идут друг за другом!')\nelse:\n print(f'Сумма чисел между ними {sum}')\n\n","sub_path":"Lesson_3/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"155728230","text":"import enum\nimport sys\n\nimport pyalgotrade.fsm as fsm\nimport pyalgotrade.logger\n\nlogger = pyalgotrade.logger.getLogger('strategyfsm')\n\n\nclass SampleStrategyFSMState(enum.Enum):\n\n INIT = 1\n STATE1 = 2\n STATE2 = 3\n ERROR = 99\n\n\nclass SampleStrategyFSM(fsm.StrategyFSM):\n\n def __init__(self, barfeed, states):\n super(SampleStrategyFSM, self).__init__(barfeed, states)\n\n def print_bars(self, bars):\n for i in bars.getInstruments():\n logger.info('{} {} {}'.format(i, bars[i].getDateTime(), bars[i].getClose()))\n\n @fsm.state(SampleStrategyFSMState.INIT, True)\n def state_init(self, bars, states):\n # You are only supposed to save states in states variable\n # DO NOT save your local variable and it is not guaranteed to be supported later \n logger.info('INIT')\n print(states.prev)\n states.prev = 'INIT'\n self.print_bars(bars)\n return SampleStrategyFSMState.STATE1\n \n @fsm.state(SampleStrategyFSMState.STATE1, False)\n def state_state1(self, bars, states):\n logger.info('STATE1')\n print(states.prev)\n states.prev = 'STATE1'\n self.print_bars(bars)\n return SampleStrategyFSMState.STATE2\n\n @fsm.state(SampleStrategyFSMState.STATE2, False)\n def state_state2(self, bars, states):\n logger.info('STATE2')\n print(states.prev)\n states.prev = 'STATE2'\n self.print_bars(bars)\n return SampleStrategyFSMState.ERROR\n \n @fsm.state(SampleStrategyFSMState.ERROR, False)\n def state_error(self, bars, states):\n logger.info('ERROR')\n sys.exit(0)\n","sub_path":"samples/strategy/strategyfsm.py","file_name":"strategyfsm.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"430219096","text":"##### Gensim doc2vec and agglomerative clustering of resulting dense vector representations (rather than bow's)\n\n### Load scraped data\nimport pickle\nimport numpy as np\n\n#filename = '/Users/admin/Documents/Projects/MachineLearning/AI/Semi-supervised Learning/scraping_oil_gas_corpus/scraping_code/d_oil_and_gas_terms.pickle'\n#filename = '/Users/Ash/Projects/MachineLearning/AI/Semi-supervised Learning/scraping_oil_gas_corpus/scraping_code/d_oil_and_gas_terms.pickle'\nfilename = '/Users/ash/Documents/Projects/MachineLearning/AI/Semi-supervised Learning/scraping_oil_gas_corpus/scraping_code/d_oil_and_gas_terms.pickle'\nwith open(filename, 'rb') as handle:\n d_data = pickle.load(handle)\n\n# sample\nfor k, v in d_data.items(): print(k, '\\n', v, '\\n'*2)\n\"\"\"\n\nforward multiple-contact test\nA laboratory test to determine the phase envelope between lean gas and oil by equilibrating a gas sample \nseveral times with fresh samples of oil. In a forward-contact test, light and intermediate components are \nstripped from the oil by multiple contacts with the gas. The test also indicates how many contacts are required \nbefore the gas with added components becomes miscible with the oil. The molar ratios at each contact step are \ntypically designed using PVT simulation software that incorporates the fluid composition at each contact.\n\n\nstanding valve\nA downhole valve assembly that is designed to hold pressure from above while allowing fluids to flow from \nbelow. Standing valves generally are run and retrieved on slickline with the valve assembly located in an \nappropriate nipple. Applications for standing valves include testing the tubing string, setting packers, or \nother applications in which it is desirable to maintain fluid in the tubing string.\n\n\nwellbore orientation\nWellbore direction. Wellbore orientation may be described in terms of inclination and azimuth. Inclination \nrefers to the vertical angle measured from the down direction-the down, horizontal and up directions have \ninclinations of 0, 90 and 180, respectively. Azimuth refers to the horizontal angle measured clockwise \nfrom true north-the north, east, south and west directions have azimuths of 0, 90, 180 and 270, respectively.\n\n\"\"\"\n\n\n### Process text\nimport os\nimport random\nimport re\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\nfrom nltk.stem.snowball import SnowballStemmer\n\n# randomly sample 400 elements of the data dictionary\n# need the same result each time so pickle the cut down dict and load it in each time\nprint(len(d_data))\t# 4931\n#random_terms = '/Users/admin/Documents/Projects/MachineLearning/AI/Semi-supervised Learning/scraping_oil_gas_corpus/nlp_code/random_terms.pickle'\n#random_terms = '/Users/Ash/Projects/MachineLearning/AI/Semi-supervised Learning/scraping_oil_gas_corpus/nlp_code/random_terms.pickle'\nrandom_terms = '/Users/ash/Documents/Projects/MachineLearning/AI/Semi-supervised Learning/scraping_oil_gas_corpus/nlp_code/random_terms.pickle'\n\nif not os.path.isfile(random_terms):\n d_data = dict( (k, d_data[k]) for k in random.sample(list(d_data), 400) )\n with open(random_terms, 'wb') as handle:\n pickle.dump(d_data, handle, protocol=pickle.HIGHEST_PROTOCOL)\nelse:\n with open(random_terms, 'rb') as handle:\n d_data = pickle.load(handle)\n\nprint(len(d_data)) # 400\nprint(list(d_data.keys())[0])\n\n# form keywords list and descriptions list\nkeywords, descriptions = [], []\nfor i in range(len(d_data)):\n keywords.append( list(d_data.keys())[i] )\n descriptions.append( list(d_data.values())[i] )\n\n# perform natural language cleaning\nstopwords = nltk.corpus.stopwords.words('english')\nstemmer = SnowballStemmer(\"english\")\ndef nlp_clean(doc):\n\n # create list of tokens from the document (a token being a individual component of the vocabulary\n # i.e. a single word, or single punctuation)\n tokens = [word for sent in nltk.sent_tokenize(doc) for word in nltk.word_tokenize(sent)]\n\n # make all words lower case\n lowers = [ token.lower() for token in tokens ]\n\n # remove stop words\n stopped = [ lower for lower in lowers if lower not in stopwords ]\n\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n filtered_tokens = []\n for token in stopped:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n\n # reduce tokens to base / stemmed form\n stems = [ stemmer.stem(token) for token in filtered_tokens ]\n return stems\n\ndocs = []\nfor doc in descriptions: docs.append( nlp_clean(doc) )\n\n# sample\nprint(descriptions[-1])\nprint(docs[-1])\n\"\"\"\n\nA phenomenon of relative seismic velocities of strata whereby a shallow layer or feature with a high seismic \nvelocity (e.g., a salt layer or salt dome, or a carbonate reef) surrounded by rock with a lower seismic velocity \ncauses what appears to be a structural high beneath it. After such features are correctly converted from time to \ndepth, the apparent structural high is generally reduced in magnitude.\n\n[u'a', u'phenomenon', u'relat', u'seismic', u'veloc', u'strata', u'wherebi', u'shallow', u'layer', u'featur', u'high', \nu'seismic', u'veloc', u'e.g.', u'salt', u'layer', u'salt', u'dome', u'carbon', u'reef', u'surround', u'rock', u'lower', \nu'seismic', u'veloc', u'caus', u'appear', u'structur', u'high', u'beneath', u'after', u'featur', u'correct', u'convert', \nu'time', u'depth', u'appar', u'structur', u'high', u'general', u'reduc', u'magnitud']\n\n\"\"\"\n\n\n\n### Build, train and save the doc2vec model\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\n\n# create tagged docs (https://github.com/RaRe-Technologies/gensim/issues/1542)\n# (tagging the docs allows us to easily extract the labels when doing doc similarity)\ndocs_tagged = [ TaggedDocument(docs[i], [keywords[i]]) for i in range(len(docs)) ]\nprint(docs_tagged[0])\n\n# initialise a model\nmodel = Doc2Vec(\n size=100, \t\t# dimensionality of document vectors\n window=4, \n min_count=5, \n workers=4\n )\n\n# build vocab\nmodel.build_vocab(docs_tagged)\n\n# train\nmodel.train(docs_tagged, total_examples=len(docs_tagged), epochs=100)\n\n\n\n### Inspect some results (https://stackoverflow.com/questions/41709318/what-is-gensims-docvecs)\n\n# first keyword and its 100-D document vector representation\nkeyword1 = keywords[0]\ndocvec = model.docvecs[0]\n\nprint('\\n'*2)\nprint(keyword1)\nprint(docvec)\nprint(len(docvec))\n\n# most similar document vectors (by their keyword)\ndocsim = model.docvecs.most_similar( keyword1 )\nfor i in range(10): print(docsim[i])\n\n\"\"\" Output:\n\nmud-aging cell\n[-0.46930075 0.24111511 -0.65452486 0.07960624 0.27933225 -0.57205486\n 0.01857634 -0.43373516 0.5950049 0.09591938 0.8515341 -0.23467036\n 0.61899596 0.20965149 0.9030415 -0.38237852 -0.4520214 -0.5371389\n -0.02965951 -0.7457512 0.21630289 0.18072322 -0.01356592 0.16475712\n 0.3085342 0.76956207 0.42369726 0.3957191 0.49336338 0.6514096\n 0.7015149 -0.56747806 0.6603277 0.00761382 0.85111606 -0.29173177\n 0.46942535 -0.5020117 -0.17106616 -1.1938404 0.6599053 0.55130905\n -0.80835307 0.40446362 0.03073083 -0.25957283 0.12954792 -0.26075384\n 0.19322304 0.00296548 0.05748585 -0.3986431 -0.05385951 -0.15913898\n -0.95375824 0.50589114 -0.74666363 0.8480258 0.6071295 -0.14590833\n -0.05624911 0.05853309 -0.3353707 1.2660801 0.6817378 -0.27144068\n -0.14211453 0.81194127 0.14978758 -0.4786596 0.3583522 -0.53172165\n -1.2283134 -0.15744266 0.0312759 -1.4410212 -0.49220684 0.28382796\n -0.7101878 0.21427606 0.48749664 -0.10307872 0.3946607 0.78112704\n 0.38293564 -0.28952685 0.68147844 -0.25098896 0.65532887 0.10993876\n -0.29568055 -0.16812487 0.11657126 0.38111183 0.2104465 -0.22793952\n 0.07921502 -0.26019612 0.2111703 -0.42160577]\n100\n(u'mud cell', 0.9888162016868591)\n(u'flowstream sample', 0.6759142875671387)\n(u'mud in sample', 0.5847841501235962)\n(u'wireline-retrievable safety valve (WRSV)', 0.583116888999939)\n(u'stock tank barrel', 0.5754137635231018)\n(u'wireline retrievable safety valve (WRSV)', 0.5743108987808228)\n(u'surge tank', 0.5686211585998535)\n(u'buoyancy method', 0.5684531331062317)\n(u'bentonite equivalent', 0.5675902962684631)\n(u'standard temperature and pressure', 0.5541241765022278)\n\n\"\"\"\n\n\n\n### Agglomerative clustering of resultant first-order tensor space\n\n# create distance matrix\nfrom sklearn.metrics.pairwise import cosine_similarity\nprint(np.asarray(model.docvecs))\nprint(type(model.docvecs))\n\ndist = cosine_similarity(np.array(model.docvecs, object))\n#dist = cosine_similarity(np.asarray(model.docvecs))\t# note previously I have had dist = 1 - cs...\nquit()\n\n\nprint('Zero''th doc vector:', '\\n', model.docvecs[0], '\\n'*3)\nprint('Zero''th distance vector:', '\\n', dist[0], '\\n'*3)\nprint('N''th doc vector:', '\\n', model.docvecs[-1], '\\n'*3)\nprint('N''th distance vector:', '\\n', dist[-1], '\\n'*3)\n\nprint(len(model.docvecs))\nprint(len(dist))\n\n# although the number of instance in model.docvecs and dist is the same (length 400), \n# the length of each element is different. The length of each element of model.docvecs \n# is 100 (for each dimension), whereas the length for each element in dist is of length \n# 400. This is because each element represents the cosine similarity between the instance\n# and each of the other 400 instances (http://blog.christianperone.com/2013/09/machine-learning-cosine-similarity-for-vector-space-models-part-iii/).\n# Each element of dist, is an array representing the Cosine Similarity between the element (document) with all \n# other elements (documents) in the set\n\n# Notice that that is why the first element of the zero'th dist vector is 1 (as this represents the cosine-similarity\n# between itself), and the last element of the last (n'th) dist vector is 1 (as this represents the cosine-similarity\n# between itself).\n\nprint(len(model.docvecs[0]))\nprint(len(dist[0]))\n\n\n\n### Run clustering algorithm to understand hidden structure within the keywords / descriptions\nimport matplotlib.pyplot as plt\nfrom scipy.cluster.hierarchy import ward, dendrogram\n\n# define the linkage_matrix using ward clustering pre-computed distances\nlinkage_matrix = ward(dist)\n\nfig, ax = plt.subplots(figsize=(15, 20)) # set size\nax = dendrogram(\n linkage_matrix,\n labels=keywords,\n leaf_font_size=12.,\n leaf_rotation=45.\n )\n\nplt.tight_layout() # show plot with tight layout\nplt.ylabel('Ward distance')\nplt.show()\n\n\n\n### Extract document classes from dendrogram\n\n# create a truncated dendrogram showing only the last 12 merges\n# this will cut away the noisey micro-clusters and enable us to consider macro-clusters\nfig, ax = plt.subplots(figsize=(15, 20)) # set size\nax = dendrogram(\n linkage_matrix,\n truncate_mode='lastp', # show only the last p merged clusters\n p=12, # last 12 merges\n leaf_font_size=12.,\n show_contracted=True, # to get a distribution impression in truncated branches\n )\n\nplt.xlabel('Number of merges in cluster')\nplt.ylabel('Ward distance')\nplt.show()\n\n# a large jump in distance is typically what we're interested in if we want to argue for\n# a certain number of clusters (https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/)\n\n# by inspecting the cut down dendrogram we can see that a distance cut-off at around \n# ward distance 20 maximises jumps in distance for each cluster tree, generating 6 document classes\nmax_dist = 20\nfig, ax = plt.subplots(figsize=(15, 20)) # set size\nax = dendrogram(\n linkage_matrix,\n truncate_mode='lastp', # show only the last p merged clusters\n p=12, # last 12 merges\n leaf_font_size=12.,\n show_contracted=True, # to get a distribution impression in truncated branches\n )\n\nplt.axhline(y=max_dist, color='r', linestyle='--')\nplt.xlabel('Number of merges in cluster')\nplt.ylabel('Ward distance')\nplt.show()\n\n# knowing max_dist (our number of clusters) we can use the fcluster class to map each observation to a cluster id\nfrom scipy.cluster.hierarchy import fcluster\n\nclusters = fcluster(linkage_matrix, max_dist, criterion='distance')\nprint(clusters)\n\"\"\"\n[2 5 4 2 6 3 1 4 1 2 5 1 1 1 4 5 4 1 2 4 3 2 5 5 3 1 1 5 4 2 1 1 1 2 3 3 1\n 6 3 3 3 2 3 3 2 5 3 6 5 6 2 2 3 3 4 4 1 3 5 1 1 1 4 1 4 1 1 1 6 4 4 3 2 6\n 6 5 6 1 2 6 2 4 4 3 1 5 2 2 1 2 6 5 2 1 6 1 1 2 6 1 1 4 3 3 5 1 5 2 4 2 1\n 4 4 3 2 6 6 5 5 1 6 2 5 4 1 2 1 3 3 2 3 2 4 2 5 4 4 1 6 4 5 5 4 1 3 3 6 6\n 2 4 4 2 6 1 1 2 4 5 2 4 2 4 2 3 2 1 6 2 4 6 1 3 6 5 6 2 3 3 3 1 3 3 3 1 1\n 3 2 1 2 2 6 2 4 2 6 5 1 2 2 2 4 4 6 1 1 1 2 5 1 1 1 1 1 2 1 4 4 2 6 4 4 1\n 1 1 4 5 1 6 2 4 3 3 1 6 1 1 1 2 3 2 5 2 4 1 6 4 6 6 3 3 2 6 6 5 2 4 6 5 2\n 5 1 2 3 4 1 3 6 5 6 3 4 2 1 4 4 4 1 5 6 2 5 3 1 3 1 3 6 2 2 6 1 4 3 1 6 1\n 5 6 6 2 6 3 5 6 1 3 1 4 1 3 1 1 3 6 6 6 1 1 3 6 6 5 2 3 6 4 2 1 2 4 6 6 1\n 5 1 2 1 1 3 4 6 2 1 3 1 1 1 5 1 2 2 1 3 5 3 4 4 2 5 1 6 2 5 3 2 2 1 1 3 2\n 5 1 3 2 3 1 6 4 4 4 1 5 6 2 1 2 2 3 6 2 6 1 6 3 5 3 1 5 2 3]\n\"\"\"\n# So, using unsupervised learning we have managed to derive class labels for a dataset that is completely beyond the understanding\n# of the programmer. We have managed to craft a semi-supervised dataset, where 400 of the labels are known. Now let's use keras to\n# create a classifier to predict labels for more unseen data.\n\n# First though, let's visualise the clusters in 2D, with the keyword labels attached and try some vector arithmetic in our embedding-space!\n\n\n\n### Use t-SNE to cluster the doc vectors too with their keywords displayed, and the class clustering colour\nfrom sklearn.manifold import TSNE\n\n# as per this SO article (https://stackoverflow.com/questions/36545434/cosine-similarity-tsne-in-sklearn-manifold)\n# we need to change our distance metric slightly (https://en.wikipedia.org/wiki/Cosine_similarity):\ndist_metric = 1.0 - dist\n\ntsne_model = TSNE(metric=\"precomputed\")\nX_reduced = tsne_model.fit_transform( abs(dist_metric) ) # https://github.com/scikit-learn/scikit-learn/issues/5772\nprint(X_reduced[:, 0], X_reduced[:, 1])\n\n#plt.scatter( X_reduced[:, 0], X_reduced[:, 1], s=20*2**4 )\n#plt.show()\n\ncolours = ['#F18F01', '#048BA8', '#2E4057', '#99C24D', '#FF2216', '#D4ADCF'] # have 6 colours here but you can't guarantee the number of clusters we'll generate as there is some randomness in the system\nfor i in range(len(keywords)):\n plt.scatter( X_reduced[i, 0], X_reduced[i, 1], s=20*2**4, color=colours[ clusters[i] - 1 ])\n plt.annotate(\n keywords[i],\n xy=( X_reduced[i, 0], X_reduced[i, 1] ),\n xytext=(5, 2),\n textcoords='offset points',\n ha='right',\n va='bottom'\n )\n\nplt.show()\n\n\n\n### Can we do some vector arithmetic for this example?\n\"\"\"\nThe classic example is:\nKing - Man + Woman = Queen\n\nThis works well, Man and Woman are clearly semantically correlated and are also a subset of Royalty (King and Queen).\nWe should try and find an example that follows a similar principle..\n\nIn word2vec we can do things like:\nmodel.most_similar(positive=['woman', 'king'], negative=['man'], topn=1) # king - man + woman = queen\n[('queen', 0.50882536)]\nmodel.doesnt_match(\"breakfast cereal dinner lunch\";.split())\n'cereal'\nmodel.similarity('woman', 'man')\n0.73723527\n\"\"\"\n\n# As we have limited knowledge of the meaning behind the dataset, try some random combinations of vector\n# additions and see if the results are a) interpretable and b) meaningful\nfor i in range(20):\n rix_1, rix_2 = random.randint(1, 399), random.randint(1, 399)\n vec_add = model.docvecs.most_similar( positive=[keywords[rix_1], keywords[rix_2]], topn=1 )\n\n print(keywords[rix_1])\n print(descriptions[rix_1], '\\n')\n\n print(keywords[rix_2])\n print(descriptions[rix_2], '\\n')\n\n print('Vector addition result:')\n print(vec_add[0][0])\n print([ descriptions[i] for i in range(len(descriptions)) if keywords[i] == vec_add[0][0] ], '\\n'*3)\n\n\"\"\"\nGood results from the addition:\n\nkilogram per cubic meter\nThe SI unit of measurement for density. Mud weights are typically expressed in kg/m3. The conversion factor from lbm/gal \nto kg/m3 is 120. For example, 12 lbm/gal = 1440 kg/m3.\n\n+\n\nzinc carbonate\nA neutral zinc salt, ZnCO3, which can be used as a sulfide scavenger in water-base muds. Zinc carbonate is less soluble \nthan zinc basic carbonate and perhaps slower to react with sulfide ions. Treatment level is about 0.1 lbm/bbl per 50 mg/L sulfide \nion (determined by Garrett Gas Train sulfide analysis of the filtrate).\n\n=\n\nequivalent weight\n[u'The molecular weight of an element, molecule or ion divided by its valence (or valence change for a redox reaction). For example, \nthe molecular weight of calcium hydroxide, or \"slaked lime,\" [Ca(OH)2] is 72.10. Because the valency of calcium in this case is 2, \nthe equivalent weight of lime is 36.05. Mud analyses give concentrations in various units: ppm, mg/L, wt.% and epm. Mud engineers should \nrecognize the meaning of epm and equivalent weight of a mud chemical.']\n\nWe can see that by adding a chemical aspect to the kilogram, we have a resultant vector of equivalent weight - which is all about molecular\nweight. This makes a lot of sense.\n\n\"\"\"\n\n\n\n### Creating a semi-supervised classifier\n\n# Done...\n# - try labelspreading algorithm as this data definitely passes the clustering assumption.\n# - train doc2vec on 400 random document samples (toy example so not a huge amount of samples)\n# - using unsupervised learning (analysis of agglomerative clustering) assign class labels to these 400 samples\n\n# To do...\n# - select another 100 random documents from my scraped document dictionary\n# - use trained doc2vec to generate inferred embeddings for these 100 unseen docs using gensim's infer_vector method (https://datascience.stackexchange.com/questions/10612/doc2vecgensim-how-can-i-infer-unseen-sentences-label)\n# - create semi-supervised trained set, where the 100 unseen samples are labelled -1\n# - use scikit-learns label propagation algorithms to classify\n# - plot in 2D (tsne) with colour coded classes, and make the markers for the unseen 100 larger\n# - should hopefully see the classified instances residing in the same regions as their fellow class members\n\n# use our doc2vec model to infer a vector representation for another 100 unseen documents selected at random\n# from the scraped data\nd_unseen = dict( (k, d_data[k]) for k in random.sample(d_data, 100) )\n\nunseen_kws, unseen_descs = [], []\nfor i in range(len(d_unseen)):\n unseen_kws.append( d_unseen.keys()[i] )\n unseen_descs.append( d_unseen.values()[i] )\n\nunseen_docs = []\nfor doc in unseen_descs: unseen_docs.append( nlp_clean(doc) )\n\n# https://stackoverflow.com/questions/44993240/how-to-use-the-infer-vector-in-gensim-doc2vec\nunseen_docs_tagged = [ TaggedDocument(unseen_docs[i], [unseen_kws[i]]) for i in range(len(unseen_docs)) ]\n\n# preallocate array\nunseen_embeddings = np.empty(len(d_unseen) * 100).reshape(100, 100)\n\nfor i in range(len(unseen_embeddings)):\n unseen_embeddings[i] = model.infer_vector(unseen_docs_tagged[i][0])\n\nprint(unseen_embeddings.shape)\n\n\n\n### Create semi-supervised training set\nX = np.append( np.asarray(model.docvecs), unseen_embeddings, axis=0 )\nprint(X)\nprint(X.shape)\n\ny = np.append( clusters, np.array([-1 for _ in range(100)]))\nprint(y)\nprint(y.shape)\n\"\"\"\n[[ 0.07102136 0.05765504 0.11885613 ... 0.05068389 -0.14108109\n -0.00422394]\n [-0.05989182 -0.47393617 -0.2024733 ... -0.10274604 0.47613543\n 0.03276008]\n [-0.07937056 -0.78328234 0.31664217 ... 0.29470178 0.15881643\n -0.52326316]\n ...\n [ 0.01068417 -0.2041029 -0.01168371 ... 0.13904609 -0.08690971\n 0.0231994 ]\n [-0.00247693 -0.02734658 -0.01304855 ... -0.01640731 -0.01997717\n 0.00771488]\n [ 0.0083775 -0.03261823 0.0180897 ... -0.02289038 -0.03065437\n -0.00108445]]\n(500, 100)\n[ 4 3 6 6 2 5 1 1 1 2 2 4 2 1 6 2 1 1 1 5 3 1 6 1\n 6 4 3 2 4 5 3 6 2 2 6 4 5 6 6 4 4 4 6 1 4 1 6 2\n 1 5 4 5 6 6 1 5 1 4 5 4 5 4 5 4 6 5 2 2 2 6 2 2\n 2 2 1 2 4 6 4 6 6 2 1 6 4 6 2 4 3 2 2 1 6 6 1 4\n 4 1 6 3 4 4 5 3 2 3 4 5 6 2 6 3 5 1 6 5 6 5 1 3\n 4 1 1 6 3 4 2 4 2 6 6 4 5 6 5 1 5 3 2 4 1 2 2 3\n 3 1 6 3 4 2 6 2 5 6 4 5 6 6 5 1 1 6 2 4 2 5 3 4\n 3 5 6 2 6 6 2 2 1 5 3 4 6 6 6 6 6 3 6 4 4 5 5 1\n 6 6 3 5 2 6 5 2 2 5 1 4 6 3 2 4 4 3 5 2 6 6 2 2\n 5 6 1 3 1 3 4 5 1 3 6 6 2 4 3 6 3 5 5 6 4 4 6 5\n 2 6 2 1 4 2 4 2 1 4 2 3 6 6 6 6 6 1 6 5 6 1 5 4\n 6 4 4 1 3 6 2 4 4 2 6 3 6 4 6 2 6 5 3 6 2 3 6 5\n 6 4 4 5 1 6 4 3 5 2 5 4 5 2 5 4 4 6 6 6 4 3 2 2\n 1 2 3 5 6 2 6 6 4 6 5 1 3 3 1 1 6 6 2 6 3 6 6 3\n 2 3 6 3 5 6 2 3 4 4 6 4 6 6 4 3 2 1 4 1 6 6 3 5\n 2 5 2 2 1 4 3 1 1 5 2 1 3 2 1 5 3 4 6 6 6 1 3 6\n 6 1 2 5 2 4 2 1 6 1 1 1 2 3 1 1 -1 -1 -1 -1 -1 -1 -1 -1\n -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1\n -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1\n -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1\n -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]\n(500,)\n\"\"\"\n\n# shuffle the training data\nX_holding = np.hstack((X, y.reshape(len(y), 1)))\nnp.random.shuffle(X_holding)\nX = X_holding[:, :-1]\ny = X_holding[:, -1]\nprint(X)\nprint(y)\nprint(X.shape)\nprint(y.shape)\n\"\"\"\n[[-5.94303727e-01 4.62160796e-01 2.33045205e-01 ... -3.89433801e-01\n 1.33214861e-01 4.69011277e-01]\n [ 2.88221855e-02 8.65062058e-01 2.26832837e-01 ... 6.85894191e-01\n -2.05603093e-01 -1.58817545e-01]\n [ 2.64949352e-01 -7.23991334e-01 3.40413958e-01 ... 6.79903328e-02\n -1.54964939e-01 -9.83462572e-01]\n ...\n [ 5.62261567e-02 2.17060149e-01 -2.57077515e-01 ... 3.20311934e-01\n -6.19823039e-02 -2.06713021e-01]\n [-4.58756760e-02 -3.28899547e-02 3.11689433e-02 ... -1.64843053e-02\n -3.56855132e-02 7.14016554e-04]\n [-3.44051331e-01 2.93963224e-01 -1.03997543e-01 ... 2.70692371e-02\n 3.51869017e-01 1.77476093e-01]]\n[ 6. 6. 2. 1. -1. 2. 6. 4. 3. 5. 6. 4. 3. 3. 2. -1. 2. 2.\n 2. 5. 6. -1. 1. 1. 1. 4. -1. 6. 4. 6. 2. -1. 3. 6. 6. -1.\n 5. -1. 6. 6. 2. 1. 6. 1. 6. 4. -1. 6. 1. -1. 5. 5. -1. 5.\n 2. -1. -1. -1. 4. -1. 6. 6. 6. -1. 3. 5. 4. 6. 6. 3. -1. 2.\n 2. 2. -1. -1. 3. 5. 2. 6. 3. 2. 4. 3. 4. -1. 4. 1. 6. 5.\n -1. 4. 6. 5. 6. 2. 2. -1. 4. -1. -1. 3. 4. -1. 4. 6. 3. 2.\n 6. -1. -1. 2. -1. 3. 1. -1. 2. 1. 6. 1. 2. 6. 2. 6. 5. 3.\n 5. 5. 6. 3. 2. -1. 2. 5. 5. 5. -1. 6. 5. -1. 2. 1. 6. 2.\n -1. 3. 2. 4. 6. 5. 6. -1. -1. 1. 5. 6. 3. 5. 2. 4. 3. 6.\n 4. 1. 4. 4. 4. 2. 1. 2. 5. 1. 4. 5. 3. 2. -1. 3. -1. 5.\n 3. 6. 5. 4. 2. 5. -1. 6. 2. 1. -1. 4. 3. -1. 4. 1. 6. 6.\n 2. 6. 5. 1. 4. 6. -1. 3. 6. -1. 3. 4. -1. 4. 2. -1. 6. 3.\n -1. 2. 5. 6. 1. 6. 4. 5. -1. 5. 3. 1. 1. -1. -1. 6. 3. 2.\n 5. 1. 2. -1. -1. 3. -1. -1. 2. 5. 6. 4. 5. 4. -1. -1. 1. 4.\n 3. 2. 5. -1. 6. 6. -1. 2. 5. 1. -1. 1. -1. 6. 6. -1. 4. 4.\n 1. 5. 6. -1. -1. 6. -1. 3. 2. -1. 1. 1. 4. -1. 4. 3. -1. 1.\n 2. 5. 6. 2. 5. -1. 4. 5. 1. 5. -1. 2. -1. 3. 2. 5. 3. 6.\n 4. 2. 5. 6. 3. 2. -1. 5. 3. -1. 1. -1. 6. 3. 6. -1. 6. 6.\n 2. 2. 4. 3. 1. 5. 6. -1. 2. 3. -1. 4. -1. 2. 6. 2. 5. 6.\n 4. 6. 6. 4. 2. 3. 1. 6. 6. 1. 3. 1. 1. 1. 6. 6. 5. -1.\n 1. 1. -1. 1. 6. -1. 3. 6. 1. 4. 4. 6. 4. 6. 4. 1. -1. 6.\n 1. 2. 6. 4. -1. 2. 6. 2. -1. -1. 6. 4. -1. 1. 6. 4. 4. 6.\n 6. 6. -1. -1. 1. 3. -1. 6. 2. -1. 1. 4. -1. 6. 1. 4. 3. 3.\n 4. 1. 6. -1. 4. 4. 1. 1. 6. 6. -1. 4. 4. 4. 3. 2. 6. -1.\n 1. 6. 4. 4. 4. 5. 6. -1. -1. 5. 2. 6. 1. 6. 3. 2. 6. 3.\n 3. 1. 2. 5. 2. -1. -1. 1. 6. 6. -1. 6. 6. 6. 4. 6. -1. 2.\n 3. 2. 5. 4. 4. 6. 4. -1. 4. 2. 6. 1. 1. 2. -1. 5. 2. 4.\n 3. -1. 6. 2. 5. 2. 2. 5. 5. 4. 2. 1. -1. 1.]\n(500, 100)\n(500,)\n\"\"\"\n\nfrom sklearn.semi_supervised import LabelSpreading\n\nlabel_propagation_model = LabelSpreading()\nlabel_propagation_model.fit(X, y)\n\n# make predictions for first twenty samples (some will be known, some unknown)\nfor i in range(20): print('y: ', y[i], '\\t', 'y_hat: ', label_propagation_model.predict(X[i].reshape(1,-1)))\n\"\"\"\ny: 6.0 y_hat: [6.]\ny: 6.0 y_hat: [6.]\ny: 2.0 y_hat: [2.]\ny: 1.0 y_hat: [1.]\ny: -1.0 y_hat: [6.] *\ny: 2.0 y_hat: [2.]\ny: 6.0 y_hat: [6.]\ny: 4.0 y_hat: [4.]\ny: 3.0 y_hat: [3.]\ny: 5.0 y_hat: [5.]\ny: 6.0 y_hat: [6.]\ny: 4.0 y_hat: [4.]\ny: 3.0 y_hat: [3.]\ny: 3.0 y_hat: [3.]\ny: 2.0 y_hat: [2.]\ny: -1.0 y_hat: [4.] *\ny: 2.0 y_hat: [2.]\ny: 2.0 y_hat: [2.]\ny: 2.0 y_hat: [2.]\ny: 5.0 y_hat: [5.]\n\"\"\"\n\n# plot\n\n# - store ixs of -1's in X\n# - store full labels array (using the predict method on the -1's)\n# - reduce the whole input matrix to 2d with tsne and the cosine distance\n# - iterate through the formation of the scatter plot, making the marker a diamond\n# instead of a circle if the iterant is in the stored -1 ixs\n\n# how does it look!?\nunseen_ixs = [ i for i, elt in enumerate(y) if elt == -1.0 ]\nprint(unseen_ixs)\nclass_labels = [ elt if elt != -1 else label_propagation_model.predict(X[i].reshape(1,-1))[0] for i, elt in enumerate(y) ]\n\ndist_metric = 1.0 - cosine_similarity(X)\ntsne_model = TSNE(metric=\"precomputed\")\nX_reduced = tsne_model.fit_transform( abs(dist_metric) ) # https://github.com/scikit-learn/scikit-learn/issues/5772\n\ncolours = ['#F18F01', '#048BA8', '#2E4057', '#99C24D', '#FF2216', '#D4ADCF']\nfor i in range(len(X)):\n if i in unseen_ixs:\n plt.scatter( X_reduced[i, 0], X_reduced[i, 1], color=colours[ int(class_labels[i] - 1) ], s=20*2**4 )\n else:\n plt.scatter( X_reduced[i, 0], X_reduced[i, 1], color=colours[ int(class_labels[i] - 1) ], s=20*2**2 )\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"AI/Semi-supervised Learning/scraping_oil_gas_corpus/nlp_code/doc2vec_agglo_clustering_with_clf.py","file_name":"doc2vec_agglo_clustering_with_clf.py","file_ext":"py","file_size_in_byte":26111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"10658435","text":"import sys\nfrom PyQt5.QtWidgets import QWidget, QApplication\nfrom PyQt5.QtGui import QPainter, QColor, QFont, QPen, QBrush\nfrom PyQt5.QtCore import Qt\n\nclass MockCalibration(QWidget):\n\n def __init__(self):\n # # willie's screen\n # self.screenx = 1920\n # self.screeny = 1080\n\n # # khrisna's screen\n # self.screenx = 3840\n # self.screeny = 2160\n\n # kai's screen\n self.screenx = 2560\n self.screeny = 1440\n\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(0, 0, self.screenx, self.screeny)\n self.setWindowTitle('Mock Calibration Screen')\n self.setStyleSheet(\"background-color: black\")\n self.showFullScreen()\n\n def paintEvent(self, event):\n painter = QPainter()\n painter.begin(self)\n painter.setBrush(QBrush(Qt.blue, Qt.SolidPattern))\n\n\n # painter.drawEllipse(self.screenx/8, self.screeny/6, 50, 50)\n # painter.drawEllipse(self.screenx *1/2, self.screeny/6, 50, 50)\n # painter.drawEllipse(self.screenx *7/8, self.screeny/6, 50, 50)\n #\n #\n # painter.drawEllipse(self.screenx/8, self.screeny *5/6, 50, 50)\n # painter.drawEllipse(self.screenx *1/2, self.screeny *5/6, 50, 50)\n # painter.drawEllipse(self.screenx *7/8, self.screeny *5/6, 50, 50)\n\n ## DRAWING A SQUARE\n painter.setPen(QPen(Qt.blue, 50, Qt.SolidLine))\n # length = 300\n # startx = 1097 # between 0 to screenlength - length (2560 - length)\n # starty = 576 # between 0 to screenheight - length (1440 - length)\n #\n # painter.drawLine(startx, starty, startx, starty + length)\n # painter.drawLine(startx + length, starty, startx + length, starty + length)\n # painter.drawLine(startx, starty + length, startx + length, starty + length)\n # painter.drawLine(startx, starty, startx + length, starty)\n\n\n ## CALIBRATION GRID DOTS\n #before: 5 rows, 16 columns\n painter.setPen(QPen(Qt.blue, 8, Qt.SolidLine))\n for row in range(4):\n for col in range(6):\n print('\\ncol:', self.screenx * (col + 1) / 11, 'row', self.screeny * (row + 1) / 5)\n painter.drawEllipse(self.screenx * (col + 1) / 7, self.screeny * (row + 1) / 5, 50, 50)\n\n painter.end()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MockCalibration()\n sys.exit(app.exec_())\n\n\n######################################################################################################################\n# OLD GRAPHICS\n######################################################################################################################\n\n# from graphics import *\n#\n# # creates a 1366 x 768 black window (EASY TO CHANGE) and\n# # draws a 3 x 2 grid of blue dots equally spaced.\n# # clicking the screen will close program.\n# def display_to_screen():\n# # background size and color\n# # 16:9 aspect ratio window size\n# screenx = 1366\n# screeny = 768\n# win = GraphWin(\"Calibration Screen\", screenx, screeny)\n# win.setBackground(\"black\")\n#\n# # Location of the 6 dots. Right now is 2row x 3cols, equally spaced\n# topleft = Point(screenx / 4, screeny / 3)\n# topmid = Point(screenx * 2 / 4, screeny / 3)\n# topright = Point(screenx * 3 / 4, screeny / 3)\n# botleft = Point(screenx / 4, screeny * 2 / 3)\n# botmid = Point(screenx * 2 / 4, screeny * 2 / 3)\n# botright = Point(screenx * 3 / 4, screeny * 2 / 3)\n#\n# # points drawn with 'blue' fill\n# ctopleft = Circle(topleft, 10)\n# ctopleft.draw(win)\n# ctopleft.setFill(\"blue\")\n#\n# ctopmiddle = Circle(topmid, 10)\n# ctopmiddle.draw(win)\n# ctopmiddle.setFill(\"blue\")\n#\n# ctopright = Circle(topright, 10)\n# ctopright.draw(win)\n# ctopright.setFill(\"blue\")\n#\n# cbotleft = Circle(botleft, 10)\n# cbotleft.draw(win)\n# cbotleft.setFill(\"blue\")\n#\n# cbotmid = Circle(botmid, 10)\n# cbotmid.draw(win)\n# cbotmid.setFill(\"blue\")\n#\n# cbotright = Circle(botright, 10)\n# cbotright.draw(win)\n# cbotright.setFill(\"blue\")\n#\n# # click will close\n# win.getMouse()\n# win.close()\n#\n# display_to_screen()\n","sub_path":"Microsoft Demo/display_to_screen.py","file_name":"display_to_screen.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"286942634","text":"#!/usr/bin/env python\n\nfrom canari.maltego.message import Label, Field\nfrom canari.framework import configure\nfrom common.entities import BitcoinTransaction, BitcoinAddress\nfrom common.blockexplorer import bitcoin_address\nfrom canari.maltego.message import MaltegoException\n\n__author__ = 'bostonlink'\n__copyright__ = 'Copyright 2014, Bitcoin-explorer Project'\n__credits__ = []\n\n__license__ = 'GPL'\n__version__ = '0.1'\n__maintainer__ = 'bostonlink'\n__email__ = 'bostonlink@igetshells.io'\n__status__ = 'Development'\n\n__all__ = [\n 'dotransform'\n]\n\n\n@configure(\n label='To Sent Transactions [Bitcoin-Explorer]',\n description='Returns sent transactions as BitcoinTransaction entities',\n uuids=[ 'bitcoin-explorer.v2.BitcoinAddressToSentTransactionID' ],\n inputs=[ ( 'Bitcoin Explorer', BitcoinAddress) ],\n remote=False,\n debug=False\n)\n\ndef dotransform(request, response, config):\n \n try:\n btc_add = bitcoin_address(request.value)\n \n for trans in btc_add['transactions']:\n\n if 'Sent' in trans['transaction_type']:\n e = BitcoinTransaction(trans['transaction_hash'],\n trans_type = trans['transaction_type'],\n amount = trans['transaction_amount'],\n trans_uri = trans['transaction_uri'],\n address = request.value)\n e += Field(\"date\", trans['date'], displayname='Date')\n e += Label(\"Bitcoin Address\", request.value)\n e += Label(\"Total Amount of Transaction\", trans['transaction_amount'])\n e += Label(\"Transaction Type\", trans['transaction_type'])\n e += Label(\"Transaction Date\", trans['date'])\n e.linklabel = 'Sent'\n \n response += e\n\n else:\n pass\n\n return response\n\n except Exception as e:\n raise MaltegoException('An error occured: %s' % e)\n","sub_path":"src/bitcoin-explorer/transforms/toSentTransactionID.py","file_name":"toSentTransactionID.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"150475822","text":"from django.conf import settings\nfrom django.db import models\nfrom threading import currentThread\nfrom proso.django.request import is_user_id_overridden, is_time_overridden\nfrom django.dispatch import receiver\nfrom django.db.models.signals import pre_save\nfrom proso.django.response import BadRequestException\nimport hashlib\nimport importlib\nimport json\nimport abc\n\n_is_user_overriden_from_url = {}\n_is_time_overriden_from_url = {}\n\n\ndef reset_url_overridden():\n global _is_user_overriden_from_url\n global _is_time_overriden_from_url\n _is_user_overriden_from_url[currentThread()] = False\n _is_time_overriden_from_url[currentThread()] = False\n\n\nclass CommonMiddleware(object):\n def process_request(self, request):\n reset_url_overridden()\n global _is_user_overriden_from_url\n global _is_time_overriden_from_url\n _is_user_overriden_from_url[currentThread()] = is_user_id_overridden(request)\n _is_time_overriden_from_url[currentThread()] = is_time_overridden(request)\n\n\ndef get_content_hash(content):\n return hashlib.sha1(content.encode()).hexdigest()\n\n\ndef get_custom_exports():\n result = {}\n for app in settings.INSTALLED_APPS:\n try:\n app_models = importlib.import_module('%s.models' % app)\n if not hasattr(app_models, 'PROSO_CUSTOM_EXPORT'):\n continue\n result[app] = {'custom_{}_{}'.format(app, name): sql for (name, sql) in app_models.PROSO_CUSTOM_EXPORT.items()}\n except ImportError:\n continue\n return result\n\n\ndef get_tables_allowed_to_export():\n tables = {}\n for app in settings.INSTALLED_APPS:\n try:\n app_models = importlib.import_module('%s.models' % app)\n if not hasattr(app_models, 'PROSO_MODELS_TO_EXPORT'):\n continue\n tables[app] = [(model._meta.pk.column, model._meta.db_table) for model in app_models.PROSO_MODELS_TO_EXPORT]\n except ImportError:\n continue\n return tables\n\n\ndef get_integrity_checks():\n checks = []\n for app in settings.INSTALLED_APPS:\n try:\n app_models = importlib.import_module('%s.models' % app)\n if not hasattr(app_models, 'PROSO_INTEGRITY_CHECKS'):\n continue\n checks += [check_class() for check_class in app_models.PROSO_INTEGRITY_CHECKS]\n except ImportError:\n continue\n return checks\n\n\nclass IntegrityCheck:\n\n @abc.abstractmethod\n def check(self):\n \"\"\"\n Perform integrity check\n\n Returns:\n None if everything is OK, message (dict) otherwise\n \"\"\"\n pass\n\n\nclass ConfigManager(models.Manager):\n\n def from_content(self, content, app_name=None, key=None):\n try:\n content = json.dumps(content, sort_keys=True)\n content_hash = get_content_hash(content)\n return self.get(content_hash=content_hash, app_name=app_name, key=key)\n except Config.DoesNotExist:\n config = Config(\n content=content,\n content_hash=content_hash)\n config.save()\n return config\n\n\nclass Config(models.Model):\n\n app_name = models.CharField(max_length=100, null=True, blank=True)\n key = models.CharField(max_length=100, null=True, blank=True)\n content = models.TextField(null=False, blank=False)\n content_hash = models.CharField(max_length=40, null=False, blank=False, db_index=True)\n\n objects = ConfigManager()\n\n def to_json(self, nested=False):\n return {\n 'id': self.id,\n 'object_type': 'config',\n 'content': json.loads(self.content),\n 'key': self.key,\n 'app_name': self.app_name,\n }\n\n\n@receiver(pre_save)\ndef check_user_or_time_overridden(sender, instance, **kwargs):\n instance_class = '{}.{}'.format(instance.__class__.__module__, instance.__class__.__name__)\n if instance_class.endswith('Session') or instance_class.endswith('UserStat'):\n return\n if _is_user_overriden_from_url.get(currentThread(), False):\n raise BadRequestException(\"Nothing ({}) can be saved when the user is overridden from URL.\".format(instance_class))\n if _is_time_overriden_from_url.get(currentThread(), False):\n raise BadRequestException(\"Nothing ({}) can be saved when the time is overridden from URL.\".format(instance_class))\n","sub_path":"proso_common/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"643590943","text":"'''\n\tCSC 111 Fall 2017 Semester Project\n\n\tThis is a \"stub\" version, i.e. an incomplete version in which you can test your new\n\tfunctions.\n'''\norgs = []\n\nfrom ignore import *\nfrom leven import *\naffinity_algo_choice = 'levenshtein'\n\t\ndef help():\n\tprint(\"Commands:\\n---------------------\")\n\tprint(\" stop -- end this program\")\n\tprint(\" help -- get a list of commands\")\n\tprint(\" list -- list all the organizations\")\n\tprint(\" list sorted -- list the organizations' names in sorted order\")\n\tprint(\" find X -- find all organizations with a partial name X\")\n\tprint(\" find activity X -- find all organizations with an activity that partially matches X\")\n\tprint(\" describe X -- list the activities of organization X\");\n\tprint(\" keywords -- show for tach organization the keywords found in their activity list\")\n\tprint(\" affinity -- calculate the affinity score for a user with each organization\")\n\treturn\n\ndef dump():\n\tprint(orgs)\n\treturn\n\ndef find():\n '''\n This functiion finds if the desired organization is within the dictionary\n '''\n desired = input(\"Which organization to find? \")\n if desired in orgs:\n print(\"We found your organization: \", orgs[desired])\n else:\n print(\"Organization not found!\")\n\ndef keyword_set():\n '''\n This function gets the keywords specific for each organization with in the list\n '''\n for org in orgs:\n keyword_set = set()\n for activity in org[1]:\n for words in activity.split():\n if words not in ignore_words:\n keyword_set.add(words)\n keyword_list = sorted(list(keyword_set))\n ', '.join(keyword_list)\n print(org[0], ':', sep='')\n print()\n print('keywords =', keyword_list)\n print()\n keywordlist = []\n \n return\ndef get_interests():\n '''\n this function takes in the users interests, and returns them in a list of strings.\n '''\n interests = []\n while True:\n interest = input('Enter an interest for an activity (To stop press enter): ')\n if interest == '':\n break\n else:\n interests.append(interest)\n return interests\n\ndef calc_affinity(org, user_interests):\n '''\n This function takes the the activities with in an organization and counts how many of the user's\n interests match he organization's using levenshtein.\n '''\n count = 0\n for interest in user_interests:\n for activity in org:\n if levenshtein(activity,interest)/len(activity) < 0.25:\n count += 1 \n return count\n\ndef calc_affinity_exact(org, user_interests):\n '''\n This function takes the the activitys with in an organization and counts how many of the user's\n interests exactly match the organization's activity.\n '''\n count = 0\n for interest in user_interests:\n for activity in org:\n if interest == activity:\n count += 1\n return count\ndef calc_affinity_contains(org, user_interests):\n '''\n This function takes the the activitys with in an organization and counts how many of the user's\n interests is a near match to the organization activity.\n '''\n count = 0\n for interest in user_interests:\n for activity in org:\n if interest in activity:\n count += 1\n return count\n \ndef near_enough(user_interests):\n '''\n '''\n global affinity_algo_choice\n \n for i in range(len(user_interests)):\n user_interests[i] = user_interests[i].lower()\n user_interests[i] = user_interests[i].rstrip()\n\n if affinity_algo_choice == 'exact':\n for org in orgs:\n print(org[0] + \":\", str(calc_affinity_exact(org[1], user_interests)))\n if affinity_algo_choice == 'contains':\n for org in orgs:\n print(org[0] + \":\", str(calc_affinity_contains(org[1], user_interests)))\n if affinity_algo_choice == 'levenshtein':\n for org in orgs:\n print(org[0] + \":\", str(calc_affinity(org[1], user_interests)))\ndef main():\n\tprint(\"Not all of these commands work!\")\n\n\tgetDatabase()\n\n\twhile True:\n\t\tcommand = input(\"\\nEnter command: \")\n\t\tif command == 'stop' or command == 'end' or command == 'quit':\n\t\t\tbreak\n\t\telif command.startswith(\"find activity \"):\n\t\t\tfind_activity(command[14:])\n\t\telif command == (\"find \"):\n\t\t\tfind()\n\t\telif command == 'list':\n\t\t\tshow()\n\t\telif command == 'list sorted':\n\t\t\tlistme(\"sorted\")\n\t\telif command.startswith(\"describe \"):\n\t\t\tdescribe(command[9:])\n\t\telif command == 'help' or command == '?':\n\t\t\thelp()\n\t\telif command == 'dump':\n\t\t\tdump()\n\t\telif command == 'keywords':\n\t\t\tkeyword_set()\n\t\telif command == 'affinity':\n\t\t\tuser_interests = get_interests()\n\t\t\tnear_enough(user_interests)\n\t\telse:\n\t\t\tprint(\"Command is illegal! Try help\")\n\n#------------------------------------------------------do not change below this line-----------------------------------------------------------\ndef readFile(filename):\n\t'''\n\t\tRead contents of file whose name is filename, and return it.\n\t\tReturn empty string on error.\n\t'''\n\ttry:\n\t\tf = open(filename,'r')\n\t\ttext = f.read()\n\t\tf.close()\n\t\treturn text\n\texcept:\n\t\tprint('Error in readfile: filename='+filename)\n\t\treturn ''\n\ndef getDatabase():\n lines = readFile(\"database.txt\").split(\"\\n\")\n \n global orgs\n orgs = [] # list of all organizations. this is a list of tuples\n current_name = \"\" # the name of the current organization we are reading in\n current_list = [] # the current organization's list of activities\n \n for line in lines:\n \tif line.startswith(\"name:\"):\n \t\tif len(current_name) > 0:\n \t\t\torgs.append([current_name, current_list])\n \t\tcurrent_name = line[5:]\n \t\tcurrent_list = []\n \telif len(line) > 0:\n \t\tcurrent_list.append(line)\n \n orgs.append([current_name, current_list])\n\nmain()\n","sub_path":"CSC 111/Lab 9/project_stub.py","file_name":"project_stub.py","file_ext":"py","file_size_in_byte":6479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"267823697","text":"######################################################################################\n# GAUSSINT.PY\n# Basic Number Theory functions implemented in Python\n# Note: Currently requires Python 3.x (uses floordiv, changes to the \"types\" module…)\n# Author: Robert Campbell, \n# Modified: Hubert Holin, \n# Date: 17 March, 2019\n# Version 1.2\n# License: Simplified BSD (see details at bottom)\n# Requirements:\n#\tRequires at least Python 3.x (runs fine on Python 3.6)\n# Bugs:\n#\tNone currently known.\n######################################################################################\n__version__ = '1.2' # Format specified in Python PEP 396\nVersion = 'GAUSSINT.PY, version ' + __version__ +\\\n\t', 8 June, 2013, by Robert Campbell, '+\\\n\t', modified 17 March 2019 by Hubert Holin, '\n\n\nimport math\t# For tools used in primality testing\n\n\nclass GaussInt:\n\t\"\"\"Gaussian Integer functions.\n\tFunctions implemented are:\n\t Arithmetic functions: +,*,//,%,**(exponentiation)\n\t a.gcd(b) - Compute the greatest common divisor of a and b.\n\t a.xgcd(b) - Extended gcd - return gcd(a,b) and x,y such that gcd(a,b)=xa+yb.\n\t n.isprime() - Is n prime (pseudoprime tests)\n\t n.factor() - Return a factor of n.\n\t n.factors() - Return list of the factors of n.\n\tGaussian Integers can be created by:\n\t n = GaussInt(5,7) # Create (5 + 7i)\n\t n = GaussInt(13) # Create (5 + 0i)\n\t z = complex(2,3); n = GaussInt(z) # Round the complex number to integer form\n\tA list of the functions implemented in GaussInt is printed by the command help(GaussInt).\n\tUsage: from gaussint import * \"\"\"\n\t\n\t\n\tdef __init__(self, a = 0, b = 0):\n\t\t\n\t\tif (type(a) is complex):\n\t\t\t\n\t\t\tif b != 0:\n\t\t\t\t\n\t\t\t\traise TypeError(\"Attempting to ceate a Gauss Integer from a complex \"+\n\t\t\t\t\t\"number and another input ({0:s} and {1:s})!\".format(a, b))\n\t\t\t\n\t\t\tself.r = round(a.real)\n\t\t\tself.i = round(a.imag)\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tself.r = int(a)\n\t\t\tself.i = int(b)\n\t\n\t\n\tdef __str__(self): # Overload string conversion used by print\n\t\t\n\t\treturn \"(\" + str(self.r) + ((\" + \"+str(self.i)) if (self.i >= 0) else (\" - \"+str(-self.i))) + \" i)\"\n\t\n\t\n\tdef __format__(self, spec):# Overload string conversion used by format\n\t\t\n\t\treturn \"(\" + str(self.r) + ((\" + \"+str(self.i)) if (self.i >= 0) else (\" - \"+str(-self.i))) + \" i)\"\n\t\n\t\n\tdef __repr__(self): # Overload conversion used for output\n\t\t\n\t\treturn \"GaussInt(\" + str(self.r) + \", \" + str(self.i) + \")\"\n\t\n\t\n\tdef __complex__(self): # Allow conversion to complex type\n\t\treturn complex(self.r, self.i)\n\t\n\t\n\tdef __eq__(self,other): # Overload the \"==\" test operator - NOTE: differs from version 1.1\n\t\t\n\t\tif (type(other) is not GaussInt):\n\t\t\t\n\t\t\treturn False\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn (self.r == other.r) and (self.i == other.i)\n\t\n\t\n\tdef __ne__(self,other): # Overload the \"!=\" test operator\n\t\t\n\t\treturn not (self == other)\n\t\n\t\n\tdef neutral_element_for_multiplication():\n\t\t\n\t\treturn __class__(1)\n\t\n\t\n\tdef conjugate(self):\n\t\t\n\t\treturn GaussInt(self.r, -self.i)\n\t\n\t\n\tdef norm(self):\n\t\t\n\t\treturn self.r*self.r + self.i*self.i\n\t\n\t\n\tdef __pos__(self):\t # Overload the \"+\" unary operator \n\t\t\n\t\treturn self\n\t\n\t\n\tdef add(self,summand):\n\t\t\n\t\tsum_r = self.r + summand.r\n\t\tsum_i = self.i + summand.i\n\t\t\n\t\treturn GaussInt(sum_r, sum_i)\n\t\n\t\n\tdef __add__(self,summand): # Overload the \"+\" binary operator\n\t\t\n\t\tif type(summand) is int:\n\t\t\t\n\t\t\treturn GaussInt(self.r+summand, self.i)\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn self.add(summand)\n\t\n\t\n\tdef __radd__(self,summand): # Overload the \"+\" binary operator\n\t\t\n\t\tif type(summand) is int:\n\t\t\t\n\t\t\treturn GaussInt(self.r+summand, self.i)\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn self.add(summand)\n\t\n\t\n\tdef __iadd__(self,summand): # Overload the \"+=\" operator\n\t\t\n\t\tself = self + summand\n\t\t\n\t\treturn self\n\t\n\t\n\tdef __neg__(self): # Overload the \"-\" unary operator \n\t\t\n\t\treturn GaussInt(-self.r,-self.i)\n\t\n\t\n\tdef __sub__(self,summand): # Overload the \"-\" binary operator\n\t\t\n\t\treturn self.__add__(-summand)\n\t\n\t\n\tdef __rsub__(self,summand): # Overload the \"-\" binary operator\n\t\t\n\t\tif type(summand) is int:\n\t\t\t\n\t\t\treturn GaussInt(summand-self.r, -self.i)\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn summand-self\n\t\n\t\n\tdef __isub__(self,summand): # Overload the \"-=\" operator\n\t\t\n\t\tself = self - summand\n\t\t\n\t\treturn self\n\t\n\t\n\tdef mult(self,multip):\n\t\t\n\t\tprod_r = (self.r * multip.r) - (self.i * multip.i)\n\t\tprod_i = (self.i * multip.r) + (self.r * multip.i)\n\t\t\n\t\treturn GaussInt(prod_r, prod_i)\n\t\n\t\n\tdef __mul__(self,multip): # Overload the \"*\" operator\n\t\t\n\t\tif type(multip) is int:\n\t\t\t\n\t\t\treturn GaussInt(self.r*multip, self.i*multip)\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn self.mult(multip)\n\t\n\t\n\tdef __rmul__(self,multip): # Overload the \"*\" operator\n\t\t\n\t\tif type(multip) is int:\n\t\t\t\n\t\t\treturn GaussInt(self.r*multip, self.i*multip)\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn self.mult(multip)\n\t\n\t\n\tdef __imul__(self,multip): # Overload the \"*=\" operator\n\t\t\n\t\tself = self * multip\n\t\t\n\t\treturn self\n\t\n\t\n\tdef floordiv(self,divisor):\n\t\t\n\t\tif type(divisor) is int:\n\t\t\t\n\t\t\tnumerator = (-self if (divisor < 0) else self)\n\t\t\t\n\t\t\tdenominator = (-divisor if (divisor < 0) else divisor)\n\t\t\t\n\t\t\tif denominator == 0:\n\t\t\t\t\n\t\t\t\traise ZeroDivisionError(\"{0:s} is null!\".format(divisor))\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tnumerator = self*divisor.conjugate()\n\t\t\t\n\t\t\tdenominator = divisor.norm()\t# Recall that denominator >= 0\n\t\t\t\n\t\t\tif denominator == 0:\n\t\t\t\t\n\t\t\t\traise ZeroDivisionError(\"{0:s} is null!\".format(divisor))\n\t\t\n\t\tcandidate_r = numerator.r//denominator\n\t\tcandidate_i = numerator.i//denominator\n\t\t\n\t\t# i.e. (candidate_r+1)*denominator-numerator.r < numerator.r-candidate_r*denominator\n\t\tif (2*candidate_r+1)*denominator < 2*numerator.r:\n\t\t\t\n\t\t\tcandidate_r += 1\n\t\t\n\t\t# i.e. (candidate_i+1)*denominator-numerator.i < numerator.i-candidate_i*denominator\n\t\tif (2*candidate_i+1)*denominator < 2*numerator.i:\n\t\t\t\n\t\t\tcandidate_i += 1\n\t\t\n\t\treturn GaussInt(candidate_r,candidate_i)\n\t\n\t\n\tdef __floordiv__(self,divisor): # Overload the \"//\" operator\n\t\t\n\t\treturn self.floordiv(divisor)\n\t\n\t\n\tdef __ifloordiv__(self,divisor): # Overload the \"//=\" operator\n\t\t\n\t\tself = self//divisor\n\t\t\n\t\treturn self\n\t\n\t\n\tdef mod(self,divisor):\n\t\t\n\t\treturn self - divisor * (self//divisor)\n\t\n\t\n\tdef __mod__(self,divisor): # Overload the \"%\" operator\n\t\t\n\t\treturn self.mod(divisor)\n\t\n\t\n\tdef __imod__(self,divisor): # Overload the \"%=\" operator\n\t\t\n\t\tself = self % divisor\n\t\t\n\t\treturn self\n\t\n\t\n\tdef divmod(self,divisor):\n\t\t\n\t\tq = self//divisor\n\t\t\n\t\treturn q, self - divisor * q\n\t\n\t\n\tdef xgcd(self,other):\n\t\t\n\t\tquot = GaussInt()\n\t\t\n\t\ta1 = GaussInt(1,0)\n\t\tb1 = GaussInt(0,0)\n\t\t\n\t\ta2 = GaussInt(0,0)\n\t\tb2 = GaussInt(1,0)\n\t\t\n\t\ta = self\n\t\tb = other\n\t\t\n\t\tif(b.norm() > a.norm()):\t# Need to start with a>b\n\t\t\t\n\t\t\ta,b = b,a\t\t\t\t\t# Swap a and b\n\t\t\ta1,b1,a2,b2 = a2,b2,a1,b1\t# Swap (a1,b1) with (a2,b2)\n\t\t\n\t\twhile (True):\n\t\t\t\n\t\t\tquot = a // b\n\t\t\t\n\t\t\ta %= b\n\t\t\t\n\t\t\ta1 -= quot*a2\n\t\t\tb1 -= quot*b2\n\t\t\t\n\t\t\tif (a == GaussInt(0,0)):\n\t\t\t\t\n\t\t\t\treturn b, a2, b2\n\t\t\t\n\t\t\tquot = b // a\n\t\t\t\n\t\t\tb %= a\n\t\t\t\n\t\t\ta2 -= quot*a1\n\t\t\tb2 -= quot*b1\n\t\t\t\n\t\t\tif (b == GaussInt()):\n\t\t\t\t\n\t\t\t\treturn a, a1, b1\n\t\n\t\n\tdef Bézout(self, other):\n\t\t\n\t\ta = self\n\t\tb = other\n\t\t\n\t\tif a.norm() < b.norm():\n\t\t\t\n\t\t\t(u, v, pgcd) = b.Bézout(a)\n\t\t\t\n\t\t\treturn (v, u, pgcd)\n\t\t\n\t\tif b == 0:\n\t\t\t\n\t\t\treturn (1, 0, a)\n\t\t\n\t\tu_n, u_n_moins_1, v_n, v_n_moins_1 = 0, 1, 1, 0\n\t\t\n\t\twhile b.norm() > 0:\n\t\t\n\t\t\tq,r = a.divmod(b)\n\t\t\t\n\t\t\tu_n_plus_1 = u_n_moins_1 - q*u_n\n\t\t\tv_n_plus_1 = v_n_moins_1 - q*v_n\n\t\t\t\n\t\t\ta, b = b, r\n\t\t\tu_n_moins_1, u_n, v_n_moins_1, v_n = u_n, u_n_plus_1, v_n, v_n_plus_1\n\t\t\n\t\treturn (u_n_moins_1, v_n_moins_1, a)\n\t\n\t\n\tdef gcd(self,other):\n\t\t\n\t\ta = self\n\t\tb = other\n\t\t\n\t\tif a.norm() < b.norm():\n\t\t\t\n\t\t\treturn b.gcd(a)\n\t\n\t\twhile b.norm() > 0:\n\t\t\n\t\t\tq,r = a.divmod(b)\n\t\t\ta,b = b,r\n\t\n\t\treturn a\n\t\n\t\n\tdef powmod(self, a_power, a_modulus):\n\t# We adapt the Binary Exponentiation algorithm with modulo\n\t\t\n\t\tresult = GaussInt(1)\n\t\t\n\t\tauxilliary = GaussInt(self.r, self.i)\n\t\t\n\t\twhile a_power:\n\t\t\t\n\t\t\tif a_power % 2:\t# If power is odd\n\t\t\t\t\n\t\t\t\tresult = (result * auxilliary) % a_modulus\n\t\t\t\n\t\t\t# Divide the power by 2\n\t\t\ta_power >>= 1\n\t\t\t\n\t\t\t# Multiply base to itself\n\t\t\tauxilliary = (auxilliary * auxilliary) % a_modulus\n\t\t\n\t\treturn result\n\t\n\t\n\tdef __pow__(self, a_power): # Overload the \"**\" operator\n\t# We adapt the Binary Exponentiation algorithm (without modulo!)\n\t\t\n\t\tresult = GaussInt(1)\n\t\t\n\t\tauxilliary = GaussInt(self.r, self.i)\n\t\t\n\t\twhile a_power:\n\t\t\t\n\t\t\tif a_power % 2:\t# If power is odd\n\t\t\t\t\n\t\t\t\tresult = result * auxilliary\n\t\t\t\n\t\t\t# Divide the power by 2\n\t\t\ta_power >>= 1\n\t\t\t\n\t\t\t# Multiply base to itself\n\t\t\tauxilliary = auxilliary * auxilliary\n\t\t\n\t\treturn result\n\t\n\t\n\tdef isprime(self):\n\t\t\"\"\"n.isprime() - Test whether the GaussInt n is prime using a variety of pseudoprime tests.\"\"\"\n\t\t# Multiply by (1,i,-1,-i) to rotate to first quadrant (similar to abs)\n\t\tif (self.r < 0): self *= (-1)\n\t\tif (self.i < 0): self *= GaussInt(0,1)\n\t\t# Check some small non-primes\n\t\tif (self in [GaussInt(0,0), GaussInt(1,0), GaussInt(0,1)]): return False\n\t\t# Check some small primes\n\t\tif (self in [GaussInt(1,1), GaussInt(2,1), GaussInt(1,2), GaussInt(3,0), GaussInt(0,3), GaussInt(3,2), GaussInt(2,3)]):\n\t\t\treturn True\n\t\treturn self.isprimeF(2) and self.isprimeF(3) and self.isprimeF(5)\n\t\n\t\n\tdef isprimeF(self,base):\n\t\t\"\"\"n.isprimeF(base) - Test whether the GaussInt n is prime using the\n\t\tGaussian Integer analogue of the Fermat pseudoprime test.\"\"\"\n\t\tif type(base) is not GaussInt:\n\t\t base = GaussInt(base) # Coerce if base not GaussInt (works for int or complex)\n\t\treturn base.powmod(self.norm()-1,self) == GaussInt(1,0)\n\t# Note: Possibly more effective would be to use the characterization of primes\n\t# in the Gaussian Integers based on the primality of their norm and reducing mod 4.\n\t# This depends on the characterization of the ideal class group, and only works for\n\t# simple rings of algebraic integers.\n\t\n\t\n\tdef factor(self):\n\t\t\"\"\"n.factor() - Find a prime factor of Gaussian Integer n using a variety of methods.\"\"\"\n\t\tif (self.isprime()): return n\n\t\tfor fact in [GaussInt(1,1), GaussInt(2,1), GaussInt(1,2), \n\t\t\t GaussInt(3,0), GaussInt(3,2), GaussInt(2,3)]:\n\t\t\tif self%fact == 0: return fact\n\t\treturn self.factorPR() # Needs work - no guarantee that a prime factor will be returned\n\t\n\t\n\tdef factors(self):\n\t\t\"\"\"n.factors() - Return a sorted list of the prime factors of Gaussian Integer n.\"\"\"\n\t\tif (self.isprime()):\n\t\t return [self]\n\t\tfact = self.factor()\n\t\tif (fact == 1): return \"Unable to factor \"+str(n)\n\t\tfacts = (self/fact).factors() + fact.factors()\n\t\treturn facts\n\t\n\t\n\tdef factorPR(self):\t# TODO: learn and test\n\t\t\"\"\"n.factorPR() - Find a factor of Gaussian Integer n using the analogue of the Pollard Rho method. \n\t\tNote: This method will occasionally fail.\"\"\"\n\t\tfor slow in [2,3,4,6]:\n\t\t\tnumsteps=2*math.floor(math.sqrt(math.sqrt(self.norm()))); fast=slow; i=1\n\t\t\twhile i>> from GaussInt import *\n# >>> a = GaussInt(1,0)\n\n############################################################################\n# License: Freely available for use, abuse and modification\n# (this is the Simplified BSD License, aka FreeBSD license)\n# Copyright 2001-2013 Robert Campbell. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in \n# the documentation and/or other materials provided with the distribution.\n############################################################################\n\n","sub_path":"gaussint.py","file_name":"gaussint.py","file_ext":"py","file_size_in_byte":12300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"563638928","text":"from math import exp as exp\nimport sys\nimport unittest\n\neval_score_max = 100\neval_score_min = 0\n\ndef round_tolerance(val, tolerance):\n return int(val / tolerance) * tolerance\n\ndef has_serial_cnt(series, mark, cnt):\n c = 0\n for v in series:\n if v == mark:\n c += 1\n if c == cnt:\n return True\n else:\n c = 0\n return False\n\ndef has_sub_array(array, subarray):\n # perfect place for dynamic programming? longest common substring\n if len(array) * len(subarray) == 0:\n return False\n s = 0\n while s < len(array):\n if len(array) - s < len(subarray):\n return False\n all_match = True\n sc = s\n for p in subarray:\n if p != array[sc]:\n all_match = False\n break\n sc += 1\n if all_match:\n return True\n s += 1\n return False\n\ndef _eval_bingo(idx, series, mark, win_narabi_cnt):\n # assume not already win_narabi_cnt, if adding one makes it, return full score\n series[idx] = mark\n if has_serial_cnt(series, mark, win_narabi_cnt):\n return eval_score_max\n return eval_score_min\n\ndef own_bingo(idx, series, own_mark, opp_mark, empty_mark, win_narabi_cnt):\n return _eval_bingo(idx, series, own_mark, win_narabi_cnt)\n\ndef opp_bingo(idx, series, own_mark, opp_mark, empty_mark, win_narabi_cnt):\n return _eval_bingo(idx, series, opp_mark, win_narabi_cnt)\n\ndef _eval_almost_bingo(idx, series, mark, empty_mark, win_narabi_cnt):\n # if there's any place that bingo - 1 with both sides empty\n series[idx] = mark\n if has_sub_array(series, [empty_mark] + [mark] * (win_narabi_cnt - 1) + [empty_mark]):\n return eval_score_max\n return eval_score_min\n\ndef own_almost_bingo(idx, series, own_mark, opp_mark, empty_mark, win_narabi_cnt):\n return _eval_almost_bingo(idx, series, own_mark, empty_mark, win_narabi_cnt)\n\ndef opp_almost_bingo(idx, series, own_mark, opp_mark, empty_mark, win_narabi_cnt):\n return _eval_almost_bingo(idx, series, opp_mark, empty_mark, win_narabi_cnt)\n\ndef _sigmoid_raw(x, steepness, mid, x_min, x_max):\n base = 1 / (1 + exp((-1 / float(steepness)) * (x - mid)))\n return base * (x_max - x_min) - x_min\n\ndef _sigmoid100(x):\n return _sigmoid_raw(x, 10,\n (eval_score_max - eval_score_min) / 2.0,\n eval_score_min,\n eval_score_max)\n\ndef freedom(idx, series, own_mark, opp_mark, empty_mark, win_narabi_cnt):\n # only count empty, compare againts 3 win_narabi_cnt, as we don't have board size\n # maybe we should just get a dict with everything here\n empty_cnt = len([x for x in series if x == empty_mark])\n return _sigmoid100(100 * empty_cnt / (3.0 * win_narabi_cnt))\n\ndef center(idx, series, own_mark, opp_mark, empty_mark, win_narabi_cnt):\n # closer to center is better\n half = len(series) / float(2)\n return 100 - _sigmoid100(100 * abs(idx - half) / half)\n\n###########################################\nclass test_eval_func(unittest.TestCase):\n def test_has_sub_array(self):\n print(\"======\", sys._getframe().f_code.co_name)\n tests = [\n [[1, 2, 3, 4, 5], [1, 2, 3], True],\n [[1, 2, 3, 4, 5], [1, 2, 4], False],\n [[1, 2, 3, 4, 5], [1, 2, 4], False],\n ]\n for case in tests:\n res = has_sub_array(case[0], case[1])\n print(\"testing {}\".format(case))\n self.assertEqual(case[2], res)\n\n def test_has_sub_array(self):\n print(\"======\", sys._getframe().f_code.co_name)\n tests = [\n [[1, 2, 3, 4, 5], [1, 2, 3], True],\n [[1, 2, 3, 4, 5], [1, 2, 4], False],\n [[1, 2, 2, 4, 5], [2, 2, 4], True],\n ]\n for case in tests:\n res = has_sub_array(case[0], case[1])\n print(\"testing {}\".format(case))\n self.assertEqual(case[2], res)\n\n def test_freedom(self):\n print(\"======\", sys._getframe().f_code.co_name)\n empty = '_'\n tests = [\n [[empty] * 5, 0.0],\n ]\n for case in tests:\n res = freedom(0, case[0], 'X', '_', empty, 5)\n print(\"testing {}\".format(case))\n self.assertEqual(case[1], res)\n# eval proximity - score higher when closer to own mark\nif __name__ == '__main__':\n #unittest.main()\n\n print(\"sigmoid\", _sigmoid100(100))\n\n print(\"**** bingo\")\n series = [0, 1, 1, 1, 1, 0, 0]\n res = []\n for i in range(len(series)):\n res.append(_eval_bingo(i, series[:], 1, 5))\n print(series)\n print(res)\n\n print(\"**** bingo almost\")\n series = [0, 1, 1, 0, 1, 0, 0]\n res = []\n for i in range(len(series)):\n res.append(_eval_almost_bingo(i, series[:], 1, 0, 5))\n print(series)\n print(res)\n\n print(\"**** freedom\")\n series = [0, 1, 1, 0, 1, 0, 0, 0]\n res = []\n for i in range(len(series)):\n res.append(freedom(i, series[:], 1, 2, 0, 5))\n print(series)\n print(res)\n","sub_path":"core/eval_func.py","file_name":"eval_func.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"212399937","text":"# coding=utf-8\n\nimport cv2\n\n\nimg = cv2.imread(\"../Data/Image/banboo.png\", 0)\ncv2.imshow(\"Origin\", img)\n\n# blur\nresult = cv2.blur(img, (5, 5))\ncv2.imshow(\"Blur\", result)\n\n# GaussianBlur\n# 1) 加权:高斯滤波就是对整幅图像进行加权平均的过程,权值就是高斯函数。每一个像素点的值,都由其本身和邻域内的其他像素值\n# 经过加权平均后得到。所以和均值滤波一样(可看做是加权相同),是一种平滑模糊滤波,即低通滤波\n# 2) 数学:I(x0,y0) = sum(G(x,y) * I(x,y)) for (x,y) in 邻域of(x0,y0)\n# 由于这个加权式就是卷积 G(x0,y0)*I(x0,y0),因此也叫高斯卷积滤波\n# 3) 参数:src , ksize:邻域范围,sigmaX : X方向的方差,dst:None 生成图片(c++声明带来的,不用关心),sigmaY:Y方向的方差\n# borderType:边缘的插值方法(边缘像素点不够模板)\n# G(x,y) = a * e^-((x-ux)^2/2*sigmaX^2 + (y-uy)^2/2*sigmaY^2) 其中 a是归一化系数,因为权值和要为1\n# 一般二维图片,取ux=uy=0, sigmaX=sigmaY; 即 G(x,y) = a * e^-(x^2 + y^2)/2*sigma^2\n# 注意:第一点就是ksize的宽和高必须是奇数;\n# 第二点就是如果参数sigmaX=sigmaY=0,则实际用的是公式sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8\n# 4) 尺度与sigma:高斯平滑的高斯函数sigma越大,高斯函数越平坦,邻域的像素值获得权重更大\n# (可以认为当sigma->无穷时就成了均值滤波),图像就会变得越平滑,平滑~=模糊,\n# 图像越模糊,模糊会凸显总体特征,而忽略细节特征,也就是凸显大尺度的特征\n# 所以认为sigma越大,图片可见的特征尺度越大(小尺度都被忽略了)\n# 5) 尺度与放缩:将一个物体图像缩小,\nresult = cv2.GaussianBlur(img, (5, 5), 1.5)\ncv2.imshow(\"GaussianBlur\", result)\n\n# 中值滤波 中值滤波器对消除椒盐现象特别有用\nresult = cv2.medianBlur(img, 5) # 中值滤波器使用5×5的范围来进行每个像素单元的计算\ncv2.imshow(\"medianBlur\", result)\n\nresult = cv2.Sobel(result, cv2.CV_16S, 1, 0) # x 方向的一阶差分\nresult = cv2.convertScaleAbs(result) # 转化会uint8,这样才能正常显示\ncv2.imshow(\"Sobel\", result)\n\n# 二阶差分\nresult = cv2.Laplacian(img, cv2.CV_16S) # 使用CV_16S是为了防止越界\nresult = cv2.convertScaleAbs(result) # 转化会uint8,这样才能正常显示\ncv2.imshow(\"Laplacian\", result)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\n\n","sub_path":"MLInAction/cv/filterTest.py","file_name":"filterTest.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"256535988","text":"import discord\nimport aioredis\nimport inspect\nimport asyncio\nimport os\nimport re\nimport json\nimport math\n\nfrom utils import dump, find, parse_redis_url\nfrom logger import Logger\nfrom time import time\nfrom aiohttp import web\nfrom rpc import RPCServer, rpc, RPCException\nfrom collections import defaultdict\n\nif not discord.opus.is_loaded():\n if platform == 'linux' or platform == 'linux2':\n discord.opus.load_opus('./libopus.so')\n elif platform == 'darwin':\n discord.opus.load_opus('libopus.dylib')\n\nclass GatewayBot(discord.Client, Logger):\n\n players = dict()\n call_next = defaultdict(lambda: True)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fetch_offline_members = True\n\n self.broker_url = kwargs.get('broker_url')\n self.redis_url = kwargs.get('redis_url')\n\n self._redis_connect()\n self._broker_connect()\n\n self.rpc_server = RPCServer(self)\n\n def __str__(self):\n return 'gateway-{}-{}'.format(self.shard_id,\n self.shard_count)\n\n def _redis_connect(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self.__redis_connect())\n\n def _broker_connect(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self.__broker_connect())\n\n async def __redis_connect(self):\n self.redis = await aioredis.create_redis(\n parse_redis_url(self.redis_url),\n encoding='utf8'\n )\n\n async def __broker_connect(self):\n self.broker = await aioredis.create_redis(\n parse_redis_url(self.broker_url),\n encoding='utf8'\n )\n\n async def send(self, queue, data):\n payload = json.dumps(data)\n await self.broker.lpush(queue, payload)\n\n async def send_dispatch_event(self, event_type, guild, before=None,\n after=None):\n e = dict(ts=time(),\n type=event_type,\n producer=str(self),\n guild=dump(guild))\n\n if before:\n if after:\n e['before'] = dump(before)\n e['after'] = dump(after)\n else:\n e['data'] = dump(before)\n\n self.log(\"{event}:{gid} @ {ts}\".format(event=e['type'],\n gid=e['guild']['id'],\n ts=e['ts']))\n\n await self.send('discord.events.{}'.format(e['type']), e)\n\n # Events handling\n async def on_message(self, message):\n # Ignore private messages\n if not message.guild:\n return\n # Ignore WHs \n if message.author.__class__ is not discord.Member:\n return\n\n await self.send_dispatch_event('MESSAGE_CREATE',\n message.guild,\n message)\n\n async def on_message_delete(self, message):\n # Ignore private messages\n if not message.guild:\n return\n # Ignore webhooks \n if message.author.__class__ is not discord.Member:\n return\n\n await self.send_dispatch_event('MESSAGE_DELETE',\n message.guild,\n message)\n\n async def on_message_edit(self, before, after):\n # Ignore private messages\n if not after.guild:\n return\n # Ignore webhooks \n if after.author.__class__ is not discord.Member:\n return\n\n await self.send_dispatch_event('MESSAGE_EDIT',\n after.guild,\n before,\n after)\n\n async def on_ready(self):\n self.log('Connected to {} guilds'.format(len(self.guilds)))\n self.rpc_server.run()\n for guild in list(self.guilds):\n self.loop.create_task(self.on_guild_ready(guild))\n\n async def on_guild_ready(self, guild):\n await self.send_dispatch_event('GUILD_READY',\n guild)\n\n async def on_guild_join(self, guild):\n await self.send_dispatch_event('GUILD_JOIN',\n guild)\n\n async def on_guild_remove(self, guild):\n await self.send_dispatch_event('GUILD_REMOVE',\n guild)\n\n async def on_guild_update(self, before, after):\n await self.send_dispatch_event('GUILD_UPDATE',\n after,\n before,\n after)\n\n async def on_member_join(self, member):\n await self.send_dispatch_event('MEMBER_JOIN',\n member.guild,\n member)\n\n async def on_member_remove(self, member):\n await self.send_dispatch_event('MEMBER_REMOVE',\n member.guild,\n member)\n\n # RPCs\n @rpc\n def get_guild(self, guild_id):\n guild = discord.utils.get(self.guilds, id=guild_id)\n if not guild:\n raise RPCException('guild_not_found')\n\n return guild\n\n @rpc\n def get_voice_channel(self, guild, voice_channel_id):\n if type(guild) in (str, int):\n guild = self.get_guild(guild)\n\n voice_channel = discord.utils.get(guild.channels, id=str(voice_channel_id))\n if not voice_channel:\n raise RPCException('voice_channel_not_found')\n\n if voice_channel.type != discord.ChannelType.voice:\n raise RPCException('not_a_voice_channel')\n\n return voice_channel\n\n @rpc\n async def join_voice(self, guild, voice_channel_id):\n if type(guild) in (str, int):\n guild = self.get_guild(guild)\n\n voice_channel = self.get_voice_channel(guild, voice_channel_id)\n\n voice = guild.voice_client\n if voice:\n await voice.move_to(voice_channel)\n else:\n await self.join_voice_channel(voice_channel)\n\n return voice_channel\n\n @rpc\n async def leave(self, guild):\n if type(guild) in (str, int):\n guild = self.get_guild(guild)\n\n voice = guild.voice_client\n if voice:\n await voice.disconnect()\n\n return None\n\n @rpc\n def get_voice_client(self, guild):\n if type(guild) in (str, int):\n guild = self.get_guild(guild)\n\n voice = guild.voice_client\n if not voice:\n raise RPCException('voice_not_connected')\n\n return voice\n\n async def _ytdl_play_song(self, guild, song_url, after=None):\n if type(guild) in (str, int):\n guild = self.get_guild(guild)\n\n voice = self.get_voice_client(guild)\n\n lock = self.play_locs[guild.id]\n try:\n await lock.acquire()\n\n curr_player = self.players.get(guild.id)\n if curr_player:\n self.call_next[guild.id] = False\n curr_player.stop()\n\n player = await voice.create_ytdl_player(url,\n ytdl_options=opts,\n after=after)\n self.players[guild.id] = player\n player.volume = 0.6\n player.start()\n finally:\n lock.release()\n return voice\n\n @rpc\n async def ytdl_play_song(self, guild, song_url):\n return self._ytdl_play_song(guild, song_url)\n\n @rpc\n async def ytdl_play_songs(self, guild, queue_name):\n def n(player):\n if player.error:\n e = player.error\n import traceback\n log('Error from the player')\n log(traceback.format_exception(type(e), e, None))\n if self.call_next.get(guild.id):\n self.loop.create_task(self.ytdl_play_songs(guild, queue_name))\n self.call_next[guild.id] = True\n\n song_url = redis.rpop(queue_name)\n if song_url:\n return await self._ytdl_play_song(guild, song_url, after=n)\n\n return None\n","sub_path":"gateway/gateway_bot.py","file_name":"gateway_bot.py","file_ext":"py","file_size_in_byte":8199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"569070610","text":"'''\n * @desc : tensorflow实现 alexNet\n * @auth : TYF\n * @date : 2019/8/31 - 15:58\n\n AlexNet特点\n ReLU: 解决了Sigmoid在深网络中的梯度弥散问题\n Dropout: 随机忽略隐层中部分节点(类似图片随机像素点置黑相当于新创建了一个更加模糊但不影响识别的样本)\n LRN: 对局部神经元的活动创建竞争机制,影响较大的节点扩大其影响\n CNN: 使用重叠的最大池化,避免平均池化的模糊化效果,设置步长比池化核尺寸小使得池化层的输出之间会有重叠提升特征丰富性\n\n AlexNet一共8层,5个卷积层,3个全连接层,结构如下:\n\n 卷积层 输入 卷积核(Ksize/channels/number/stride) 激活 输出 数据增强 池化核(Psize/stride) 输出\n conv1 224*224*3 11*11/ 3/ 64/ 4*4/ ReLU 55*55*64 LRN 3*3/ 2*2/ 27*27*64\n conv2 27*27*64 5*5/ 64/ 192/ 1*1/ ReLU 27*27*192 LRN 3*3/ 2*2/ 13*13*192\n conv3 13*13*192 3*3/ 192/ 384/ 1*1/ ReLU 13*13*384\n conv4 13*13*384 3*3/ 384/ 256/ 1*1/ ReLU 13*13*256\n conv5 13*13*256 3*3/ 256/ 256/ 1*1/ ReLU 13*13*256 3*3/ 2*2/ 6*6*256\n\n 全连接层 输入节点 激活 输出节点\n FC1 9216 ReLU 4096\n FC2 4096 ReLU 4096\n FC3 9216 Softmax 1000\n\n\n\n'''\nimport tensorflow as tf\n\n\n\nbatch_size = 32\nnum_batches = 100\n\n\n#初始化权重\ndef variable_with_weight_loss(shape,stddev,wl):\n var=tf.Variable(tf.truncated_normal(shape,stddev=stddev))\n if wl is not None:\n weight_loss=tf.multiply(tf.nn.l2_loss(var),wl,name='weight_loss')\n tf.add_to_collection('losses',weight_loss)\n return var\n\ndef inference(images):\n parameters = []\n #conv1\n with tf.name_scope('conv1') as scope:\n #卷积核 number=64 channels=3 size=11*11 截断的正态分布初始化卷积核参数\n kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights')\n #计算卷积 strides=4*4\n conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')\n #偏差\n biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases')\n #卷积结果+偏差\n bias = tf.nn.bias_add(conv, biases)\n #relu激活\n conv1 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n #lrn1数据增强\n lrn1 = tf.nn.lrn(conv1, 4, bias = 1.0, alpha = 0.001 / 9, beta = 0.75, name = 'lrn1')\n #池化核 size=3*3 strides=2*2\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1')\n print('------')\n print('conv1 input:',images.shape)\n print('conv1 output:',conv1.get_shape().as_list())\n print('conv1 pool output:',pool1.get_shape().as_list())\n #conv2\n with tf.name_scope('conv2') as scope:\n kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n lrn2 = tf.nn.lrn(conv2, 4, bias = 1.0, alpha = 0.001 / 9, beta = 0.75, name = 'lrn2')\n pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool2')\n print('------')\n print('conv2 input:',pool1.get_shape().as_list())\n print('conv2 output:',conv2.get_shape().as_list())\n print('conv2 pool output:',pool2.get_shape().as_list())\n #conv3\n with tf.name_scope('conv3') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv3 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n print('------')\n print('conv3 input:',pool2.get_shape().as_list())\n print('conv3 output:',conv3.get_shape().as_list())\n #conv4\n with tf.name_scope('conv4') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv4 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n print('------')\n print('conv4 input:',conv3.get_shape().as_list())\n print('conv4 output:',conv4.get_shape().as_list())\n #conv5\n with tf.name_scope('conv5') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv5 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool5')\n print('------')\n print('conv5 input:',conv4.get_shape().as_list())\n print('conv5 output:',conv5.get_shape().as_list())\n print('conv5 pool output:',pool5.get_shape().as_list())\n\n #FC1\n with tf.name_scope('FC1') as scope:\n reshape = tf.reshape(pool5, [batch_size, -1])\n dim = reshape.get_shape()[1].value\n weight = variable_with_weight_loss(shape=[dim, 4096], stddev=0.01, wl=0.004)\n biases = tf.Variable(tf.constant(0.0, shape=[4096]), dtype=tf.float32, trainable=True)\n FC1_d = tf.nn.dropout(tf.matmul(reshape, weight) + biases,0.25)\n FC1 = tf.nn.relu(FC1_d)\n print('------')\n print('FC1 input:',reshape.get_shape().as_list())\n print('FC1 output:',FC1.get_shape().as_list())\n #FC2\n with tf.name_scope('FC2') as scope:\n weight = variable_with_weight_loss(shape=[4096, 4096], stddev=0.001, wl=0.004)\n biases = tf.Variable(tf.constant(0.0, shape=[4096]), dtype=tf.float32, trainable=True)\n FC2_d = tf.nn.dropout(tf.matmul(FC1, weight) + biases,0.25);\n FC2 = tf.nn.relu(FC2_d)\n print('------')\n print('FC2 input:',FC1.get_shape().as_list())\n print('FC2 output:',FC2.get_shape().as_list())\n #FC3\n with tf.name_scope('FC3') as scope:\n weight = variable_with_weight_loss(shape=[4096, 1000], stddev=0.001, wl=0.004)\n biases = tf.Variable(tf.constant(0.0, shape=[1000]), dtype=tf.float32, trainable=True)\n FC3 = tf.nn.softmax(tf.matmul(FC2, weight) + biases)\n print('------')\n print('FC3 input:', FC2.get_shape().as_list())\n print('FC3 output:', FC3.get_shape().as_list())\n return FC3, parameters\n\n\n\n\nif __name__ == '__main__':\n\n #创建输入图像的一个batch\n image_size = 224\n images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1))\n FC3,parameters = inference(images)\n #初始化网络中的所有变量\n init = tf.global_variables_initializer()\n #执行网络\n sess = tf.Session()\n sess.run(init)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/t_6.py","file_name":"t_6.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"101953378","text":"import spacy\nimport pickle\nfrom sklearn.svm import LinearSVC, SVC\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom dataset_analysis import analyse_dataset\nimport csv\nfrom sklearn.metrics import accuracy_score\nfrom features_relations import structural_number_tokens_source, relationship, structural_difference_number_tokens, structural_sentence_distance, structural_target_before_source, lexical_number_common_terms, structural_number_punctuation_marks_source, structural_difference_number_punctuation_marks, lexical_source_contains_modal_verb, indicators_source_contains_discourse_markers\n\nnlp = spacy.load('de_core_news_sm')\n\n# TODO Reihenfolge der pairs relevant?!\n# TODO insert None-label!!! as src and target!!!\n\nelements = []\n\nelements.append({\n 'id': 0,\n 'label': 'claim',\n 'start': 0,\n 'length': 20,\n 'text': \"Ich finde das Produkt gut.\",\n 'confidence': 1.\n })\n\n\nelements.append({\n 'id': 1,\n 'label': 'premise',\n 'start': 40,\n 'length': 60,\n 'text': \"Meiner Meinung nach ist es äußerst hilfreich.\",\n 'confidence': 1.\n })\n\n# elements.append({\n # 'id': 2,\n # 'label': 'None',\n # 'start': 97,\n # 'length': 100,\n # 'text': \"Das Wetter ist schön.\",\n # 'confidence': 1.\n # })\n\nelements.append({\n 'id': 2,\n 'label': 'premise',\n 'start': 70,\n 'length': 75,\n 'text': \"Es löst das gegebene Problem.\",\n 'confidence': 1.\n })\n\n# elements.append({\n# 'id': 4,\n# 'label': 'None',\n# 'start': 97,\n# 'length': 100,\n# 'text': \"Das Wetter ist schön.\",\n# 'confidence': 1.\n# })\n\n\ndef predict(elements):\n # load the model from disk\n loaded_model = pickle.load(open(\"linear_SVC_relations.pkl\", 'rb'))\n\n X_unseen_pairs, pairs = encode_features(elements)\n print(\"pairs: \", X_unseen_pairs)\n #for x in X_unseen_pairs:\n # pred = loaded_model.predict(x)\n # print(pred)\n # return pred\n pred = loaded_model.predict(X_unseen_pairs)\n support_relations = []\n\n count = 0\n for p in pred:\n print(p)\n if p == 1 and pairs[count]['srcElem'] != pairs[count]['trgElem']:\n pairs[count]['label'] = 'support'\n count += 1\n print(pred)\n\n for p in pairs:\n if p['label'] == 'support':\n support_relations.append(p)\n\n return support_relations\n\n\ndef encode_features(elements):\n f, pairs = generate_pairs(elements)\n\n # loaded_model = pickle.load(open(\"linear_SVC.pkl\", 'rb'))\n loaded_scaler = pickle.load(open(\"standard_scaler_linear_SVC.pkl\", 'rb'))\n\n df = pd.DataFrame.from_dict(f)\n\n df = pd.get_dummies(df, columns=['label_source', 'label_target']) # 'relationship',\n # print(df['number_tokens_target'])\n scaled_features = df.copy()\n\n col_names = ['number_tokens_source', 'number_tokens_target', 'difference_number_tokens', 'sentence_distance', 'number_of_common_terms', 'number_of_punctuation_marks_source', 'number_of_punctuation_marks_target', 'difference_number_punctuation_marks']\n features = scaled_features[col_names]\n # scaler = StandardScaler().fit(features.values)\n features = loaded_scaler.transform(features.values)\n\n scaled_features[col_names] = features\n #print(scaled_features['number_tokens_target'])\n\n #print(df)\n #print(\"df: \", df.columns.values)\n\n print(\"scaled: \", scaled_features.columns.values)\n print(scaled_features)\n\n scaled_features[\"label_source_None\"] = 0\n scaled_features[\"label_target_None\"] = 0\n\n return scaled_features, pairs\n\n\ndef generate_pairs(elements):\n f = []\n annotations = create_csv(elements)\n counter = 0\n number_of_docs = 1\n #print(number_of_docs)\n pairs = []\n\n while counter < number_of_docs:\n to_compare = []\n for a in annotations:\n # if a['docID'] == counter:\n to_compare.append(a)\n\n #print(to_compare)\n\n n_pair1 = 0\n\n while n_pair1 < len(to_compare)-1:\n n_pair2 = 0 #n_pair1 + 1\n while n_pair2 < len(to_compare):\n pair1 = to_compare[n_pair1]\n pair2 = to_compare[n_pair2]\n pairs.append({\n 'srcElem': annotations[n_pair1]['id'],\n 'trgElem': annotations[n_pair2]['id'],\n 'label': 'non-support',\n 'confidence': 1.\n })\n print(\"1: \", pair1)\n print(\"2: \", pair2)\n\n feat = generate_features(pair1, pair2)\n f.append(feat)\n\n n_pair2 += 1\n\n n_pair1 += 1\n\n counter += 1\n\n #print(f)\n\n return f, pairs\n\n\ndef create_csv(elements):\n global_id = 0\n final_annot = []\n\n for elem in elements:\n final_annot.append({\n 'globalID': global_id,\n 'text': elem['text'],\n 'label': elem['label'],\n 'id': elem['id']\n })\n\n global_id += 1\n\n return final_annot\n\n\ndef generate_features(pair1, pair2):\n features = {}\n # features[\"relationship\"] = relationship.get_relationship(pair1, pair2) # 'support' 'non-support' categorical\n features[\"label_source\"] = pair1['label'] # claim, premise, None categorical\n features[\"label_target\"] = pair2['label'] # claim, premise, None categorical\n features[\"number_tokens_source\"] = structural_number_tokens_source.get_number_of_tokens(pair1['text']) # int\n features[\"number_tokens_target\"] = structural_number_tokens_source.get_number_of_tokens(pair2['text']) # int\n features[\"difference_number_tokens\"] = structural_difference_number_tokens.get_difference_number_tokens(pair1, pair2) # int\n features[\"sentence_distance\"] = structural_sentence_distance.get_sentence_distance(pair1, pair2) # int\n features[\"target_before_source\"] = structural_target_before_source.is_target_before_source(pair1, pair2) # bool\n features[\"number_of_common_terms\"] = lexical_number_common_terms.get_number_of_common_terms(pair1, pair2) # int\n features[\"number_of_punctuation_marks_source\"] = structural_number_punctuation_marks_source.get_number_of_punctuation_marks(pair1['text']) # int\n features[\"number_of_punctuation_marks_target\"] = structural_number_punctuation_marks_source.get_number_of_punctuation_marks(pair2['text']) # int\n features[\"difference_number_punctuation_marks\"] = structural_difference_number_punctuation_marks.get_difference_number_of_punctuation_marks(pair1, pair2) # int\n features[\"contains_modal_verb_source\"] = lexical_source_contains_modal_verb.contains_modal_verb(pair1['text']) # bool\n features[\"contains_modal_verb_target\"] = lexical_source_contains_modal_verb.contains_modal_verb(pair2['text']) # bool\n features[\"contains_argumentative_discourse_markers_source\"] = indicators_source_contains_discourse_markers.contains_argumentative_markers(pair1['text']) # bool\n features[\"contains_argumentative_discourse_markers_target\"] = indicators_source_contains_discourse_markers.contains_argumentative_markers(pair2['text']) # bool\n return features\n\n\n# encode_features(elements)\n#predict(elements)","sub_path":"features_relations/predict_relations.py","file_name":"predict_relations.py","file_ext":"py","file_size_in_byte":7510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"434096104","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 19 21:57:36 2021\n\n@author: enkh-orgilbatkhuu\n\"\"\"\n\n#avant de commencer il faut installer pygame by pip install pygame\nimport pygame\nfrom game import Game\npygame.init()\n \n#génerer la fênetre de notre jeu\npygame.display.set_caption(\"BRAVE I\")\n#dimention de la fênetre\nscreen = pygame.display.set_mode((1080, 720))\n\n# importer de charger l'arriere plan de notre jeu\nbackground = pygame.image.load(\"assets/bg.jpg\")\n\n#charger notre jeu\ngame = Game()\n\nrunning = True\n\n#boucle tant que cett condition est vraie\nwhile running:\n \n #appliquer l'arriere plan de notre jeu\n screen.blit(background, (0, 0))\n\n #appliquer l'image de mon joueur\n screen.blit(game.player.image, game.player.rect)\n \n #mettre à jour l'ecran\n pygame.display.flip()\n # si le joueur ferme cette fenêtre\n for event in pygame.event.get():\n # que l'evenement est fermeeturee de fenetre\n if event.type == pygame.QUIT:\n running = False\n pygame.quit()\n print(\"fermeture du jeu\")\n # detecter si un joueur lache une touche du clavier\n elif event.type == pygame.KEYDOWN:\n # quelle touche a été uttiliséee\n if event.key == pygame.K_RIGHT:\n game.player.move_right()\n elif event.key == pygame.K_LEFT:\n game.player.move_left()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"145414933","text":"import os\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\nimport numpy as np\nimport pandas as pd\n\ndef load_data_and_labels(file_dir):\n EMBEDDING_FILE = 'GoogleNews-vectors-negative300.bin'\n files = sorted(os.listdir(file_dir))\n labels = []\n data = []\n index = 0\n for file in files:\n df = pd.read_csv(os.path.join(file_dir, file), header=None, delimiter=None, encoding=\"iso-8859-1\", error_bad_lines=False)\n for line in list(df[1]):\n data.append(line)\n labels.append(index)\n index += 1\n\n tokenizer = Tokenizer(char_level=False)\n tokenizer.fit_on_texts(data)\n sequences = tokenizer.texts_to_sequences(data)\n\n seq_lens = [len(s) for s in sequences]\n max_sequence_length = max(seq_lens)\n\n data = pad_sequences(sequences, maxlen=max_sequence_length, padding='post')\n\n word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True)\n EMBEDDING_DIM = 300\n nb_words = len(tokenizer.word_index)+1\n print(\"word num:\", nb_words)\n\n embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))\n\n for word, i in tokenizer.word_index.items():\n if word in word2vec.vocab:\n embedding_matrix[i] = word2vec.word_vec(word)\n else:\n print(word)\n print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\n return np.array(data), to_categorical(labels), nb_words, embedding_matrix\n\nif __name__ == '__main__':\n x, y, num_words = load_data_and_labels('../data/')\n","sub_path":"Word/data_helper.py","file_name":"data_helper.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"398642694","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse, HttpResponseRedirect,JsonResponse\r\nfrom django.urls import reverse\r\nimport joblib\r\nimport pickle\r\nimport numpy as np\r\n\r\ndef sales(request):\r\n voting=joblib.load('voting.sav')\r\n \r\n if request.method == \"GET\":\r\n return render(request,\"sales.html\")\r\n Item_Identifier=request.POST['Item_Identifier']\r\n Item_Weight=float(request.POST['Item_Weight'])\r\n Item_Fat_Content=int(request.POST['Item_Fat_Content'])\r\n Item_Visibility=float(request.POST['Item_Visibility'])\r\n Item_Type=int(request.POST['Item_Type'])\r\n Item_MRP=float(request.POST['Item_MRP'])\r\n new_out=int(request.POST['Outlet_Identifier'])\r\n Outlet_Establishment_Year=int(request.POST['Outlet_Establishment_Year'])\r\n Outlet_Size=int(request.POST['Outlet_Size'])\r\n Outlet_Location_Type=int(request.POST['Outlet_Location_Type'])\r\n Outlet_Type=int(request.POST['Outlet_Type'])\r\n \r\n \r\n iden=Item_Identifier[:2]\r\n if(iden=='DR'):\r\n Item_Cateogory = 0\r\n if(iden=='FD'):\r\n Item_Cateogory = 1\r\n if(iden=='NC'):\r\n Item_Cateogory = 2\r\n new_Item = int(Item_Identifier[-2:])\r\n if Item_MRP<=67.5:\r\n MRP_bins=0\r\n elif (Item_MRP>67.5) & (Item_MRP<=134.5):\r\n MRP_bins=1\r\n elif (Item_MRP>134.5) & (Item_MRP<=201.1):\r\n MRP_bins=2\r\n else:\r\n MRP_bins=3\r\n total=2021-Outlet_Establishment_Year\r\n \r\n list=[Item_Weight,Item_Fat_Content,Item_Visibility,Item_Type, Item_MRP,Outlet_Size,\r\n Outlet_Location_Type,Outlet_Type\t,Item_Cateogory,new_Item,\r\n MRP_bins,new_out,total\t]\r\n print(list)\r\n \r\n b = np.array(list, dtype=float) # convert using numpy\r\n c = [float(i) for i in list] # convert with for loop\r\n prediction=voting.predict([c])\r\n prediction = abs(prediction)\r\n return render(request,\"sales.html\",{'prediction':prediction})\r\n\r\ndef home(request):\r\n return render(request,\"portfolio.html\")\r\ndef predict():\r\n model = pickle.load(open(\"flight_rf.pkl\", \"rb\"))\r\n if request.method == \"POST\":\r\n\r\n # Date_of_Journey\r\n date_dep = request.form[\"Dep_Time\"]\r\n Journey_day = int(pd.to_datetime(date_dep, format=\"%Y-%m-%dT%H:%M\").day)\r\n Journey_month = int(pd.to_datetime(date_dep, format =\"%Y-%m-%dT%H:%M\").month)\r\n # print(\"Journey Date : \",Journey_day, Journey_month)\r\n\r\n # Departure\r\n Dep_hour = int(pd.to_datetime(date_dep, format =\"%Y-%m-%dT%H:%M\").hour)\r\n Dep_min = int(pd.to_datetime(date_dep, format =\"%Y-%m-%dT%H:%M\").minute)\r\n # print(\"Departure : \",Dep_hour, Dep_min)\r\n\r\n # Arrival\r\n date_arr = request.form[\"Arrival_Time\"]\r\n Arrival_hour = int(pd.to_datetime(date_arr, format =\"%Y-%m-%dT%H:%M\").hour)\r\n Arrival_min = int(pd.to_datetime(date_arr, format =\"%Y-%m-%dT%H:%M\").minute)\r\n # print(\"Arrival : \", Arrival_hour, Arrival_min)\r\n\r\n # Duration\r\n dur_hour = abs(Arrival_hour - Dep_hour)\r\n dur_min = abs(Arrival_min - Dep_min)\r\n # print(\"Duration : \", dur_hour, dur_min)\r\n\r\n # Total Stops\r\n Total_stops = int(request.form[\"stops\"])\r\n # print(Total_stops)\r\n\r\n # Airline\r\n # AIR ASIA = 0 (not in column)\r\n airline=request.form['airline']\r\n if(airline=='Jet Airways'):\r\n Jet_Airways = 1\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0 \r\n\r\n elif (airline=='IndiGo'):\r\n Jet_Airways = 0\r\n IndiGo = 1\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0 \r\n\r\n elif (airline=='Air India'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 1\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0 \r\n \r\n elif (airline=='Multiple carriers'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 1\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0 \r\n \r\n elif (airline=='SpiceJet'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 1\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0 \r\n \r\n elif (airline=='Vistara'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 1\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline=='GoAir'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 1\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline=='Multiple carriers Premium economy'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 1\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline=='Jet Airways Business'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 1\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n elif (airline=='Vistara Premium economy'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 1\r\n Trujet = 0\r\n \r\n elif (airline=='Trujet'):\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 1\r\n\r\n else:\r\n Jet_Airways = 0\r\n IndiGo = 0\r\n Air_India = 0\r\n Multiple_carriers = 0\r\n SpiceJet = 0\r\n Vistara = 0\r\n GoAir = 0\r\n Multiple_carriers_Premium_economy = 0\r\n Jet_Airways_Business = 0\r\n Vistara_Premium_economy = 0\r\n Trujet = 0\r\n\r\n # print(Jet_Airways,\r\n # IndiGo,\r\n # Air_India,\r\n # Multiple_carriers,\r\n # SpiceJet,\r\n # Vistara,\r\n # GoAir,\r\n # Multiple_carriers_Premium_economy,\r\n # Jet_Airways_Business,\r\n # Vistara_Premium_economy,\r\n # Trujet)\r\n\r\n # Source\r\n # Banglore = 0 (not in column)\r\n Source = request.form[\"Source\"]\r\n if (Source == 'Delhi'):\r\n s_Delhi = 1\r\n s_Kolkata = 0\r\n s_Mumbai = 0\r\n s_Chennai = 0\r\n\r\n elif (Source == 'Kolkata'):\r\n s_Delhi = 0\r\n s_Kolkata = 1\r\n s_Mumbai = 0\r\n s_Chennai = 0\r\n\r\n elif (Source == 'Mumbai'):\r\n s_Delhi = 0\r\n s_Kolkata = 0\r\n s_Mumbai = 1\r\n s_Chennai = 0\r\n\r\n elif (Source == 'Chennai'):\r\n s_Delhi = 0\r\n s_Kolkata = 0\r\n s_Mumbai = 0\r\n s_Chennai = 1\r\n\r\n else:\r\n s_Delhi = 0\r\n s_Kolkata = 0\r\n s_Mumbai = 0\r\n s_Chennai = 0\r\n\r\n # print(s_Delhi,\r\n # s_Kolkata,\r\n # s_Mumbai,\r\n # s_Chennai)\r\n\r\n # Destination\r\n # Banglore = 0 (not in column)\r\n Source = request.form[\"Destination\"]\r\n if (Source == 'Cochin'):\r\n d_Cochin = 1\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n \r\n elif (Source == 'Delhi'):\r\n d_Cochin = 0\r\n d_Delhi = 1\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n\r\n elif (Source == 'New_Delhi'):\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 1\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n\r\n elif (Source == 'Hyderabad'):\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 1\r\n d_Kolkata = 0\r\n\r\n elif (Source == 'Kolkata'):\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 1\r\n\r\n else:\r\n d_Cochin = 0\r\n d_Delhi = 0\r\n d_New_Delhi = 0\r\n d_Hyderabad = 0\r\n d_Kolkata = 0\r\n\r\n # print(\r\n # d_Cochin,\r\n # d_Delhi,\r\n # d_New_Delhi,\r\n # d_Hyderabad,\r\n # d_Kolkata\r\n # )\r\n \r\n\r\n # ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',\r\n # 'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',\r\n # 'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',\r\n # 'Airline_Jet Airways', 'Airline_Jet Airways Business',\r\n # 'Airline_Multiple carriers',\r\n # 'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',\r\n # 'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',\r\n # 'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',\r\n # 'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',\r\n # 'Destination_Kolkata', 'Destination_New Delhi']\r\n \r\n prediction=model.predict([[\r\n Total_stops,\r\n Journey_day,\r\n Journey_month,\r\n Dep_hour,\r\n Dep_min,\r\n Arrival_hour,\r\n Arrival_min,\r\n dur_hour,\r\n dur_min,\r\n Air_India,\r\n GoAir,\r\n IndiGo,\r\n Jet_Airways,\r\n Jet_Airways_Business,\r\n Multiple_carriers,\r\n Multiple_carriers_Premium_economy,\r\n SpiceJet,\r\n Trujet,\r\n Vistara,\r\n Vistara_Premium_economy,\r\n s_Chennai,\r\n s_Delhi,\r\n s_Kolkata,\r\n s_Mumbai,\r\n d_Cochin,\r\n d_Delhi,\r\n d_Hyderabad,\r\n d_Kolkata,\r\n d_New_Delhi\r\n ]])\r\n\r\n output=round(prediction[0],2)\r\n\r\n return render_template('home.html',prediction_text=\"Your Flight price is Rs. {}\".format(output))\r\n\r\n\r\n return render_template(\"home.html\")\r\n","sub_path":"sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"581415760","text":"# vim:set ff=unix expandtab ts=4 sw=4:\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom pathlib import Path\nfrom sympy import diff, exp, lambdify, flatten, latex, Symbol\nfrom scipy.integrate import odeint, quad \nfrom scipy.interpolate import interp1d\nimport numpy as np\n\nfrom .SmoothModelRun import SmoothModelRun\nfrom .helpers_reservoir import has_pw, numsol_symbolic_system\n\nclass ModelRun:\n def __init__(self, rm, parameter_set={}, start_values=None, times = None):\n self.model = rm\n self.parameter_set = parameter_set\n self.times = times\n self.start_values = start_values\n\n times = set(list(times) + rm.jump_times)\n times = sorted(times)\n\n split_data = []\n start_time = times[0]\n for end_time in rm.jump_times + [times[-1]]:\n ts = times[times.index(start_time):times.index(end_time)+1]\n \n intensity = [0]*self.nr_pools\n if start_time in rm.impulsive_fluxes.keys():\n for pool, intens in rm.impulsive_fluxes[start_time].items():\n intensity[pool] = intens\n \n split_data.append({'times': ts, 'intensity': intensity})\n start_time = ts[-1]\n \n self.split_data = split_data\n \n\n @property\n def nr_pools(self):\n return(self.model.nr_pools)\n\n def solve(self):\n #return cached result if possible\n if hasattr(self,\"_previously_computed__sol\"):\n return(self._previously_computed__sol)\n \n old_values = np.array(self.start_values)\n solution = np.array([[0]*self.nr_pools])\n for data in self.split_data:\n start_values = old_values + np.array(data['intensity'], dtype='float64')\n times = data['times']\n\n smr = SmoothModelRun(self.model.clean_model, self.parameter_set, start_values, times)\n sol = smr.solve()\n solution = np.append(solution[:-1], sol, axis=0)\n old_values = sol[-1]\n \n self._previously_computed__sol = solution\n return solution\n \n# @property\n# def mean_transit_time(self):\n# times=self.times\n# n=self.nr_pools\n# m=self.model\n# soln=self.solve_mean_age_system()\n# sol_x=soln[:,0:n]\n# sol_funcs=self.sol_funcs()\n# tup=tuple(m.state_variables)+(m.time_symbol,)\n# l=len(times)\n# outputs=dict()\n# for key,expr in m.output_fluxes.items():\n# expr_par=expr.subs(self.parameter_set)\n# ol=lambdify(tup,expr_par,modules=\"numpy\")\n# result=np.ndarray((l,))\n# for i in range(l):\n# args=[sol_x[i,j] for j in range(n)]+[times[i]]\n# res=ol(*args)\n# result[i]=res\n# outputs[key]=result\n# \n# transit_times=np.zeros((l,))\n# overall_output=np.zeros((l,))\n# for key,vector in outputs.items():\n# agevec=soln[:,n+key]\n# pool_age=agevec*vector\n# overall_output+=vector\n# transit_times+=pool_age\n# return(transit_times/overall_output) \n#\n def sol_funcs(self):\n sol=self.solve()\n sol_funcs=[]\n for i in range(self.nr_pools):\n sol_inter=interp1d(self.times, sol[:,i])\n sol_funcs.append(sol_inter)\n return(sol_funcs)\n \n# def f_of_t_maker(self,sol_funcs,ol):\n# def ot(t):\n# sv=[sol_funcs[i](t) for i in range(self.nr_pools)]\n# tup=tuple(sv)+(t,)\n# res=ol(*tup)\n# return(res)\n# return(ot)\n#\n# def flux_funcs(self,expr_dict):\n# m=self.model\n# sol_funcs=self.sol_funcs()\n# flux_funcs={}\n# tup=tuple(m.state_variables)+(m.time_symbol,)\n# for key,value in expr_dict.items():\n# o_par=value.subs(self.parameter_set)\n# ol=lambdify(tup,o_par,modules=\"numpy\")\n# flux_funcs[key]=self.f_of_t_maker(sol_funcs,ol)\n#\n# return(flux_funcs)\n#\n# \n# def external_input_flux_funcs(self):\n# return(self.flux_funcs(self.model.input_fluxes))\n#\n# def internal_flux_funcs(self):\n# return(self.flux_funcs(self.model.internal_fluxes))\n#\n#\n# def output_flux_funcs(self):\n# return(self.flux_funcs(self.model.output_fluxes))\n# \n#\n def plot_sols(self, fig, fontsize = 20):\n #fixme:\n # since time units and units are related to those\n # of the other fluxes it would be more consistent\n # to make them a property of SmoothModelRun and use\n # them in the other plots as well\n\n tf = self.times\n sol_funcs = self.sol_funcs()\n n=self.nr_pools\n for i in range(n):\n ax = fig.add_subplot(n,1,i+1)\n ax.plot(tf,sol_funcs[i](tf))\n \n ax.set_xlim(tf[0], tf[-1])\n ax.set_ylim(ax.get_ylim()[0]*0.9, ax.get_ylim()[1]*1.1)\n \n time_unit = self.model.time_unit\n xlabel = latex(self.model.time_symbol)\n if time_unit:\n xlabel += \"\\quad(\" + latex(time_unit) + \")\"\n ax.set_xlabel(\"$\" + xlabel + \"$\", fontsize = fontsize)\n \n units = self.model.units\n ylabel = latex(self.model.state_vector[i])\n if units and units[i]:\n ylabel += \"\\quad(\" + latex(units[i]) + \")\"\n ax.set_ylabel(\"$\" + ylabel + \"$\", fontsize=fontsize)\n\n def plot_phase_plane(self, ax, i, j, fontsize = 24):\n #fixme: exact the same code as in SmoothModelRun\n tf = self.times\n sol_funcs = self.sol_funcs()\n ax.plot(sol_funcs[i](tf), sol_funcs[j](tf))\n\n x0 = sol_funcs[i](tf[0])\n y0 = sol_funcs[j](tf[0])\n ax.scatter([x0],[y0], s=60)\n\n x1 = sol_funcs[i](tf[len(tf)//2-1])\n y1 = sol_funcs[j](tf[len(tf)//2-1])\n x2 = sol_funcs[i](tf[len(tf)//2+1])\n y2 = sol_funcs[j](tf[len(tf)//2+1])\n ax.add_patch(mpatches.FancyArrowPatch((x1,y1), (x2,y2), arrowstyle='simple', mutation_scale=20, alpha=1))\n\n ax.set_xlim(ax.get_xlim()[0]*0.9, ax.get_xlim()[1]*1.1)\n ax.set_ylim(ax.get_ylim()[0]*0.9, ax.get_ylim()[1]*1.1)\n\n units = self.model.units\n\n xlabel = latex(self.model.state_vector[i])\n if units and units[i]:\n xlabel += \"\\quad(\" + units[i] + \")\"\n ax.set_xlabel(\"$\" + xlabel + \"$\", fontsize=fontsize)\n \n ylabel = latex(self.model.state_vector[j])\n if units and units[j]:\n ylabel += \"\\quad(\" + units[j] + \")\"\n ax.set_ylabel(\"$\" + ylabel + \"$\", fontsize=fontsize)\n\n def plot_phase_planes(self, fig, fontsize = 20):\n #fixme: exact the same code as in SmoothModelRun\n n = len(self.model.state_vector)\n planes = [(i,j) for i in range(n) for j in range(i)]\n n = len(planes)\n\n if n <=3:\n rows = 1\n cols = n\n if n == 4 :\n rows = 2\n cols = 2\n if n >= 5:\n rows = n // 3\n if n % 3 != 0:\n rows += 1\n cols = 3 \n\n for k, (i, j) in enumerate(planes):\n ax = fig.add_subplot(rows, cols, k+1)\n self.plot_phase_plane(ax, i, j, fontsize=fontsize)\n \n# def plot_internal_fluxes(self,fig):\n# internal_flux_funcs=self.internal_flux_funcs()\n# n=len(internal_flux_funcs.keys())\n# tf=self.times\n# sol_funcs=self.sol_funcs()\n# n=self.nr_pools\n# i=1\n# for key,value in internal_flux_funcs.items():\n# p=fig.add_subplot(n,1,i)\n# p.plot(tf,internal_flux_funcs[key](tf))\n# i+=1\n#\n# def plot_output_fluxes(self,fig):\n# tf=self.times\n# output_flux_funcs=self.output_flux_funcs()\n# n=len(output_flux_funcs.keys())\n# i=1\n# for key,value in output_flux_funcs.items():\n# p=fig.add_subplot(n,1,i)\n# p.plot(tf,output_flux_funcs[key](tf))\n# i+=1\n# \n# def plot_mean_ages_rasmussen(self,fig):\n# tf=self.times\n# n=self.nr_pools\n# states,rhs=self.model.mean_age_system\n# time_symbol=self.model.time_symbol\n# tf=self.times\n# soln=self.solve_mean_age_system()\n# for i in range(n):\n# ax_a = plt.subplot2grid((n,1),(i,0))\n# ax_a.plot(tf,soln[:,i+n])\n# ax_a.set_xlabel(\"$\"+latex(time_symbol)+ \"$\" )\n# ax_a.set_ylabel(\"$\"+latex(states[i+n])+ \"$\" )\n# \n# def plot_mean_transit_time_rasmussen(self,fig):\n# tr_val=self.mean_transit_time\n# fig=plt.figure()\n# ax_a = plt.subplot2grid((1,1),(0,0))\n# ax_a.plot(tf,tr_val[:])\n# ax_a.set_xlabel(\"$\"+latex(time_symbol)+ \"$\" )\n# ax_a.set_ylabel(\"mean transit time \" )\n#\n# def plot_external_input_fluxes(self,fig):\n# tf=self.times\n# input_flux_funcs=self.external_input_flux_funcs()\n# n=len(input_flux_funcs.keys())\n# i=1\n# for key,value in input_flux_funcs.items():\n# p=fig.add_subplot(n,1,i)\n# p.plot(tf,input_flux_funcs[key](tf))\n# i+=1\n \n\n\n\n \n \n \n \n","sub_path":"bgc_md/ModelRun.py","file_name":"ModelRun.py","file_ext":"py","file_size_in_byte":9209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"418884713","text":"# -*- coding: utf-8 -*-\nfrom Crud.CrudCategoriaAPagar import CrudCatAPagar\n\n\nclass CategoriaAPagar(object):\n\n # Populando combobox forma de pagamento\n def cboxCatAPagar(self, combobox):\n busca = CrudCatAPagar()\n busca.listaCatAPagar()\n combobox.clear()\n\n for i in range(len(busca.descCatAPagar)):\n combobox.addItem(busca.descCatAPagar[i], str(\n str(busca.idCatAPagar[i])))\n","sub_path":"controle_estoque/Funcoes/categoriaAPagar.py","file_name":"categoriaAPagar.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"479899356","text":"from imdb.models import Actor,Movie,Director,Cast,Rating\nfrom django.db.models import Q \nfrom django.db.models import *\ndef populate_database(actors_list, movies_list, directors_list, movie_rating_list):\n for actor in actors_list:\n actor_a=Actor(actor_id=actor['actor_id'],name=actor['name'])\n actor_a.save()\n\n for director in directors_list:\n director_d=Director(name=director)\n director_d.save()\n \n for movies in movies_list:\n d1= Director.objects.get(pk=movies['director_name'])\n movie_m=Movie(movie_id=movies['movie_id'],name=movies['name'],release_date=movies['release_date'],box_office_collection_in_crores=movies['box_office_collection_in_crores'],director=d1)\n movie_m.save()\n m1=Movie.objects.get(pk=movies['movie_id'])\n for actor_cast in movies['actors']:\n a1=Actor.objects.get(pk=actor_cast['actor_id'])\n Cast.objects.create(actor=a1,movie=m1,role=actor_cast['role'],is_debut_movie=actor_cast['is_debut_movie'])\n #print(cast_c)\n \n for rating in movie_rating_list:\n m1=Movie.objects.get(pk=rating['movie_id'])\n rating_r=Rating(movie=m1,rating_one_count=rating['rating_one_count'],rating_two_count=rating['rating_two_count'],rating_three_count=rating['rating_three_count'],rating_four_count=rating['rating_four_count'],rating_five_count=rating['rating_five_count'])\n rating_r.save()\n \ndef get_no_of_distinct_movies_actor_acted(actor_id):\n list=Movie.objects.filter(actors__actor_id=actor_id).distinct()\n return len(list)\n \n \ndef get_movies_directed_by_director(director_obj):\n return list(Movie.objects.filter(director=director_obj))\n \ndef get_average_rating_of_movie(movie_obj):\n m1=Movie.objects.get(pk=movie_obj.movie_id)\n try:\n r1=Rating.objects.get(movie_id=m1.movie_id)\n one,two,three,four,five=r1.rating_one_count,r1.rating_two_count,r1.rating_three_count,r1.rating_four_count,r1.rating_five_count\n rate_sum=one*1+two*2+three*3+four*4+five*5\n rate_count=one+two+three+four+five\n if rate_sum:\n average=(rate_sum)/rate_count\n return average\n except Rating.DoesNotExist:return 0\n return 0\n\ndef get_sum_rating_of_movie(movie_obj):\n m1=Movie.objects.get(pk=movie_obj.movie_id)\n try:\n r1=Rating.objects.get(movie_id=m1.movie_id)\n one,two,three,four,five=r1.rating_one_count,r1.rating_two_count,r1.rating_three_count,r1.rating_four_count,r1.rating_five_count\n rate_sum=one+two+three+four+five\n if rate_sum:\n return rate_sum\n except Rating.DoesNotExist:return 0\n return 0\n \ndef delete_movie_rating(movie_obj):\n r1=Rating.objects.get(movie_id=movie_obj.movie_id)\n r1.delete()\n \ndef get_all_actor_objects_acted_in_given_movies(movie_objs):\n return list(Actor.objects.filter(movie__in=movie_objs).distinct())\n \n \n \ndef update_director_for_given_movie(movie_obj, director_obj):\n movie_obj.director=director_obj\n movie_obj.save()\n \ndef get_distinct_movies_acted_by_actor_whose_name_contains_john():\n return list(Movie.objects.filter(actors__name__contains='john').distinct())\n\n\n\ndef remove_all_actors_from_given_movie(movie_obj):\n movie_obj.actors.clear()\n\n \ndef get_all_rating_objects_for_given_movies(movie_objs):\n return list(Rating.objects.filter(movie__in=movie_objs))\n\n\ndef get_movies_by_given_movie_names(movie_names):\n movie_objs=Movie.objects.filter(name__in=movie_names)\n list_of_movies=[]\n for movie_obj in movie_objs:\n cast_list=[]\n cast_obj=Cast.objects.filter(movie=movie_obj)\n for cast in cast_obj:\n cast_list.append({\n \"actor\":{\n \"name\":cast.actor.name,\n \"actor_id\":cast.actor.actor_id,\n },\n \"role\":cast.role,\n \"is_debut_movie\":cast.is_debut_movie\n })\n \n list_of_movies.append({\n \"movie_id\":movie_obj.movie_id,\n \"name\":movie_obj.name,\n \"cast\":cast_list,\n \"box_office_collection_in_crores\":movie_obj.box_office_collection_in_crores,\n \"release_date\":str(movie_obj.release_date),\n \"director_name\":movie_obj.director.name,\n \"average_rating\":get_average_rating_of_movie(movie_obj),\n \"total_number_of_ratings\":get_sum_rating_of_movie(movie_obj)\n })\n \n return list_of_movies\n\ndef get_movies_by_given_movie_objs(movie_ids):\n movie_objs=Movie.objects.filter(movie_id__in=movie_ids)\n list_of_movies=[]\n for movie_obj in movie_objs:\n cast_obj=Cast.objects.filter(movie=movie_obj)\n cast_list=[]\n for cast in cast_obj: \n cast_list.append({\n \"actor\":{\n \"name\":cast.actor.name,\n \"actor_id\":cast.actor.actor_id,\n },\n \"role\":cast.role,\n \"is_debut_movie\":cast.is_debut_movie\n })\n \n list_of_movies.append({\n \"movie_id\":movie_obj.movie_id,\n \"name\":movie_obj.name,\n \"cast\":cast_list,\n \"box_office_collection_in_crores\":movie_obj.box_office_collection_in_crores,\n \"release_date\":str(movie_obj.release_date),\n \"director_name\":movie_obj.director.name,\n \"average_rating\":get_average_rating_of_movie(movie_obj),\n \"total_number_of_ratings\":get_sum_rating_of_movie(movie_obj)\n })\n \n return list_of_movies\n \n\ndef get_movies_released_in_summer_in_given_years():\n x=Movie.objects.filter(release_date__month__range=(5,7),release_date__year__range=(2006,2009)).values_list('movie_id',flat=True).distinct()\n return get_movies_by_given_movie_objs(x)\n \ndef get_movie_names_with_actor_name_ending_with_smith():\n x=Movie.objects.filter(actors__name__iendswith='smith').values_list('name',flat=True).distinct()\n return list(x)\n \ndef get_movie_names_with_ratings_in_given_range():\n x=Rating.objects.filter(rating_five_count__range=(1000,3000)).values_list('movie__name',flat=True).distinct()\n return list(x)\n \ndef get_movie_names_with_ratings_above_given_minimum():\n x=Rating.objects.filter(Q(movie__release_date__year__range=(2001,2100)),(Q(rating_five_count__gte=500)|Q(rating_four_count__gte=1000)|Q(rating_three_count__gte=2000)|Q(rating_two_count__gte=4000)|Q(rating_one_count__gte=8000))).values_list('movie__name',flat=True).distinct()\n return list(x)\n \ndef get_movie_directors_in_given_year():\n x=Movie.objects.filter(release_date__year=2000).values_list('director__name',flat=True).distinct()\n return list(x)\n \n \ndef get_actor_names_debuted_in_21st_century():\n x=Movie.objects.filter(cast__is_debut_movie=True,release_date__year__range=(2001,2100)).values_list('actors__name',flat=True).distinct()\n return list(x) \n \ndef get_director_names_containing_big_as_well_as_movie_in_may():\n x=Movie.objects.filter(name__contains='big').filter(release_date__month=5).values_list('director__name',flat=True).distinct()\n return list(x)\n \ndef get_director_names_containing_big_and_movie_in_may():\n x=Movie.objects.filter(name__contains='big',release_date__month=5).values_list('director__name',flat=True).distinct()\n return x\n\ndef reset_ratings_for_movies_in_this_year():\n Rating.objects.filter(movie__release_date__year=2000).update(rating_five_count=0,rating_four_count=0,rating_three_count=0,rating_two_count=0,rating_one_count=0)\n \n \n#task1\ndef get_average_box_office_collections():\n x=Movie.objects.aggregate(avg=Avg('box_office_collection_in_crores'))\n if x['avg']!=None:\n return round(x[\"avg\"],3)\n return 0\n \n#task2\ndef get_movies_with_distinct_actors_count():\n return list(Movie.objects.annotate(actors_count=Count('actors',distinct=True)))\n \n#task3\ndef get_male_and_female_actors_count_for_each_movie():\n f_count=Count('actors',filter=Q(actors__gender='FEMALE'),distinct=True)\n m_count=Count('actors',filter=Q(actors__gender='MALE'),distinct=True)\n x=Movie.objects.annotate(female_actors_count=f_count).annotate(male_actors_count=m_count)\n return list(x)\n\n#task4\ndef get_roles_count_for_each_movie():\n return list(Movie.objects.annotate(roles_count=Count('cast__role',distinct=True)))\n \n#task5\ndef get_role_frequency():\n dict={}\n x=Cast.objects.all().values_list('role').annotate(num_actors=Count('actor',distinct=True))\n dict.update(x)\n return dict\n \n#task6\ndef get_role_frequency_in_order():\n x=Cast.objects.all().values_list('role').annotate(num_actors=Count('actor',distinct=True)).order_by('-movie__release_date')\n return list(x)\n \n#task7\ndef get_no_of_movies_and_distinct_roles_for_each_actor():\n a=Actor.objects.annotate(movies_count=Count('movie',distinct=True),roles_count=Count('cast__role',distinct=True)) \n return list(a)\n \n#task8\ndef get_movies_with_atleast_forty_actors():\n m=Movie.objects.annotate(actors_count=Count('actors',distinct=True)).filter(actors_count__gte=40) \n return list(m)\n\n#task9\ndef get_average_no_of_actors_for_all_movies():\n x=Movie.objects.annotate(actors_count=Count('actors',distinct=True)).aggregate(avg=Avg('actors_count'))\n if x['avg']!=None:\n return round(x['avg'],3)\n return 0\n \n#task6 5\ndef get_female_cast_details_from_movies_having_more_than_five_female_cast():\n female_count = Count('actors', filter=Q(actors__gender__iexact='FEMALE'))\n movie_objs=Movie.objects.annotate(count=female_count).prefetch_related('cast_set').filter(count__gte=1)\n return get_movies_by_given_movie_objs(movie_objs)\n\n#task7 5\ndef get_actor_movies_released_in_year_greater_than_or_equal_to_2000():\n a=Actor.objects.prefetch_related('movie_set','movie_set__cast_set','mov').filter(movie__release_date__year__gte=2000,cast__actor__actor_id=actor_id)\n list_of_movies=[]\n for actor in a:\n for movie_obj in a.movie_set.all():\n #cast_obj=Cast.objects.filter(movie=movie_obj) \n cast_list=[]\n for cast in actor.cast_set.all(): \n cast_list.append({\n \"role\":cast.role,\n \"is_debut_movie\":cast.is_debut_movie\n })\n \n list_of_movies.append({\n \"name\":actor.name,\n \"actor_id\":actor.actor_id,\n \"movies\":[{\n \"movie_id\":movie_obj.movie_id,\n \"name\":movie_obj.name,\n \"cast\":cast_list,\n \"box_office_collection_in_crores\":movie_obj.box_office_collection_in_crores,\n \"release_date\":str(movie_obj.release_date),\n \"director_name\":movie_obj.director.name,\n \"average_rating\":get_average_rating_of_movie(movie_obj),\n \"total_number_of_ratings\":get_sum_rating_of_movie(movie_obj)\n \n }]\n })\n \n return list_of_movies","sub_path":"django/django_submissions/django_assignment_004/imdb/.~c9_invoke_HZ1e2V.py","file_name":".~c9_invoke_HZ1e2V.py","file_ext":"py","file_size_in_byte":11137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"209568807","text":"import art\n\nprint(art.logo)\nprint(\"Welcome to the secret auction program\")\n\nfrom replit import clear\n#HINT: You can call clear() to clear the output in the console.\n\n\n\nrepeat =\"yes\"\ncustomer_dict={}\n\nwhile repeat ==\"yes\":\n name_key =input(\"What is your name?: \")\n money_value =int(input(\"What's your bid?:\"))\n\n customer_dict[name_key]= money_value\n\n repeat =input(\"Are there any other biddere?: 'yes' or 'no'\").lower()\n clear()\n\nmaximum= 0\nfor key in customer_dict:\n amount=customer_dict[key]\n if amount > maximum:\n maximum = amount\n new_key=key\n\nprint(f\"The winner is {new_key} with a bid of Rs{amount}\")\n","sub_path":"practise programs/9_bid.py","file_name":"9_bid.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"125197817","text":"#!/usr/bin/env python3\n\nimport base64\nimport hashlib\nimport hmac\nimport json\nimport yaml\nimport os\nimport string\nimport time\nfrom datetime import date, datetime, timedelta\nfrom textwrap import dedent\nfrom typing import Optional, Tuple\n\nimport dateutil.parser\nimport gitlab\nimport jwt\nimport requests\nfrom cryptography.hazmat.backends import default_backend\nfrom flask import Flask, request\n\napp = Flask(__name__)\nconfig_file = os.environ.get(\"CONFIG_FILE_PATH\", \"config.yml\")\n\ntry:\n with open(config_file) as f:\n config = yaml.load(f, Loader=yaml.SafeLoader)\nexcept IOError:\n print(f\"Could not open configuration file ({config_file})\")\n exit(1)\n\nGITLAB_SECRET = config[\"gitlab_secret\"]\nGITHUB_APP_SECRET = config[\"github_app_secret\"].encode(\"utf-8\")\nrevng_push_ci_private_key_b64 = config[\"revng_push_ci_private_key_base64\"]\nrevng_push_ci_private_key = base64.b64decode(revng_push_ci_private_key_b64).decode(\"utf-8\")\ngithub_priv_key_b64 = config[\"github_private_key_base64\"]\ngithub_priv_key = default_backend().load_pem_private_key(base64.b64decode(github_priv_key_b64), None)\nADMIN_TOKEN = config[\"gitlab_admin_token\"]\n\nGITHUB_API_URL = \"https://api.github.com\"\n\nallowed_to_push = config[\"allowed_to_push\"]\nGITLAB_URL = config[\"gitlab_url\"]\nPROJECT_ID = config[\"project_id\"]\nBRANCH = config[\"branch\"]\nmapping = config[\"github_to_gitlab_mapping\"]\ndefault_user = config[\"default_user\"]\nci_user = config[\"ci_user\"]\ngithub_app_id = config[\"github_app_id\"]\nci_job_url = config[\"ci_job_url\"]\ngithub_installation_id = config[\"github_installation_id\"]\ndefault_user_target_components = config[\"default_user_target_components\"]\ntarget_components = config[\"target_components\"]\nrevng_orchestra_repo_url = config[\"revng_orchestra_repo_url\"]\n\n_installation_token_info = None\n\n\ndef log_response(body: str, status_code: int = 400) -> Tuple[str, int]:\n print(body, end=\"\")\n return body, status_code\n\n\ndef installation_token() -> str:\n global _installation_token_info\n should_refresh = _installation_token_info is None\n if not should_refresh:\n expires = dateutil.parser.isoparse(_installation_token_info[\"expires_at\"])\n now = datetime.now(expires.tzinfo)\n should_refresh = (expires - timedelta(minutes=10)) < now\n\n if should_refresh:\n token = jwt.encode(\n {\n \"iat\": int(time.time()) + 60,\n \"exp\": int(time.time()) + 10 * 60,\n \"iss\": str(github_app_id)\n },\n github_priv_key,\n \"RS256\"\n )\n r = requests.post(f\"{GITHUB_API_URL}/app/installations/{github_installation_id}/access_tokens\", headers={\n \"Accept\": \"application/vnd.github+json\",\n \"Authorization\": f\"Bearer {token}\",\n })\n if r.status_code not in (200, 201, 202):\n raise RuntimeError(\"Unable to retrieve GitHub installation token\")\n _installation_token_info = r.json()\n return _installation_token_info[\"token\"]\n\n\ndef github_headers() -> dict:\n result = {\n \"Accept\": \"application/vnd.github+json\",\n \"Authorization\": f\"token {installation_token()}\",\n }.copy()\n return result\n\n\nORCHESTRA_CONFIG_REPO_HTTP_URL = config[\"orchestra_config_repo_http_url\"]\nORCHESTRA_CONFIG_REPO_SSH_URL = config[\"orchestra_config_repo_ssh_url\"]\n\npusher_user_options = dedent(\"\"\"\n #@data/values\n ---\n #@overlay/match missing_ok=True\n remote_base_urls:\n - public: git@github.com:revng\n - private: git@rev.ng:revng-private\n\n #@overlay/match missing_ok=True\n binary_archives:\n - public: git@rev.ng:revng/binary-archives.git\n - private: git@rev.ng:revng-private/binary-archives.git\n\"\"\")\n\npull_request_user_options = dedent(\"\"\"\n #@data/values\n ---\n #@overlay/match missing_ok=True\n remote_base_urls:\n - source: ${clone_namespace}\n - public: https://github.com/revng\n ${private_sources}\n\n #@overlay/match missing_ok=True\n binary_archives:\n - public: https://rev.ng/gitlab/revng/binary-archives.git\n ${private_bin_archives}\n\"\"\")\n\n\ndef hub_to_lab(username):\n return mapping.get(username, default_user)\n\n\nclass ImpersonationToken:\n def __init__(self, user):\n self.user = user\n\n def __enter__(self):\n self.impersonation_token = self.user.impersonationtokens.create({\n \"name\": \"ci-temporary-token\",\n \"scopes\": [\"api\"],\n \"expires_at\": (date.today() + timedelta(days=2)).strftime(\"%Y-%m-%d\")\n })\n return self.impersonation_token.token\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n impersonation_token_id = self.impersonation_token.id\n self.user.impersonationtokens.delete(impersonation_token_id)\n assert not self.user.impersonationtokens.get(impersonation_token_id).active\n\n\ndef impersonate(admin_gl, username):\n matching = admin_gl.users.list(username=username)\n if len(matching) != 1:\n raise Exception(\"Unexpected number of users matching \" + username)\n user_id = matching[0].id\n\n user = admin_gl.users.get(user_id)\n return ImpersonationToken(user)\n\n\ndef trigger_ci(username, repo_url, base_repo_url, ref, status_update_metadata: Optional[dict] = None):\n # Ignore pushes by the CI itself\n if username == ci_user:\n return\n\n admin_gl = gitlab.Gitlab(GITLAB_URL, private_token=ADMIN_TOKEN)\n admin_gl.auth()\n\n with impersonate(admin_gl, username) as token:\n user_gl = gitlab.Gitlab(GITLAB_URL, private_token=token)\n user_gl.auth()\n project = user_gl.projects.get(PROJECT_ID)\n\n is_anonymous = username == default_user\n\n variables = {\n \"TARGET_COMPONENTS_URL\": repo_url,\n \"TARGET_COMPONENTS\": \" \".join(is_anonymous and default_user_target_components or target_components),\n \"PUSHED_REF\": ref,\n \"ORCHESTRA_CONFIG_REPO_HTTP_URL\": ORCHESTRA_CONFIG_REPO_HTTP_URL,\n \"ORCHESTRA_CONFIG_REPO_SSH_URL\": ORCHESTRA_CONFIG_REPO_SSH_URL,\n }\n\n if base_repo_url == ORCHESTRA_CONFIG_REPO_HTTP_URL and repo_url != base_repo_url:\n variables[\"ORCHESTRA_CONFIG_REPO_HTTP_URL\"] = repo_url\n elif base_repo_url == revng_orchestra_repo_url and repo_url != base_repo_url:\n variables[\"REVNG_ORCHESTRA_URL\"] = repo_url\n\n if status_update_metadata is not None:\n variables[\"REVNG_CI_STATUS_UPDATE_METADATA\"] = json.dumps(status_update_metadata)\n\n if username in allowed_to_push and base_repo_url == repo_url:\n variables[\"BASE_USER_OPTIONS_YML\"] = pusher_user_options\n variables[\"SSH_PRIVATE_KEY\"] = revng_push_ci_private_key\n variables[\"PUSH_CHANGES\"] = \"1\"\n else:\n tpl_params = {\n \"clone_namespace\": \"/\".join(repo_url.split(\"/\")[:-1]),\n \"private_sources\": \"\",\n \"private_bin_archives\": \"\"\n }\n if not is_anonymous:\n # Placeholders are replaced in the shell script since we don't want to intentionally introduce shell\n # injection vulnerabilities.\n tpl_params.update({\n \"private_sources\": \"- private: %PRIVATE_SOURCES_PLACEHOLDER%\",\n \"private_bin_archives\": \"- private: %PRIVATE_BIN_ARCHIVES_PLACEHOLDER%\"\n })\n\n variables[\"BASE_USER_OPTIONS_YML\"] = \\\n string.Template(pull_request_user_options).substitute(tpl_params)\n\n if not is_anonymous:\n for variable, value in config.get(\"authenticated_user_variables\", {}).items():\n variables[variable] = value\n\n parameters = {\n \"ref\": BRANCH,\n \"variables\": [{\"key\": key, \"value\": value}\n for key, value\n in variables.items()]\n }\n print(json.dumps(parameters, indent=2))\n project.pipelines.create(parameters)\n\n\nLAB_TO_HUB_STATUS_MAP = {\n \"created\": \"queued\",\n \"waiting_for_resource\": \"queued\",\n \"preparing\": \"queued\",\n \"pending\": \"queued\",\n \"running\": \"in_progress\",\n \"success\": \"completed\",\n \"failed\": \"completed\",\n \"canceled\": \"completed\",\n \"skipped\": \"completed\",\n \"manual\": \"queued\",\n \"scheduled\": \"queued\"\n}\n\nLAB_TO_LAB_STATUS_MAP = {\n \"created\": \"started\",\n \"waiting_for_resource\": None,\n \"preparing\": None,\n \"pending\": None,\n \"running\": None,\n \"success\": \"completed\",\n \"failed\": \"failed\",\n \"canceled\": \"cancelled\",\n \"skipped\": \"skipped\",\n \"manual\": None,\n \"scheduled\": None\n}\n\nLAB_TO_HUB_CONCLUSION_MAP = {\n \"success\": \"success\",\n \"failed\": \"failure\",\n \"skipped\": \"skipped\",\n \"canceled\": \"cancelled\",\n}\n\n\ndef get_pl_metadata(admin_gl, pipeline_id):\n pipeline = admin_gl.projects.get(PROJECT_ID).pipelines.get(pipeline_id)\n variables = pipeline.variables.list()\n\n for var in variables:\n if var.key == \"REVNG_CI_STATUS_UPDATE_METADATA\":\n return json.loads(var.value)\n\n\n@app.route('/ci-hook/gitlab_ci', methods=[\"POST\"])\ndef gitlab_ci_hook():\n headers = dict(request.headers)\n if headers.get(\"X-Gitlab-Token\", \"\") != GITLAB_SECRET:\n return log_response(\"Invalid token\\n\", 403)\n\n event = headers.get(\"X-Gitlab-Event\", \"\")\n if event != \"Job Hook\":\n return log_response(f\"Invalid event: {event}\\n\", 404)\n\n data = request.json\n\n # Fetch the status from the pipeline variables\n admin_gl = gitlab.Gitlab(GITLAB_URL, private_token=ADMIN_TOKEN)\n admin_gl.auth()\n\n metadata = get_pl_metadata(admin_gl, data[\"pipeline_id\"])\n if not metadata:\n return \"No action performed\\n\", 200\n\n if metadata[\"platform\"] == \"github\":\n j = {\n \"details_url\": ci_job_url + str(data[\"build_id\"]),\n \"status\": LAB_TO_HUB_STATUS_MAP[data[\"build_status\"]],\n \"external_id\": str(data[\"build_id\"]),\n \"actions\": []\n }\n if data.get(\"build_started_at\"):\n j[\"started_at\"] = data[\"build_started_at\"].replace(\" UTC\", \"Z\").replace(\" \", \"T\")\n if j[\"status\"] == \"completed\":\n if data.get(\"build_finished_at\"):\n j[\"completed_at\"] = data[\"build_finished_at\"].replace(\" UTC\", \"Z\").replace(\" \", \"T\")\n j[\"conclusion\"] = LAB_TO_HUB_CONCLUSION_MAP[data[\"build_status\"]]\n\n r = requests.patch(\n f\"{GITHUB_API_URL}/repos/{metadata['github_repository_name']}/check-runs/{metadata['github_check_run_id']}\",\n headers=github_headers(),\n json=j\n )\n if r.status_code not in (200, 201, 202):\n return log_response(f\"Github API call failed:\\n{r.content.decode(errors='replace')}\", r.status_code)\n\n elif metadata[\"platform\"] == \"gitlab\":\n sha = metadata['head_sha'][:8]\n messages = {\n \"started\": f\"The CI job for this merge request has been started for commit {sha}.\\n\\n\"\n f\"View the status: {ci_job_url}{data['build_id']}\",\n \"completed\": f\"**Success!** The CI job has passed for commit {sha}\",\n \"failed\": f\"**Error!** The CI job has failed for commit {sha}\",\n \"cancelled\": f\"**Cancelled.** The CI job has been cancelled for commit {sha}\",\n \"skipped\": f\"**Skipped.** The CI job was skipped for commit {sha}\"\n }\n message = messages.get(LAB_TO_LAB_STATUS_MAP[data[\"build_status\"]])\n\n if message:\n with impersonate(admin_gl, ci_user) as token:\n user_gl = gitlab.Gitlab(GITLAB_URL, private_token=token)\n user_gl.auth()\n\n mr_notes = user_gl.projects.get(metadata['gitlab_project_id']).mergerequests.get(\n metadata['gitlab_mr_iid']).notes\n mr_notes.create({\n \"body\": message,\n \"merge_request_diff_sha\": metadata['head_sha']\n })\n else:\n return log_response(f\"Invalid platform ID: '{metadata['platform']}'\\n\", 500)\n\n return \"All good\\n\", 200\n\n\n@app.route('/ci-hook/gitlab', methods=[\"POST\"])\ndef gitlab_hook():\n headers = dict(request.headers)\n if headers.get(\"X-Gitlab-Token\", \"\") != GITLAB_SECRET:\n return log_response(\"Invalid token\\n\", 403)\n\n event = headers.get(\"X-Gitlab-Event\", \"\")\n data = request.json\n attributes = data.get(\"object_attributes\", {})\n\n if event == \"Push Hook\":\n trigger_ci(\n data[\"user_username\"],\n data[\"project\"][\"git_http_url\"] + \" \" + data[\"project\"][\"git_ssh_url\"],\n data[\"project\"][\"git_http_url\"] + \" \" + data[\"project\"][\"git_ssh_url\"],\n data[\"ref\"]\n )\n\n elif event == \"Merge Request Hook\" and attributes.get(\"action\", \"\") in (\"open\", \"update\") \\\n and attributes.get(\"state\", \"\") == \"opened\":\n trigger_ci(\n data[\"user\"][\"username\"],\n attributes[\"source\"][\"git_http_url\"] + \" \" + attributes[\"source\"][\"git_ssh_url\"],\n attributes[\"target\"][\"git_http_url\"] + \" \" + attributes[\"target\"][\"git_ssh_url\"],\n f'refs/heads/{attributes[\"source_branch\"]}',\n {\n \"platform\": \"gitlab\",\n \"gitlab_project_id\": data['project']['id'],\n \"gitlab_mr_iid\": attributes['iid'],\n \"gitlab_mr_web_url\": attributes[\"url\"],\n \"head_sha\": attributes['last_commit']['id']\n }\n )\n\n return \"All good\\n\", 200\n\n\n@app.route('/ci-hook/github', methods=[\"POST\"])\ndef github_hook():\n \"\"\"\n GitHub workflow:\n\n 1. User does one of the following actions:\n - Pushes to a managed repo\n - Pushes to a pull-request targeting a managed repo\n - Presses the re-run button on a failed job\n 2. GitHub sends respectively the following webhooks:\n - check_suite, action=requested\n - pull_request, action=opened/synchronize\n - check_suite or check_run, action=rerequested\n 3. We create a new check run with the GitHub API (this implicitly creates a check suite as well)\n 4. We start the CI and link it to the check run ID; gitlab_ci_hook() will then update the check run status.\n \"\"\"\n\n headers = dict(request.headers)\n\n signature = 'sha256=' + hmac.new(GITHUB_APP_SECRET, request.data, hashlib.sha256).hexdigest()\n if signature != headers.get(\"X-Hub-Signature-256\"):\n return log_response(\"Invalid signature\\n\", 403)\n\n data = request.json\n\n event = headers.get(\"X-Github-Event\", \"\")\n\n if event == \"pull_request\" and data.get(\"action\", \"\") in ('opened', 'synchronize'):\n pull_request = data[\"pull_request\"]\n\n username = data[\"sender\"][\"login\"]\n branch = pull_request[\"head\"][\"ref\"]\n head_sha = pull_request[\"head\"][\"sha\"]\n clone_url = pull_request[\"head\"][\"repo\"][\"clone_url\"]\n base_url = pull_request[\"base\"][\"repo\"][\"clone_url\"]\n\n elif event in (\"check_suite\", \"check_run\") and data.get(\"action\", \"\") in ('requested', 'rerequested'):\n check_suite = data[\"check_suite\"] if event == \"check_suite\" else data[\"check_run\"][\"check_suite\"]\n\n username = data[\"sender\"][\"login\"]\n branch = check_suite[\"head_branch\"]\n head_sha = check_suite[\"head_sha\"]\n clone_url = data[\"repository\"][\"clone_url\"]\n base_url = clone_url\n\n else:\n return \"Unsupported event\\n\", 202\n\n assert branch is not None\n\n r = requests.post(\n f\"{GITHUB_API_URL}/repos/{data['repository']['full_name']}/check-runs\",\n headers=github_headers(),\n json={\n \"name\": \"rev.ng CI\",\n \"head_sha\": head_sha\n }\n )\n if r.status_code not in (200, 201, 202):\n return log_response(f\"Check run creation failed:\\n{r.content.decode(errors='replace')}\\n\", r.status_code)\n\n check_run = r.json()\n\n trigger_ci(\n hub_to_lab(username),\n clone_url,\n base_url,\n f'refs/heads/{branch}',\n {\n \"platform\": \"github\",\n \"github_repository_name\": data['repository']['full_name'],\n \"github_check_run_id\": check_run['id'],\n \"triggering_user\": username\n }\n )\n\n return \"All good\\n\", 200\n\n","sub_path":".orchestra/ci/ci-hook/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"264004017","text":"#!/usr/bin/env python\n\n##############################################################################\n##\n## This file is part of Sardana\n##\n## http://www.sardana-controls.org/\n##\n## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain\n##\n## Sardana is free software: you can redistribute it and/or modify\n## it under the terms of the GNU Lesser General Public License as published by\n## the Free Software Foundation, either version 3 of the License, or\n## (at your option) any later version.\n##\n## Sardana is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU Lesser General Public License for more details.\n##\n## You should have received a copy of the GNU Lesser General Public License\n## along with Sardana. If not, see .\n##\n##############################################################################\n\n\"\"\" \"\"\"\n\n__all__ = [\"ZeroDExpChannel\", \"ZeroDExpChannelClass\"]\n\n__docformat__ = 'restructuredtext'\n\nimport time\n\nfrom PyTango import Except\nfrom PyTango import DevVoid, DevDouble, DevString\nfrom PyTango import DispLevel, DevState, AttrQuality\nfrom PyTango import READ, READ_WRITE, SCALAR, SPECTRUM\n\nfrom taurus.core.util.log import DebugIt\n\nfrom sardana import State, DataFormat, SardanaServer\nfrom sardana.sardanaattribute import SardanaAttribute\nfrom sardana.pool.controller import ZeroDController, Type\nfrom sardana.tango.core.util import to_tango_type_format\n\nfrom sardana.tango.pool.PoolDevice import PoolElementDevice, \\\n PoolElementDeviceClass\n\n\nclass ZeroDExpChannel(PoolElementDevice):\n\n def __init__(self, dclass, name):\n PoolElementDevice.__init__(self, dclass, name)\n\n def init(self, name):\n PoolElementDevice.init(self, name)\n\n def get_zerod(self):\n return self.element\n\n def set_zerod(self, zerod):\n self.element = zerod\n\n zerod = property(get_zerod, set_zerod)\n\n @DebugIt()\n def delete_device(self):\n PoolElementDevice.delete_device(self)\n zerod = self.zerod\n if zerod is not None:\n zerod.remove_listener(self.on_zerod_changed)\n\n @DebugIt()\n def init_device(self):\n PoolElementDevice.init_device(self)\n zerod = self.zerod\n if zerod is None:\n full_name = self.get_full_name()\n name = self.alias or full_name\n self.zerod = zerod = \\\n self.pool.create_element(type=\"ZeroDExpChannel\", name=name,\n full_name=full_name, id=self.Id, axis=self.Axis,\n ctrl_id=self.Ctrl_id)\n zerod.add_listener(self.on_zerod_changed)\n\n ## force a state read to initialize the state attribute\n #state = zerod.state\n self.set_state(DevState.ON)\n\n def on_zerod_changed(self, event_source, event_type, event_value):\n # during server startup and shutdown avoid processing element\n # creation events\n if SardanaServer.server_state != State.Running:\n return\n\n timestamp = time.time()\n name = event_type.name\n quality = AttrQuality.ATTR_VALID\n priority = event_type.priority\n error = None\n attr = self.get_device_attr().get_attr_by_name(name)\n\n if name == \"state\":\n event_value = self.calculate_tango_state(event_value)\n elif name == \"status\":\n event_value = self.calculate_tango_status(event_value)\n else:\n if isinstance(event_value, SardanaAttribute):\n if event_value.error:\n error = Except.to_dev_failed(*event_value.exc_info)\n timestamp = event_value.timestamp\n event_value = event_value.value\n\n if name == \"value\":\n state = self.zerod.get_state()\n if state == State.Moving:\n quality = AttrQuality.ATTR_CHANGING\n self.set_attribute(attr, value=event_value, timestamp=timestamp,\n quality=quality, priority=priority, error=error,\n synch=False)\n\n def always_executed_hook(self):\n #state = to_tango_state(self.zerod.get_state(cache=False))\n pass\n\n def read_attr_hardware(self, data):\n pass\n\n def get_dynamic_attributes(self):\n cache_built = hasattr(self, \"_dynamic_attributes_cache\")\n\n std_attrs, dyn_attrs = \\\n PoolElementDevice.get_dynamic_attributes(self)\n\n if not cache_built:\n # For value attribute, listen to what the controller says for data\n # type (between long and float)\n value = std_attrs.get('value')\n if value is not None:\n attr_name, data_info, attr_info = value\n ttype, _ = to_tango_type_format(attr_info.dtype)\n data_info[0][0] = ttype\n\n # Add manually a 'CurrentValue' with the same time as 'Value'\n attr_name = 'CurrentValue'\n attr_info = attr_info.copy()\n attr_info.description = attr_name\n std_attrs[attr_name] = [attr_name, data_info, attr_info]\n\n return std_attrs, dyn_attrs\n\n def initialize_dynamic_attributes(self):\n attrs = PoolElementDevice.initialize_dynamic_attributes(self)\n\n detect_evts = \"value\",\n non_detect_evts = ()\n\n for attr_name in detect_evts:\n if attr_name in attrs:\n self.set_change_event(attr_name, True, True)\n for attr_name in non_detect_evts:\n if attr_name in attrs:\n self.set_change_event(attr_name, True, False)\n\n def read_Value(self, attr):\n zerod = self.zerod\n value = zerod.get_accumulated_value()\n quality = None\n if self.get_state() == State.Moving:\n quality = AttrQuality.ATTR_CHANGING\n self.set_attribute(attr, value=value.value, quality=quality, priority=0)\n\n def read_CurrentValue(self, attr):\n zerod = self.zerod\n #use_cache = ct.is_action_running() and not self.Force_HW_Read\n use_cache = self.get_state() == State.Moving and not self.Force_HW_Read\n value = zerod.get_current_value(cache=use_cache, propagate=0)\n if value.error:\n Except.throw_python_exception(*value.exc_info)\n quality = None\n state = zerod.get_state(cache=use_cache, propagate=0)\n if state == State.Moving:\n quality = AttrQuality.ATTR_CHANGING\n self.set_attribute(attr, value=value.value, quality=quality,\n priority=0, timestamp=value.timestamp)\n\n def Start(self):\n self.zerod.start_acquisition()\n\n def read_ValueBuffer(self, attr):\n attr.set_value(self.zerod.get_value_buffer())\n\n def read_TimeBuffer(self, attr):\n attr.set_value(self.zerod.get_time_buffer())\n\n def read_CumulationType(self, attr):\n attr.set_value(self.zerod.get_cumulation_type())\n\n def write_CumulationType(self, attr):\n self.zerod.set_cumulation_type(attr.get_write_value())\n\n def _is_allowed(self, req_type):\n return PoolElementDevice._is_allowed(self, req_type)\n\n is_Value_allowed = _is_allowed\n is_CurrentValue_allowed = _is_allowed\n is_CumulationType_allowed = _is_allowed\n is_ValueBuffer_allowed = _is_allowed\n is_TimeBuffer_allowed = _is_allowed\n\n\n_DFT_VALUE_INFO = ZeroDController.standard_axis_attributes['Value']\n_DFT_VALUE_TYPE, _DFT_VALUE_FORMAT = to_tango_type_format(_DFT_VALUE_INFO[Type], DataFormat.Scalar)\n\n\nclass ZeroDExpChannelClass(PoolElementDeviceClass):\n\n # Class Properties\n class_property_list = {\n }\n\n # Device Properties\n device_property_list = {\n }\n device_property_list.update(PoolElementDeviceClass.device_property_list)\n\n # Command definitions\n cmd_list = {\n 'Start' : [ [DevVoid, \"\"], [DevVoid, \"\"] ],\n }\n cmd_list.update(PoolElementDeviceClass.cmd_list)\n\n # Attribute definitions\n attr_list = {\n 'ValueBuffer' : [ [ DevDouble, SPECTRUM, READ, 16 * 1024 ] ],\n 'TimeBuffer' : [ [ DevDouble, SPECTRUM, READ, 16 * 1024 ] ],\n 'CumulationType' : [ [ DevString, SCALAR, READ_WRITE ],\n { 'Memorized' : \"true\",\n 'label' : \"Cumulation Type\",\n 'Display level' : DispLevel.EXPERT } ],\n }\n attr_list.update(PoolElementDeviceClass.attr_list)\n\n standard_attr_list = {\n 'Value' : [ [ _DFT_VALUE_TYPE, SCALAR, READ, ],\n { 'abs_change' : '1.0', } ],\n }\n standard_attr_list.update(PoolElementDeviceClass.standard_attr_list)\n\n def _get_class_properties(self):\n ret = PoolElementDeviceClass._get_class_properties(self)\n ret['Description'] = \"0D experimental channel device class\"\n ret['InheritedFrom'].insert(0, 'PoolElementDevice')\n return ret\n","sub_path":"src/sardana/tango/pool/ZeroDExpChannel.py","file_name":"ZeroDExpChannel.py","file_ext":"py","file_size_in_byte":8991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"558694883","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport sys\nfrom DTNRMLibs.MainUtilities import getVal\nfrom SiteFE.PolicyService.stateMachine import StateMachine\nfrom DTNRMLibs.MainUtilities import getConfig, getStreamLogger\nfrom DTNRMLibs.FECalls import getDBConn\n\nLOGGER = getStreamLogger()\nconfig = getConfig([\"/etc/dtnrm-site-fe.conf\"])\nstateMachine = StateMachine(LOGGER)\n\ndef deleteAll(sitename, deltaUID=None):\n dbI = getDBConn()\n dbobj = getVal(dbI, sitename=sitename)\n for delta in dbobj.get('deltas'):\n if deltaUID:\n if delta['uid'] != deltaUID:\n continue\n print('Cancel %s' % delta['uid'])\n stateMachine._stateChangerDelta(dbobj, 'remove', **delta)\n\nif __name__ == \"__main__\":\n print(len(sys.argv))\n print(sys.argv)\n if len(sys.argv) > 2:\n deleteAll(sys.argv[1], sys.argv[2])\n else:\n deleteAll(sys.argv[1])\n","sub_path":"helpers/cancelalldeltas.py","file_name":"cancelalldeltas.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"45807520","text":"from keras.models import Model\nfrom keras.layers import Input, concatenate, Conv2D, AveragePooling2D, MaxPooling2D, Activation, Conv2DTranspose\nfrom keras.layers.convolutional import UpSampling2D\nfrom keras.layers import BatchNormalization\nfrom layers import identity_block, dense_block\n\n\ndef deep_unet(input_shape, classes):\n \"\"\"\n Implementation of U-Net for satellite segmentation with dense residual skip connections.\n Inputs:\n - Input_shape: input shape. Default (None, None, 3) for arbitrary input size and testing\n - classes: number of object classes.\n Outputs:\n - Model: Outputs deep U-net model with skip connections\n \"\"\"\n # from layers import identity_block\n img_input = Input(input_shape)\n kernel = 3\n\n # Encoder:\n # block 1\n first = Conv2D(64, (kernel,kernel), padding='same', kernel_initializer='he_normal')(img_input)\n first = BatchNormalization()(first)\n first = Activation('relu')(first)\n skip_conv_1 = identity_block(first, 64, kernel=kernel)\n skip_conv_1 = identity_block(skip_conv_1, 64, kernel=kernel)\n # down (pool)\n pool_1 = MaxPooling2D()(skip_conv_1)\n # block 2\n second = Conv2D(128, (kernel, kernel), padding='same',kernel_initializer='he_normal')(pool_1)\n second = BatchNormalization()(second)\n second = Activation('relu')(second)\n skip_conv_2 = identity_block(second, 128, kernel=kernel)\n skip_conv_2 = identity_block(skip_conv_2, 128, kernel=kernel)\n # down (pool)\n pool_2 = MaxPooling2D()(skip_conv_2)\n # block 3\n third = Conv2D(256, (kernel,kernel),padding='same',kernel_initializer='he_normal')(pool_2)\n third = BatchNormalization()(third)\n third = Activation('relu')(third)\n skip_conv_3 = identity_block(third, 256, kernel=kernel)\n skip_conv_4 = identity_block(skip_conv_3, 256, kernel=kernel)\n # down (pool)\n pool_3 = MaxPooling2D()(skip_conv_4)\n # block 4\n fourth = Conv2D(512, (kernel,kernel),padding='same',kernel_initializer='he_normal')(pool_3)\n fourth = BatchNormalization()(fourth)\n fourth = Activation('relu')(fourth)\n skip_conv_5 = identity_block(fourth, 512, kernel=kernel)\n skip_conv_6= identity_block(skip_conv_5, 512, kernel=kernel)\n\n pool_4 = MaxPooling2D()(skip_conv_6)\n # block 5 (final)\n fifth = Conv2D(512, (kernel,kernel),padding='same',kernel_initializer='he_normal')(pool_4)\n skip_conv_7 = identity_block(fifth, 512, kernel=kernel)\n skip_conv_8 = identity_block(skip_conv_7, 512, kernel=kernel)\n\n # Decoder\n # Transpose Convolution 1 (up)\n x = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(skip_conv_8)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n # dense block\n x = concatenate([x, skip_conv_6])\n x = Conv2D(512, (kernel, kernel), padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = identity_block(x, 512, kernel=kernel)\n x = identity_block(x, 512, kernel=kernel)\n # Transpose Convolution 2\n x = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n # dense block\n x = concatenate([x, skip_conv_4])\n x = Conv2D(256, (kernel, kernel), padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = identity_block(x, 256, kernel=kernel)\n x = identity_block(x, 256, kernel=kernel)\n # Transpose Convolution 3\n x = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n # dense block\n x = concatenate([x, skip_conv_2])\n x = Conv2D(128, (kernel, kernel), padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = identity_block(x, 128, kernel=kernel)\n x = identity_block(x, 128, kernel=kernel)\n # Transpose Convolution 4\n x = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n # dense block\n x = concatenate([x, skip_conv_1])\n x = Conv2D(64, (kernel, kernel), padding='same', kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = identity_block(x, 64, kernel=kernel)\n x = identity_block(x, 64, kernel=kernel)\n\n # last conv\n x = Conv2D(classes, (3, 3), activation='softmax', padding='same')(x)\n\n model = Model(img_input, x)\n\n return model\n\ndef dense_unet(input_shape, classes):\n img_input = Input(input_shape)\n kernel = 3\n\n # Encoder:\n # block 1\n first = dense_block(input_tensor=img_input, filters=64, kernel=kernel)\n pool_1 = MaxPooling2D()(first)\n # block 2\n second = dense_block(input_tensor=pool_1, filters=128, kernel=kernel)\n pool_2 = MaxPooling2D()(second)\n # block 3\n third = dense_block(input_tensor=pool_2, filters=256, kernel=kernel)\n pool_3 = MaxPooling2D()(third)\n # block 4\n fourth = dense_block(input_tensor=pool_3, filters=512, kernel=kernel)\n pool_4 = MaxPooling2D()(fourth)\n # block 5 (final)\n fifth = dense_block(input_tensor=pool_4, filters=512, kernel=kernel)\n\n # Decoder\n # Transpose Convolution 1 (up)\n up_1 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(fifth)\n up_1 = BatchNormalization()(up_1)\n up_1 = Activation('relu')(up_1)\n # dense block\n concat_1 = concatenate([up_1, fourth])\n dense_1 = dense_block(input_tensor=concat_1, filters=512, kernel=kernel)\n # Transpose Convolution 2\n up_2 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(dense_1)\n up_2 = BatchNormalization()(up_2)\n up_2 = Activation('relu')(up_2)\n # dense block\n concat_2 = concatenate([up_2, third])\n dense_2 = dense_block(input_tensor=concat_2, filters=256, kernel=kernel)\n # Transpose Convolution 3\n up_3 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(dense_2)\n up_3 = BatchNormalization()(up_3)\n up_3 = Activation('relu')(up_3)\n # dense block\n concat_3 = concatenate([up_3, second])\n dense_3 = dense_block(input_tensor=concat_3, filters=128, kernel=kernel)\n # Transpose Convolution 4\n up_4 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(dense_3)\n up_4 = BatchNormalization()(up_4)\n up_4 = Activation('relu')(up_4)\n # dense block\n concat_4 = concatenate([up_4, first])\n dense_4 = dense_block(input_tensor=concat_4, filters=64, kernel=kernel)\n # last conv\n final = Conv2D(classes, (3, 3), activation='softmax', padding='same')(dense_4)\n\n model = Model(img_input, final)\n return model","sub_path":"Models/dense_unet.py","file_name":"dense_unet.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"462722084","text":"import os\nimport tkinter as tk\nfrom tkinter import ttk\nimport multiprocessing\nimport time\nimport numpy as np\nfrom video_source_calibration import VideoSourceCalibration, VideoSourceCalibrationConfig\nfrom tracking import TrackingScheduler, TrackingCofig\nfrom marker_detection_settings import CUBE_DETECTION, SINGLE_DETECTION, SingleMarkerDetectionSettings, MarkersCubeDetectionSettings, MarkerCubeMapping\nimport video_device_listing\n\n\nclass App():\n\n def __init__(self, start_tracking, stop_tracking, window):\n self.start_tracking_event = start_tracking\n self.stop_tracking_event = stop_tracking\n\n window.title(\"AR Tracking Interface\")\n\n width = 500\n height = 700\n pos_x = (window.winfo_screenwidth()/2) - (width/2)\n pos_y = (window.winfo_screenheight()/2) - (height/2)\n window.geometry('%dx%d+%d+%d' % (width, height, pos_x, pos_y))\n window.resizable(0, 0)\n\n # Create some room around all the internal frames\n window['padx'] = 5\n window['pady'] = 5\n\n window.grid_rowconfigure(1, weight=1)\n window.grid_rowconfigure(2, weight=1)\n window.grid_rowconfigure(3, weight=1)\n window.grid_rowconfigure(4, weight=1)\n window.grid_columnconfigure(1, weight=1)\n\n self.video_source_frame = ttk.LabelFrame(\n window, text=\"Video Source\")\n self.video_source_frame.grid(row=1, column=1, pady=5, padx=5)\n self.video_source_frame.grid_columnconfigure(1, weight=1)\n\n self.refresh_video_sources_button = tk.Button(\n self.video_source_frame, text=\"Refresh Devices\")\n self.refresh_video_sources_button['command'] = self.refresh_video_sources\n self.refresh_video_sources_button.grid(row=1, column=1, pady=5)\n\n self.video_source = ttk.Combobox(\n self.video_source_frame, state=\"readonly\", height=4, width=25)\n self.video_source.bind('<>',\n self.video_source_init)\n self.video_source.grid(row=2, column=1, padx=5, pady=5)\n\n self.video_source_calibration_frame = ttk.LabelFrame(\n self.video_source_frame, text=\"Calibration\")\n self.video_source_calibration_frame.grid(\n row=3, column=1, padx=5, pady=5)\n\n self.video_source_calibration_status_frame = tk.Frame(\n self.video_source_calibration_frame)\n self.video_source_calibration_status_frame.grid(\n row=1, column=1, padx=5, pady=5)\n\n self.calibration_status_label = ttk.Label(\n self.video_source_calibration_status_frame, text=\"Status:\")\n self.calibration_status_label.grid(row=1, column=1)\n self.calibration_status = ttk.Label(\n self.video_source_calibration_status_frame)\n self.calibration_status.grid(row=1, column=2)\n\n self.calibration_chessboard_parameters_frame = tk.Frame(\n self.video_source_calibration_frame)\n self.calibration_chessboard_parameters_frame.grid(\n row=2, column=1, padx=5)\n\n self.calibration_config = VideoSourceCalibrationConfig.persisted()\n\n self.chessboard_square_size = tk.DoubleVar()\n self.chessboard_square_size.set(\n self.calibration_config.chessboard_square_size)\n self.chessboard_square_size_label = ttk.Label(\n self.calibration_chessboard_parameters_frame, text=\"Chessboard square size:\")\n self.chessboard_square_size_label.grid(\n row=1, column=1)\n self.chessboard_square_size_entry = ttk.Entry(\n self.calibration_chessboard_parameters_frame, width=5,\n textvariable=self.chessboard_square_size)\n self.chessboard_square_size_entry.grid(row=1, column=2)\n\n self.calibration_buttons_frame = tk.Frame(\n self.video_source_calibration_frame)\n self.calibration_buttons_frame.grid(row=3, column=1, pady=5)\n\n self.calibrate_button = tk.Button(\n self.calibration_buttons_frame, text=\"Calibrate\", command=self.calibrate)\n self.calibrate_button.grid(row=1, column=1, padx=5)\n\n self.calibrate_button = tk.Button(\n self.calibration_buttons_frame, text=\"Reset\", command=self.reset_calibration)\n self.calibrate_button.grid(row=1, column=2, padx=5)\n\n self.configuration_frame = tk.Frame(window)\n self.configuration_frame.grid(\n row=2, column=1)\n\n self.configuration_frame.grid_columnconfigure(1, weight=1)\n self.configuration_frame.grid_columnconfigure(2, weight=1)\n\n self.tracking_config_frame = tk.Frame(\n self.configuration_frame)\n self.tracking_config_frame.grid(\n row=1, column=1, pady=5, padx=5)\n\n self.tracking_config_frame.grid_columnconfigure(1, weight=1)\n self.tracking_config_frame.grid_rowconfigure(1, weight=1)\n self.tracking_config_frame.grid_rowconfigure(2, weight=1)\n self.tracking_config_frame.grid_rowconfigure(3, weight=1)\n\n self.tracking_config = TrackingCofig.persisted()\n\n self.detection_mode_frame = tk.LabelFrame(\n self.tracking_config_frame, text=\"Detection Mode\")\n self.detection_mode_frame.grid(row=1, column=1, padx=5, pady=5)\n\n self.single_marker_frame = ttk.LabelFrame(\n self.detection_mode_frame, text=\"Single Marker\")\n self.single_marker_frame.grid(\n row=1, column=1, padx=5, pady=5)\n\n self.single_marker_mode = tk.BooleanVar()\n self.single_marker_mode_checkbox = tk.Checkbutton(\n self.single_marker_frame, variable=self.single_marker_mode,\n command=self.single_marker_settings_selection)\n self.single_marker_mode_checkbox.grid(row=1, column=1, pady=5)\n\n self.single_marker_settings_frame = tk.Frame(\n self.single_marker_frame)\n self.single_marker_settings_frame.grid(\n row=2, column=1, padx=5, pady=5)\n\n self.single_marker_id = tk.IntVar()\n self.single_marker_id_label = ttk.Label(\n self.single_marker_settings_frame, text=\"Marker ID:\")\n self.single_marker_id_label.grid(\n row=1, column=1, sticky=tk.W + tk.N)\n self.single_marker_id_entry = ttk.Entry(\n self.single_marker_settings_frame, textvariable=self.single_marker_id, width=5)\n self.single_marker_id_entry.grid(row=1, column=2, sticky=tk.W)\n\n self.single_marker_length = tk.DoubleVar()\n self.single_marker_length_label = ttk.Label(\n self.single_marker_settings_frame, text=\"Marker length:\")\n self.single_marker_length_label.grid(\n row=2, column=1, sticky=tk.W + tk.N)\n self.single_marker_length_entry = ttk.Entry(\n self.single_marker_settings_frame, textvariable=self.single_marker_length, width=5)\n self.single_marker_length_entry.grid(row=2, column=2, sticky=tk.W)\n\n self.single_marker_buttons_frame = tk.Frame(\n self.single_marker_frame)\n self.single_marker_buttons_frame.grid(\n row=3, column=1, padx=5, pady=5)\n\n self.single_marker_save_button = tk.Button(\n self.single_marker_buttons_frame, text=\"Save\", command=self.single_marker_save)\n self.single_marker_save_button.grid(row=1, column=1)\n\n self.marker_cube_frame = ttk.LabelFrame(\n self.detection_mode_frame, text=\"Marker Cube\")\n self.marker_cube_frame.grid(\n row=1, column=2, padx=5, pady=5)\n\n self.marker_cube_mode = tk.BooleanVar()\n self.marker_cube_mode_checkbox = tk.Checkbutton(\n self.marker_cube_frame, variable=self.marker_cube_mode,\n command=self.marker_cube_settings_selection)\n self.marker_cube_mode_checkbox.grid(row=1, column=1, pady=5)\n\n self.cube_id_frame = tk.Frame(self.marker_cube_frame)\n self.cube_id_frame.grid(row=2, column=1, padx=5, pady=5)\n\n self.cube_id_selection = ttk.Combobox(\n self.cube_id_frame, state=\"normal\", height=4, width=15)\n self.cube_id_selection.bind('<>',\n self.cube_id_selected)\n self.cube_id_selection.grid(row=1, column=1)\n\n self.new_cube_id_button = tk.Button(\n self.cube_id_frame, text=\"New\", command=self.add_cube_id)\n self.new_cube_id_button.grid(row=1, column=2, padx=5)\n\n self.marker_cube_settings_frame = tk.Frame(\n self.marker_cube_frame)\n self.marker_cube_settings_frame.grid(\n row=3, column=1, padx=5, pady=5)\n\n self.cube_up_marker_id = tk.IntVar()\n self.cube_up_marker_id_label = ttk.Label(\n self.marker_cube_settings_frame, text=\"Up Marker ID:\")\n self.cube_up_marker_id_label.grid(\n row=1, column=1, sticky=tk.W + tk.N)\n self.cube_up_marker_id_entry = ttk.Entry(\n self.marker_cube_settings_frame, textvariable=self.cube_up_marker_id, width=5)\n self.cube_up_marker_id_entry.grid(row=1, column=2, sticky=tk.W)\n\n self.cube_side_marker_ids_label = ttk.Label(\n self.marker_cube_settings_frame, text=\"Side Marker IDS:\")\n self.cube_side_marker_ids_label.grid(\n row=2, column=1, sticky=tk.W + tk.N)\n\n self.cube_side_marker_1 = tk.IntVar()\n self.cube_side_marker_1_entry = ttk.Entry(\n self.marker_cube_settings_frame, textvariable=self.cube_side_marker_1, width=5)\n self.cube_side_marker_1_entry.grid(row=2, column=2, sticky=tk.W)\n self.cube_side_marker_2 = tk.StringVar()\n self.cube_side_marker_2_entry = ttk.Entry(\n self.marker_cube_settings_frame, textvariable=self.cube_side_marker_2, width=5)\n self.cube_side_marker_2_entry.grid(row=2, column=3, sticky=tk.W)\n self.cube_side_marker_3 = tk.StringVar()\n self.cube_side_marker_3_entry = ttk.Entry(\n self.marker_cube_settings_frame, textvariable=self.cube_side_marker_3, width=5)\n self.cube_side_marker_3_entry.grid(row=2, column=4, sticky=tk.W)\n self.cube_side_marker_4 = tk.StringVar()\n self.cube_side_marker_4_entry = ttk.Entry(\n self.marker_cube_settings_frame, textvariable=self.cube_side_marker_4, width=5)\n self.cube_side_marker_4_entry.grid(row=2, column=5, sticky=tk.W)\n\n self.cube_down_marker_id = tk.StringVar()\n self.cube_down_marker_id_label = ttk.Label(\n self.marker_cube_settings_frame, text=\"Down Marker ID:\")\n self.cube_down_marker_id_label.grid(\n row=3, column=1, sticky=tk.W + tk.N)\n self.cube_down_marker_id_entry = ttk.Entry(\n self.marker_cube_settings_frame, textvariable=self.cube_down_marker_id, width=5)\n self.cube_down_marker_id_entry.grid(row=3, column=2, sticky=tk.W)\n\n self.cube_markers_length = tk.DoubleVar()\n self.cube_markers_length_label = ttk.Label(\n self.marker_cube_settings_frame, text=\"Markers length:\")\n self.cube_markers_length_label.grid(\n row=4, column=1, pady=5)\n self.cube_markers_length_entry = ttk.Entry(\n self.marker_cube_settings_frame, textvariable=self.cube_markers_length, width=5)\n self.cube_markers_length_entry.grid(row=4, column=2, sticky=tk.W)\n\n self.marker_cube_buttons_frame = tk.Frame(\n self.marker_cube_frame)\n self.marker_cube_buttons_frame.grid(\n row=4, column=1, padx=5, pady=5)\n\n self.marker_cube_id_map_button = tk.Button(\n self.marker_cube_buttons_frame, text=\"Map and Save\", command=self.marker_cube_map)\n self.marker_cube_id_map_button.grid(row=1, column=1, padx=5)\n\n self.marker_cube_id_delete_button = tk.Button(\n self.marker_cube_buttons_frame, text=\"Delete\", command=self.marker_cube_delete)\n self.marker_cube_id_delete_button.grid(row=1, column=2, padx=5)\n\n self.single_marker_settings = SingleMarkerDetectionSettings.persisted()\n self.single_marker_settings_set()\n\n self.marker_cube_settings = MarkersCubeDetectionSettings.persisted(\n self.cube_id_selection.current())\n self.marker_cube_settings_set()\n\n if self.tracking_config.marker_detection_settings is None or self.tracking_config.marker_detection_settings.identifier == SINGLE_DETECTION:\n self.single_marker_mode.set(True)\n self.single_marker_settings_selection()\n elif self.tracking_config.marker_detection_settings.identifier == CUBE_DETECTION:\n self.marker_cube_mode.set(True)\n self.marker_cube_settings_selection()\n\n self.translation_offset_frame = ttk.LabelFrame(\n self.tracking_config_frame, text=\"Translation Offset\")\n self.translation_offset_frame.grid(row=2, column=1, pady=5)\n\n self.translation_offset_x = tk.DoubleVar()\n self.translation_offset_x.set(\n self.tracking_config.translation_offset[0][3])\n self.translation_offset_x_label = ttk.Label(\n self.translation_offset_frame, text=\"X\", foreground=\"red\")\n self.translation_offset_x_label.grid(row=1, column=1, pady=5)\n self.translation_offset_x_entry = ttk.Entry(\n self.translation_offset_frame, textvariable=self.translation_offset_x, width=5)\n self.translation_offset_x_entry.grid(\n row=1, column=2, sticky=tk.W, padx=5)\n\n self.translation_offset_y = tk.DoubleVar()\n self.translation_offset_y.set(\n self.tracking_config.translation_offset[1][3])\n self.translation_offset_y_label = ttk.Label(\n self.translation_offset_frame, text=\"Y\", foreground=\"green\")\n self.translation_offset_y_label.grid(row=1, column=3, pady=5)\n self.translation_offset_y_entry = ttk.Entry(\n self.translation_offset_frame, textvariable=self.translation_offset_y, width=5)\n self.translation_offset_y_entry.grid(\n row=1, column=4, sticky=tk.W, padx=5)\n\n self.translation_offset_z = tk.DoubleVar()\n self.translation_offset_z.set(\n self.tracking_config.translation_offset[2][3])\n self.translation_offset_z_label = ttk.Label(\n self.translation_offset_frame, text=\"Z\", foreground=\"blue\")\n self.translation_offset_z_label.grid(row=1, column=5, pady=5)\n self.translation_offset_z_entry = ttk.Entry(\n self.translation_offset_frame, textvariable=self.translation_offset_z, width=5)\n self.translation_offset_z_entry.grid(\n row=1, column=6, sticky=tk.W, padx=5)\n\n self.export_coordinates_frame = ttk.LabelFrame(\n self.tracking_config_frame, text=\"Coordinates Publish Server UDP\")\n self.export_coordinates_frame.grid(row=3, column=1, pady=5)\n\n self.export_coordinates_input_frame = tk.Frame(\n self.export_coordinates_frame)\n self.export_coordinates_input_frame.grid(\n row=1, column=1, padx=5, pady=5)\n\n self.server_ip = tk.StringVar()\n self.server_ip.set(self.tracking_config.server_ip)\n self.server_ip_label = ttk.Label(\n self.export_coordinates_input_frame, text=\"IP Address:\")\n self.server_ip_label.grid(row=1, column=1)\n self.server_ip_entry = ttk.Entry(\n self.export_coordinates_input_frame, textvariable=self.server_ip, width=15)\n self.server_ip_entry.grid(row=1, column=2)\n\n self.server_port = tk.StringVar()\n self.server_port.set(self.tracking_config.server_port)\n self.server_port_label = ttk.Label(\n self.export_coordinates_input_frame, text=\"Port:\")\n self.server_port_label.grid(row=1, column=3)\n self.server_port_entry = ttk.Entry(\n self.export_coordinates_input_frame, textvariable=self.server_port, width=7)\n self.server_port_entry.grid(row=1, column=4)\n\n self.show_video = tk.BooleanVar()\n self.show_video.set(self.tracking_config.show_video)\n self.show_video_checkbox = tk.Checkbutton(\n self.tracking_config_frame, text=\"Show video\", variable=self.show_video)\n self.show_video_checkbox.grid(row=4, column=1, pady=5)\n\n self.tracking_button = tk.Button(\n window, text=\"Start Tracking\", command=self.start_tracking)\n self.tracking_button.grid(row=4, column=1, sticky=tk.S)\n\n self.base_video_source_dir = '../assets/camera_calibration_data'\n self.base_cube_dir = '../assets/configs/marker_cubes'\n self.calibration = None\n self.cube_ids = []\n self.cube_ids_init()\n self.video_source_list = []\n self.refresh_video_sources()\n self.video_source_init()\n\n def single_marker_settings_selection(self):\n if self.single_marker_mode.get():\n self.marker_cube_mode.set(False)\n\n for child in self.single_marker_settings_frame.winfo_children():\n child.configure(state=tk.ACTIVE)\n\n for child in self.single_marker_buttons_frame.winfo_children():\n child.configure(state=tk.ACTIVE)\n\n for child in self.marker_cube_settings_frame.winfo_children():\n child.configure(state=tk.DISABLED)\n\n for child in self.cube_id_frame.winfo_children():\n child.configure(state=tk.DISABLED)\n\n for child in self.marker_cube_buttons_frame.winfo_children():\n child.configure(state=tk.DISABLED)\n else:\n self.single_marker_mode.set(True)\n\n def single_marker_settings_set(self):\n self.single_marker_length.set(\n self.single_marker_settings.marker_length)\n self.single_marker_id.set(self.single_marker_settings.marker_id)\n\n def single_marker_save(self):\n self.single_marker_settings.marker_length = self.single_marker_length.get()\n self.single_marker_settings.marker_id = self.single_marker_id.get()\n\n self.single_marker_settings.persist()\n\n def marker_cube_settings_selection(self):\n if self.marker_cube_mode.get():\n self.single_marker_mode.set(False)\n for child in self.marker_cube_settings_frame.winfo_children():\n child.configure(state=tk.ACTIVE)\n\n for child in self.cube_id_frame.winfo_children():\n child.configure(state=tk.ACTIVE)\n\n for child in self.marker_cube_buttons_frame.winfo_children():\n child.configure(state=tk.ACTIVE)\n\n for child in self.single_marker_settings_frame.winfo_children():\n child.configure(state=tk.DISABLED)\n\n for child in self.single_marker_buttons_frame.winfo_children():\n child.configure(state=tk.DISABLED)\n else:\n self.marker_cube_mode.set(True)\n\n def cube_ids_init(self):\n for cube_id in os.listdir(self.base_cube_dir):\n self.cube_ids.append(cube_id.split(\".\")[0])\n\n self.cube_id_selection['values'] = self.cube_ids\n\n if len(self.cube_ids) > 0:\n self.cube_id_selection.current(0)\n self.marker_cube_settings = MarkersCubeDetectionSettings.persisted(\n self.cube_id_selection.get())\n self.marker_cube_settings_set()\n else:\n self.cube_id_selection.set(\"\")\n\n def cube_id_selected(self, _=None):\n self.marker_cube_settings = MarkersCubeDetectionSettings.persisted(\n self.cube_id_selection.get())\n self.marker_cube_settings_set()\n self.cube_id_selection['state'] = 'readonly'\n\n def add_cube_id(self):\n if self.cube_ids.__contains__(\"\"):\n self.cube_ids.remove(\"\")\n\n self.cube_ids.append(\"\")\n self.cube_id_selection['values'] = self.cube_ids\n self.cube_id_selection.current(len(self.cube_ids) - 1)\n self.cube_id_selected()\n self.cube_id_selection['state'] = 'normal'\n\n def marker_cube_settings_set(self):\n self.cube_up_marker_id.set(self.marker_cube_settings.up_marker_id)\n self.cube_side_marker_1.set(\n self.marker_cube_settings.side_marker_ids[0])\n self.cube_side_marker_2.set(\n self.marker_cube_settings.side_marker_ids[1])\n self.cube_side_marker_3.set(\n self.marker_cube_settings.side_marker_ids[2])\n self.cube_side_marker_4.set(\n self.marker_cube_settings.side_marker_ids[3])\n self.cube_down_marker_id.set(self.marker_cube_settings.down_marker_id)\n self.cube_markers_length.set(self.marker_cube_settings.markers_length)\n\n def marker_cube_map(self):\n detection = MarkerCubeMapping(self.cube_id_selection.get(), self.get_video_source_dir(), self.video_source.current(),\n self.cube_markers_length.get(), self.cube_up_marker_id.get(),\n [self.cube_side_marker_1.get(), self.cube_side_marker_2.get(\n ), self.cube_side_marker_3.get(), self.cube_side_marker_4.get()],\n self.cube_down_marker_id.get())\n\n detection.map()\n self.marker_cube_settings = MarkersCubeDetectionSettings.persisted(\n self.cube_id_selection.get())\n self.cube_id_selection['state'] = 'readonly'\n\n if not self.cube_ids.__contains__(self.cube_id_selection.get()):\n self.cube_ids.append(self.cube_id_selection.get())\n\n if self.cube_ids.__contains__(\"\"):\n self.cube_ids.remove(\"\")\n\n self.cube_id_selection['values'] = self.cube_ids\n\n def marker_cube_delete(self):\n filename = '../assets/configs/marker_cubes/{}.pkl'.format(\n self.cube_id_selection.get())\n if os.path.isfile(filename):\n os.remove(filename)\n\n if self.cube_ids.__contains__(self.cube_id_selection.get()):\n self.cube_ids.remove(self.cube_id_selection.get())\n self.cube_id_selection['values'] = self.cube_ids\n\n if len(self.cube_ids) > 0:\n self.cube_id_selection.current(0)\n self.marker_cube_settings_set()\n else:\n self.cube_id_selection.set(\"\")\n\n self.cube_id_selected()\n\n def refresh_video_sources(self):\n try:\n self.video_source_list = video_device_listing.get_devices()\n self.video_source['values'] = self.video_source_list\n self.video_source.current(0)\n except SystemError:\n pass\n\n def start_tracking(self):\n self.save_tracking_config()\n self.start_tracking_event.set()\n self.single_marker_save()\n\n if not self.tracking_config.show_video:\n self.tracking_button['text'] = \"Stop Tracking\"\n self.tracking_button['command'] = self.stop_tracking\n\n def stop_tracking(self):\n self.stop_tracking_event.set()\n self.tracking_button['text'] = \"Start Tracking\"\n self.tracking_button['command'] = self.start_tracking\n\n def calibrate(self):\n self.save_calibration_config()\n self.calibration.calibrate()\n self.update_calibration_status()\n\n def reset_calibration(self):\n self.calibration.delete_calibration()\n self.update_calibration_status()\n\n def video_source_init(self, _=None):\n self.update_calibration_status()\n\n self.calibration = VideoSourceCalibration(\n self.get_video_source_dir(), self.video_source.current(), self.calibration_config)\n\n def update_calibration_status(self):\n if(self.check_video_source_calibration()):\n self.tracking_button['state'] = tk.ACTIVE\n self.calibration_status['text'] = \"Calibrated!\"\n self.calibration_status['foreground'] = \"green\"\n else:\n self.tracking_button['state'] = tk.DISABLED\n self.calibration_status['text'] = \"Not calibrated!\"\n self.calibration_status['foreground'] = \"red\"\n\n def check_video_source_calibration(self):\n if not os.path.exists(self.get_video_source_dir()):\n return False\n\n cam_mtx_exists = os.path.isfile(\n '{}/cam_mtx.npy'.format(self.get_video_source_dir()))\n dist_exists = os.path.isfile(\n '{}/dist.npy'.format(self.get_video_source_dir()))\n\n return cam_mtx_exists & dist_exists\n\n def get_video_source_dir(self):\n camera_identification = self.video_source.get().replace(\" \", \"_\")\n return '{}/{}'.format(self.base_video_source_dir, camera_identification)\n\n def save_tracking_config(self):\n self.tracking_config.device_number = self.video_source.current()\n self.tracking_config.device_parameters_dir = self.get_video_source_dir()\n self.tracking_config.show_video = self.show_video.get()\n self.tracking_config.server_ip = self.server_ip.get()\n self.tracking_config.server_port = self.server_port.get()\n\n marker_detection_settings = None\n if self.single_marker_mode.get():\n marker_detection_settings = self.single_marker_settings\n elif self.marker_cube_mode.get():\n marker_detection_settings = self.marker_cube_settings\n\n self.tracking_config.marker_detection_settings = marker_detection_settings\n\n offset_matrix = np.zeros(shape=(4, 4))\n offset_matrix[0][0] = 1\n offset_matrix[1][1] = 1\n offset_matrix[2][2] = 1\n offset_matrix[0][3] = self.translation_offset_x.get()\n offset_matrix[1][3] = self.translation_offset_y.get()\n offset_matrix[2][3] = self.translation_offset_z.get()\n offset_matrix[3][3] = 1\n self.tracking_config.translation_offset = offset_matrix\n\n self.tracking_config.persist()\n\n def save_calibration_config(self):\n self.calibration_config.chessboard_square_size = self.chessboard_square_size.get()\n self.calibration_config.persist()\n\n\nif __name__ == \"__main__\":\n multiprocessing.freeze_support()\n\n start_tracking_event = multiprocessing.Event()\n stop_tracking_event = multiprocessing.Event()\n\n tracking_scheduler_process = multiprocessing.Process(\n target=TrackingScheduler(start_tracking_event, stop_tracking_event).main)\n tracking_scheduler_process.start()\n\n tk_root = tk.Tk()\n App(start_tracking_event, stop_tracking_event, tk_root)\n tk_root.mainloop()\n\n stop_tracking_event.set()\n time.sleep(1)\n tracking_scheduler_process.terminate()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"539904045","text":"\n# encoding = utf-8\n\nimport os\nimport sys\nimport time\nimport datetime\nimport requests\nimport json\n\n\ndef validate_input(helper, definition):\n \"\"\"Implement your own validation logic to validate the input stanza configurations\"\"\"\n # This example accesses the modular input variable\n #dynatrace_tenant = definition.parameters.get('dynatrace_tenant', None)\n #dynatrace_api_token = definition.parameters.get('dynatrace_api_token', None)\n #dynatrace_collection_interval = definition.parameters.get('dynatrace_collection_interval', None)\n pass\n\n\ndef collect_events(helper, ew):\n \n '''\n Verify SSL Certificate\n '''\n \n ssl_certificate = helper.get_arg('ssl_certificate_verification')\n \n if ssl_certificate == True:\n verify_ssl = True\n else:\n verify_ssl = False\n \n '''\n Force HTTPS\n '''\n dynatrace_tenant_input = helper.get_arg('dynatrace_tenant')\n \n if dynatrace_tenant_input.find('https://') == 0:\n opt_dynatrace_tenant = dynatrace_tenant_input\n elif dynatrace_tenant_input.find('http://') == 0:\n opt_dynatrace_tenant = dynatrace_tenant_input.replace('http://', 'https://')\n else: \n opt_dynatrace_tenant = 'https://' + dynatrace_tenant_input\n '''\n '''\n \n opt_dynatrace_api_token = helper.get_arg('dynatrace_api_token')\n opt_dynatrace_collection_interval = helper.get_arg('dynatrace_collection_interval')\n\n headers = {'Authorization': 'Api-Token {}'.format(opt_dynatrace_api_token),\n 'version':'Splunk TA 1.0.3'}\n api_url = opt_dynatrace_tenant + '/api/v1/problem/feed' + '?relativeTime=' + opt_dynatrace_collection_interval \n problem_url = opt_dynatrace_tenant + '/api/v1/problem/details/'\n \n #helper.log_debug(\"url: \" + url)\n \n response = helper.send_http_request(api_url, \"GET\", headers=headers, parameters=None, payload=None, cookies=None, verify=verify_ssl, cert=None, timeout=None, use_proxy=True)\n try:\n response.raise_for_status()\n except:\n helper.log_error (response.text)\n\n # check the response status, if the status is not sucessful, raise requests.HTTPError\n r_status = response.status_code\n r_data = response.json()\n z = json.dumps(r_data)\n x = json.loads(z)\n entityDict = x[\"result\"][\"problems\"]\n \n for problems in entityDict:\n '''\n # The following code will retrieve the details for each problem ID\n # (The volume of data exceeds the value provided. )\n #\n problem_id = problems['id']\n api_url = problem_url + problem_id \n response = helper.send_http_request(api_url, \"GET\", headers=headers, parameters=None, payload=None, cookies=None, verify=None, cert=None, timeout=None, use_proxy=True)\n try:\n response.raise_for_status()\n except:\n helper.log_error (response.text)\n \n problem_details = response.json()\n \n problems[\"details\"] = problem_details['result']\n '''\n \n HECEvent = json.dumps(problems, sort_keys=True)\n event = helper.new_event(data=HECEvent, source=None, index=None, sourcetype=None)\n ew.write_event(event)\n \n # Save the name of the Dynatrace Server that this data came from\n event = helper.new_event(data='{\"dynatrace_server\":\"' + opt_dynatrace_tenant + '\"}', index=None, source=None, sourcetype=None)\n ew.write_event(event)\n\n\n","sub_path":"bin/input_module_dynatrace_problem.py","file_name":"input_module_dynatrace_problem.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"536089213","text":"from plugins.categories import IRegularCommand\n\nimport pytz\nfrom datetime import datetime, timedelta\nimport random\n\nzones = pytz.all_timezones[:]\n\nclass BlazeIt (IRegularCommand):\n\t\n\tdef command_blazeit(this, context, user, channel, args):\n\t\trandom.shuffle(zones)\n\t\trealnow = datetime.now(pytz.timezone('Etc/UTC'))\n\t\tbestid = None\n\t\tbestthen = None\n\t\tmin = timedelta.max\n\t\tfor tzid in zones:\n\t\t\ttz = pytz.timezone(tzid)\n\t\t\tnow = datetime.now(tz)\n\t\t\tthen = tz.localize(datetime(\n\t\t\t\tnow.year,\n\t\t\t\tnow.month,\n\t\t\t\tnow.day,\n\t\t\t\t4, 20\n\t\t\t))\n\t\t\tafternoon = False\n\t\t\twhile not then > now:\n\t\t\t\tdelta = timedelta(1)\n\t\t\t\tif afternoon:\n\t\t\t\t\tthen = tz.localize(datetime(\n\t\t\t\t\t\tthen.year,\n\t\t\t\t\t\tthen.month,\n\t\t\t\t\t\tthen.day,\n\t\t\t\t\t\t4, 20\n\t\t\t\t\t)) + delta\n\t\t\t\telse:\n\t\t\t\t\tthen = tz.localize(datetime(\n\t\t\t\t\t\tthen.year,\n\t\t\t\t\t\tthen.month,\n\t\t\t\t\t\tthen.day,\n\t\t\t\t\t\t16, 20\n\t\t\t\t\t))\n\t\t\t\tafternoon = not afternoon\n\t\t\tif then - realnow <= min:\n\t\t\t\tbestid = tzid\n\t\t\t\tbestthen = then\n\t\t\t\tmin = then - realnow\n\t\twait = (bestthen - realnow).total_seconds()\n\t\t(minutes, seconds) = divmod(wait, 60)\n\t\treturn ('The next 4:20 is in ' + bestid + ' (UTC' +\n\t\t\tbestthen.strftime('%z') +\n\t\t\t'), ' + str(int(minutes)) + ' minutes and ' +\n\t\t\tstr(int(seconds)) + ' seconds from now')\n","sub_path":"plugins/blazeit.py","file_name":"blazeit.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"141749192","text":"'''\n1.常用元字符\n. 匹配任意除换行符\"\\n\"外的字符\n+ 匹配前一个字符1次或无限次\n.+匹配多个除换行符\"\\n\"外的字符\n?匹配一个字符0次或1次,还有一个功能是可以防止贪婪匹配\n^ 匹配字符串开头\n$ 匹配字符串末尾\n| 匹配该符号两边的一个\n() 匹配括号内的表达式,也表示一个组\n[] 匹配字符组中的字符\n[^]匹配除了字符组中字符的所有字符\n{n} 重复n次\n{n,}重复n次或更多次\n{n,m}重复n到m次\n\n2.预定义字符集表\n\\d 匹配数字\n\\D 匹配非数字\n\\w 匹配字母或数字或下划线\n\\W 匹配非字母或数字或下划线\n\\s 匹配任意的空白符\n\\S 匹配非空白符\n\\n 匹配一个换行符\n\\t 匹配一个制表符\n\\A 仅匹配字符串开头,同^\n\\Z 仅匹配字符串结尾,同$\n\\b 匹配一个单词边界,也就���指单词和空格间的位置\n'''\n# 正则表达式基础\nimport re\n\nstr1 = 'newdream'\nstr2 = '''\nhello123hello\nhello123\nhello12\n12hello\nhello12ho\n''' # 以换行开头\n# 方式一:\npattern_01 = re.compile('n\\w+m')\nresult_01 = re.match(pattern_01,str1)\nprint(result_01.group())\n\npattern_02 = re.compile('hello\\d+h')\nresult_02 = re.findall(pattern_02,str2)\nprint(result_02)\n\n# 方式二:\nresult_03 = re.match('n\\w+m',str1)\nprint(result_03.group())\n\n# 方式三:\nresult_04 = pattern_01.match(str1)\nprint(result_04.group())","sub_path":"samples/re_01/01_re_jichu.py","file_name":"01_re_jichu.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"606564875","text":"'''\r\nCreated on 28.10.2010\r\n\r\n@author: osman\r\n'''\r\n\r\nimport logging\r\n\r\n_logname = \"sample\"\r\n\r\ndef configureLogger(logger, conf):\r\n import logging.handlers\r\n levels = {'debug': logging.DEBUG,'info': logging.INFO,\r\n 'warning': logging.WARNING, 'error': logging.ERROR,\r\n 'critical': logging.CRITICAL}\r\n logger.setLevel(levels[conf[\"level\"]])\r\n if \"maxBytes\" in conf:\r\n handler = logging.handlers.RotatingFileHandler(\r\n conf[\"filename\"], maxBytes = conf[\"maxBytes\"])\r\n else:\r\n handler = logging.FileHandler(conf[\"filename\"])\r\n formatter = logging.Formatter(conf[\"format\"])\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\ndef setLogger(conf):\r\n global _logname\r\n _logname = conf[\"logname\"]\r\n configureLogger(logging.getLogger(_logname), conf)\r\n \r\ndef getLogger(name = \"\"):\r\n global _logname\r\n if not name: return logging.getLogger(_logname)\r\n else: return logging.getLogger(_logname + '.' + name)\r\n","sub_path":"odocu/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"637897005","text":"\"\"\"\nDatabase utility functions.\n\"\"\"\nfrom datetime import datetime, timedelta\nfrom json import dumps, loads\nfrom typing import Optional\nfrom .enums import Change, Action\ntry:\n from asyncpg import Record, InterfaceError, UniqueViolationError, create_pool\n from asyncpg.pool import Pool\nexcept ImportError:\n Record = None\n Pool = None\n print('asyncpg not installed, PostgresSQL function not available.')\n\n\ndef parse_record(record: Record) -> Optional[tuple]:\n \"\"\"\n Parse a asyncpg Record object to a tuple of values\n :param record: the asyncpg Record object\n :return: the tuple of values if it's not None, else None\n \"\"\"\n try:\n return tuple(record.values())\n except AttributeError:\n return None\n\n\nasync def make_tables(pool: Pool, schema: str):\n \"\"\"\n Make tables used for caching if they don't exist.\n :param pool: the connection pool.\n :param schema: the schema name.\n \"\"\"\n await pool.execute('CREATE SCHEMA IF NOT EXISTS {};'.format(schema))\n\n reacts = \"\"\"\n CREATE TABLE IF NOT EXISTS {}.reacts (\n id SERIAL,\n trigger TEXT UNIQUE,\n reaction TEXT,\n created_at TIMESTAMP DEFAULT current_timestamp,\n PRIMARY KEY (id, trigger)\n );\n \"\"\".format(schema)\n\n spam = \"\"\"\n CREATE TABLE IF NOT EXISTS {}.spam (\n userid BIGINT,\n logtime TIMESTAMP DEFAULT current_timestamp,\n PRIMARY KEY (logtime)\n );\"\"\".format(schema)\n\n roles = \"\"\"\n CREATE TABLE IF NOT EXISTS {}.roles (\n serverid BIGINT,\n userid BIGINT,\n change SMALLINT,\n logtime TIMESTAMP,\n PRIMARY KEY (serverid, userid, change)\n );\"\"\".format(schema)\n\n moderation = \"\"\"\n CREATE TABLE IF NOT EXISTS {}.moderation (\n serverid BIGINT,\n modid BIGINT,\n targetid BIGINT,\n action SMALLINT,\n logtime TIMESTAMP DEFAULT current_timestamp,\n PRIMARY KEY (serverid, modid, targetid, action)\n );\n \"\"\".format(schema)\n\n emojis = \"\"\"\n CREATE TABLE IF NOT EXISTS {}.emojis (\n id BIGINT,\n name TEXT,\n message_id BIGINT,\n channel_id BIGINT,\n channel_name TEXT,\n user_id BIGINT,\n user_name TEXT,\n reaction BOOLEAN,\n logtime TIMESTAMP DEFAULT current_timestamp,\n PRIMARY KEY(id, message_id, user_id, reaction)\n );\n \"\"\".format(schema)\n\n messages = \"\"\"\n CREATE TABLE IF NOT EXISTS {}.messages (\n serverid BIGINT,\n messageid BIGINT UNIQUE,\n authorid BIGINT,\n authorname TEXT,\n channelid BIGINT,\n channelname TEXT,\n pinned BOOLEAN,\n content VARCHAR(2000),\n createdat TIMESTAMP,\n PRIMARY KEY (serverid, messageid, authorid, channelid)\n );\n \"\"\".format(schema)\n\n servers = \"\"\"\n CREATE TABLE IF NOT EXISTS {}.servers (\n serverid BIGINT,\n assignableroles varchar ARRAY,\n filterwordswhite varchar ARRAY,\n filterwordsblack varchar ARRAY,\n blacklistchannels integer ARRAY,\n r9kchannels integer ARRAY,\n addtime TIMESTAMP DEFAULT current_timestamp,\n PRIMARY KEY (serverid)\n );\"\"\".format(schema)\n await pool.execute(reacts)\n await pool.execute(spam)\n await pool.execute(roles)\n await pool.execute(moderation)\n await pool.execute(emojis)\n await pool.execute(messages)\n await pool.execute(servers)\n\n\nclass PostgresController():\n \"\"\"\n We will use the schema 'nanochan' for the db\n \"\"\"\n __slots__ = ('pool', 'schema', 'logger')\n\n def __init__(self, pool: Pool, logger, schema: str = 'nanochan'):\n self.pool = pool\n self.schema = schema\n self.logger = logger\n\n @classmethod\n async def get_instance(cls, logger=None, connect_kwargs: dict = None,\n pool: Pool = None, schema: str = 'nanochan'):\n \"\"\"\n Get a new instance of `PostgresController`\n This method will create the appropriate tables needed.\n :param logger: the logger object.\n :param connect_kwargs:\n Keyword arguments for the\n :func:`asyncpg.connection.connect` function.\n :param pool: an existing connection pool.\n One of `pool` or `connect_kwargs` must not be None.\n :param schema: the schema name used. Defaults to `minoshiro`\n :return: a new instance of `PostgresController`\n \"\"\"\n assert logger, (\n 'Please provide a logger to the data_controller'\n )\n assert connect_kwargs or pool, (\n 'Please either provide a connection pool or '\n 'a dict of connection data for creating a new '\n 'connection pool.'\n )\n if not pool:\n try:\n pool = await create_pool(**connect_kwargs)\n logger.info('Connection pool made.')\n except InterfaceError as e:\n logger.error(str(e))\n raise e\n logger.info('Creating tables...')\n await make_tables(pool, schema)\n logger.info('Tables created.')\n return cls(pool, logger, schema)\n\n async def insert_rolechange(self, server_id: int, user_id: int,\n changetype: Change):\n \"\"\"\n Inserts into the roles table a new rolechange\n :param user_id: the id of the user changed\n :param changetype: The type of change that occured\n \"\"\"\n sql = \"\"\"\n INSERT INTO {}.roles VALUES ($1, $2, $3);\n \"\"\".format(self.schema)\n\n await self.pool.execute(sql,server_id, user_id, changetype.value)\n\n async def insert_modaction(self, server_id: int, mod_id: int,\n target_id: int, action_type: Action):\n \"\"\"\n Inserts into the roles table a new rolechange\n :param mod_id: the id of the mod that triggered the action\n :param target_id: the id of user that action was performed on\n :param action_type: The type of change that occured\n \"\"\"\n sql = \"\"\"\n INSERT INTO {}.moderation VALUES ($1, $2, $3);\n \"\"\".format(self.schema)\n\n await self.pool.execute(\n sql, server_id, mod_id, target_id, action_type.value)\n\n async def add_server(self, server_id: int):\n \"\"\"\n Inserts into the server table a new server\n :param server_id: the id of the server added\n \"\"\"\n sql = \"\"\"\n INSERT INTO {}.servers VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (serverid)\n DO nothing;\n \"\"\".format(self.schema)\n\n await self.pool.execute(sql, server_id, [], [], [], [], [])\n\n async def add_whitelist_word(self, server_id: int, word: str):\n \"\"\"\n Adds a word that is allowed on the whitelist channels\n :param server_id: the id of the server to add the word to\n :param word: word to add\n \"\"\"\n return\n\n async def add_message(self, message):\n \"\"\"\n Adds a message to the database\n :param message: the discord message object to add\n \"\"\"\n sql = \"\"\"\n INSERT INTO {}.messages VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)\n ON CONFLICT (messageid)\n DO nothing;\n \"\"\".format(self.schema)\n await self.pool.execute(\n sql,\n message.guild.id,\n message.id,\n message.author.id,\n message.author.name,\n message.channel.id,\n message.channel.name,\n message.pinned,\n message.clean_content,\n message.created_at\n )\n\n async def add_emoji(self, emoji, message_id, user, channel, is_reaction):\n \"\"\"\n Adds emoji to emoji tracking table\n :param emoji: discord emoji to add\n \"\"\"\n sql = \"\"\"\n INSERT INTO {}.emojis VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\n \"\"\".format(self.schema)\n try:\n await self.pool.execute(\n sql,\n emoji.id,\n emoji.name,\n message_id,\n channel.id,\n channel.name,\n user.id,\n user.name,\n is_reaction\n )\n except UniqueViolationError:\n pass\n \n async def get_emoji_count(self, emoji, days_to_subtract, logger):\n \"\"\"\n Returns the amount of the single emoji that were found in the last days\n \"\"\"\n sql = \"\"\"\n SELECT count(id) FROM {}.emojis\n WHERE id = $1 AND logtime > $2;\n \"\"\".format(self.schema)\n\n date_delta = datetime.utcnow() - timedelta(days=days_to_subtract)\n try:\n return await self.pool.fetchval(sql, emoji.id, date_delta)\n except Exception as e:\n logger.warning(f'Error retrieving emoji count: {e}')\n return None\n\n async def add_blacklist_word(self, server_id: int, word: str):\n \"\"\"\n Adds a word that is not allowed on the server\n :param server_id: the id of the server to add the word to\n :param word: word to add\n \"\"\"\n return\n\n async def add_message_delete(self, user_id: int):\n \"\"\"\n Logs a message deletion into the db\n \"\"\"\n sql = \"\"\"\n INSERT INTO {}.spam VALUES ($1);\n \"\"\".format(self.schema)\n await self.pool.execute(sql, user_id)\n\n async def get_message_deleted(self, user_id: int):\n \"\"\"\n Returns count of message deletions\n \"\"\"\n sql = \"\"\"\n SELECT COUNT(*) FROM {}.spam\n WHERE userid = $1;\n \"\"\".format(self.schema)\n return await self.pool.fetchval(sql, user_id)\n\n \n async def reset_message_deleted(self):\n \"\"\"\n Deletes all items form spam table\n \"\"\"\n sql = \"\"\"\n DELETE FROM {}.spam;\n \"\"\".format(self.schema)\n await self.pool.execute(sql)\n\n\n async def add_whitelist_channel(self, server_id: int, channel_id: int):\n \"\"\"\n Adds a channel that will delete all but the messages containing a\n string in the 'whitelist' column of the server row\n :param server_id: the id of the server to add the word to\n :param word: word to add\n \"\"\"\n return\n\n async def add_r9k_channel(self, server_id: int, channel_id: int):\n \"\"\"\n this would be a cool thing to have\n \"\"\"\n return\n\n async def get_all_triggers(self):\n \"\"\"\n Returns list of triggers\n \"\"\"\n sql = \"\"\"\n SELECT trigger FROM {}.reacts;\n \"\"\".format(self.schema)\n trigger_list = []\n records = await self.pool.fetch(sql)\n for rec in records:\n trigger_list.append(rec['trigger'])\n return trigger_list\n\n async def rem_reaction(self, trigger):\n \"\"\"\n REmoves a value from the reacts DB\n \"\"\"\n sql = \"\"\"\n DELETE FROM {}.reacts WHERE trigger = $1;\n \"\"\".format(self.schema)\n\n await self.pool.execute(sql, trigger)\n\n async def add_reaction(self, trigger, reaction):\n \"\"\"\n sets or updates a reaction\n \"\"\"\n sql = \"\"\"\n INSERT INTO {}.reacts (trigger, reaction) VALUES ($1, $2)\n ON CONFLICT (trigger)\n DO UPDATE SET\n reaction = $3 WHERE {}.reacts.trigger = $4;\n \"\"\".format(self.schema, self.schema)\n\n await self.pool.execute(sql, trigger, reaction, reaction, trigger)\n\n async def get_reaction(self, trigger):\n \"\"\"\n returns a reaction TEXT\n \"\"\"\n sql = \"\"\"\n SELECT reaction FROM {}.reacts\n WHERE trigger = $1;\n \"\"\".format(self.schema)\n return await self.pool.fetchval(sql, trigger)\n","sub_path":"cogs/utils/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"249954356","text":"class SimBox:\n\tdef get_coord(self,fname):\n\t\tf = open(fname, \"r\")\n\t\tcoords = []\n\t\tflag = 0\n\t\tfor line in f:\n\t\t\tif (flag == 0) and (\"ITEM: ATOMS\" in line):\n\t\t\t\tflag = 1\n\t\t\telif flag == 1:\n\t\t\t\ta = line[:-1].split()\n\t\t\t\tx = float(a[2])\n\t\t\t\ty = float(a[3])\n\t\t\t\tz = float(a[4])\n\t\t\t\tcoords.append([x,y,z])\n\t\tf.close()\n\t\treturn coords\n\n\tdef get_box(self,fname):\n\t\tf = open(fname, \"r\")\n\t\tbox = []\n\t\tflag = 0\n\t\tfor line in f:\n\t\t\tif (flag == 0) and (\"ITEM: BOX\" in line):\n\t\t\t\tflag = 1\n\t\t\telif flag == 1:\n\t\t\t\ta = line[:-1].split()\n\t\t\t\tself.xlo = float(a[0])\n\t\t\t\tself.xhi = float(a[1])\n\t\t\t\tself.xy = float(a[2])\n\t\t\t\tflag += 1\n\n\t\t\telif flag == 2:\n\t\t\t\ta = line[:-1].split()\n\t\t\t\tself.ylo = float(a[0])\n\t\t\t\tself.yhi = float(a[1])\n\t\t\t\tself.xz = float(a[2])\n\t\t\t\tflag += 1\n\t\t\t\t\n\t\t\telif flag == 3:\n\t\t\t\ta = line[:-1].split()\n\t\t\t\tself.zlo = float(a[0])\n\t\t\t\tself.zhi = float(a[1])\n\t\t\t\tself.yz = float(a[2])\n\t\t\t\tflag += 1\n\t\tf.close()\t\n\n\n\tdef __init__(self,fname):\n\t\tself.xlo = 0.0\n\t\tself.xhi = 0.0\n\t\tself.ylo = 0.0\n\t\tself.yhi = 0.0\n\t\tself.zlo = 0.0\n\t\tself.zhi = 0.0\n\t\tself.xy = 0.0\n\t\tself.xz = 0.0\n\t\tself.yz = 0.0\n\t\tself.coords = self.get_coord(fname)\n\t\tself.get_box(fname)\n\t\tself.lx = self.xhi-self.xlo\n\t\tself.ly = self.yhi-self.ylo\n\t\tself.lz = self.zhi-self.zlo\n\n\tdef ex_box(self,bx=\"p\",by=\"p\",bz=\"p\"):\n\t\tif bz == \"p\":\n\t\t\tnew_hi_coord = [[old[0] + self.xz, old[1] + self.yz ,old[2] + self.lz] for old in self.coords]\n\t\t\tnew_lo_coord = [[old[0] - self.xz, old[1] - self.yz ,old[2] - self.lz] for old in self.coords]\n\t\t\tself.coords = self.coords + new_hi_coord + new_lo_coord\n","sub_path":"simbox.py","file_name":"simbox.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"93835281","text":"#!/usr/bin/env python3\n\n#-*- coding: UTF-8 -*-\n#------------------------------\n# @Version : 1.0\n# @Author : Zeng**\n# @Time : 22-03-10\n# @Language : Python3.6\n# @Function : mysql_api\n#------------------------------\n\n\"\"\"\npython MySQL 数据库操作基础;\n数据库通常使用文件系统作为基本的持久化存储,它可以是普通的操作系统文件、专用的操作系统文件,甚至是原始的磁盘分区。\n\n首先,应用与数据库之间进行通信需要建立数据库连接;\n其次,当一个连接(或一个连接池)建立后,可以创建一个游标,向数据库发送请求,然后从数据库中接收回应。\n\n关于游标,游标可以让用户提交数据库命令,并获得查询的结果行。\n\nPyMySQL 是在 Python3.x 版本中用于连接 MySQL 服务器的一个库,Python2 中则使用 mysqldb。\n\"\"\"\t\n\n# 需要先安装连接数据库的第三方库,> pip3 install pymysql\n\nimport pymysql\n\n# 1、建立数据库连接\ndb = pymysql.connect(host='192.168.207.129',\n\t\t\t\t\t port=3306,\n\t\t\t\t\t user='admin',\n\t\t\t\t\t passwd='Abc-123.',\n\t\t\t\t\t db='shop',\n\t\t\t\t\t charset='utf8'\n\t\t\t\t\t )\n\n# 2、创建游标对象\ncursor = db.cursor()\n\n# 3、使用execute()方法,执行SQL语句;\ns_sql = 'select * from user'\ncursor.execute(s_sql)\n\n# 创建表\ntry:\n\tc_table = \"\"\"create table abc (\n\t\t\t\t first_name char(20) not null,\n\t\t\t\t last_name char(20),\n\t\t\t\t age int,\n\t\t\t\t sex char(1),\n\t\t\t\t income float ) \"\"\"\n\n\tcursor.execute(c_table)\nexcept Exception as e:\n\tprint(\"Create Table Error: \", e, '\\n')\n\n# 验证表是否创建成功;\ns_sql_table = 'show tables'\ncursor.execute(s_sql_table)\n\n# 4、使用fetchone()或fetchall()获取数据;\ndata = cursor.fetchall()\nprint(data)\n\n# 5、断开数据库,释放资源\ncursor.close()\ndb.close()\n\n\n\n\n\t\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python_core_3/MySQL、Redis/mysql_api.py","file_name":"mysql_api.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"214116087","text":"# -*- coding: cp1252 -*-\n\n\"\"\" -*- coding: utf-8 -*- \"\"\"\n#\n# version 0.0.1 - By SlySen\n#\n# pylint...: --max-line-length 120\n# vim......: set expandtab\n# vim......: set tabstop=4\n#\nimport os, time, urllib, urllib2, re, socket, sys, traceback, xbmcplugin, xbmcaddon, xbmcgui, xbmc, simplejson\nif sys.version >= \"2.5\":\n from hashlib import md5 as _hash\nelse:\n from md5 import new as _hash\n\n\nADDON = xbmcaddon.Addon()\nADDON_CACHE_BASEDIR = os.path.join(xbmc.translatePath(ADDON.getAddonInfo('path')), \".cache\")\nADDON_CACHE_TTL = float(ADDON.getSetting('CacheTTL').replace(\"0\", \".5\").replace(\"73\", \"0\"))\nADDON_ICON = ADDON.getAddonInfo('icon')\nADDON_NAME = ADDON.getAddonInfo('name')\nADDON_IMAGES_BASEPATH = ADDON.getAddonInfo('path')+'/resources/media/images/'\nADDON_FANART = ADDON.getAddonInfo('path')+'/fanart.jpg'\nRE_HTML_TAGS = re.compile(r'<[^>]+>')\nRE_AFTER_CR = re.compile(r'\\n.*')\n\nTV5CA_VIDEO_SITE = 'tv5.ca'\nTV5CA_BASE_URL = 'http://'+TV5CA_VIDEO_SITE\n\nif not os.path.exists(ADDON_CACHE_BASEDIR):\n os.makedirs(ADDON_CACHE_BASEDIR)\n\ndef is_cached_content_expired(last_update):\n \"\"\" function docstring \"\"\"\n expired = time.time() >= (last_update + (ADDON_CACHE_TTL * 60**2))\n return expired\n\ndef get_cached_filename(path):\n \"\"\" function docstring \"\"\"\n filename = \"%s\" % _hash(repr(path)).hexdigest()\n return os.path.join(ADDON_CACHE_BASEDIR, filename)\n\ndef get_cached_content(path):\n \"\"\" function docstring \"\"\"\n content = None\n try:\n filename = get_cached_filename(path)\n if os.path.exists(filename) and not is_cached_content_expired(os.path.getmtime(filename)):\n content = open(filename).read()\n else:\n content = get_url_txt(path)\n try:\n file(filename, \"w\").write(content) # cache the requested web content\n except StandardError:\n traceback.print_exc()\n except StandardError:\n return None\n return content\n\n# Merci à l'auteur de cette fonction\ndef unescape_callback(matches):\n \"\"\" function docstring \"\"\"\n html_entities =\\\n {\n 'quot':'\\\"', 'amp':'&', 'apos':'\\'', 'lt':'<',\n 'gt':'>', 'nbsp':' ', 'copy':'©', 'reg':'®',\n 'Agrave':'À', 'Aacute':'Á', 'Acirc':'Â',\n 'Atilde':'Ã', 'Auml':'Ä', 'Aring':'Å',\n 'AElig':'Æ', 'Ccedil':'Ç', 'Egrave':'È',\n 'Eacute':'É', 'Ecirc':'Ê', 'Euml':'Ë',\n 'Igrave':'Ì', 'Iacute':'Í', 'Icirc':'Î',\n 'Iuml':'Ï', 'ETH':'Ð', 'Ntilde':'Ñ',\n 'Ograve':'Ò', 'Oacute':'Ó', 'Ocirc':'Ô',\n 'Otilde':'Õ', 'Ouml':'Ö', 'Oslash':'Ø',\n 'Ugrave':'Ù', 'Uacute':'Ú', 'Ucirc':'Û',\n 'Uuml':'Ü', 'Yacute':'Ý', 'agrave':'à',\n 'aacute':'á', 'acirc':'â', 'atilde':'ã',\n 'auml':'ä', 'aring':'å', 'aelig':'æ',\n 'ccedil':'ç', 'egrave':'è', 'eacute':'é',\n 'ecirc':'ê', 'euml':'ë', 'igrave':'ì',\n 'iacute':'í', 'icirc':'î', 'iuml':'ï',\n 'eth':'ð', 'ntilde':'ñ', 'ograve':'ò',\n 'oacute':'ó', 'ocirc':'ô', 'otilde':'õ',\n 'ouml':'ö', 'oslash':'ø', 'ugrave':'ù',\n 'uacute':'ú', 'ucirc':'û', 'uuml':'ü',\n 'yacute':'ý', 'yuml':'ÿ'\n }\n\n entity = matches.group(0)\n val = matches.group(1)\n\n try:\n if entity[:2] == r'\\u':\n return entity.decode('unicode-escape')\n elif entity[:3] == '&#x':\n return unichr(int(val, 16))\n elif entity[:2] == '&#':\n return unichr(int(val))\n else:\n return html_entities[val].decode('utf-8')\n\n except (ValueError, KeyError):\n pass\n\ndef html_unescape(data):\n \"\"\" function docstring \"\"\"\n data = data.decode('utf-8')\n data = re.sub(r'&#?x?(\\w+);|\\\\\\\\u\\d{4}', unescape_callback, data)\n data = data.encode('utf-8')\n return data\n\ndef rechercher_un_element(argument, rechercher_dans):\n \"\"\" function docstring \"\"\"\n reponse = re.compile(argument, re.DOTALL).search(rechercher_dans)\n if reponse:\n return reponse.group(1)\n else:\n return \"\"\n\ndef get_url_txt(the_url):\n \"\"\" function docstring \"\"\"\n check_for_internet_connection()\n req = urllib2.Request(the_url)\n req.add_header(\\\n 'User-Agent',\\\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'\\\n )\n req.add_header('Accept-Charset', 'utf-8')\n response = urllib2.urlopen(req)\n link = response.read()\n link = html_unescape(link)\n response.close()\n return link\n\ndef creer_menu_accueil():\n \"\"\" function docstring \"\"\"\n\n add_dir('A %C3%A0 Z - Toutes les vid%C3%A9os',\\\n TV5CA_BASE_URL+'/videos?options[sort]=title&options[order]=ASC&options[page]=1', \\\n 6, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n add_dir('Cat%C3%A9gories',\\\n TV5CA_BASE_URL+'/videos',\\\n 7, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n add_dir('Titres',\\\n TV5CA_BASE_URL+'/videos',\\\n 8, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n add_dir('-- %C3%80 voir',\\\n TV5CA_BASE_URL,\\\n 5, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n add_dir('-- Populaires',\\\n TV5CA_BASE_URL+'/videos?options[sort]=meta.hits&options[order]=DESC&options[page]=1',\\\n 6, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n add_dir('-- R%C3%A9cents',\\\n TV5CA_BASE_URL+'/videos?options[sort]=publish_start&options[order]=DESC&options[page]=1',\\\n 6, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n add_dir('[COLOR steelblue][I]Param%C3%A8tres de l\\'addiciel...[/I][/COLOR]',\\\n TV5CA_BASE_URL,\\\n 99, ADDON_IMAGES_BASEPATH+'default-folder.png', False\\\n )\n\ndef creer_menu_categories(the_url):\n \"\"\" function docstring \"\"\"\n link = get_cached_content(the_url)\n container = re.split('
', link)\n liste = re.split('(.+?)', item)\n add_dir(\\\n theme_title,\\\n TV5CA_BASE_URL+\\\n '/videos?options[sort]=title&options[order]=ASC¶ms[theme]='+theme+'&options[page]=1',\\\n 6, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n\ndef creer_menu_titres(the_url):\n \"\"\" function docstring \"\"\"\n link = get_cached_content(the_url)\n container = re.split('
', link)\n liste = re.split('(.+?)', item)\n add_dir(\\\n serie_title,\\\n TV5CA_BASE_URL+\\\n '/videos?options[sort]=title&options[order]=ASC¶ms[serie]='+serie+'&options[page]=1',\\\n 6, ADDON_IMAGES_BASEPATH+'default-folder.png', True\\\n )\n\ndef creer_liste_videos_orphelines(the_url, the_carousel_class_name):\n \"\"\" function docstring \"\"\"\n link = get_cached_content(the_url)\n\n currentpage = rechercher_un_element(r'options\\[page\\]=(\\d+)', the_url)\n\n container = re.split('