diff --git "a/4510.jsonl" "b/4510.jsonl" new file mode 100644--- /dev/null +++ "b/4510.jsonl" @@ -0,0 +1,652 @@ +{"seq_id":"20220667853","text":"# import numpy as np\n# import matplotlib.pyplot as plt\nfrom os import listdir \n# from PIL import Image \n# from IPython.display import display\n\nfrom matplotlib.pyplot import figure, imshow, axis\nfrom matplotlib.image import imread\n\ndef showX(X, rows=1):\n assert X.shape[0]%rows == 0\n int_X = ( (X+1)/2*255).clip(0,255).astype('uint8')\n if channel_first:\n int_X = np.moveaxis(int_X.reshape(-1,3,imageSize,imageSize), 1, 3)\n else:\n int_X = int_X.reshape(-1,imageSize,imageSize, 3)\n int_X = int_X.reshape(rows, -1, imageSize, imageSize,3).swapaxes(1,2).reshape(rows*imageSize,-1, 3)\n display(Image.fromarray(int_X))\n\ndef showImages(list_of_files):\n fig = figure()\n number_of_files = len(list_of_files)\n for i in range(number_of_files):\n a=fig.add_subplot(1,number_of_files,i+1)\n image = imread(list_of_files[i])\n imshow(image,cmap='Greys_r')\n axis('off')\n print('image shown') \n\n# w=10\n# h=10\n# fig=plt.figure(figsize=(8, 8))\n# columns = 4\n# rows = 5\n# for i in range(1, columns*rows +1):\n# img = np.random.randint(10, size=(h,w))\n# fig.add_subplot(rows, columns, i)\n# plt.imshow(img)\n# plt.show()\n\n# directory = 'image-data/hangul-images'\n# files = listdir(directory) \n# w = 8 \n# h = 4\n# fig = plt.figure(figsize=(w,h))\n# for i in range(32):\n# \tim_name = directory + '/' + files[i]\n# \tim = Image.open(im_name) \n# \tfig.add_subplot(h, w, i+1)\n# \tplt.imshow(im) \n\n# plt.axis('off')\n# # im.set_cmap('grey')\n# plt.savefig(\"test.png\", bbox_inches='tight')\n\n# plt.show()\n\n# directory = 'image-data/hangul-images'\n# files = listdir(directory) \n# for i in range(10):\n# \tfiles[i] = directory + '/' + files[i] \n\n# for file in files:\n# \tfile = directory + file \n\ndirectory = 'hangul'\nfiles = listdir(directory) \nfor i in range(len(files)):\n\tfiles[i] = directory + '/' + files[i] \nshowImages(files)","repo_name":"briankim13/art_ml_project2","sub_path":"hangul/show_image.py","file_name":"show_image.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"23873120414","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 20 15:52:42 2020\n\n@author: AJITHABH K. S.\nLast modified: 21-07-2022\n\nThis script can be used to plot coherency values\nfor all the time windows for all target frequencies.\n\nThis script enables ploting of the coherency values\nfor Ex and Ey components in an argand diagram. Any impedance\nvalue can be selected for plotting. The color of the data points\nindicate the coherency value.\n\nTo plot Ex component, make \ncc = AllcohEx[fnum,:]\n\nSimilarly for Ey, make\ncc = AllcohEy[fnum,:]\n\nChange 'Zxy_single' to 'Zyx_single' in following line\nto choose argand diagram of Zyx component.\nZ_all = bandavg.get('Zxy_single')\n\n\"\"\"\n\nimport matplotlib\n#matplotlib.use('TkAgg')\nfrom matplotlib import pyplot as plt\ncdict = {'red': ((0.0, 0.0, 0.0),\n (0.1, 0.5, 0.5),\n (0.2, 0.0, 0.0),\n (0.4, 0.2, 0.2),\n (0.6, 0.0, 0.0),\n (0.8, 1.0, 1.0),\n (1.0, 1.0, 1.0)),\n 'green':((0.0, 0.0, 0.0),\n (0.1, 0.0, 0.0),\n (0.2, 0.0, 0.0),\n (0.4, 1.0, 1.0),\n (0.6, 1.0, 1.0),\n (0.8, 1.0, 1.0),\n (1.0, 0.0, 0.0)),\n 'blue': ((0.0, 0.0, 0.0),\n (0.1, 0.5, 0.5),\n (0.2, 1.0, 1.0),\n (0.4, 1.0, 1.0),\n (0.6, 0.0, 0.0),\n (0.8, 0.0, 0.0),\n (1.0, 0.0, 0.0))}\nmy_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)\n\"\"\"\nTry 'Zxy_single', 'Zyx_single', 'Zxx_single', 'Zyy_single'\nin the below field as required\n\"\"\"\nZ_all = bandavg.get('Zxy_single')\ncoh_selected_all = np.ones(np.shape(bandavg.get('ExExc')),dtype=float)\nfor fnum in range(np.size(ftlist)):\n Z = Z_all[fnum,:]\n coh_selected = coh_selected_all[fnum,:].reshape(-1,1)\n \"\"\"\n Try: cc = AllcohEx[fnum,:] to plot Ex component\n Try: cc = AllcohEy[fnum,:] to plot Ey component\n \"\"\"\n cc = AllcohEx[fnum,:]\n cc = cc.reshape(-1,1)\n Z = Z.reshape(-1,1)\n ind_coh = np.where(coh_selected==0)[0].reshape(-1,1)\n cc = np.delete(cc,ind_coh).reshape(-1,1)\n Z = np.delete(Z,ind_coh).reshape(-1,1)\n Z_real = np.real(Z)\n Z_imag = np.imag(Z)\n plt.figure(num=fnum)\n sc = plt.scatter(Z_real,Z_imag,c=cc,cmap=my_cmap)\n plt.colorbar(sc)\n plt.clim(0,1) \n # plt.scatter(Z_huber[fnum].real,Z_huber[fnum].imag,c='black',marker=(5, 1))\n plt.title(procinfo.get('selectedsite') + ' - ' + procinfo.get('meas')+\n ' ('+str(procinfo.get('fs'))+' Hz) f='+ str(round(ftlist[fnum][0],2)) +' Hz')\n # plt.savefig('C:/Users/Ajithabh/Desktop/myImagePDF.eps', format='eps', dpi=1200)","repo_name":"ajithabhks/SigMT","sub_path":"plot-coherency.py","file_name":"plot-coherency.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"93"} +{"seq_id":"2663207321","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n@Summary : example1 of observer\n@Author : Rey\n@Time : 2022-05-01 17:28:33\n\"\"\"\n\n\nclass Publisher:\n def __init__(self) -> None:\n self.observers = []\n\n def add(self, observer):\n if observer not in self.observers:\n self.observers.append(observer)\n else:\n print(f'Failed to add: {observer}')\n\n def remove(self, observer):\n try:\n self.observers.remove(observer)\n except ValueError:\n print(f'Failed to remove: {observer}')\n\n def notify(self):\n [o.notify(self) for o in self.observers]\n\n\nclass DefaultFormatter(Publisher):\n def __init__(self, name) -> None:\n super().__init__()\n self.name = name\n self._data = 0\n\n def __str__(self) -> str:\n return f'{type(self).__name__}: \"{self.name}\" has data = {self._data}'\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, new_value):\n try:\n self._data = int(new_value)\n except ValueError as err:\n print(f'Error: {err}')\n else:\n self.notify()\n\n\nclass HexFormatter:\n def notify(self, publisher):\n return f'{type(self).__name__}: \"{publisher.name}\" has now hex data = {hex(publisher.data)}'\n\n\nclass BinaryFormatter:\n def notify(self, publisher):\n return f'{type(self).__name__}: \"{publisher.name}\" has now bin data = {bin(publisher.data)}'\n\n\ndef main():\n df = DefaultFormatter('test1')\n print(df)\n\n print()\n hf = HexFormatter()\n df.add(hf)\n df.data = 3\n print(df)\n\n print()\n bf = BinaryFormatter()\n df.add(bf)\n df.data = 21\n print(df)\n\n print()\n df.remove(hf)\n df.data = 40\n print(df)\n\n print()\n df.remove(hf)\n df.add(bf)\n\n df.data = 'hello'\n print(df)\n\n print()\n df.data = 15.8\n print(df)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"czp-first/ToBeBetter","sub_path":"design_pattern/behavioral/observer/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"93"} +{"seq_id":"14424721215","text":"\nimport random\nfrom collections import defaultdict\n\nTHEMSELVES = 1\n\nclass Solution(object):\n\n def __init__(self, graph):\n self.graph = graph\n\n\n def knows(self, a, b):\n if self.graph and self.graph[a][b] == 1:\n return True\n\n return False\n\n\n def findCelebrity(self, n):\n\n # greedy depth first search\n # while I did not talk to everyone do:\n # asks if A knows B\n # if answer is yes: put B on the stack and move to him\n # if answer is no: keep asking until you asked about everyone\n\n\n stack = []\n visited = set()\n\n for people in xrange(n):\n stack.append(people)\n\n\n\n # The definition of a celebrity is that all the other n - 1 people know him/her but he/she does not know any of them.\n know_count = defaultdict(set)\n by_themselves = set()\n\n while len(visited) < n and stack:\n\n a = stack.pop()\n visited.add(a)\n\n kcount = 0\n for b in xrange(n):\n if self.knows(a, b):\n kcount += 1\n if a != b:\n know_count[b].add(a)\n if b not in visited:\n stack.append(b)\n\n if kcount == THEMSELVES:\n by_themselves.add(a)\n\n\n for people in by_themselves:\n if people in know_count and len(know_count[people]) == n - 1:\n return people\n\n\n return -1\n\n\n\n\n\n\n\ngraph = [\n [1,1,0],\n [0,1,0],\n [1,1,1]\n] # 1\n\nprint(Solution(graph).findCelebrity(len(graph)))\n\n\ngraph = [\n [1,0,1],\n [1,1,0],\n [0,1,1]\n] # -1\nprint(Solution(graph).findCelebrity(len(graph)))\n\n\n\n\ngraph = [\n [1, 1, 1, 1, 1, 1],\n [1, 1, 0, 0, 1, 1],\n [0, 0, 0, 1, 0, 0], # knows only himself but not celebrity\n [0, 1, 1, 0, 1, 1],\n [1, 1, 1, 1, 0, 1],\n [0, 1, 1, 0, 1, 1]\n] # -1\nprint(Solution(graph).findCelebrity(len(graph)))\n\n\n","repo_name":"marquesarthur/programming_problems","sub_path":"leetcode/pinterest/find_celebrity.py","file_name":"find_celebrity.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"93"} +{"seq_id":"12055393502","text":"import rebound\nfrom numpy import *\nfrom tabulate import tabulate\n\nplannet=['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune']\nPt=['Plannet']+plannet\nMercury=['Mercury']\nVenus=['Venus']\nEarth=['Earth']\nMars=['Mars']\nJupiter=['Jupiter']\nSaturn=['Saturn']\nUranus=['Uranus']\nNeptune=['Neptune']\nDummy=[Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune]\n\nfor first in range(len(plannet)):\n\tfor second in range(len(plannet)):\n\t\tif second>first:\n\t\t\tplannets=[]\n\t\t\tsim = rebound.Simulation()\n\t\t\tsim.units=('kg','km','yr')\n\t\t\tdate=\"2000-01-01 00:00\"\n\t\t\tsim.add(particle='Sun', date=date)\n\t\t\tplannets.append(plannet[first])\n\t\t\tplannets.append(plannet[second])\n\t\t\tfor particles in plannets:\t\n\t\t\t\tsim.add(particle=particles, date=date)\n\n\t\t\tmonths_array=[]\n\t\t\tdist=[]\n\t\t\tps=sim.particles\n\t\t\ty=arange(0,100,1)\n\t\t\tt=arange(1,13,1)\n\t\t\tfor i in y:\n\t\t\t\tfor j in t:\n\t\t\t\t\tmonths=i*12+j\n\t\t\t\t\tmonths_array.append(months)\n\t\t\t\t\tyear=i+j/12.0\n\t\t\t\t\tsim.integrate(year)\n\t\t\t\t\tdp=(ps[1]-ps[2]) #Calculate difference between particles\n\t\t\t\t\tdistance=(sqrt((dp.x)*(dp.x)+(dp.y)*(dp.y)+(dp.z)*(dp.z)))\n\t\t\t\t\tdist.append(distance)\n\n\t\t\tdist, months_array=zip(*sorted(zip(dist, months_array)))\n\n\t\t\tmonths_stack=['January', 'February', 'March', \"April\", 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\n\n\t\t\tyr=str(2000+(months_array[0]//12))\n\t\t\tmon=months_stack[months_array[0]%12]\n\t\t\tdis=dist[0]\n\t\t\tDummy[first].append('{0} {1}'.format(yr,mon))\n\t#\t\tprint(yr)\n\t\telse:\n\t\t\tDummy[first].append('X')\n\ntable=[[Pt[k], Mercury[k], Venus[k], Earth[k], Mars[k], Jupiter[k], Saturn[k], Uranus[k],Neptune[k]] for k in range(len(Pt))]\nprint(tabulate(table))\n","repo_name":"Soumyadeep142/Rebound","sub_path":"All_Plannets_Closest_Chart.py","file_name":"All_Plannets_Closest_Chart.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"20263654046","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 2 21:23:14 2021\n\n@author: 20181758\n\"\"\"\nimport tensorflow\n#import miscnn\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, concatenate\nfrom tensorflow.keras.layers import Conv3D, MaxPooling3D, Conv3DTranspose\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, SeparableConv2D, DepthwiseConv2D\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.layers import Concatenate, Add, concatenate\nfrom tensorflow.keras.layers import ReLU, ELU\nfrom tensorflow.keras.initializers import glorot_normal, Identity\n#from tensorflow.keras.contrib.layers import repeat\n#from tensorflow.keras.contrib.framework import Arg_Scope\nfrom tensorflow.keras.regularizers import l2\n\n#from miscnn.neural_network.architecture.abstract_architecture import Abstract_Architecture\n\nfc = Dense\nconv = Conv2D\ndeconv = Conv2DTranspose\nrelu = ELU\nmaxpool = MaxPooling2D\ndropout_layer = Dropout\nbatchnorm = BatchNormalization\nwinit = glorot_normal()\n#repeat = repeat\n#arg_scope = Aarg_Scope\nl2_regularizer = l2\n\nimport tensorflow as tf\n#from tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Input, concatenate\ndef Dilationlayer(inputspre,n_filters,filter_size=1,dilation_rate=1,dropout=0.2):\n input_shape=inputspre.shape\n# lay1 = tf.keras.layers.BatchNormalization()(inputspre)\n lay1 = BatchNormalization(momentum=0.99)(inputspre)\n# lay2 = relu(lay1)\n lay3 = tf.keras.layers.Conv2D(n_filters, (filter_size,1), strides=(1,1),dilation_rate=dilation_rate,activation='relu', input_shape=input_shape[1:],padding = 'same')(lay1)\n lay3 = tf.keras.layers.Conv2D(n_filters, (1,filter_size),strides=(1,1),dilation_rate=dilation_rate,input_shape=input_shape[1:],padding = 'same')(lay3)\n lay5 = tf.keras.layers.Dropout(rate=dropout)(lay3)\n# lay=tf.keras.layers.concatenate([inputspre,lay5])\n lay = concatenate([inputspre, lay5], axis=-1)\n\n return lay\ndef downsampleBlock(inputdowns, n_filters, dropout=0.2):\n \n# lay1 = tf.keras.layers.BatchNormalization()(inputdowns)\n lay1 = BatchNormalization(momentum=0.99)(inputdowns)\n# lay2 = relu(lay1)\n# lay3 = tf.keras.layers.Conv2D(n_filters, (1,1), strides=(1,1), padding='same', activation=None, \n# dilation_rate=1, use_bias=False, kernel_regularizer=tf.keras.regularizers.L2(0.00004))(lay2) #Do I need to add regularization??\n lay3 = tf.keras.layers.Conv2D(n_filters, (1,1), strides=(1,1), padding='same', activation='relu', \n dilation_rate=1, use_bias=False)(lay1) #Do I need to add regularization??\n\n lay4= tf.keras.layers.Dropout(rate=dropout)(lay3)\n lay5= tf.keras.layers.MaxPooling2D(pool_size=2, strides=2)(lay4)\n \n return lay5\n\n\ndef DenseDilatedBlock(inputdense, n_filters,filter_size, dilation_rate):\n lay1=tf.keras.layers.Conv2D(n_filters,[filter_size,1], strides=(1,1),dilation_rate=dilation_rate[0],padding='same',activation='relu')(inputdense)\n lay1=tf.keras.layers.Conv2D(n_filters,[1,filter_size],strides=(1,1),dilation_rate=dilation_rate[0],padding='same',activation='relu')(lay1)\n# lay1i=tf.keras.layers.concatenate([inputdense,lay1])\n lay1i = concatenate([inputdense, lay1], axis=-1)\n \n lay2=tf.keras.layers.Conv2D(n_filters,[filter_size,1],strides=(1,1), dilation_rate=dilation_rate[1],padding='same',activation='relu')(lay1i)\n lay2=tf.keras.layers.Conv2D(n_filters,[1,filter_size],strides=(1,1), dilation_rate=dilation_rate[1],padding='same',activation='relu')(lay2)\n# lay12=tf.keras.layers.concatenate([lay1i,lay2])\n lay12 = concatenate([lay1i, lay2], axis=-1)\n\n\n lay3=tf.keras.layers.Conv2D(n_filters,[filter_size,1],strides=(1,1), dilation_rate=dilation_rate[2],padding='same',activation='relu')(lay12)\n lay3=tf.keras.layers.Conv2D(n_filters,[1,filter_size],strides=(1,1), dilation_rate=dilation_rate[2],padding='same',activation='relu')(lay3)\n# lay312=tf.keras.layers.concatenate([lay3,lay12])\n lay312 = concatenate([lay3, lay2], axis=-1)\n lay = concatenate([lay312, lay1], axis=-1)\n\n \n# lay4=tf.keras.layers.Conv2D(n_filters,[filter_size,1],strides=(1,1), dilation_rate=dilation_rate[3],padding='same',activation='relu')(lay312)\n# lay4=tf.keras.layers.Conv2D(n_filters,[1,filter_size],strides=(1,1), dilation_rate=dilation_rate[3],padding='same',activation='relu')(lay4)\n# # lay=tf.keras.layers.concatenate([lay312,lay4])\n# lay = concatenate([lay312, lay4], axis=-1)\n\n return lay\ndef splitBlock(input, n_filters,dropout=0.0, dilation_rate=1):\n x1nd= DepthwiseConv2D(3, strides=(1, 1), dilation_rate=1,depth_multiplier=1, activation='relu', padding='same', use_bias=False)(input)\n x1nd = BatchNormalization(momentum=0.99)(x1nd)\n x1d=DepthwiseConv2D(3, strides=(1, 1), dilation_rate=dilation_rate,depth_multiplier=1, activation='relu', padding='same', use_bias=False)(input)\n x1d=BatchNormalization(momentum=0.99)(x1d)\n x1=tf.keras.layers.add([x1nd,x1d])\n x1 = Conv2D(n_filters, 1, strides=1, padding='same', activation='relu', kernel_initializer=winit,\n dilation_rate=1, use_bias=False, kernel_regularizer=l2_regularizer(0.00004))(x1)\n x1=BatchNormalization(momentum=0.99)(x1)\n x2 = Conv2D(n_filters, (1, 3), strides=1, dilation_rate=1,padding='same', activation='relu')(input)\n x2 = Conv2D(n_filters, (3, 1), strides=1, dilation_rate=1,padding='same', activation='relu')(x2)\n x2 = BatchNormalization(momentum=0.99)(x2)\n x2 = Conv2D(n_filters, (1, 3), strides=1, dilation_rate=dilation_rate,padding='same', activation='relu')(x2)\n x2 = Conv2D(n_filters, (3, 1), strides=1, dilation_rate=dilation_rate,padding='same', activation='relu')(x2)\n x2 = BatchNormalization(momentum=0.99)(x2)\n x=concatenate([x2, x1], axis=-1)\n if input.shape[3] == x.shape[3]:\n x=tf.keras.layers.add([input, x])\n x=BatchNormalization(momentum=0.99)(x)\n return x \n \ndef LVNET(inputs,k=16): \n fc1 = tf.keras.layers.Conv2D(2*k,(3,3), dilation_rate=1,strides=(1,1),padding='same',activation='relu')(inputs)\n \n fc2x= splitBlock(fc1, n_filters=2*k, dropout=0.0, dilation_rate=2)\n fc2d=downsampleBlock(fc2x,2*k,dropout=0.0)\n fc2 = Dilationlayer(fc2d,2*k, filter_size=3, dilation_rate=1,dropout=0.0) \n fc3 = downsampleBlock(fc2,4*k,dropout=0.0)\n fc3dense = DenseDilatedBlock(fc3,4*k,filter_size=3, dilation_rate=[2,4,8])\n fc3x= splitBlock(fc3dense, n_filters=4*k, dropout=0.0, dilation_rate=3)\n \n fc4x= splitBlock(fc1, n_filters=2*k, dropout=0.0, dilation_rate=2) \n fc4d=downsampleBlock(fc4x,2*k,dropout=0.0)\n fc4 = Dilationlayer(fc4d,2*k, filter_size=3, dilation_rate=1,dropout=0.0) \n fc5 = downsampleBlock(fc4,4*k,dropout=0.0)\n fc6 = DenseDilatedBlock(fc5,4*k,filter_size=3, dilation_rate=[2,4,8])\n fc6x= splitBlock(fc6, n_filters=4*k, dropout=0.0, dilation_rate=3)\n \n fc7c=concatenate([fc3x,fc6x],axis=-1)\n fc7 = downsampleBlock(fc7c,8*k, dropout=0.0)\n \n fc8 = Dilationlayer(fc7,8*k,filter_size=3, dilation_rate=2,dropout=0.0)\n fc8x= splitBlock(fc8, n_filters=8*k, dropout=0.0, dilation_rate=4)\n fc10 =splitBlock(fc8, n_filters=8*k, dropout=0.0, dilation_rate=6)\n fc14 = Dilationlayer(fc10,8*k, filter_size=3, dilation_rate=2,dropout=0.0)\n fc9 = tf.keras.layers.Conv2DTranspose(4*k,(3,3), strides=2,padding='same')(fc14)\n fc9= BatchNormalization(momentum=0.99)(fc9)\n fc11 = tf.keras.layers.Conv2DTranspose(2*k,(3,3), strides=2,padding='same')(fc9)\n fc11= BatchNormalization(momentum=0.99)(fc11)\n fco = tf.keras.layers.Conv2DTranspose(1*k,(3,3), strides=2,padding='same')(fc11)\n fco= BatchNormalization(momentum=0.99)(fco)\n fco = tf.keras.layers.Conv2D(8*k,(3,3), dilation_rate=1,strides=(1,1),padding='same',activation='relu')(fco)\n output= tf.keras.layers.Conv2D(1,(1,1),strides=1,padding='same',activation='sigmoid',)(fco)\n FcdDN = tf.keras.Model(inputs =[inputs] , outputs = [output])\n return FcdDN\n ","repo_name":"navchetan-awasthi/Left_Ventricle_Segmentation","sub_path":"LVNet/utilModels.py","file_name":"utilModels.py","file_ext":"py","file_size_in_byte":8247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"30783808418","text":"#!/usr/bin/env python3\n\"\"\"Annotations\"\"\"\nimport typing\n\n\ndef sum_list(input_list: typing.List[float]) -> float:\n \"\"\"This script defines a function sum_list and\n takes a list as input, it returns the sum of the floats in the list\n\n Keyword arguments:\n input_list -- A list of float values\n Return: returns the sum of all floats in the list\n \"\"\"\n sum: float = 0.0\n for flt in input_list:\n sum += flt\n return sum\n","repo_name":"phurhard/alx-backend-python","sub_path":"0x00-python_variable_annotations/5-sum_list.py","file_name":"5-sum_list.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"45172526880","text":"# This program turns on the LED on pin 4 for 3 seconds and then turn off\n\nimport RPi.GPIO as GPIO\nimport time\n\nledPin = 4 # define the LED pin\n\nGPIO.setmode(GPIO.BCM) # set Broadcom's pin numbering scheme\nGPIO.setup(ledPin, GPIO.OUT) # set led pin to OUTPUT mode\nGPIO.output(ledPin, GPIO.HIGH) # pull up the pin\n\ntime.sleep(3) # wait for 3 seconds\n\nGPIO.output(ledPin, GPIO.LOW) # pull down the pin\n\n\n","repo_name":"terryoy/rpi-python-exercises","sub_path":"gpio/gpio.py","file_name":"gpio.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"33311929449","text":"from Clases.Conexion import Conexion\nfrom Clases.Producto import Producto\nclass Pedido:\n __id=None\n __listaProds=None\n __fecPedido= None\n __autPedido= None\n __total= None\n __entregado= None\n __listaPedidos = []\n def __init__(self, codP, fecha, autor):\n self.__fecPedido = fecha\n self.__autPedido = autor\n self.__id = codP\n self.__listaProds = []\n self.__total = 0\n self.__entregado = False\n def getFecPedido(self):\n return self.__fecPedido\n def getAutPedido(self):\n return self.__autPedido\n def getListaProds(self):\n return self.__listaProds\n def getTotal(self):\n return self.__total\n def getEntregado(self):\n return self.__entregado\n def getId(self):\n return self.__id\n \n def agregarProducto(self, prodPedido):\n self.__listaProds.append(prodPedido)\n def calcularTotal(self):\n for prod in self.__listaProds:\n self.__total = self.__total+prod.getPrecio()\n return self.__total\n def imprimirPedido(self):\n print(\"Código: \", self.__id)\n print(\"Fecha: \", self.__fecPedido)\n for producto in self.__listaProds:\n print(\"Cód:\" , producto.getId(), \" | Nombre: \", producto.getNombre(), \" | Precio: \", producto.getPrecio())\n print(\" --- TOTAL --- : \", self.calcularTotal())\n @staticmethod\n def verTodos():\n return Pedido.__listaPedidos\n @staticmethod\n def agregarPedido(pedido, listaProductos):\n con = Conexion()\n con.conectar()\n cur = con.c()\n recs = [pedido]\n sentencia = \"INSERT INTO pedido (fecha, fk_id_autor, entregado) VALUES (%s,%s,%s)\"\n cur.executemany(sentencia, recs)\n con.grabar()\n ultimoId = \"SELECT id FROM pedido ORDER BY id DESC LIMIT 1\"\n cur.execute(ultimoId)\n resultado = cur.fetchall()\n for dato in resultado:\n ultimoId = dato\n for producto in listaProductos:\n datos = [(producto, ultimoId,1)]\n sentencia = \"INSERT INTO productopedido (fk_id_producto, fk_id_pedido, cantidad) VALUES (%s,%s,%s)\"\n cur.executemany(sentencia, datos)\n con.grabar()\n con.desconectar()\n @staticmethod\n def cargarLista():\n con = Conexion()\n con.conectar()\n cur = con.c()\n q = \"SELECT pedido.id, fecha, entregado, fk_id_autor FROM pedido INNER JOIN usuario ON pedido.fk_id_autor = usuario.id\"\n cur.execute(q)\n resultado = cur.fetchall()\n for pedido in resultado:\n p = Pedido(pedido[0], pedido[1], pedido[2])\n qDos = \"SELECT producto.id, producto.nombre, producto.descripcion, producto.precio, producto.stock from productopedido inner join producto on productopedido.fk_id_producto = producto.id \" \\\n \"inner join pedido on pedido.id = productopedido.fk_id_pedido where pedido.id = \"+str(p.getId()) \n cur.execute(qDos)\n productos = cur.fetchall()\n for producto in productos:\n prod = Producto(producto[0], producto[1], producto[2], producto[3], producto[4])\n p.agregarProducto(prod)\n Pedido.__listaPedidos.append(p)\n con.desconectar()\n ###Setters y getters\n","repo_name":"depcsuite/python","sub_path":"Biblioteca/Unidad III/Clase3ActividadDos/Clases/Pedido.py","file_name":"Pedido.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"30333760583","text":"from Bio import SeqIO\ninput_filename = \"NC_000913.faa\"\noutput_filename = \"NC_000913_long_only.faa\"\ncount = 0\ntotal = 0\noutput_handle = open(output_filename, \"w\")\nfor record in SeqIO.parse(input_filename, \"fasta\"):\n total = total + 1\n if 100 <= len(record):\n count = count + 1\n SeqIO.write(record, output_handle, \"fasta\")\noutput_handle.close()\nprint(str(count) + \" records selected out of \" + str(total))\n","repo_name":"peterjc/biopython_workshop","sub_path":"writing_sequence_files/length_filter.py","file_name":"length_filter.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"93"} +{"seq_id":"28162788012","text":"from datetime import datetime\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport sys, time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\nCONTACTS_PATH = './contacts.txt'\ntargets = []\nmsg = 'Este es un mensaje de prueba'\ndriver = webdriver.Chrome('./chromedriver.exe')\n\ndef send():\n wait = WebDriverWait(driver, 600)\n wait5 = WebDriverWait(driver, 5)\n\n for target in targets:\n x_arg = '//span[contains(@title,' + '\"' + target + '\"' +')]'\n group_title = wait.until(EC.presence_of_element_located((\n By.XPATH, x_arg)))\n group_title.click()\n\n message = driver.find_element_by_class_name('_13mgZ')\n message.send_keys(msg)\n\n sendbutton = driver.find_element_by_class_name('_3M-N-')\n sendbutton.click()\n time.sleep(2)\n\n #driver.close()\n\ndef main():\n try:\n with open(CONTACTS_PATH) as f:\n for contact in f.readlines():\n targets.append(contact.rstrip('\\n'))\n except Exception as e:\n print(e); driver.close(); sys.exit(1)\n\n driver.get(\"https://web.whatsapp.com/\")\n\n sched = BlockingScheduler()\n # Schedule job_function to be called every 30 seconds\n sched.add_job(send, 'interval', seconds=30)\n sched.start()\n\nif __name__=='__main__':\n main()","repo_name":"nosceteipsvm/whatsapp-automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"70438108146","text":"# IMPORT DISCORD.PY. ALLOWS ACCESS TO DISCORD'S API.\r\nimport discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport time\r\n# Import the os module.\r\nimport os\r\nfrom config import token\r\n# GETS THE CLIENT OBJECT FROM DISCORD.PY. CLIENT IS SYNONYMOUS WITH BOT.\r\n# bot = discord.Client()\r\nbot = commands.Bot(command_prefix=\"\")\r\n\r\n# EVENT LISTENER FOR WHEN THE BOT HAS SWITCHED FROM OFFLINE TO ONLINE.\r\n@bot.event\r\nasync def on_ready():\r\n # CREATES A COUNTER TO KEEP TRACK OF HOW MANY GUILDS / SERVERS THE BOT IS CONNECTED TO.\r\n guild_count = 0\r\n\r\n # LOOPS THROUGH ALL THE GUILD / SERVERS THAT THE BOT IS ASSOCIATED WITH.\r\n for guild in bot.guilds:\r\n # PRINT THE SERVER'S ID AND NAME.\r\n print(f\"- {guild.id}(name: {guild.name})\")\r\n # INCREMENTS THE GUILD COUNTER\r\n guild_count = guild_count + 1\r\n\r\n # PRINTS HOW MANY GUILDS/SERVERS THE BOT IS IN\r\n print(\"PyBot is in \" + str(guild_count) + \" guilds.\")\r\n\r\n await bot.change_presence(activity=discord.Game(name=\"Python 3 on VSCode\"))\r\n\r\n # Cogs code - start\r\n for cog in os.listdir(r\"cogs\"): # Loop through each file in your \"cogs\" folder.\r\n if cog.endswith(\".py\"):\r\n try:\r\n cog = f\"cogs.{cog.replace('.py', '')}\"\r\n bot.load_extension(cog) # Load the file as an extension.\r\n except Exception as e:\r\n print(f\"{cog} is failed to load:\")\r\n raise e\r\n # Cogs code - end\r\n# @bot.event\r\n# async def on_voice_state_update(member, before, after): \r\n# #if channel.id == \"849184911863578665\": #BoysLockerRoom \r\n# #if not before.channel and after.channel:\r\n# #channel = channel.get_channel(849184911863578665) \r\n# #await channel.send(\"Hello welcome to BoysLockerRoom\")\r\n# if before.channel is None and after.channel is not None:\r\n# if after.channel.id == [849184911863578665]:\r\n# await member.channel.send(\"Welcome to BoysLockerRoom\")\r\n\r\n@bot.event\r\nasync def on_voice_state_update(member, before, after):\r\n nagUndeaf = before.self_deaf and not after.self_deaf\r\n nagUnmute = before.self_mute and not after.self_mute\r\n nagtanggalNgStream = before.self_stream and not after.self_stream\r\n nagtanggalNgVideo = before.self_video and not after.self_video\r\n\r\n # ignore nagUndeaf / nagUnmute\r\n if nagUndeaf or nagUnmute:\r\n return\r\n\r\n # ignore nagDeaf / nagMute\r\n if after.self_deaf or after.self_mute:\r\n return\r\n\r\n # ignore nagstream / nag-tanggal ng stream\r\n if after.self_stream or nagtanggalNgStream:\r\n return\r\n\r\n # ignore nagturn-on ng video cam / nagtanggal ng video cam\r\n if after.self_video or nagtanggalNgVideo:\r\n return\r\n \r\n # ignore yung umalis ng Channel malamang tanga ka ba\r\n if after.channel == None:\r\n return\r\n \r\n # don't greet a bot\r\n if member == bot.user: \r\n return\r\n\r\n # hanapin ang \"private-chat\" Text Channel\r\n channel_found = None\r\n for channel in member.guild.channels:\r\n if \"private-chat\" in channel.name:\r\n channel_found = channel\r\n # make sure na may message send lang kapag pumasok sa BoysLockerRoom \r\n if after.channel.name == \"BoysLockerRoom\":\r\n # yung Channel object ang nakakapag .send(), this case si \"private-chat\"\r\n await channel_found.send(f\"Welcome to {after.channel.name} {member.mention}\")\r\n break\r\n if \"council-chat\" in channel.name:\r\n channel_found = channel\r\n if after.channel.name == \"Extreme High Council\":\r\n await channel_found.send(f\"Welcome to {after.channel.name} {member.mention}\")\r\n break\r\n #if channel.name.__contains__(\"main-chat\"):\r\n # channel_found = channel\r\n # if after.channel.name.__contains__(\"Ranked ng Smurf\"):\r\n # await channel_found.send(f\"Welcome to {after.channel.name} {member.mention} stream mo naman lods sayang boost\")\r\n # break\r\n\r\n#EVENT LISTENER FOR WHEN A NEW MESSAGE IS SENT TO A CHANNEL\r\n@bot.event\r\nasync def on_message(message):\r\n if message.author.bot:\r\n return \r\n # changes message content into lowercase\r\n words=str.lower(message.content)\r\n #grab the user who sent the command\r\n \r\n \r\n #voiceChannel = discord.utils.get(message.guild.voice_channels, name = voice_channel)\r\n #voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n \r\n \r\n brooklyn_99_quotes = [\r\n 'I\\'m the human form of the 💯 emoji.',\r\n 'Bingpot!',\r\n 'Cool. Cool cool cool cool cool cool cool', \r\n 'no doubt no doubt no doubt no doubt.'\r\n ]\r\n the_office = [\r\n \"Would I rather be feared or loved? Easy. Both. I want people to be afraid of how much they love me.\",\r\n \"I\\'m not superstitious, but I am a little stitious.\",\r\n \"The worst thing about prison was the dementors.\"\r\n ]\r\n dead = [\r\n \"https://s.yimg.com/uu/api/res/1.2/27yeB91IS2Vg58yvePUJ9A--~B/aD03Njg7dz0xMDI0O2FwcGlkPXl0YWNoeW9u/http://media.zenfs.com/en-SG/homerun/the_hive_asia_947/9bc64f74c9dcd321e2daa890e112348f\",\r\n \"http://assets.rappler.com/C6965FC179534E97B3CC676BBFF1599A/img/7779C656CA9C486EB29364A166236AF8/emmannn.jpg\",\r\n \"https://pbs.twimg.com/media/Dxq12kBVAAEwz85.jpg\",\r\n \"https://ichef.bbci.co.uk/news/976/cpsprodpb/1344F/production/_116572987_dacerafacebook.jpg\",\r\n \"https://www.getrealphilippines.com/wp-content/uploads/2018/05/ninoy_aquino_international_airport-1.jpg\",\r\n \"https://cdn.discordapp.com/attachments/849185224221786132/945202027313635408/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f776174747061642d6d656469612d736572766963652f53746f7279496d6167652f5f7263446536793975585f5f7a773d3d2d3833333930313838322e313565663162313030646362633533643632353031313637373235382e6a7067.png\"\r\n ]\r\n marcos = [\r\n \"https://cdn.discordapp.com/attachments/849185224221786132/945203028590460978/VZSQOHVYTD4J3PZ4SWMXBHMJPE.png\",\r\n \"https://cdn.discordapp.com/attachments/849185224221786132/945202801099800606/marcos.jpg\"\r\n ]\r\n commands = [\r\n \"dead\",\"gelo cruz\",\"the office\",\"dead\",\"cake\",\"depressed\"\r\n ]\r\n \r\n try:\r\n user = message.author\r\n voice_channel = user.voice.channel\r\n \r\n if words == '99!':\r\n response = random.choice(brooklyn_99_quotes)\r\n await message.channel.send(response)\r\n # CHECKS IF THE MESSAGE THAT WAS SENT IS EQUAL TO \"HELLO\".\r\n if words == \"hello\":\r\n # SENDS BACK A MESSAGE TO THE CHANNEL.\r\n await message.channel.send(\"hello ka rin\")\r\n elif words == \"best porn producer in ph\":\r\n await message.channel.send(\"Vivamax films\")\r\n elif \"ayun\" in words:\r\n await message.channel.send(\"Ahyun\")\r\n elif words == \"gelo cruz\":\r\n await message.channel.send(\"Yung matabang tomboy na nangaway ng bata sa McDo sa Pasig\")\r\n time.sleep(1.5)\r\n await voice_channel.connect()\r\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n voice.play(discord.FFmpegPCMAudio(\"audio/gelocruz.mp3\"))\r\n while voice.is_playing():\r\n time.sleep(.1)\r\n await voice.disconnect()\r\n return\r\n elif \"low ranks\" in words:\r\n await message.channel.send(\"Low rank ka naman\" + message.author.mention)\r\n if \"need\" in words:\r\n await message.channel.send(\"Need mo ng susubo ng burat?\")\r\n elif words == \"the office\":\r\n theofficeresponse = random.choice(the_office)\r\n await message.channel.send(theofficeresponse)\r\n elif \"pagsamo\" in words:\r\n await message.channel.send(\"tama na please\")\r\n elif \"kain\" in words:\r\n await message.channel.send(\"kain ka burat\")\r\n elif message.content.startswith('wait'):\r\n await message.channel.send('tagal ohh tanginang to') \r\n elif words == \"dead\" or words == \"yataps\":\r\n deadresponse = random.choice(dead)\r\n await message.channel.send(deadresponse)\r\n elif words == \"cake\" or words == \"lodicakes\":\r\n marcoslodicake = random.choice(marcos)\r\n await message.channel.send(marcoslodicake)\r\n elif \"depressed\" in words:\r\n await message.channel.send(\"https://c.tenor.com/rv_QpUUUW0sAAAAC/dwight-the-office.gif\")\r\n elif words == \"command list\" or words == \"pybot commands\":\r\n await message.channel.send(commands)\r\n elif words == 'dogstyle':\r\n await message.channel.send(\"https://media.discordapp.net/attachments/819854712576409600/820110846512463882/unknown.png\")\r\n elif 'wag' in words:\r\n await message.channel.send(\"https://images-ext-2.discordapp.net/external/rMF7uHkVpovUDZTQnHo4XAfudHHZHigLYJPa8ZW3Q98/https/i.pinimg.com/236x/c6/49/eb/c649eb07789ec9794983101fca8b01e7.jpg\")\r\n elif words == 'happy new year':\r\n await voice_channel.connect()\r\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n voice.play(discord.FFmpegPCMAudio(\"audio/happynewyear.mp3\"))\r\n while voice.is_playing():\r\n time.sleep(.1)\r\n await voice.disconnect()\r\n return\r\n elif words == \"bully\":\r\n await voice_channel.connect()\r\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n voice.play(discord.FFmpegPCMAudio(\"audio/gonnacry.mp3\"))\r\n while voice.is_playing():\r\n time.sleep(.1)\r\n await voice.disconnect()\r\n return\r\n elif words == \"gameon\":\r\n await voice_channel.connect()\r\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n voice.play(discord.FFmpegPCMAudio(\"audio/gaymoan.mp3\"))\r\n while voice.is_playing():\r\n time.sleep(.1)\r\n await voice.disconnect()\r\n return\r\n elif words == \"mataba\":\r\n await voice_channel.connect()\r\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n voice.play(discord.FFmpegPCMAudio(\"audio/fatfuck.mp3\"))\r\n while voice.is_playing():\r\n time.sleep(.1)\r\n await voice.disconnect()\r\n return\r\n elif words == \"nigger\":\r\n await voice_channel.connect()\r\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n voice.play(discord.FFmpegPCMAudio(\"audio/nigger.mp3\"))\r\n #time.sleep(7)\r\n #await voice.disconnect()\r\n while voice.is_playing():\r\n time.sleep(.1)\r\n await voice.disconnect()\r\n return\r\n elif words == \"focus\":\r\n await voice_channel.connect()\r\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\r\n voice.play(discord.FFmpegPCMAudio(\"audio/dimakafocus.mp3\"))\r\n while voice.is_playing():\r\n time.sleep(.1)\r\n await voice.disconnect()\r\n #time.sleep(4)\r\n # Sleep while audio is playing.\r\n \r\n return\r\n\r\n \r\n # need to add this await command so Cog Commands can work.\r\n # Without this, Cog Commands gets blocked.\r\n # Source: https://stackoverflow.com/a/53706211/7209628\r\n if message.author.bot:\r\n return \r\n await bot.process_commands(message)\r\n except:\r\n await message.channel.send(message.author.mention + ', Teka error ako help')\r\n\r\n\r\n\r\n# EXECUTES THE BOT WITH THE SPECIFIED TOKEN. \r\nbot.run(token)\r\n\r\n","repo_name":"Jow-han/pybot.py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"146836131","text":"from noodles import serial\nfrom noodles.tutorial import add, mul\nfrom noodles.run.xenon import (\n Machine, XenonJobConfig)\nfrom noodles.run.messages import (\n JobMessage)\n\nfrom pathlib import Path\nimport xenon\nimport socket\n\n\ndef test_machine_batch_job(xenon_server, tmpdir):\n m = Machine(scheduler_adaptor='local')\n scheduler = m.scheduler\n tmpdir = Path(str(tmpdir))\n\n stdout_file = Path(tmpdir) / 'hostname.txt'\n job_description = xenon.JobDescription(\n executable='/bin/hostname', stdout=str(stdout_file))\n job = scheduler.submit_batch_job(job_description)\n scheduler.wait_until_done(job)\n\n lines = [line.strip() for line in stdout_file.open()]\n assert lines[0] == socket.gethostname()\n\n scheduler.close()\n\n\ndef test_worker_one_batch_job(xenon_server, tmpdir):\n tmpdir = Path(str(tmpdir))\n infile = tmpdir / 'infile.json'\n outfile = tmpdir / 'outfile.json'\n\n wf = add(1, 1)\n job = next(iter(wf._workflow.nodes.values()))\n job_message = JobMessage(42, job)\n\n registry = serial.base()\n print(registry.to_json(job_message), file=infile.open('w'))\n\n m = Machine(scheduler_adaptor='local')\n scheduler = m.scheduler\n\n job_config = XenonJobConfig()\n executable, arguments = job_config.command_line()\n\n job_description = xenon.JobDescription(\n executable=str(executable), arguments=arguments,\n stdin=str(infile), stdout=str(outfile))\n\n job = scheduler.submit_batch_job(job_description)\n scheduler.wait_until_done(job)\n\n result_json = [line.strip() for line in outfile.open()]\n assert len(result_json) == 1\n\n result = registry.from_json(result_json[0])\n assert result.status == 'done'\n assert result.key == 42\n assert result.msg is None\n assert result.value == 2\n\n scheduler.close()\n\n\ndef test_worker_one_online_job(xenon_server):\n wf = mul(6, 7)\n job = next(iter(wf._workflow.nodes.values()))\n job_message = JobMessage(1234, job)\n registry = serial.base()\n msg = registry.to_json(job_message)\n\n m = Machine(scheduler_adaptor='local')\n scheduler = m.scheduler\n\n job_config = XenonJobConfig()\n executable, arguments = job_config.command_line()\n\n xjob_description = xenon.JobDescription(\n executable=str(executable), arguments=arguments)\n\n xjob, xstdout = scheduler.submit_interactive_job(\n xjob_description, [msg.encode()])\n scheduler.wait_until_done(xjob)\n\n result_json = ''.join(m.stdout.decode() for m in xstdout if m.stdout)\n assert len(result_json) > 0\n\n result = registry.from_json(result_json)\n assert result.status == 'done'\n assert result.key == 1234\n assert result.msg is None\n assert result.value == 42\n\n scheduler.close()\n\n\ndef test_worker_ten_online_jobs(xenon_server):\n registry = serial.base()\n\n def single_job(wf):\n job = next(iter(wf._workflow.nodes.values()))\n job_message = JobMessage(0, job)\n return (registry.to_json(job_message) + '\\n').encode()\n\n m = Machine(scheduler_adaptor='local')\n scheduler = m.scheduler\n\n job_config = XenonJobConfig(verbose=True)\n executable, arguments = job_config.command_line()\n\n xjob_description = xenon.JobDescription(\n executable=str(executable), arguments=arguments)\n\n xjob, xstdout = scheduler.submit_interactive_job(\n xjob_description, [single_job(mul(10, i)) for i in range(10)])\n scheduler.wait_until_done(xjob)\n\n result_json = \"\"\n for m in xstdout:\n if m.stdout:\n result_json += m.stdout.decode()\n if m.stderr:\n for l in m.stderr.decode().splitlines():\n print(\"remote:\", l)\n\n results = [registry.from_json(r)\n for r in result_json.splitlines()]\n print(\"results: \", end='')\n for r in results:\n print(r.value, end=' ')\n print()\n\n assert len(results) == 10\n\n for i, result in enumerate(results):\n assert result.status == 'done'\n assert result.key == 0\n assert result.msg is None\n assert result.value == i * 10\n\n scheduler.close()\n","repo_name":"NLeSC/noodles","sub_path":"test/xenon/test_machine.py","file_name":"test_machine.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"93"} +{"seq_id":"25814910212","text":"\"\"\"Views are loaded from this directory.\n\nNew views should be placed under catalog/collection/viewname.sql. No further actions needed.\n\"\"\"\n\n\nimport os\nfrom typing import Optional\n\nGobViewType = dict[str, str]\nGobViewsType = dict[str, GobViewType]\n\n\nclass GOBViews:\n \"\"\"GOB Views.\"\"\"\n\n _data: dict[str, dict[str, GobViewsType]] = {}\n\n def __init__(self):\n if not self._data:\n self._load_views()\n\n def _load_views(self):\n \"\"\"Load views from directory and saves them in self._data.\n\n :return:\n \"\"\"\n catalogs = self._dirs_in_path(os.path.dirname(__file__))\n self._data = {}\n\n for catalog, catalog_path in catalogs:\n self._load_catalog_from_dir(catalog, catalog_path)\n\n def _load_catalog_from_dir(self, catalog_name: str, catalog_path: str):\n \"\"\"Load catalog from catalog_path.\n\n :param catalog_name:\n :param catalog_path:\n :return:\n \"\"\"\n self._data[catalog_name] = {}\n\n for collection, collection_path in self._dirs_in_path(catalog_path):\n self._load_collection_from_dir(catalog_name, collection, collection_path)\n\n def _load_collection_from_dir(self, catalog_name: str, collection_name: str, collection_path: str):\n \"\"\"Load collection from collection_path.\n\n :param catalog_name:\n :param collection_name:\n :param collection_path:\n :return:\n \"\"\"\n self._data[catalog_name][collection_name] = {}\n\n sql_files = self._sql_files_in_dir(collection_path)\n\n for view_filename, file_location in sql_files:\n with open(file_location) as file:\n view_name = \".\".join(view_filename.split(\".\")[:-1])\n self._data[catalog_name][collection_name][view_name] = {\n \"query\": file.read(),\n \"name\": f\"{catalog_name}_{collection_name}_{view_name}\",\n }\n\n def _sql_files_in_dir(self, dir: str):\n \"\"\"Return list of tuples of (filename.sql, path/to/filename.sql) for all sql files in dir.\n\n :param dir:\n :return:\n \"\"\"\n files = []\n for item in os.listdir(dir):\n item_path = os.path.join(dir, item)\n if os.path.isfile(item_path) and item.endswith(\".sql\"):\n files.append((item, item_path))\n return files\n\n def _dirs_in_path(self, path: str):\n \"\"\"Return the directories in path. Result is a list of tuples [(dirname, dirpath)].\n\n For example:\n\n _dirs_in_path('/tmp')\n result: [('dirA', '/tmp/dirA'), ('dirB', '/tmp/dirB')]\n\n :param path:\n :return:\n \"\"\"\n dirs = []\n for item in os.listdir(path):\n item_path = os.path.join(path, item)\n if os.path.isdir(item_path) and not item.startswith(\"__\"):\n dirs.append((item, item_path))\n return dirs\n\n def get_catalogs(self) -> list[str]:\n \"\"\"Return catalog list.\"\"\"\n return list(self._data.keys())\n\n def get_entities(self, catalog_name) -> list[str]:\n \"\"\"Return collection list.\"\"\"\n return list(self._data[catalog_name].keys())\n\n def get_views(self, catalog_name, entity_name) -> GobViewsType:\n \"\"\"Return catalog collection views.\"\"\"\n return self._data[catalog_name][entity_name]\n\n def get_view(self, catalog_name, entity_name, view_name) -> Optional[GobViewType]:\n \"\"\"Return catalog collection view.\"\"\"\n try:\n return self._data[catalog_name][entity_name][view_name]\n except KeyError:\n return None\n","repo_name":"Amsterdam/GOB-Core","sub_path":"gobcore/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"72584249586","text":"'''Script is designed to use RegEx to detect Biogrid IDs and UniProtKB IDs in\na psi_mitlab file from the IMEX protein-protein interaction database found at:\nhttp://www.ebi.ac.uk/Tools/webservices/psicquic/view/\nThis program focuses on psi-mitlab files produced from interrogating Biogrid, Innate DB, MINT, and IntAct databases\nwith the psicquic tool.\n'''\nimport numpy as np\nimport pandas as pd\nimport warnings\n\nwarnings.simplefilter(action='ignore', category=FutureWarning) #Silences future warning\n\ndef get_id(df):\n '''Searches the dataframe for Biogrid IDs and UniProtKB IDs. The Innate DB, MINT, and IntAct databases seem to\n typically have UniProtKB IDs in either the 'ID(s) interactor' column or the 'Alias(es) interactor' column.\n Biogrid IDs can often be mapped to a single UniProtKB ID using a dictionary from the Biogrid website. Biogrid IDs\n will eventually be converted to UniProtKB IDs using the id_converter script.\n '''\n df['Parsed A ID'] = df['#ID(s) interactor A'].str.extract(r'biogrid:(\\d{6})')\n df['Parsed A ID'] = np.where(\n df['Parsed A ID'].isnull(),\n df['#ID(s) interactor A'].str.extract('([OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})')[\n 0],\n df['Parsed A ID'])\n df['Parsed A ID'] = np.where(df['Parsed A ID'].isnull(),\n df['Alias(es) interactor A'].str.extract(\n '([OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})')[0],\n df['Parsed A ID'])\n df['Parsed B ID'] = df['ID(s) interactor B'].str.extract(r'biogrid:(\\d{6})')\n df['Parsed B ID'] = np.where(df['Parsed B ID'].isnull(),\n df['ID(s) interactor B'].str.extract('([OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})')[\n 0],\n df['Parsed B ID'])\n df['Parsed B ID'] = np.where(df['Parsed B ID'].isnull(),\n df['Alias(es) interactor B'].str.extract(\n '([OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})')[0],\n df['Parsed B ID'])\n return df\n\ndef run(filename):\n '''Runs the script.\n '''\n print('Parsing IDs...')\n df = pd.read_csv(filename, delimiter='\\t')\n parsed_df = get_id(df=df)\n print('IDs parsed')\n return parsed_df\n\nif __name__ == '__main__':\n print(run(filename='clusteredQuery_MST1R.txt'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"brettvanderwerff/PPI-Analysis","sub_path":"id_parser.py","file_name":"id_parser.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"93"} +{"seq_id":"41146869328","text":"import json\r\nimport os\r\nimport random\r\n\r\nfrom torch.utils.data import Dataset\r\nfrom collections import defaultdict\r\n\r\nfrom PIL import Image\r\nfrom PIL import ImageFile\r\nfrom dataset.utils import pre_caption\r\n\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\nImage.MAX_IMAGE_PIXELS = None\r\n\r\n\r\n\r\n\r\nclass re_split_eval_dataset(Dataset):\r\n def __init__(self, ann_file, transform, image_root, config, split, max_words=30):\r\n self.ann_imgs = json.load(open(ann_file, 'r'))['images']\r\n self.transform = transform\r\n self.image_root = image_root\r\n self.max_words = max_words\r\n\r\n # mapping total file to split file\r\n self.img_order2id = {}\r\n self.img_id2order = {}\r\n self.text_order2id = {}\r\n self.text_id2order = {}\r\n self.ann = []\r\n self.text = []\r\n self.image = []\r\n self.txt2img = {}\r\n self.img2txt = {}\r\n\r\n txt_order = 0\r\n img_order = 0\r\n for img in self.ann_imgs:\r\n if split == 'all':\r\n assert img['split'] in ['train', 'val', 'test']\r\n prefix = img['filepath'] if 'filepath' in img else config['image_prefix'][img['split']]\r\n elif img['split'] == split:\r\n prefix = img['filepath'] if 'filepath' in img else config['image_prefix'][split]\r\n else:\r\n continue\r\n\r\n prefix_image_name = os.path.join(prefix, img['filename'])\r\n self.image.append(prefix_image_name)\r\n self.img2txt[img_order] = []\r\n self.img_id2order[img['imgid']] = img_order\r\n self.img_order2id[img_order] = img['imgid']\r\n caps = []\r\n for caption in img['sentences']:\r\n caps.append(caption['raw'])\r\n self.text.append(pre_caption(caption['raw'], self.max_words))\r\n self.img2txt[img_order].append(txt_order)\r\n self.txt2img[txt_order] = img_order\r\n self.text_id2order[caption['sentid']] = txt_order\r\n self.text_order2id[txt_order] = caption['sentid']\r\n txt_order += 1\r\n self.ann.append({'image': img['filename'],\r\n 'caption': caps})\r\n img_order += 1\r\n\r\n def __len__(self):\r\n return len(self.image)\r\n\r\n def __getitem__(self, index):\r\n prefix_img_name = self.image[index]\r\n image_path = os.path.join(self.image_root, prefix_img_name)\r\n image = Image.open(image_path).convert('RGB')\r\n image = self.transform(image)\r\n\r\n return image, index\r\n\r\n\r\nclass re_nocaps_eval_dataset(Dataset):\r\n def __init__(self, config, transform, max_words=30):\r\n self.ann = json.load(open(config['split_file'], 'r'))\r\n self.transform = transform\r\n self.image_root = config['image_root']\r\n self.max_words = max_words\r\n\r\n self.img_order2id = {}\r\n self.img_id2order = {}\r\n self.text_order2id = {}\r\n self.text_id2order = {}\r\n\r\n self.text = []\r\n self.image = []\r\n self.txt2img = {}\r\n self.img2txt = defaultdict(list)\r\n\r\n img_order = 0\r\n for image in self.ann['images']:\r\n if image['domain'] in config['data_type']:\r\n self.image.append(image['file_name'])\r\n self.img_id2order[image['id']] = img_order\r\n self.img_order2id[img_order] = image['id']\r\n img_order += 1\r\n\r\n text_order = 0\r\n for text in self.ann['annotations']:\r\n if text['image_id'] in self.img_id2order.keys():\r\n self.text.append(pre_caption(text['caption'], self.max_words))\r\n img_order = self.img_id2order[text['image_id']]\r\n self.img2txt[img_order].append(text_order)\r\n self.txt2img[text_order] = img_order\r\n\r\n self.text_id2order[text['id']] = text_order\r\n self.text_order2id[text_order] = text['id']\r\n text_order += 1\r\n\r\n def __len__(self):\r\n return len(self.image)\r\n\r\n def __getitem__(self, index):\r\n prefix_img_name = self.image[index]\r\n image_path = os.path.join(self.image_root, prefix_img_name)\r\n image = Image.open(image_path).convert('RGB')\r\n image = self.transform(image)\r\n\r\n return image, index\r\n\r\n\r\n","repo_name":"luoyetingqiu/speed-up","sub_path":"dataset/caption_dataset.py","file_name":"caption_dataset.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"8094941404","text":"\n\nimport asyncio\nfrom typing import Any\nfrom django.db.models.query import QuerySet\n\nfrom django.forms.models import BaseModelForm\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import redirect\nfrom django.db.models import Sum\nfrom django.http import JsonResponse \nfrom django.contrib.auth.mixins import (\n LoginRequiredMixin,\n PermissionRequiredMixin\n )\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.contrib import messages\n\n\n\nimport win32print\nimport win32evtlog\n\n\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.generic import DetailView, ListView\nfrom django.views.generic.edit import CreateView, DeleteView\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.urls import reverse, reverse_lazy\n\n\n\nfrom .models import Order, JobOrder, JobType, Print, Expense, Payment\nfrom .forms import OrderForm\n\n\ndef index(request):\n return render(request, 'base.html')\n \n \n \n\ndef printer_jobs(request):\n print(win32print.EnumJobs(win32print.OpenPrinter(win32print.GetDefaultPrinter()), 0, -1, 1 ))\n jobs = []\n\n printer_name = win32print.GetDefaultPrinter()\n handle = win32print.OpenPrinter(printer_name)\n job_info = win32print.EnumJobs(handle, 0, -1, 2)\n\n for job in job_info:\n job_id, _, _, status = job\n jobs.append({\n 'job_id': job_id,\n 'status': status,\n })\n \n win32print.ClosePrinter(handle)\n\n return render(request, 'printjobs/print_jobs.html', {'jobs': jobs, 'printer_name': printer_name})\n\n\n\nclass CreateJobView(LoginRequiredMixin, CreateView):\n model = Order\n template_name = 'order/create_order.html'\n form_class = OrderForm\n success_url = reverse_lazy('printjob:order')\n \n \n def get_form(self):\n form = super(CreateJobView, self).get_form()\n return form\n \n def form_valid(self, form):\n form.instance.staff = self.request.user\n form.instance.status = 'New'\n form.save()\n return super(CreateJobView, self).form_valid(form)\n \n \nclass RedirectView(View):\n def get(self, request, *args, **kwargs):\n messages.error(request, \"You don't have permission to access this page.\")\n return redirect('account:login')\n \n \nclass RestrictedPageView(View):\n @method_decorator(permission_required('printjobs.access_place_order_page', raise_exception=True))\n def get(self, request, *args, **kwargs):\n return render(request, 'restricted_page.html')\n \n \nclass RedirectView(View):\n def get(self, request, *args, **kwargs):\n messages.error(request, \"You don't have permission to access this page.\")\n return redirect('login')\n \n \n \nclass OrderDetailView(DetailView):\n model = Order\n template_name = 'order/order_details.html'\n \n def get_queryset(self):\n return super().get_queryset()\n\n \nclass OrderedJobDetailView(DetailView):\n model = JobOrder\n template_name = 'order/orderjob_details.html' \n \n\n \n \nclass OrderListView(ListView):\n model = Order\n template_name = 'order/order_list.html'\n \n\nclass OrderedJobListView(ListView):\n model = JobOrder\n template_name = 'order/joborder_list.html'\n queryset = JobOrder.objects.filter(paid=False)\n\n \n\ndef record_print_job_event(request):\n hand = win32evtlog.OpenEventLog('localhost', 'System')\n flags = win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ\n\n events = win32evtlog.ReadEventLog(hand, flags, 0)\n \n for event in events:\n if event.EventID == 307:\n print(event)\n else:\n print('none')\n # Event ID 307 corresponds to print job events\n # print_data = event.StringInserts[1] # Extract print job information\n # # Process and record print_data in the database\n # print(print_data)\n\n events = win32evtlog.ReadEventLog(hand, flags, 0)\n\n win32evtlog.CloseEventLog(hand)\n \n # context = {\n # 'events':events,\n # 'hand': hand,\n # 'flags':flags\n # }\n # {'context':context}\n \n return render(request, 'printjobs/records.html')\n \n \n# Step 3: Run the Monitoring Script\n# Run the monitoring script as a background service or in a separate process to continuously monitor the Windows Event Log for print events.\n\n# Note: Running scripts that access the Windows Event Log API typically requires administrative privileges. Ensure that the script is executed with the necessary permissions.\n\n# Please note that directly interacting with the Windows Event Log API requires knowledge of the Event Log structure and IDs to filter the relevant print job events. You may need to customize the script based on your specific use case and the information logged in the Windows Event Log for print events.\n\n# Additionally, consider using external tools or third-party solutions that specialize in monitoring print activities on Windows systems to ensure more comprehensive monitoring and easier management. These tools often offer more robust features and can simplify the process of capturing print events.\n\ndef check_order(request):\n if request.method == 'GET':\n if 'q' in request.GET:\n query = request.GET['q']\n jobs = Order.objects.filter(id__icontains=query)\n return render(request, 'order/check_job.html', {'jobs':jobs})\n \n return render(request, 'order/check_job.html')\n\n@permission_required('printjobs.can_delete_placed_order')\ndef delete_order(request, id):\n order = get_object_or_404(Order, pk=id)\n order.delete()\n return redirect('printjob:order')\n \n# @permission_required('printjobs.can_confirm_order')\ndef confirm_job_order(request, order_id):\n if request.method == 'POST':\n order = get_object_or_404(Order, pk=order_id)\n order.status = 'confirmed'\n order.save()\n \n job_order = JobOrder(\n order = order,\n job_type = order.job_type,\n quantity = order.quantity,\n customer = order.customer,\n total_amount = order.price,\n staff = request.user\n )\n\n job_order.ordered = True\n job_order.save()\n \n return redirect('printjob:checkout')\n\n\n\ndef process_orders_view(request):\n if request.method == 'POST':\n # Get the all id the selected ordered job \n selected_joborders_ids = request.POST.getlist('selected_orders')\n # Filter them out Fetch order data from database \n selected_orders = JobOrder.objects.filter(id__in=selected_joborders_ids) \n # sum the total_amount \n total_price = sum(joborder.total_amount for joborder in selected_orders)\n \n \n # Process selected_orders as needed (e.g., update the orders, send emails, etc.)\n # return HttpResponse('Selected orders processed successfully.')\n \n return render(request, 'order/payment_order_list.html', {'orders': selected_orders, 'total':total_price})\n\n \n\n\n@csrf_exempt\ndef get_order_job(request): \n if request.method == 'POST': \n job_id = request.POST.get('job_id')\n try:\n order_job = JobOrder.objects.get(pk=job_id) \n # order_job is ordered, allow printing \n return JsonResponse({'job_id': order_job.id,\n 'ordered': order_job.ordered, \n # 'customer':order_job.customer, \n # 'staff':order_job.staff,\n 'created_at': order_job.created_at}) \n \n except JobOrder.DoesNotExist:\n # order_job is not ordered, deny printing \n return JsonResponse({'job_id':job_id, 'ordered': False})\n\n\ndef print(request):\n if request.method == 'POST':\n job_id = request.POST.get('job_id')\n ordered_job = get_object_or_404(JobOrder, pk=job_id)\n job_to_print == Print.objects.create()\n \n \n \ndef yearly_job_order_expenses_profit(request):\n pass\n \n \n \ndef monthly_job_order_expenses_profit(request):\n # get year\n # curent_year = request.GET.get('year', 2023)\n # get month\n # curent_month = request.GET.get('month', 8)\n # calculate the start and end dates of the month\n current_year = 2023\n current_month = 8\n start_date = f'{current_year}-{current_month:02d}-01'\n end_date = f'{current_year}-{current_month:02d}-31'\n # get the total jo order amount\n total_job_order_amount = JobOrder.objects.filter(\n created_at__range=[\n start_date, \n end_date\n ]\n ).aggregate(total_amount=Sum('total_amount'))['total_amount'] or 0\n \n # Get the total expenses for the month\n total_expenses = Expense.objects.filter(\n created__range=[\n start_date,\n end_date]\n ).aggregate(total_amount=Sum('amount'))['total_amount'] or 0\n \n # Calculate the profit for the month\n profit = total_job_order_amount - total_expenses\n \n context = {\n 'year': current_year,\n 'month': current_month,\n 'total_job_order_amount': total_job_order_amount,\n 'total_expenses': total_expenses,\n 'profit':profit\n }\n \n return render(request, 'printjobs/monthly_report.html', context)\n\n\ndef weekly_job_order_expenses_profit(request):\n pass\n\n\n\n\ndef add_print_job_view(request):\n orders = Order.objects.all()\n return render(request, 'add_print_job.html', {'orders': orders})\n\n\n\ndef add_print_job_to_order(request):\n if request.method == 'POST':\n order_id = request.POST.get('order')\n job_type = request.POST.get('job_name')\n quantity = request.POST.get('quantity')\n \n order = Order.objects.get(pk=order_id)\n print_job = JobOrder(order=order, job_type=job_type, quantity=quantity)\n print_job.save()\n \n return render(request, 'print_job_added.html')\n\n\ndef payment(request):\n return render(request, 'order/payment.html')\n\n\ndef generate_receipt(request):\n return render(request, 'order/print.html')\n \n \n \n \n\n\n","repo_name":"abdultruth/printjobmanager","sub_path":"printjobs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"15288383452","text":"import abc\nimport re\nimport sys\nfrom datetime import datetime\nfrom typing import Literal, Union\nimport eagerbib.config as cfg\n\n\nclass BaseProcessingCommand(abc.ABC):\n \"\"\"Base class for processing commands.\"\"\"\n def __init__(self, current_item: dict[str, str]):\n self.current_item = current_item\n\n @property\n @abc.abstractmethod\n def output(self) -> dict[str, str]:\n pass\n\n\nclass UpdateItemProcessingCommand(BaseProcessingCommand):\n \"\"\"A command to update an item in the bibliography.\n\n Args:\n current_item (dict[str, str]): The current item in the bibliography.\n new_item (dict[str, str]): The new item to replace the current item with.\n \"\"\"\n def __init__(self, current_item: dict[str, str], new_item: dict[str, str],\n method: Union[Literal[\"automated\"], Literal[\"manual\"]]):\n super().__init__(current_item)\n\n # Update id/key of the new item to match the current item.\n new_item[\"ID\"] = current_item[\"ID\"]\n\n current_date = datetime.now().strftime(\"%Y-%m-%d\")\n new_item[\"eagerbib_comment\"] = f\"{method} update on {current_date}\"\n\n self.new_item = new_item\n\n @property\n def output(self) -> dict[str, str]:\n return self.new_item\n\n\nclass KeepItemProcessingCommand(BaseProcessingCommand):\n \"\"\"A command to keep an item in the bibliography.\n\n Args:\n current_item (dict[str, str]): The current item in the bibliography.\n \"\"\"\n def __init__(self, current_item: dict[str, str]):\n super().__init__(current_item)\n\n @property\n def output(self) -> dict[str, str]:\n return self.current_item\n\n\ndef transform_reference_dict_to_lines(item: dict[str, str]) -> list[str]:\n \"\"\"Transform a reference dictionary to a list of lines.\"\"\"\n item_lines = [f\"@{item['ENTRYTYPE']}{{{item['ID']},\"]\n for key, value in item.items():\n if key == \"ENTRYTYPE\" or key == \"ID\":\n continue\n item_lines += [f\" {key} = {{{value}}},\"]\n item_lines += [\"}\"]\n return item_lines\n\n\ndef _normalize_preprints(entries: list[dict[str, str]]):\n \"\"\"Normalizes preprints in the bibliography.\n\n Args:\n entries (list[dict[str, str]]): The bibliography entries that will be updated\n in-place.\n \"\"\"\n print(\"Normalizing preprints:\")\n for i in range(len(entries)):\n entry = entries[i]\n entry_str = \" \".join(transform_reference_dict_to_lines(entry)).lower()\n arxiv_ids = set()\n # Find arXiv IDs in the entry. This pattern was proposed by the rebiber authors.\n for m in re.finditer(\n r\"(arxiv:|arxiv.org\\/abs\\/|arxiv.org\\/pdf\\/)([0-9]{4}).([0-9]{5})\",\n entry_str):\n arxiv_ids.add(f\"{m.group(2)}.{m.group(3)}\")\n if len(arxiv_ids) > 1:\n print(f\"• Cannot normalize {entry['ID']}: conflicting arXiv IDs found.\")\n elif len(arxiv_ids) == 1:\n new_entry = {k: entry[k] for k in [\"ID\", \"ENTRYTYPE\", \"author\", \"title\"]}\n new_entry[\"eprint\"] = arxiv_ids.pop()\n new_entry[\"journal\"] = \"arXiv preprint\"\n new_entry[\"volume\"] = f\"abs/{new_entry['eprint']}\"\n new_entry[\"year\"] = \"20\" + new_entry[\"eprint\"].split(\".\")[0][:2]\n new_entry[\"url\"] = f\"https://arxiv.org/abs/{new_entry['eprint']}\"\n # Update entry by removing old and inserting new entry.\n entries.pop(i)\n entries.insert(i, new_entry)\n\n\ndef _remove_duplicates(entries: list[dict[str, str]]):\n \"\"\"Removes duplicate entries from the bibliography.\n\n Args:\n entries (list[dict[str, str]]): The bibliography entries that will be updated\n in-place.\n \"\"\"\n # Remove duplicate entries based on their ID.\n duplicated_idxs = []\n for i1 in range(len(entries)):\n for i2 in range(i1 + 1, len(entries)):\n if entries[i1][\"ID\"] == entries[i2][\"ID\"]:\n print(entries[i1])\n print(entries[i2])\n duplicated_idxs.append(i2)\n if len(duplicated_idxs) > 0:\n print(\"Detected duplicate keys:\")\n for i in sorted(duplicated_idxs, reverse=True):\n print(f\"• {entries[i]['ID']}\")\n del entries[i]\n\n # Remove duplicate entries based on their properties.\n duplicated_idx_pairs = []\n for i1 in range(len(entries)):\n l1 = transform_reference_dict_to_lines(entries[i1])\n s1 = \"\\n\".join(l1[1:])\n for i2 in range(i1 + 1, len(entries)):\n l2 = transform_reference_dict_to_lines(entries[i2])\n s2 = \"\\n\".join(l2[1:])\n if s1 == s2:\n duplicated_idx_pairs.append((i1, i2))\n duplicated_idxs = sorted(duplicated_idx_pairs, key=lambda x: x[1], reverse=True)\n if len(duplicated_idxs) > 0:\n print(\"Detected duplicate entries:\")\n for (i1, i2) in duplicated_idxs:\n print(f\"• {entries[i2]['ID']} -> {entries[i1]['ID']}\")\n del entries[i2]\n\n\ndef _remove_fields(entries: list[dict[str, str]], fields: list[str]):\n \"\"\"Removes fields from the bibliography entries.\n\n Args:\n entries (list[dict[str, str]]): The bibliography entries that will be updated\n in-place.\n fields (list[str]): The fields that will be removed.\n \"\"\"\n\n print(\"Removing fields:\")\n for fk in fields:\n print(f\"• {fk}\")\n for entry in entries:\n if fk in entry:\n del entry[fk]\n\n\ndef _apply_abbreviations(entries: list[dict[str, str]],\n abbreviations: list[cfg.NameNormalizationConfig]):\n \"\"\"Applies name_normalizations to the bibliography entries.\n\n Args:\n entries (list[dict[str, str]]): The bibliography entries that will be updated\n in-place.\n abbreviations (list[cfg.NameNormalizationConfig]): The name_normalizations that will be\n used to update the entries.\n \"\"\"\n\n if len(abbreviations) == 0:\n return\n print(\"Normalizing names.\")\n\n for abbreviation in abbreviations:\n for full_name in abbreviation.alternative_names:\n # Check if the regular expression is valid.\n try:\n re.compile(full_name)\n except re.error:\n print(f\"• Invalid regular expression for {abbreviation.name}: {full_name}\")\n sys.exit(-1)\n for entry in entries:\n for field in [\"journal\", \"booktitle\"]:\n if field in entry and re.match(full_name, entry[field]):\n entry[field] = abbreviation.name\n\n\ndef process_commands(commands: list[BaseProcessingCommand],\n config: cfg.OutputProcessorConfig) -> list[dict[str, str]]:\n \"\"\"Process the commands and return the output bibliography items.\n\n Args:\n commands (list[BaseProcessingCommand]): The processing commands.\n config: (cfg.OutputProcessorConfig): The output writer configuration.\n\n Returns:\n list[dict[str, str]]: The output bibliography items.\n \"\"\"\n entries = [command.output for command in commands]\n\n # Sort entries.\n if config.sort:\n entries = sorted(entries, key=lambda x: x[\"ID\"])\n\n # Apply name_normalizations.\n if len(config.name_normalizations) > 0:\n _apply_abbreviations(entries, config.name_normalizations)\n\n # Normalize preprints.\n if config.normalize_preprints:\n _normalize_preprints(entries)\n\n # Remove unwanted fields.\n if len(config.remove_fields) > 0:\n _remove_fields(entries, config.remove_fields)\n\n # Remove duplicates.\n if config.deduplicate:\n _remove_duplicates(entries)\n\n return entries\n\n\ndef write_output(output: list[dict[str, str]], output_fn: str) -> None:\n \"\"\"Write the output to a file in BibTeX format.\n\n Args:\n output (list[dict[str, str]]): The output bibliography items to write.\n output_fn (str): The path to the output file.\n \"\"\"\n all_lines = []\n for item in output:\n all_lines += transform_reference_dict_to_lines(item) + [\"\"]\n\n # Remove the last newline.\n if len(all_lines) > 0:\n del all_lines[-1]\n\n with open(output_fn, \"w\") as f:\n f.write(\"\\n\".join(all_lines))\n","repo_name":"zimmerrol/eagerbib","sub_path":"eagerbib/output_processor.py","file_name":"output_processor.py","file_ext":"py","file_size_in_byte":8205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"16382236986","text":"import logging\n\nlogger=logging.getLogger(__name__)\n\nfileloc=logging.FileHandler(\"loggi.log\")\nformat=logging.Formatter(\"%(asctime)s : %(levelname)s : %(message)s : %(name)s\")\nfileloc.setFormatter(format)\n\nlogger.addHandler(fileloc)\nlogger.setLevel(logging.DEBUG)\n\nlogger.debug(\"This is just f\")","repo_name":"rajabhinav02/pythonProject","sub_path":"pytest_demo/log_test.py","file_name":"log_test.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"22172881621","text":"################### PORT 5002 ###################\n\n# Flask\nfrom flask import Flask, request, jsonify\nfrom flask_jwt_extended import JWTManager, jwt_required\nfrom flask_mysqldb import MySQL\nfrom flask_socketio import SocketIO\nfrom flask_cors import CORS\n\n# Misc\nfrom dotenv import load_dotenv\nimport os\nload_dotenv()\n\n# Microservice app\ngps = Flask(__name__)\ncors = CORS(gps, resources={r\"/*\": {\"origins\": \"http://127.0.0.1:4200\"}})\n\ngps.config[\"MYSQL_PORT\"] = int(os.getenv(\"MYSQL_PORT\"))\ngps.config[\"MYSQL_HOST\"] = os.getenv(\"MYSQL_HOST\")\ngps.config[\"MYSQL_USER\"] = os.getenv(\"MYSQL_USER\")\ngps.config[\"MYSQL_PASSWORD\"] = os.getenv(\"MYSQL_PASSWORD\")\ngps.config[\"MYSQL_DB\"] = os.getenv(\"MYSQL_DB\")\ngps.config[\"JWT_SECRET_KEY\"] = os.getenv(\"JWT_SECRET_KEY\")\n\njwt = JWTManager(gps)\nmysql = MySQL(gps)\nsocketio = SocketIO(gps, cors_allowed_origins=\"http://127.0.0.1:4200\")\n\n# WebSocket\n@socketio.on(\"connect\")\ndef handle_connect():\n print(\"Server is listening...\")\n\n@socketio.on(\"disconnect\")\ndef handle_disconnect():\n print(\"Server has disconnected\")\n\n@socketio.on(\"update\")\ndef handle_update():\n try:\n cursor = mysql.connection.cursor()\n cursor.execute(\"CALL UpdateMap()\")\n mysql.connection.commit()\n cursor.close()\n\n data = get_truck_locations()\n trucks = [{\n \"id\": truck[0],\n \"name\": truck[1],\n \"latitude\": truck[2],\n \"longitude\": truck[3]\n } for truck in data]\n\n data = get_gps_data()\n air = [{\n \"quality\": record[0],\n \"contaminants\": record[1] == 1,\n \"latitude\": record[2],\n \"longitude\": record[3]\n } for record in data]\n\n socketio.emit(\"updated\", {\"trucks\": trucks, \"air\": air})\n except Exception as e:\n print(f\"Error in handle_update: {str(e)}\")\n\n# Get locations for a map\n@gps.route(\"/get-locations\", methods=[\"GET\"])\n@jwt_required()\ndef get_locations():\n try:\n trucks = get_truck_locations()\n air_data = get_gps_data()\n\n response = {\n \"message\": \"Ubicaciones recuperadas con éxito\",\n \"trucks\": [{\n \"id\": truck[0],\n \"name\": truck[1],\n \"latitude\": truck[2],\n \"longitude\": truck[3]\n } for truck in trucks],\n \"air\": [{\n \"quality\": record[0],\n \"contaminants\": record[1] == 1,\n \"latitude\": record[2],\n \"longitude\": record[3]\n } for record in air_data],\n \"error\": False\n }\n return jsonify(response), 200, {\"Content-Type\": \"application/json\"}\n except Exception as e:\n response = {\n \"message\": f\"Internal Server Error: {str(e)}\",\n \"error\": True\n }\n return jsonify(response), 500, {\"Content-Type\": \"application/json\"}\n\n# Get purchases and their statuses\n@gps.route(\"/get-purchases\", methods=[\"GET\"])\n@jwt_required()\ndef get_purchases():\n try:\n purchases = get_truck_purchases()\n\n if purchases:\n response = {\n \"message\": \"Compras recuperadas con éxito\",\n \"purchases\": [{\n \"truck\": purchase[0],\n \"id\": purchase[1],\n \"status\": purchase[2]\n } for purchase in purchases],\n \"error\": False\n }\n return jsonify(response), 200, {\"Content-Type\": \"application/json\"}\n\n response = {\n \"message\": \"No se encontraron compras\",\n \"error\": True\n }\n return jsonify(response), 404, {\"Content-Type\": \"application/json\"}\n except Exception as e:\n response = {\n \"message\": f\"Internal Server Error: {str(e)}\",\n \"error\": True\n }\n return jsonify(response), 500, {\"Content-Type\": \"application/json\"}\n\n# Retrieve all trucks' locations from the database\ndef get_truck_locations():\n try:\n cursor = mysql.connection.cursor()\n cursor.execute(\"\"\"\n SELECT truck_id, name, latitude, longitude\n FROM trucks\n WHERE latitude IS NOT NULL AND longitude IS NOT NULL\n \"\"\")\n data = cursor.fetchall()\n cursor.close()\n\n return data\n except Exception as e:\n print(f\"Error in get_truck_locations: {str(e)}\")\n return []\n\n# Retrieve all trucks' locations from the database\ndef get_gps_data():\n try:\n cursor = mysql.connection.cursor()\n cursor.execute(\"\"\"\n SELECT air_quality, contaminants, latitude, longitude\n FROM gps_data\n WHERE date > NOW() - INTERVAL 10 SECOND\n \"\"\")\n data = cursor.fetchall()\n cursor.close()\n\n return data\n except Exception as e:\n print(f\"Error in get_truck_locations: {str(e)}\")\n return []\n\n# Retrieve each truck and purchase association\ndef get_truck_purchases():\n try:\n cursor = mysql.connection.cursor()\n cursor.execute(\"\"\"\n SELECT t.name, p.purchase_id, p.status\n FROM trucks t\n JOIN purchase p\n ON t.truck_id = p.truck_id;\n \"\"\")\n data = cursor.fetchall()\n cursor.close()\n\n return data\n except Exception as e:\n print(f\"Error in get_truck_purchases: {str(e)}\")\n return []\n\nif __name__ == \"__main__\":\n socketio.run(gps, debug=True, host=\"0.0.0.0\", port=5002)\n","repo_name":"edu-flores/jewelry-database","sub_path":"services/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"27290057786","text":"from urllib.request import urlopen #(thư viện)\nfrom bs4 import BeautifulSoup\nfrom collections import OrderedDict\n#1. Connect to the page (bước thủ tục)\nurl='https://dantri.com.vn'\nconnection = urlopen(url)\n#2. Download the page content (bước thủ tục)\nraw_data=connection.read()\npage_content=raw_data.decode(\"utf8\") #utf8 = unicode - xử lí kí tự có dấu\n\nwith open(\"dantri.html\", \"wb\") as f: #wb write binery = du lieu tho\n f.write(raw_data)\n\n#3. Find ROI (region of interest)\nsoup=BeautifulSoup(page_content, \"html.parser\")\nul=soup.find(\"ul\",\"ul1 ulnew\") #href=\"\", id=\"\"\n# print(ul.prettify())\n\n#4. Extract data\nli_list=ul.find_all(\"li\") #li_list là list soup\n# li=li_list[0]\n# h4=li.h4\n# print(h4)\n# a=h4.a\nnews_list=[]\nfor li in li_list:\n a=li.h4.a\n title=a.string\n # print(a.string)\n link= url+a[\"href\"]\n news=OrderedDict({\n \"title\":title,\n \"link\":link\n })\n news_list.append(news)\nprint(news_list)\n\n#5. Save data (bước thủ tục)\nimport pyexcel\npyexcel.save_as(records=news_list, dest_file_name=\"dantri.xlsx\")\n","repo_name":"lamnguyen9396/nguyenhonglam-fundamental-c4e23","sub_path":"Labs/Lab2/dantri_scraping.py","file_name":"dantri_scraping.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"15276052498","text":"import copy\nimport sys\nsys.stdin = open('input.txt')\n# 세찬\n\ndef solution(n,map,core):\n stack = [(0,0,0,copy.deepcopy(map))]\n visit = []\n minlen = 9999\n maxcore = 0\n answer = []\n\n\n while stack:\n #idx, sumOfCoreCount, sumOfLine ,map\n idx,coreCount, lineLen, copiedMap = stack.pop()\n answer.append((coreCount,lineLen))\n if idx == len(core):\n continue\n\n currCoreidx, currCorejdx = core[idx]\n checks = 0\n\n for i in range(4):\n newCopyMap = copy.deepcopy(copiedMap)\n\n #right\n if i == 0:\n if 1 not in newCopyMap[currCoreidx][currCorejdx+1:]:\n for j in range(currCorejdx+1,n):\n newCopyMap[currCoreidx][j] = 1\n stack.append((idx+1,coreCount+1,lineLen+n-currCorejdx-1,newCopyMap))\n checks+=1\n #left\n elif i == 1:\n if 1 not in newCopyMap[currCoreidx][:currCorejdx]:\n for j in range(0,currCorejdx):\n newCopyMap[currCoreidx][j] = 1\n stack.append((idx+1,coreCount+1,lineLen+currCorejdx,newCopyMap))\n checks+=1\n #up\n elif i == 2:\n checkAboutBlock = True\n for j in range(currCoreidx):\n if newCopyMap[j][currCorejdx] == 1:\n checkAboutBlock = False\n break\n if checkAboutBlock:\n for j in range(currCoreidx):\n newCopyMap[j][currCorejdx] = 1\n stack.append((idx+1,coreCount+1,lineLen + currCoreidx,newCopyMap))\n\n #down\n else:\n checkAboutBlock = True\n for j in range(currCoreidx+1,n):\n if newCopyMap[j][currCorejdx] == 1:\n checkAboutBlock = False\n break\n if checkAboutBlock:\n for j in range(currCoreidx+1,n):\n newCopyMap[j][currCorejdx] =1\n stack.append((idx+1,coreCount+1,lineLen+n-currCoreidx-1,newCopyMap))\n checks+1\n\n if checks == 0:\n stack.append((idx+1,coreCount,lineLen,newCopyMap))\n\n answer.sort(key = lambda x:(-x[0],x[1]))\n print(answer)\n return answer[0][1]\n\ndef main():\n T = int(input())\n for tc in range(1, T + 1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n core = []\n #find core except line\n for i in range(1,N-1):\n for j in range(1,N-1):\n if arr[i][j] == 1:\n core.append((i,j))\n\n print(f'#{tc}', solution(N,arr,core))\n\n\nmain()","repo_name":"kongji9847/Algorithm","sub_path":"Algorithm_practice/202203/0311/1762_SWEA_프로세서연결/s4.py","file_name":"s4.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"93"} +{"seq_id":"72493184625","text":"import numpy as np\nimport pandas as pd\nimport keras\nimport h5py\nfrom keras.utils import plot_model\nfrom keras.optimizers import SGD\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras.models import load_model\nfrom model import CVNShortSimple, CVNShortSimple_Dense\nfrom model_resnet import ResNet\nfrom model_cnn import ShortCNN\nfrom generator import generator\nfrom args import parse_args\nfrom hdf5tools import produce_labeled_h5s\nfrom eval import plot_confusion_matrix, GetTrainingSet, plot_loss\nfrom dask.distributed import Client, progress\nimport dask.array as da\n\nclass ModelCNN:\n\n def __init__(self, name):\n self.name = name\n self.args = parse_args()\n\n def Train(self):\n args = self.args\n traindir = args.traindir\n validdir = args.validdir\n\n # Preprocess the labels if the -l flag is passed to train.py\n #if args.label:\n # traindir = produce_labeled_h5s(args.traindir, args.reducecosmics)\n # testdir = produce_labeled_h5s(args.testdir, args.reducecosmics)\n\n # The generator objects for the train and test sets\n train_batch = generator(traindir, args.trbatchsize)\n test_batch = generator(validdir, args.tsbatchsize)\n\n # Initialize the model, optimizer, and callbacks then train\n\n if self.name == \"CVN\":\n model = CVNShortSimple()\n elif self.name == \"CVN_Dense\":\n model = CVNShortSimple_Dense() \n elif self.name == \"ResNet\":\n model = ResNet()\n elif self.name == \"ShortCNN\":\n model = ShortCNN()\n\n print(model.summary())\n\n optimizer = SGD(lr=args.baselr, momentum=args.momentum)\n model.compile(optimizer=optimizer,\n loss={'output':'categorical_crossentropy'},\n loss_weights={'output' : 1.0},\n metrics=['accuracy'])\n tensorboard = keras.callbacks.TensorBoard(log_dir='./logs/out_weight_{}'.format(1.0))\n history = model.fit_generator(generator=train_batch, steps_per_epoch=args.ntrsteps,\n epochs=args.nepochs, verbose=1, callbacks=[tensorboard, ModelCheckpoint('Models/model_{0}_weights.h5'.format(self.name), save_best_only=True),\n EarlyStopping(patience=10)],\n validation_data=test_batch, validation_steps=args.ntssteps)\n\n\n\n # update the model with the best weights\n model.load_weights('Models/model_{0}_weights.h5'.format(self.name))\n\n # save the best model\n print(\"... saving model {0} ...\".format(self.name))\n model.save('Models/{0}.h5'.format(self.name))\n\n print(\"... saving model history ...\".format(self.name))\n hist_df = pd.DataFrame(history.history) \n hist_csv_file = 'Models/history_{0}.csv'.format(self.name)\n with open(hist_csv_file, mode='w') as f:\n hist_df.to_csv(f)\n\n\n def Predict(self,path, large = False): \n\n path_model = 'Models/{0}.h5'.format(self.name)\n print(\"... loading model from ... \", path_model)\n model = load_model(path_model)\n if large == False:\n print(\"... gettint evaluation dataset from ... \", path)\n X1, X2, y = GetTrainingSet(path)\n print(\"... calculating predictions ... \") \n y_pred = model.predict([X1,X2], verbose=1)\n elif large == True:\n print(\"... gettint evaluation dataset from ... \", path)\n X1, X2, y = GetTrainingSet(path)\n print(\"... calculating predictions ... \") \n y_pred = model.predict([X1,X2], verbose=1)\n\n\n print(\"... saving predictions ... \")\n hf = h5py.File('Predictions/{m}_{p}.h5'.format(m=self.name,p=str(path[-12:-3])), 'w')\n hf.create_dataset('y_pred', data=y_pred)\n hf.create_dataset('y_true', data=y)\n hf.close()\n\n\n\n\n\n","repo_name":"kubumiro/NOvA_CNN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"32340960573","text":"def on_overlap_tile(sprite, location):\n game.over(True)\nscene.on_overlap_tile(SpriteKind.player,\n assets.tile(\"\"\"\n myTile1\n \"\"\"),\n on_overlap_tile)\n\ndef on_overlap_tile2(sprite2, location2):\n game.over(False)\nscene.on_overlap_tile(SpriteKind.player,\n assets.tile(\"\"\"\n myTile4\n \"\"\"),\n on_overlap_tile2)\n\ndef on_a_pressed():\n if monkey1.is_hitting_tile(CollisionDirection.BOTTOM):\n monkey1.vy = -320\ncontroller.A.on_event(ControllerButtonEvent.PRESSED, on_a_pressed)\n\ndef 初始化变量():\n global 当前关卡, 关卡总量\n 当前关卡 = 1\n 关卡总量 = 3\ndef 更新地图():\n if 当前关卡 == 1:\n tiles.set_current_tilemap(tilemap(\"\"\"\n 级别1\n \"\"\"))\n elif 当前关卡 == 2:\n tiles.set_current_tilemap(tilemap(\"\"\"\n 级别5\n \"\"\"))\n elif 当前关卡 == 3:\n tiles.set_current_tilemap(tilemap(\"\"\"\n 级别6\n \"\"\"))\n关卡总量 = 0\n当前关卡 = 0\nmonkey1: Sprite = None\nscene.set_background_color(9)\nmonkey1 = sprites.create(assets.image(\"\"\"\n monkey1\n\"\"\"), SpriteKind.player)\ncontroller.move_sprite(monkey1, 100, 0)\nscene.camera_follow_sprite(monkey1)\nmonkey1.ay = 980\ntiles.set_current_tilemap(tilemap(\"\"\"\n 级别1\n\"\"\"))","repo_name":"ediema1/ediema1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"71966326066","text":"import sys\n\nfrom . import util\n\n\ndef parse_input(lines):\n deck = []\n all_decks = []\n for line in lines:\n if line.startswith(\"Player\"):\n if deck:\n all_decks.append(deck)\n deck = []\n else:\n deck.insert(0, int(line))\n all_decks.append(deck)\n return all_decks\n\n\ndef calculate_score(deck):\n score = 0\n for idx, card in enumerate(deck):\n score += (idx + 1) * card\n return score\n\n\ndef deal_cards(all_decks):\n return [(player_idx, deck.pop()) for player_idx, deck in enumerate(all_decks) if deck]\n\n\ndef give_cards_to_winner(all_decks, played_cards, winning_player_idx=None):\n def sort_key(x, idx=winning_player_idx): return sys.maxsize if x[0] == idx else x[1]\n sorted_cards = sorted(played_cards, key=sort_key, reverse=True)\n if winning_player_idx is None:\n winning_player_idx = sorted_cards[0][0]\n for _, card in sorted_cards:\n all_decks[winning_player_idx].insert(0, card)\n\n\ndef serialize_deck_state(all_decks):\n return '|'.join(','.join(str(card) for card in deck) for deck in all_decks)\n\n\ndef play_game(all_decks, allow_recursion=False, encountered_deck_states=None):\n if encountered_deck_states is None:\n encountered_deck_states = set()\n num_cards = sum(len(deck) for deck in all_decks)\n while max(len(deck) for deck in all_decks) < num_cards:\n state = serialize_deck_state(all_decks)\n if state in encountered_deck_states:\n # player 1 wins\n return 0, calculate_score(all_decks[0])\n encountered_deck_states.add(state)\n played_cards = deal_cards(all_decks)\n should_recurse = allow_recursion\n if allow_recursion:\n for player_idx, card in played_cards:\n if len(all_decks[player_idx]) < card:\n should_recurse = False\n break\n if should_recurse:\n sub_decks = []\n for player_idx, card in played_cards:\n sub_decks.append(all_decks[player_idx][-card:])\n winning_player_idx, _ = play_game(sub_decks, encountered_deck_states)\n give_cards_to_winner(all_decks, played_cards, winning_player_idx)\n else:\n give_cards_to_winner(all_decks, played_cards)\n for player_idx, deck in enumerate(all_decks):\n if deck:\n return player_idx, calculate_score(deck)\n\n\ndef get_part1_answer(lines):\n all_decks = parse_input(lines)\n return play_game(all_decks)[1]\n\n\ndef get_part2_answer(lines):\n all_decks = parse_input(lines)\n return play_game(all_decks, allow_recursion=True)[1]\n\n\ndef run():\n lines = util.get_input_file_lines(\"day22.txt\")\n print(f\"The answer to part 1 is {get_part1_answer(lines)}\")\n print(f\"The answer to part 2 is {get_part2_answer(lines)}\")\n","repo_name":"askrepps/advent-of-code-2020","sub_path":"advent2020/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"72987056625","text":"#!/usr/bin/env python\r\n#--*-- coding:utf-8 --*-- \r\nimport portscan as ps\r\nimport fcntl\r\nimport json\r\nimport os \r\nimport sys\r\nimport getopt\r\nimport multiprocessing\r\nfrom threading import Thread, Lock\r\nfrom Queue import Queue\r\nimport datetime\r\nimport time\r\nfrom collections import OrderedDict, defaultdict\r\nimport codecs\r\nimport types\r\nimport socket\r\nfrom itertools import chain\r\n\r\nsub_num = multiprocessing.cpu_count()\r\ndir0 = os.getcwd()\r\nq = Queue()\r\nglock = Lock()\r\nfile = None\r\n\r\ndef mksubfile(lines, sfile, sub):\r\n dfile = sfile + '_' + str(sub)\r\n print( \"make sub file:{}\".format(sub))\r\n with open(dfile, 'w') as fout:\r\n fout.writelines(lines)\r\n sub = sub + 1\r\n return sub\r\n\r\n\r\ndef splitfile(file):\r\n print(\"\\n splitting the file now ...\")\r\n\r\n time_start = time.time()\r\n with open(file, 'r') as f0:\r\n for i, line in enumerate(f0):\r\n pass\r\n line_cnt = i + 1\r\n\r\n line_size = line_cnt / sub_num\r\n print(\"\\n Total lines cnt: {}\".format(line_cnt))\r\n\r\n sub = 0\r\n lines = []\r\n with open(file, 'r') as f0:\r\n for eachline in f0: \r\n if len(lines) == line_size:\r\n if sub + 1 < int(sub_num):\r\n sub = mksubfile(lines, file, sub)\r\n lines = []\r\n else:\r\n pass\r\n lines.append(eachline)\r\n\r\n if len(lines) != 0:\r\n mksubfile(lines, file,sub)\r\n time_end = time.time()\r\n cost = time_end - time_start\r\n print(\"\\n Split Done , cost {}\".format(cost))\r\n\r\n return line_cnt\r\n\r\ndef scan(ip, port_max, port_min,num):\r\n dic = OrderedDict()\r\n interval = (port_max - port_min) // num + 1\r\n threads = [ps.scanThread(ip, i * num, (i+1) * num) for i in xrange(interval)]\r\n map(lambda x:x.start(),threads)\r\n map(lambda x:x.join(),threads)\r\n res = [threads[i].getports() for i in xrange(interval)]\r\n results = list(chain(*res))\r\n return results\r\n\r\n\r\n\r\ndef Worker_helper(sfile, sub, port_max, port_min,num):\r\n subfile = sfile + '_' + str(sub)\r\n str0 = dir0 + '/input/' + subfile \r\n dir1= dir0 + '/out1/'\r\n str1 = dir1 + subfile + \"_open\"\r\n log = dir1 + file + '.log'\r\n\r\n ip_ports = {}\r\n list1 =[]\r\n\r\n with open(str0, 'r')as f:\r\n for eachline in f:\r\n ip = eachline.strip()\r\n try:\r\n output = scan(ip, port_max, port_min,num)\r\n except Exception as e:\r\n print(\"error:{} occured at ip:{}\".format(e, ip))\r\n with open(log, 'a') as flog:\r\n fcntl.flock(flog.fileno(), fcntl.LOCK_EX)\r\n flog.write(\"error: {} at ip:{} during scan\\n\".format(e, ip))\r\n else:\r\n ip_ports[ip] = []\r\n ip_ports[ip] = output\r\n list1.append(json.dumps(ip_ports) + '\\n')\r\n ip_ports.clear()\r\n if len(list1) == 200:\r\n with open(str1, 'a') as results1:\r\n results1.writelines(list1)\r\n list1 = []\r\n\r\n #print(\"subfile {}'s unknown domains num:{}\".format(sub,len(list3)))\r\n if len(list1) != 0:\r\n with open(str1, 'a') as results1:\r\n results1.writelines(list1)\r\n\r\n\r\ndef main(argv):\r\n global file\r\n \r\n if not os.path.exists('./out1/'):\r\n os.makedirs('./out1/')\r\n\r\n try:\r\n opts,args = getopt.getopt(argv, \"-h-f:-x-i:-a:-n:\", [\"help\",\"file=\", \"need to extract\", \"port_min=\", \"port_max=\", \"[batch_num=\"])\r\n except getopt.GetoptError:\r\n print(\"test.py -f -x -i -a -n \")\r\n sys.exit()\r\n\r\n for opt_name,opt_value in opts:\r\n if opt_name in ('-h','--help'):\r\n print(\"test.py -f -i -a -n \")\r\n sys.exit()\r\n elif opt_name in ('-f','--file'):\r\n file = opt_value\r\n elif opt_name in ('-i', '--port_min'):\r\n port_min = int(opt_value)\r\n elif opt_name in('-a', '--port_max'):\r\n port_max = int(opt_value)\r\n elif opt_name in('-n', '--batch_num'):\r\n num = int(opt_value)\r\n else:\r\n print(\"test.py -f -i -a -n \") \r\n\r\n \r\n str0 = dir0 + '/input/' + file + '_IPs'\r\n log = dir0 + '/out1/' + file + '.log'\r\n total_ips = splitfile(str0)\r\n\r\n #pool number default equal to cpu kernels and scan\r\n time_start = time.time()\r\n pool = multiprocessing.Pool(sub_num)\r\n for i in xrange(sub_num):\r\n pool.apply_async(Worker_helper, (file + '_IPs', i, port_max, port_min,num)) \r\n pool.close()\r\n pool.join()\r\n time.sleep(5)\r\n\r\n time_end = time.time()\r\n cost1 = time_end - time_start\r\n\r\n print(\"\\n-----combining the results now...-----\")\r\n time_start = time.time()\r\n timestamp = datetime.datetime.now().strftime('%m%d')\r\n dir1 = dir0 + '/out1/' \r\n str1 = dir1 + file + '_open'\r\n\r\n with open(str1, 'w') as results1:\r\n for i in xrange(sub_num):\r\n try:\r\n with open(dir1 + file + '_IPs_' + str(i) + '_open','r') as f1:\r\n content1 = f1.read()\r\n results1.write(content1)\r\n except Exception as e:\r\n continue\r\n\r\n time_end = time.time()\r\n cost2 = time_end - time_start\r\n print(\"\\nfile: {}, total IPs:{}\".format(file, total_ips))\r\n print(\"\\nPortScanV1.3: domain file:{},scan cost: {}, combine cost:{}\".format(file,cost1,cost2))\r\n\r\n with open(log, 'a') as flog:\r\n flog.write(\"\\nPortScanV1.3: file:{},IPs:{}, scan cost:{} s, combine cost:{}\".format(file, total_ips, cost1, cost2))\r\n\r\n Clear_helper()\r\n\r\n\r\ndef Clear_helper():\r\n str0 = dir0 + '/input/' + file \r\n str00 = str0 + '_IPs'\r\n dir1 = dir0 + '/out1/'\r\n\r\n # try:\r\n # os.remove(str00)\r\n # except:\r\n # pass\r\n\r\n for i in xrange(sub_num):\r\n subfile = str00 + '_' + str(i)\r\n str1 = dir1 + file + '_IPs_' + str(i) + '_open'\r\n\r\n try:\r\n os.remove(str1)\r\n except:\r\n pass\r\n try:\r\n os.remove(subfile)\r\n except:\r\n pass\r\n\r\n print(\"\\nclear work have done!\")\r\n\r\n\r\n#the input file is whois result files ,extract the domains\r\ndef Extract_domain(file):\r\n str0 = dir0 + '/input/' + file\r\n domains=[]\r\n\r\n with open(str0, 'r') as f0:\r\n for eachline in f0:\r\n record = json.loads(eachline.strip())\r\n for key in record.keys():\r\n domains.append(key + '\\n')\r\n str1 = dir0 + '/input/' + file + '_domains' \r\n with open(str1, 'w') as fout:\r\n fout.writelines(domains)\r\n return file + '_domains' \r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n\r\n\r\n","repo_name":"xn1012/fastscan","sub_path":"fastscan-v1.3.py","file_name":"fastscan-v1.3.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"34504167109","text":"from time import sleep\n# import datetime\nimport picamera\n# from picamera import PiCamera as camera\n\n# event_duration is the time, in seconds, over which filming will occur.\n# In other words, sunrise at 7AM thru sunset at 7PM would be a 12 hour event_duration or 12x3600=43200 seconds.\n# video_length is the length of video desired (in seconds) assuming video frames per second (video_fps) settings.\n# set event_duration OR video_length, not both. If using event_duration, video_length must be set to None.\n# wait_time is the amount of time between photos (in seconds) and is used in conjunction with event_duration.\nevent_duration = 60\nwait_time = 1\nvideo_length = None\nvideo_fps = 10\n\n# The remaining code can be left as is unless something different is desired.\nif video_length:\n number_of_pictures = video_length * video_fps\nelse:\n number_of_pictures = event_duration / wait_time\n\nwith picamera.PiCamera() as camera:\n try:\n for picture, filename in enumerate(camera.capture_continuous('/home/pi/Documents/timelapse/pictures/img_{timestamp:%Y%m%d_%H%M%S}.jpg')):\n print('picture number %d of %d' % (picture, number_of_pictures))\n sleep(wait_time)\n if picture >= number_of_pictures:\n break\n finally:\n camera.close()\n","repo_name":"tlsherwood/rpi3-timelapse","sub_path":"timelapse.py","file_name":"timelapse.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"40482770125","text":"from flask import Flask, make_response, render_template, request, redirect, url_for, session, flash, send_file, stream_template\nfrom flask_session import Session\nfrom flask.views import View\nimport requests\nfrom pydantic import BaseModel\nimport datetime\nfrom flask_caching import Cache\nimport configparser\n\nconfig = {\n \"DEBUG\": True, # some Flask specific configs\n \"CACHE_TYPE\": \"SimpleCache\", # Flask-Caching related configs\n \"CACHE_DEFAULT_TIMEOUT\": 300\n}\n\napp = Flask(__name__)\nSESSION_TYPE = 'filesystem'\napp.config.from_object(__name__)\n\napp.config.from_mapping(config)\ncache = Cache(app)\ncache.init_app(app)\nSession(app)\n\n#import config.ini api_url variable from FASTAPI\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\napi_url = config['FASTAPI']['api_url']\n\n\ndef set_cookie(response):\n session['token'] = response.json()['token']\n session['type'] = response.json()['type']\n session['username'] = response.json()['username']\n\ndef validate_token():\n token = get_token()\n response = requests.get(api_url + '/token', headers = {'Authorization': 'Bearer ' + token})\n if response.status_code == 200:\n return True\n else:\n return False\n\ndef get_token():\n token = session.get('token', 'No token')\n return token\n\ndef get_username():\n username = session.get('username', 'anonymous')\n return username\n\ndef get_intervals():\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/config/interval/all', headers = {'Authorization': 'Bearer ' + token}, params={'token': token})\n if response.status_code == 200:\n intervals = response.json()[\"intervals\"]\n return intervals\n else:\n return []\n \ndef get_types():\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/config/type/all', headers = {'Authorization': 'Bearer ' + token}, params={'token': token})\n if response.status_code == 200:\n types = response.json()[\"types\"]\n return types\n else:\n return []\n\n@cache.cached(timeout=120, key_prefix='all_directories')\ndef get_all_directories():\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/directory/all', headers = {'Authorization': 'Bearer ' + token})\n directories_full = response.json().get('directories', 'No directories')\n #get all directory_name from directories\n directories = [directory['directory_name'] for directory in directories_full]\n if response.status_code == 200:\n return directories, directories_full\n\n@cache.cached(timeout=120, key_prefix='disk_space')\ndef get_disk_space():\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/disk/space', headers = {'Authorization': 'Bearer ' + token})\n chart_data = response.json()\n return chart_data\n\n\ndef get_folder_size(directory):\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/directory/size/', headers = {'Authorization': 'Bearer ' + token}, params={'folder_name': directory})\n folder_size = response.json()\n return folder_size\n\n@cache.cached(timeout=120, key_prefix='get_all_folders_size_cache')\ndef get_all_folders_size():\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/directory/size/all', headers = {'Authorization': 'Bearer ' + token})\n all_folders_size = response.json()\n return all_folders_size\n\n\ndef is_admin_from_cache():\n isAdmin = session.get('type', 'No type')\n if isAdmin == 'admin':\n return True\n else:\n return False\n\n@app.template_filter('format_date')\ndef format_date(value, format):\n date = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')\n if format == 'short':\n format = \"%d/%m/%Y\"\n return date.strftime(format)\n elif format == 'long':\n format = \"%d/%m/%Y %H:%M:%S\"\n return date.strftime(format)\n elif format == 'time':\n format = \"%H:%M:%S\"\n return date.strftime(format)\n\ndef get_all_configs():\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/config/all', headers = {'Authorization': 'Bearer ' + token}, params={'token': token})\n all_configs = response.json()[\"all_configs\"]\n return all_configs\n\ndef get_all_backups():\n if validate_token() == True:\n token = get_token()\n response = requests.get(api_url + '/backup/all', headers = {'Authorization': 'Bearer ' + token}, params={'token': token})\n backups = response.json()[\"backups\"]\n return backups\n\nclass FlaskCache:\n def __init__(self, cache):\n self.cache = cache\n \n def delete_cache(cache_name):\n cache.delete(cache_name)\n\n@app.route(\"/\")\ndef index():\n if validate_token() == True:\n return redirect(url_for('startpage'))\n else:\n return render_template('index.html', title='Home')\n\n@app.route('/gettoken/')\ndef get():\n return session.get('token', 'No token')\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n user_object = {\n 'username': username,\n 'password': password\n }\n response = requests.post(api_url + '/login', json=user_object)\n if response.status_code == 200:\n set_cookie(response)\n return redirect(url_for('startpage'))\n else:\n flash(response.json()['detail'], 'error')\n return redirect(url_for('login'))\n else:\n return render_template('login.html', title='Login')\n\n@app.route(\"/logout\", methods=['POST'])\ndef logout():\n token = session.get('token', 'No token')\n token_object = {\n 'token': token\n }\n response = requests.post(api_url + '/logout', headers = {'Authorization': 'Bearer ' + token}, json=token_object)\n if response.status_code == 200:\n session.clear()\n return redirect(url_for('index'))\n else:\n raise Exception(response.json()['detail'])\n \n\n@app.route(\"/adduser\", methods=['GET', 'POST'])\ndef adduser():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n email = request.form['email']\n\n user_object = {\n 'username': username,\n 'password': password,\n 'email': email\n }\n response = requests.post(api_url + '/user', json=user_object)\n if response.status_code == 200:\n flash('Usuário adicionado com sucesso!')\n return redirect(url_for('login'))\n else:\n flash('Erro ao adicionar usuário!')\n return redirect(url_for('adduser'))\n\n return render_template('adduser.html')\n\n@app.route(\"/startpage\", methods=['GET', 'POST'])\ndef startpage():\n if validate_token() == True:\n flashes = session.get('_flashes', [])\n print(flashes)\n return render_template('start_page.html', title=\"Inicio\")\n else:\n return redirect(url_for('index'))\n\n@app.route(\"/adddirectory\", methods=['POST'])\ndef adddirectory():\n if validate_token() == True:\n directory_name = request.form['folderName']\n directory_path = request.form['folderPath']\n username = get_username()\n token = get_token()\n directory_object = {\n 'directory_name': directory_name,\n 'directory_path': directory_path,\n 'username': username\n }\n response = requests.post(api_url + '/directory', headers={'Authorization': 'Bearer ' + token}, json=directory_object)\n if response.status_code == 200:\n FlaskCache.delete_cache('all_directories')\n flash('Directory added', 'success')\n return redirect(url_for('startpage'))\n else:\n flash(response.json()['detail'], 'error')\n return redirect(url_for('startpage'))\n\n@app.route(\"/deletedirectory\", methods=['POST'])\ndef deletedirectory():\n if validate_token() == True:\n directory_name = request.form['selected_directory']\n username = get_username()\n token = get_token()\n directory_object = {\n 'directory_name': directory_name\n }\n response = requests.delete(api_url + '/directory', headers={'Authorization': 'Bearer ' + token}, json=directory_object)\n if response.status_code == 200:\n FlaskCache.delete_cache('all_directories')\n return redirect(url_for('startpage'))\n else:\n flash(response.json()['detail'], 'error')\n return redirect(url_for('startpage'))\n\n@app.route(\"/listdirectoryfiles\", methods=['GET', 'POST'])\ndef listdirectoryfiles():\n if validate_token() == True:\n if request.method == 'GET':\n directory_name = request.args.get('directory_name')\n token = get_token()\n directory_object = {\n 'directory_name': directory_name\n }\n folder_size = get_folder_size(directory_name)\n response = requests.get(api_url + '/directory/file/all', headers={'Authorization': 'Bearer ' + token}, json=directory_object)\n if response.status_code == 200:\n files = response.json().get('files', 'No files')\n return render_template('files.html', files=files, folder_size=folder_size, title=directory_name)\n else:\n flash(\"Pasta não encontrada\", 'error')\n return redirect(url_for('startpage'))\n\n@app.route(\"/downloadfile\", methods=['GET', 'POST'])\ndef downloadfile():\n if validate_token() == True:\n if request.method == 'GET':\n file_path = request.args.get('file_path_download')\n file_name = request.args.get('file_name_download')\n token = get_token()\n response = requests.get(api_url + '/downloadfile', headers={'Authorization': 'Bearer ' + token}, params={'file_path': file_path})\n if response.status_code == 200:\n return send_file(file_path, as_attachment=True, download_name=file_name)\n else:\n flash(response.json(), 'error')\n #keep user in the same page\n return redirect(request.referrer)\n\n@app.route(\"/change_user_password\", methods=['GET', 'POST'])\ndef change_user_password():\n if validate_token() == True:\n if request.method == 'POST':\n username = get_username()\n token = get_token()\n old_password = request.form['old_password']\n new_password = request.form['new_password']\n user_object = {\n 'username': username,\n 'old_password': old_password,\n 'new_password': new_password,\n 'token': token\n }\n response = requests.put(api_url + '/user/password', headers={'Authorization': 'Bearer ' + token}, json=user_object)\n if response.status_code == 200:\n flash(response.json()['detail'], 'success')\n return redirect(url_for('startpage'))\n else:\n flash(response.json()['detail'], 'error')\n return redirect(url_for('startpage'))\n\n@app.route(\"/config\", methods=['GET', 'POST'])\ndef config():\n if validate_token() == True and is_admin_from_cache() == True:\n if request.method == 'GET':\n users = list_users()\n configs = get_all_configs()\n return render_template('config.html', users=users, configs=configs, title=\"Configurações\")\n else:\n return redirect(url_for('startpage'))\n\n@app.route(\"/backup\", methods=['GET', 'POST'])\ndef backup():\n if validate_token() == True and is_admin_from_cache() == True:\n if request.method == 'GET':\n backups = get_all_backups()\n return render_template('backup.html', title=\"Backup\", backups=backups)\n else:\n return redirect(url_for('startpage'))\n\n@app.route(\"/config//\", methods=['POST'])\ndef config_user_authorized(username, authorized):\n if validate_token() == True:\n if request.method == 'POST':\n token = get_token()\n if authorized == 1:\n authorized_status = True\n else:\n authorized_status = False\n user_object = {\n 'username': username,\n 'autorized': authorized_status\n }\n response = requests.put(api_url + '/user/authorized', headers = {'Authorization': 'Bearer ' + token}, params={'token':token} ,json=user_object)\n if response.status_code == 200:\n flash(response.json()['detail'], 'success')\n return redirect(url_for('config'))\n else:\n flash(response.json()['detail'], 'error')\n return redirect(url_for('config'))\n\n@app.route(\"/config/user\", methods=['POST'])\ndef config_user_update_fields():\n if validate_token() == True and is_admin_from_cache() == True:\n if request.method == 'POST':\n token = get_token()\n old_username = request.form['oldUserName']\n username = request.form['userName']\n email = request.form['userEmail']\n userType = request.form['userType']\n\n #check if username, email or userType is empty\n if username == '' or email == '' or userType == '':\n flash(\"Preencha todos os campos\", 'error')\n return redirect(url_for('config'))\n else:\n response_name = requests.put(api_url + '/user/name', headers={'Authorization': 'Bearer ' + token}, params={'token':token} ,json={'old_username': old_username, 'new_username': username})\n response_email = requests.put(api_url + '/user/email', headers={'Authorization': 'Bearer ' + token}, params={'token':token} ,json={'username':username, 'email': email})\n response_type = requests.put(api_url + '/user/type', headers={'Authorization': 'Bearer ' + token}, params={'token':token} ,json={'username':username, 'type': userType})\n\n if response_name.status_code == 200 or response_email.status_code == 200 or response_type.status_code == 200:\n flash(\"Informações atualizadas com sucesso\", 'success')\n return redirect(url_for('config'))\n else:\n flash(\"Erro ao atualizar informações\", 'error')\n return redirect(url_for('config'))\n\n@app.route(\"/list_users\", methods=['GET', 'POST'])\ndef list_users():\n if validate_token() == True:\n if request.method == 'GET':\n token = get_token()\n response = requests.get(api_url + '/user/all', headers={'Authorization': 'Bearer ' + token}, params={'token':token})\n if response.status_code == 200:\n users = response.json().get('users', 'No users')\n return users\n else:\n return ''\n\n@app.route(\"/delete_user/\", methods=['POST'])\ndef delete_user(username):\n if validate_token() == True:\n if request.method == 'POST':\n token = get_token()\n response = requests.delete(api_url + '/user/', headers={'Authorization': 'Bearer ' + token}, params={'token':token}, json={'username': username})\n if response.status_code == 200:\n flash('User deleted', 'success')\n return redirect(url_for('config'))\n else:\n flash(response.json()['detail'], 'error')\n return redirect(url_for('config'))\n\n\n@app.route(\"/config/config\", methods=['POST'])\ndef config_config_update_fields():\n if validate_token() == True and is_admin_from_cache() == True:\n if request.method == 'POST':\n token = get_token()\n config_name = request.form['configName']\n config_value = request.form['configValue']\n #check if config_name or config_value is empty\n if config_name == '' or config_value == '':\n flash(\"Preencha todos os campos\", 'error')\n return redirect(url_for('config'))\n else:\n response = requests.put(api_url + '/config', headers={'Authorization': 'Bearer ' + token}, params={'token':token} ,json={'config_name': config_name, 'config_value': config_value})\n if response.status_code == 200:\n backups = response.json()\n return backups\n else:\n return ''\n\n@app.route(\"/add_backup\", methods=['GET', 'POST'])\ndef add_backup():\n if validate_token() == True and is_admin_from_cache() == True:\n if request.method == 'POST':\n token = get_token()\n username = get_username()\n backup = {\n 'backup_name': request.form['backupName'],\n 'backup_path': request.form['backupPath'],\n 'time': request.form['backupTime'],\n 'interval': request.form['backupInterval'],\n 'day': '',\n 'connection_string': request.form['backupString'],\n 'backup_type': request.form['backupType'],\n 'backup_user': request.form['backupUser'],\n 'backup_password': request.form['backupPassword'],\n 'username': username\n }\n response = requests.post(api_url + '/backup', headers={'Authorization': 'Bearer ' + token}, params={'token':token} ,json=backup)\n if response.status_code == 200:\n flash('Backup added', 'success')\n return redirect(url_for('backup'))\n else:\n flash(response.json()['detail'], 'error')\n return redirect(url_for('backup'))\n#Jinja2 global functions\napp.jinja_env.globals.update(get_disk_space=get_disk_space, isAdmin=is_admin_from_cache, get_all_directories=get_all_directories, get_all_folders_size=get_all_folders_size, get_intervals=get_intervals, get_types=get_types)","repo_name":"pedroluizmossi1/File-Remote-OS-Downloader","sub_path":"main_flask.py","file_name":"main_flask.py","file_ext":"py","file_size_in_byte":18257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"45662994661","text":"import tensorflow as tf\r\nimport numpy as np\r\nfrom tensorflow import keras\r\n\r\nclass myCallback(tf.keras.callbacks.Callback):\r\n def on_epoch_end(self, epoch, logs={}):\r\n if logs.get('loss') < 0.4:\r\n print('\\nLoss is low so cancelling training!')\r\n self.model.stop_training = True\r\ncallbacks = myCallback()\r\nfmnist = tf.keras.datasets.fashion_mnist\r\n\r\n\r\n\r\n(training_images, training_lables),(test_images, test_labels) = fmnist.load_data()\r\ntraining_images = training_images / 255.0\r\ntest_images = test_images / 255.0\r\ntraining_images = training_images.reshape(-1,28,28,1)\r\ntest_images = test_images.reshape(-1,28,28,1)\r\nmodel = tf.keras.models.Sequential([tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu', input_shape = (28, 28, 1)),\r\n tf.keras.layers.MaxPooling2D(2, 2),\r\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n tf.keras.layers.Flatten(),\r\n tf.keras.layers.Dense(128, activation='relu'),\r\n tf.keras.layers.Dense(10, activation='softmax')])\r\nprint(model.summary())\r\n\r\nmodel.compile(optimizer ='adam',\r\n loss = 'sparse_categorical_crossentropy',\r\n metrics = ['accuracy'])\r\n\r\nmodel.fit(training_images,training_lables, epochs=15, callbacks=[callbacks])\r\nprint(model.evaluate(test_images, test_labels))\r\n\r\n\r\n\r\n\r\n'''\r\n\r\n#this over here is a simple 1 nueron nueral network to accomplish a simple task of figuring out a pattern\r\n\r\nmodel = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\r\nmodel.compile(optimizer='sgd', loss='mean_squared_error')\r\nxs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)\r\nys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)\r\nmodel.fit(xs, ys, epochs=600)\r\nprint(model.predict([10.0]))\r\n'''\r\n\r\n'''\r\nfmnist = tf.keras.datasets.fashion_mnist\r\n\r\n(training_images, training_lables),(test_images, test_labels) = fmnist.load_data()\r\n\r\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\r\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\r\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\r\nmodel.compile(optimizer = 'adam',\r\n loss = 'sparse_categorical_crossentropy',\r\n metrics = ['accuracy'])\r\n\r\nmodel.fit(training_images,training_lables, epochs=15, callbacks=[callbacks])\r\nprint(model.evaluate(test_images, test_labels))\r\n'''","repo_name":"samahAlbast/Arabic-negotiator-","sub_path":"tensorflowcourse.py","file_name":"tensorflowcourse.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"36345003975","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nxvals = (1, 2, 4, 8, 16, 32)\nyvals1 = (1.918499, 1.580462, 1.813013, 1.808579, 1.729736, 1.729384,)\nyvals2 = (0.618573, 0.545513, 0.574069, 0.459962, 0.567306, 0.519556,)\n\nyprops = dict(rotation=0, y=1.05, horizontalalignment='left')\n\nplt.subplot(111,axisbg='#BBBBBB',alpha=0.1)\nplt.grid(color='white', alpha=0.5, linewidth=2, linestyle='-', axis='y')\n\nfor spine_name in ['top', 'left', 'right']:\n plt.gca().spines[spine_name].set_color('none')\n \nplt.ylabel('Performance [flops/cycle]', **yprops)\nplt.xlabel('N [doubles]')\n\nplt.gca().tick_params(direction='out', length=0, color='k')\n\nplt.plot(xvals, yvals1, 'bo-', linewidth=2)\nplt.plot(xvals, yvals2, 'go-', linewidth=2)\nplt.gca().set_axisbelow(True)\n\nplt.savefig('test.png', dpi=300)\n\nplt.show()\n\n","repo_name":"kaisert/DPHPC-project","sub_path":"utils/pueschelplot.py","file_name":"pueschelplot.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"39073505950","text":"import torch\nimport cv2\nfrom model_tr import Net\n\nPATH = \"model/model.pth\"\nIMAGE_PATH = \"cropped_images/croppedsavedImage_9.png\"\nimg = cv2.imread(IMAGE_PATH)\nprint(img.shape)\nimg = cv2.resize(img, (200, 200))\nfilename = 'savedImage_test.png'\ncv2.imwrite(filename, img)\n\n# Model class must be defined somewhere\nmodel = Net(input_size = (200,200,3), output_size = 26)\nmodel.load_state_dict(torch.load(PATH))\n\n\nshape_img = img.shape\nimg = img.reshape(shape_img[2], shape_img[0], shape_img[1])\nimg = torch.FloatTensor(img)\nt = model(img)\nprint(img.shape)\nprint(t.shape)\nmove = torch.argmax(t).item()\nprint(move)","repo_name":"Laveen-exe/Character-RL","sub_path":"Snake-AI/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"14383666592","text":"import numpy as np\nfrom .base import BaseLoss\n\n\nclass MarginLoss(BaseLoss):\n def __init__(self, margin: float = 1.):\n self._margin: float = margin\n\n def __call__(self, *args, **kwargs):\n y_pred, y = np.array(args[0]), np.array(args[1])\n num_classes = np.max(y) + 1\n li = 0\n loss = 0\n\n for i, y_batch in enumerate(y):\n for k, y_true in enumerate(y_batch):\n for j in range(num_classes):\n true_class = y_true[j]\n if true_class != 1:\n pred_class = y_pred[i][k][j]\n li += np.maximum([0.], pred_class - true_class + self._margin)\n loss += li\n\n return loss/(y.shape[0] * y.shape[1])\n\n\nif __name__ == '__main__':\n y = [\n [\n [-1, 1],\n [-1, 1]\n ],\n [\n [-1, 1],\n [1, -1]\n ]\n ]\n\n y_pred = [\n [\n [1, 1],\n [-1, 1]\n ],\n [\n [-1, 1],\n [1, -1]\n ]\n ]\n\n loss = MarginLoss()\n print(loss(y_pred, y))\n","repo_name":"MakhmoodSodikov/somovlearn","sub_path":"loss/margin.py","file_name":"margin.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"93"} +{"seq_id":"34866607031","text":"import importlib\nimport os\nfrom buildfly.utils.string_utils import camelize\n\n\nclass BuildManager(object):\n def __init__(self):\n pass\n\n def build(self, app_dep):\n code_dir = app_dep.get_code_dir()\n install_dir = app_dep.get_install_dir()\n cmds = app_dep.cmds\n if cmds:\n build_type = 'custom'\n else:\n build_type = self.detact_build_type(code_dir)\n build_class = build_type + \"_build\"\n build_module = importlib.import_module(\"buildfly.build.\" + build_class)\n build_obj = getattr(build_module, camelize(build_class))()\n build_obj.build(app_dep, code_dir, install_dir)\n\n def detact_build_type(self, code_dir):\n code_files = os.listdir(code_dir)\n if \"CMakeLists.txt\" in code_files:\n return \"cmake\"\n elif \"configure\" in code_files:\n return \"configure_make\"\n elif \"makefile\" in code_files or \"Makefile\" in code_files:\n return \"makefile\"\n else:\n raise Exception(\"unsupport build type\")\n","repo_name":"all3n/buildfly","sub_path":"buildfly/build/build_manager.py","file_name":"build_manager.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"19913313827","text":"from django import forms\nfrom django.contrib import admin\nfrom django.http.response import HttpResponseRedirect\nfrom django.urls import path, reverse\nfrom django.utils.html import format_html\n\nfrom .models import Device\nfrom permissions.admin import AbilityAdminMixin\n\n\n@admin.register(Device)\nclass DeviceAdmin(AbilityAdminMixin, admin.ModelAdmin):\n list_display = (\n \"id\",\n \"name\",\n \"is_active\",\n \"temp_code\",\n \"temp_code_expires_at\",\n \"request_activation\",\n )\n list_filter = (\"is_active\",)\n search_fields = (\"user__username\", \"name\")\n fields = (\"uuid\", \"name\", \"is_active\")\n readonly_fields = (\"uuid\", \"name\", \"is_active\")\n\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\n \"request-activation//\",\n self.admin_site.admin_view(self.request_activation_view),\n name=\"request-activation\",\n )\n ]\n return custom_urls + urls\n\n @admin.display\n def request_activation(self, obj):\n return format_html(\n 'Request activation',\n reverse(\"admin:request-activation\", args=[obj.pk]),\n )\n\n def request_activation_view(self, request, pk):\n device: Device = Device.objects.get(id=pk)\n tempcode = device.request_activation()\n self.message_user(\n request,\n \"Request Succeeded. Please enter the device record's temporary\"\n f\" code ({tempcode}) in the device to complete the activation.\",\n )\n return HttpResponseRedirect(reverse(\"admin:device_device_changelist\"))\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n\nclass InlineDeviceForm(forms.ModelForm):\n def has_changed(self):\n return True\n\n\nclass InlineDevice(AbilityAdminMixin, admin.StackedInline):\n form = InlineDeviceForm\n model = Device\n extra = 0\n fields = (\"uuid\", \"name\", \"is_active\")\n\n def get_formset(self, request, obj=None, **kwargs):\n formset: type[forms.BaseInlineFormSet] = super().get_formset(\n request, obj, **kwargs\n )\n if request.user.is_superuser:\n return formset\n\n formset.form.base_fields[\"uuid\"].disabled = True\n formset.form.base_fields[\"name\"].disabled = True\n formset.form.base_fields[\"is_active\"].disabled = True\n\n return formset\n","repo_name":"Sesota/patient-sensor-system","sub_path":"device/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"70820831986","text":"from Goldeneye.Recognizers.Models.ModelType import ModelType\nfrom Goldeneye.Recognizers.RecognizerCDNNConstants import RecognizersCDNNConstants as constants\nfrom Goldeneye.Recognizers.Models.GNet import GNet\nfrom Goldeneye.Recognizers.Models.GNetV2 import GNetV2\n\n\nclass ModelFactory:\n\n @staticmethod\n def get_model(model_type):\n if model_type == ModelType.GNet:\n model_name = 'CNN-gnet'\n model_path = \"{}{}.h5\".format(constants.MODEL_DIR, model_name)\n return GNet.load_model(model_path)\n\n elif model_type == ModelType.GNetV2:\n model_name = 'CNN-gnet-v2'\n model_path = \"{}{}.h5\".format(constants.MODEL_DIR, model_name)\n return GNetV2.load_model(model_path)\n\n elif model_type == ModelType.GNETV2Ultimate:\n model_name = 'CNN-gnet-deep-v2-ultimate'\n model_path = \"{}{}.h5\".format(constants.MODEL_DIR, model_name)\n return GNetV2.load_model(model_path)\n\n elif model_type == ModelType.GNetOld:\n model_name = 'number_detection_model'\n model_path = \"{}{}.h5\".format(constants.MODEL_DIR, model_name)\n return GNet.load_model(model_path)\n","repo_name":"crzdg/TrainControlSystem","sub_path":"Goldeneye/Recognizers/Models/ModelFactory.py","file_name":"ModelFactory.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"93"} +{"seq_id":"7668926173","text":"import simulator.preprocessing as preprocessing\nimport random\n\n\ndef select_random_node(directed_edges, conditions):\n channels_with_qualified_number_of_channels = get_channels_with_qualified_number_of_channels(directed_edges,\n conditions['n_channels_lower_bound'],\n conditions['n_channels_upper_bound'])\n\n channels_with_qualified_total_capacity = get_channels_with_qualified_total_capacity(directed_edges,\n channels_with_qualified_number_of_channels,\n conditions['total_capacity_lower_bound'],\n conditions['total_capacity_upper_bound'])\n\n random_node_index = random.sample(channels_with_qualified_total_capacity, 15)\n return random_node_index\n\n\ndef get_channels_with_qualified_number_of_channels(directed_edges, n_channels_lower_bound, n_channels_upper_bound):\n candidate_channels = []\n for i in range(len(directed_edges)):\n src, trgs, channel_ids, number_of_channels = preprocessing.select_node(directed_edges, i)\n if n_channels_upper_bound > number_of_channels > n_channels_lower_bound:\n candidate_channels.append(i)\n return candidate_channels\n\n\ndef get_total_capacity(directed_edges, index):\n src = directed_edges.iloc[index]['src']\n trgs = directed_edges.loc[(directed_edges['src'] == src)]['trg']\n c = 0\n for trg in trgs:\n channel = directed_edges.loc[(directed_edges['src'] == src) & (directed_edges['trg'] == trg)]\n c += channel['capacity'].iloc[0]\n return c\n\n\ndef get_channels_with_qualified_total_capacity(directed_edges, channels_with_qualified_number_of_channels, total_capacity_lower_bound, total_capacity_upper_bound):\n candidate_channels = []\n for i in channels_with_qualified_number_of_channels:\n c = get_total_capacity(directed_edges, i)\n if total_capacity_lower_bound < c < total_capacity_upper_bound:\n candidate_channels.append(i)\n return candidate_channels\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Random node selection considering conditions on number of channels & total capacity')\n parser.add_argument('--data_path', default='../data/data.json')\n parser.add_argument('--n_channels_lower_bound', type=int, default=5)\n parser.add_argument('--n_channels_upper_bound', type=int, default=10)\n parser.add_argument('--total_capacity_lower_bound', type=int, default=15e6)\n parser.add_argument('--total_capacity_upper_bound', type=int, default=30e6)\n args = parser.parse_args()\n\n directed_edges_path = args.data_path\n directed_edges = preprocessing.get_directed_edges(directed_edges_path)\n\n conditions = {'n_channels_lower_bound': args.n_channels_lower_bound,\n 'n_channels_upper_bound': args.n_channels_upper_bound,\n 'total_capacity_lower_bound': args.total_capacity_lower_bound,\n 'total_capacity_upper_bound': args.total_capacity_upper_bound}\n\n random_node_index = select_random_node(directed_edges, conditions)\n print(random_node_index)\n\n\n","repo_name":"LightningCrashers/LNFee","sub_path":"scripts/node_selection.py","file_name":"node_selection.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"9099567707","text":"\"\"\"\nCreated on 11/4/2018\n@author: Antonio Alvarez\n\"\"\"\nimport math as m\nimport copy\nimport pandas as pd\nimport numpy as np \n\ndef reorganize(df):\n\n #Reorganize the data frame putting the numerical values first and the categorical later. \n flo = []\n integer = []\n cat = []\n for i in range(0,len(df.columns)-1):\n if (df.iloc[:,i].dtype == np.float64):\n flo += [df.iloc[:,i].name]\n elif (df.iloc[:,i].dtype == np.int64):\n integer += [df.iloc[:,i].name]\n else: \n cat += [df.iloc[:,i].name]\n\n mark = len(flo) #where the categorical begins\n header = flo + integer\n mark2 = len(header)\n header += cat + [df.iloc[:,-1].name]\n newdf = df[header]\n\n return mark,mark2,newdf\n\ndef ivdmPrep(index1,df,results):\n\n #Preparations for IVDM distance\n numAttr = len(df.columns)\n numClass = results.unique().size\n s = max(numClass,5) # number of intervals\n width = []\n for i in range(0,index1):\n width += [(df.iloc[:,i].max() - df.iloc[:,i].min())/s]\n\n #discretization\n discTable = copy.deepcopy(df)\n for i in range(0,df.iloc[:,0].count()):\n #print(\"indice \",i)\n for j in range(0,index1):\n if (discTable.iloc[i,j] == df.iloc[:,j].max()):\n discTable.iloc[i,j] = s\n else:\n discTable.iloc[i,j] = m.floor((df.iloc[i,j]-df.iloc[:,j].min())/width[j])+1\n\n \n #Getting the Conditional probabilities\n maxAttr = m.floor(discTable.max().max())+1\n nxac = np.zeros(shape=(maxAttr,numAttr,numClass),dtype=np.int64)\n pxac = np.zeros(shape=(maxAttr,numAttr,numClass),dtype=np.float64)\n nxa = np.zeros(shape=(maxAttr,numAttr),dtype=np.int64)\n\n for i in range(0,df.iloc[:,0].count()):\n #print(\"indice2 \",i)\n for j in range(0,df.iloc[0,:].count()):\n nxac[m.floor(discTable.iloc[i,j]),j,m.floor(results[i])] += 1\n nxa[m.floor(discTable.iloc[i,j]),j] += 1\n\n for i in range(0,maxAttr):\n for j in range(0,numAttr):\n for k in range(0,numClass):\n if (nxa[i,j] == 0):\n pxac[i,j,k] = 0\n else:\n pxac[i,j,k] = nxac[i,j,k]/nxa[i,j]\n\n return pxac,width","repo_name":"ragnell93/Instance-Selection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"30094724422","text":"import copy\nfrom typing import Union, Dict\nfrom copy import deepcopy\n\nimport pytest\n\nfrom iconsdk.wallet.wallet import KeyWallet, public_key_to_address, convert_public_key_format\n\n\nclass TestKeyWallet:\n\n @pytest.mark.parametrize(\n \"compressed,hexadecimal,ret_type,size\",\n (\n (True, True, str, 33),\n (True, False, bytes, 33),\n (False, True, str, 65),\n (False, False, bytes, 65),\n )\n )\n def test_get_public_key(self, compressed: bool, hexadecimal: bool, ret_type: type, size: int):\n wallet: KeyWallet = KeyWallet.create()\n public_key: Union[str, bytes] = wallet.get_public_key(compressed, hexadecimal)\n assert isinstance(public_key, ret_type)\n if hexadecimal:\n print(public_key)\n pub_key: bytes = bytes.fromhex(public_key)\n assert len(pub_key) == size\n else:\n assert len(public_key) == size\n\n def test_get_private_key(self):\n wallet: KeyWallet = KeyWallet.create()\n private_key: str = wallet.get_private_key()\n assert isinstance(private_key, str)\n assert not private_key.startswith(\"0x\")\n\n private_key: bytes = wallet.get_private_key(hexadecimal=False)\n assert isinstance(private_key, bytes)\n assert wallet.private_key == private_key\n\n def test_private_key(self):\n wallet: KeyWallet = KeyWallet.create()\n wallet2: KeyWallet = KeyWallet.load(wallet.private_key)\n assert wallet == wallet2\n\n wallet3 = KeyWallet.create()\n assert wallet != wallet3\n\n def test_public_key(self):\n wallet: KeyWallet = KeyWallet.create()\n public_key: bytes = wallet.public_key\n assert isinstance(public_key, bytes)\n assert len(public_key) == 65\n\n def test_to_dict(self):\n password = \"1234\"\n wallet: KeyWallet = KeyWallet.create()\n jso: Dict[str, str] = wallet.to_dict(password)\n assert jso[\"address\"] == wallet.get_address()\n assert jso[\"coinType\"] == \"icx\"\n\n wallet2 = KeyWallet.from_dict(jso, password)\n assert wallet2 == wallet\n\n def test_copy(self):\n wallet = KeyWallet.create()\n wallet2 = copy.deepcopy(wallet)\n assert wallet == wallet2\n\n wallet3 = copy.copy(wallet)\n assert wallet == wallet3\n\n def test_hash(self):\n wallet = KeyWallet.create()\n wallet_dict = {wallet: wallet.public_key}\n assert wallet.public_key == wallet_dict[wallet]\n\n\ndef test_public_key_to_address():\n wallet: KeyWallet = KeyWallet.create()\n address: str = public_key_to_address(wallet.public_key)\n assert address == wallet.get_address()\n\n compressed_public_key: bytes = wallet.get_public_key(compressed=True, hexadecimal=False)\n address2: str = public_key_to_address(compressed_public_key)\n assert address2 == wallet.get_address()\n\n\n@pytest.mark.parametrize(\n \"iformat,oformat,size\",\n (\n (True, True, 33),\n (True, False, 65),\n (False, True, 33),\n (False, False, 65),\n )\n)\ndef test_convert_public_key_format(iformat: bool, oformat: bool, size: int):\n wallet: KeyWallet = KeyWallet.create()\n\n public_key: bytes = wallet.get_public_key(compressed=iformat, hexadecimal=False)\n ret: bytes = convert_public_key_format(public_key, compressed=oformat)\n if iformat == oformat:\n assert ret == public_key\n assert len(ret) == size\n assert public_key_to_address(public_key) == public_key_to_address(ret)\n","repo_name":"icon-project/icon-sdk-python","sub_path":"tests/wallet/test_wallet.py","file_name":"test_wallet.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"93"} +{"seq_id":"24521205972","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\nimport os\n\ndef main():\n \"\"\" ------------------- argparse -------------------- \"\"\"\n parser = argparse.ArgumentParser(description=\"queue of problems\", formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"command\", help=\n \"add -- add a problem to the queue\\n\" +\n \"list -- list all problems in the queue\\n\" +\n \"drop -- drop a problem from the queue\\n\" +\n \"done -- mark as completed and drop\\n\"\n )\n parser.add_argument(\"option\",\n nargs=\"*\", default=None,\n help=\"id to add/remove\"\n )\n\n args = parser.parse_args()\n\n if args.command not in [\"add\", \"list\", \"drop\", \"done\"]:\n print(\"UNKNOWN COMMAND\")\n return\n\n path = os.path.join(os.path.dirname(__file__), \"q.txt\")\n\n q = []\n with open(path, \"r\") as f:\n for line in f:\n q.append(line)\n\n\n if args.command == \"list\":\n sys.stdout.write(\"\".join(q))\n return\n\n if args.option is None:\n print(\"specify problem to add/drop\")\n return\n\n if args.command == \"add\":\n for it in args.option:\n name = it + \"\\n\"\n if name in q[1:]:\n print(\"{} is already in the queue\".format(name.strip()))\n else:\n q.append(name)\n with open(path, \"a\") as f:\n f.write(name)\n sys.stdout.write(\"\".join(q))\n\n else:\n for it in args.option:\n name = it + \"\\n\"\n if name not in q[1:]:\n print(\"{} is not in the queue\".format(name.strip()))\n else:\n q.remove(name)\n if args.command == \"done\":\n num = int(q[0].split()[1])\n q[0] = \"Completed: {}\\n\".format(num+1)\n with open(path, \"w\") as f:\n f.write(\"\".join(q))\n sys.stdout.write(\"\".join(q))\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\")\n sys.exit(0)\n","repo_name":"ehnryx/queue","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"73858314226","text":"def asteroid():\n # Masa je gustoca * volumen\n # volumen je 4/3 * r^3 * pi\n \n \n pi=3.14159265358980 # NE DIRAJ!!!\n gustoca=3 # g/cm^3, mijenjaj po potrebi\n promjer= 1300 # m, mijenjaj po potrebi\n brzina=26.0 # km/s, mijenjaj po potrebi\n \n prava_brzina=brzina*1000 # m/s\n polumjer=promjer/2 # m\n prava_gustoca=gustoca*1000 # kg/m^3\n \n volumen=4/3 * polumjer**3 * pi\n \n \n masa=prava_gustoca*volumen\n energija=1/2*masa*(prava_brzina*prava_brzina)\n rezultat=energija/(4.184*(10**15)) # u megatone TNT-a\n print(rezultat)\n\n\nasteroid()","repo_name":"kvrancic/University-Courses","sub_path":"Introduction To Programming/VSC/asteroid.py","file_name":"asteroid.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"72103119987","text":"import numpy as np\nimport nmrglue as ng\nimport re\n\nVAL_PATTERNS_ = [\n #For parsing hdr file\n #(matching pattern, suitable type conversion function, has unit)\n (re.compile(r'\"(TRUE|FALSE)\"'), bool, False),\n (re.compile(r'\"(.*)\"'), str, False),\n (re.compile(r'(\\d+\\.?\\d*)\\[(.+)\\]'), float, True),\n (re.compile(r'(\\d+)'), int, False),\n (re.compile(r'(\\d+\\.\\d*)'), float, False)\n]\n\n\ndef _read_hdr(hdr_filename):\n\n string_pattern = re.compile(r'\"(.*)\"')\n integer_pattern = re.compile(r'')\n\n\n \n header = {}\n with open(hdr_filename) as fin:\n\n for line in fin:\n\n elems = line.strip().split()\n\n if len(elems) < 2:\n continue\n\n\n val = ''.join(elems[1:])\n\n for pattern, conv, has_unit in VAL_PATTERNS_:\n\n\n match = pattern.match(val)\n\n if match:\n val = conv(match.group(1))\n\n if has_unit:\n unit = match.group(2)\n else:\n unit = None\n\n header[elems[0]] = (val, unit)\n break\n\n\n return header\n\n#Remove SI-prefix with keeping the value, for example, (100.0, 'kHz') -> (100000.0, 'Hz')\ndef _process_si_prefix(val, unit_with_si):\n\n si_factor = {'p':1.0E-12, 'n': 1.0E-9, 'u': 1.0E-6, 'm':1.0E-3,\n 'k': 1.0E+3, 'M': 1.0E+6, 'G':1.0E+12, 'T':1.0E+15}\n si_unit_pattern = re.compile(r'(p|n|u|m|k|M|G|T)(.+)')\n\n if unit_with_si is None:\n return val, None\n \n #\"ppm\" matches the pattern, then escape \n if unit_with_si == 'ppm':\n return val, 'ppm'\n\n match = si_unit_pattern.match(unit_with_si)\n if match:\n return val * si_factor[match.group(1)], match.group(2)\n\n else:\n return val, unit_with_si\n\n\n \ndef _read_complex1d(asc_file_name, size):\n\n data = np.zeros(size, dtype = np.complex64)\n\n with open(asc_file_name) as fin:\n\n idx = 0\n for i, line in enumerate(fin):\n\n\n if i == 0: continue\n\n elems = line.strip().split()\n if len(elems) != 3:\n raise InvalidFileFormatException()\n \n data[i - 1] = float(elems[1]) + 1.0j * float(elems[2])\n \n\n return data\n\ndef _read_real1d(asc_file_name, size):\n\n data = np.zeros(size, dtype = np.float32)\n\n with open(asc_file_name) as fin:\n\n idx = 0\n for i, line in enumerate(fin):\n\n\n if i == 0: continue\n\n elems = line.strip().split()\n if len(elems) != 2:\n raise InvalidFileFormatException()\n \n data[i - 1] = float(elems[1])\n \n\n return data\n\ndef _read_complex2d(asc_file_name, size):\n\n data = np.zeros(size, dtype = np.complex64)\n\n with open(asc_file_name) as fin:\n\n x_idx = 0\n y_idx = 0\n for i, line in enumerate(fin):\n\n\n if i == 0: continue\n\n elems = line.strip().split()\n if len(elems) != 4:\n raise InvalidFileFormatException()\n \n data[y_idx, x_idx] = float(elems[2]) + 1.0j * float(elems[3])\n\n x_idx += 1\n if x_idx == size[1]:\n x_idx = 0\n y_idx += 1\n \n\n return data\n\ndef _read_real2d(asc_file_name, size):\n\n data = np.zeros(size, dtype = np.float32)\n\n with open(asc_file_name) as fin:\n\n x_idx = 0\n y_idx = 0\n for i, line in enumerate(fin):\n\n if i == 0: continue\n\n elems = line.strip().split()\n if len(elems) != 3:\n raise InvalidFileFormatException()\n \n data[y_idx, x_idx] = float(elems[2])\n\n x_idx += 1\n if x_idx == size[1]:\n x_idx = 0\n y_idx += 1 \n\n \n\n return data\n\n\n#The following two methods are required for making compatibility to NMRPipe format.\ndef _interleave_2d(data):\n\n il_data = np.zeros(data.shape, dtype = data.dtype)\n il_data[0::2,:] = np.conjugate(data[:data.shape[0]//2,:]) #To invert the data arrangement, the results look OK\n il_data[1::2,:] = -1.0 * np.conjugate(data[data.shape[0]//2:,:]) #To invert the data arrangement, the results look OK\n return il_data\n\ndef _delete_2d_sine_modulation(data):\n #In almost cases, frequency domain data of NMRPipe don't have imaginary (sin-modulation) part.\n #The sine modulation part is removed.\n cos_data = np.zeros((data.shape[0] // 2, data.shape[1]), dtype = data.dtype)\n cos_data = data[0:data.shape[0]//2,:]\n\n return cos_data\n \n\n\n# Read .asc and its related .hdr files made by Delta\n# and output dic and data of nmrglue.pipe\ndef read(asc_filename):\n\n filename_pattern = re.compile(r'(.+)\\.asc')\n\n match = filename_pattern.match(asc_filename)\n if match is None:\n raise FilenameException()\n\n\n header = _read_hdr(match.group(1) + '.hdr')\n\n\n ndim = header['dimensions'][0]\n if ndim > 3:\n raise NotSupportedFileException('1D and 2D files can be only handled.')\n\n\n udic = {'ndim': ndim}\n udic.update({i:{} for i in range(ndim)})\n dim_idx = {'x': ndim - 1, 'y': ndim - 2}\n \n x_freq, _ = _process_si_prefix(*header['x_freq'])\n\n x_idx = dim_idx['x']\n udic[x_idx]['obs'] = x_freq / 1.0E+6\n udic[x_idx]['car'] = x_freq / 1.0E+6 * header['x_offset'][0]\n udic[x_idx]['sw'] = _process_si_prefix(*header['x_sweep'])[0]\n udic[x_idx]['label'] = header['x_domain'][0]\n udic[x_idx]['complex'] = (header['x_format'][0] == 'COMPLEX')\n udic[x_idx]['freq'] = (header['x_start'][1] == 'ppm')\n udic[x_idx]['time'] = not udic[x_idx]['freq']\n udic[x_idx]['size'] = header['x_curr_points'][0]\n udic[x_idx]['encoding'] = 'complex'\n \n\n if ndim == 2:\n y_idx = dim_idx['y']\n y_freq, _ = _process_si_prefix(*header['y_freq'])\n udic[y_idx]['obs'] = y_freq / 1.0E+6\n udic[y_idx]['car'] = y_freq / 1.0E+6 * header['y_offset'][0]\n udic[y_idx]['sw'] = _process_si_prefix(*header['y_sweep'])[0]\n udic[y_idx]['label'] = header.get('y_domain', ('None',))[0]\n udic[y_idx]['complex'] = (header['y_format'][0] == 'COMPLEX')\n udic[y_idx]['freq'] = (header['y_start'][1] == 'ppm')\n udic[y_idx]['time'] = not udic[y_idx]['freq']\n udic[y_idx]['size'] = header['y_curr_points'][0]\n if udic[y_idx]['complex']:\n udic[y_idx]['size'] *= 2\n \n udic[y_idx]['encoding'] = 'states'\n\n if ndim == 1:\n\n if udic[x_idx]['complex']:\n data = _read_complex1d(asc_filename, udic[x_idx]['size'])\n else:\n data = _read_real1d(asc_filename, udic[x_idx]['size'])\n \n elif ndim == 2:\n\n if udic[x_idx]['complex']:\n data = _read_complex2d(asc_filename, (udic[y_idx]['size'], udic[x_idx]['size']) )\n else:\n data = _read_real2d(asc_filename, (udic[y_idx]['size'], udic[x_idx]['size']) )\n\n\n if udic[y_idx]['complex'] and udic[y_idx]['time']:\n data = _interleave_2d(data)\n if udic[y_idx]['complex'] and udic[y_idx]['freq']:\n udic[y_idx]['size'] //= 2\n udic[y_idx]['complex'] = False\n data = _delete_2d_sine_modulation(data)\n \n if udic[y_idx]['label'] == 'None':\n udic[y_idx]['obs'] = 1.0\n\n\n pipe_dic = ng.pipe.create_dic(udic)\n\n return pipe_dic, data\n\n\n# get *_list as a python array.\n# SI-prefix will be removed.\n# For example y_list = {0[ms], 1[ms], ..., 1000[ms]} -> [0.0, 0.001, ... ,1.0]\ndef get_array_values(asc_filename, dim = 'y'):\n\n filename_pattern = re.compile(r'(.+)\\.asc')\n\n match = filename_pattern.match(asc_filename)\n if match is None:\n raise FilenameException()\n\n array_data = []\n list_name = f'{dim}_list'\n inlist = False\n with open(match.group(1) + '.hdr') as fin:\n\n for line in fin:\n\n elems = line.strip().split()\n\n if inlist:\n array_data.append(elems[0])\n len_ -= 1\n if len_ == 0:\n break\n \n\n if elems[0] == list_name:\n len_ = int(elems[1])\n inlist = True\n continue\n\n\n \n parsed_array_data = []\n\n for data in array_data:\n\n for pattern, conv, has_unit in VAL_PATTERNS_:\n\n match = pattern.match(data)\n\n if match:\n if has_unit:\n val = _process_si_prefix(conv(match.group(1)), match.group(2))[0]\n else:\n val = conv(match.group(1))\n\n\n parsed_array_data.append(val)\n break\n \n return parsed_array_data\n\nclass FilenameException(Exception):\n\n def __init__(self):\n pass\n\n def __str__(self):\n return 'Filename should be \"*.asc\"'\n\nclass NotSupportedFileException(Exception):\n\n def __init__(self, detail):\n self.detail = detail\n pass\n\n def __str__(self):\n return self.detail\n\nclass InvalidFileFormatException(Exception):\n\n def __init__(self):\n pass\n\n def __str__(self):\n return 'Number of columns of .asc file is invalid.'\n \n\nif __name__ == '__main__':\n import sys\n\n #read asc file and convert to nmrpipe format\n dic, data = read(sys.argv[1])\n \n\n ng.pipe.write('test2.ft2', dic, data, overwrite= True)\n\n arr = get_array_values(sys.argv[1])\n print(arr)\n","repo_name":"makki547/delta_asc_reader","sub_path":"delta_asc_reader/delta_asc_reader.py","file_name":"delta_asc_reader.py","file_ext":"py","file_size_in_byte":9501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"7265592953","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule composé d'une unique classe RejeuClock gérant la simulation, l'horloge et les messages émis/reçu sur le bus.\n\"\"\"\n__author__ = \"Alban\", \"Audrey\", \"Alexandre\"\n\nfrom ivy.std_api import IvyBindMsg\nfrom ivy.std_api import IvySendMsg\nimport time\nimport logging\nimport models as mod\nimport utils\nimport math\nimport control\nimport re\n\nclass RejeuClock(object):\n\n def __init__(self, db_connection, start_time=0):\n \"\"\"\n Initialisation d'un objet RejeuClock\n\n :param db_connection: Connexion à la base de données\n\n :param start_time: Heure de départ à 0 secondes par défaut\n \"\"\"\n self.running = True\n self.paused = True\n self.current_time = start_time\n self.rate = 1.0\n self.db_con = db_connection\n self.session = db_connection.get_session()\n # abonnement aux messages relatifs à l'horloge\n self.__set_subscriptions()\n\n def __set_subscriptions(self):\n \"\"\"\n Abonnements aux messages du bus Ivy afin de traiter les différentes actions de l'utilisateur\n\n :return: NONE\n \"\"\"\n IvyBindMsg(lambda *l: self.start(), '^ClockStart')\n IvyBindMsg(lambda *l: self.stop(), '^ClockStop')\n IvyBindMsg(lambda *l: self.set_rate(l[1]), '^SetClock Rate=(\\S+)')\n IvyBindMsg(lambda *l: self.set_init_time(l[1]), '^SetClock Time=(\\S+)')\n IvyBindMsg(lambda *l: self.send_beacons(l[1]), \"^GetAllBeacons MsgName=(\\S+)\")\n IvyBindMsg(lambda *l: self.send_pln(l[1], int(l[2]), l[3]), \"^GetPln MsgName=(\\S+) Flight=(\\S+) From=(\\S+)\")\n IvyBindMsg(lambda *l: self.send_sectors_info(l[1], int(l[2])), \"^GetSectorsInfos MsgName=(\\S+) Flight=(\\S+)\")\n IvyBindMsg(lambda *l: self.set_heading(l[0], int(l[1]), int(l[2])), '^AircraftHeading Flight=(\\S+) To=(\\S+)')\n IvyBindMsg(lambda *l: self.set_heading(l[0], int(l[1]), int(l[2]), side=l[3]), '^AircraftHeading Flight=(\\S+) To=(\\S+) By=(\\S+)')\n IvyBindMsg(lambda *l: self.set_heading(l[0], int(l[1]), int(l[2]), rate=int(l[3])), '^AircraftHeading Flight=(\\S+) To=(\\S+) Rate=(\\S+)')\n IvyBindMsg(lambda *l: self.set_heading(l[0], int(l[1]), int(l[2]), side=l[3], rate=int(l[4])), '^AircraftHeading Flight=(\\S+) To=(\\S+) By=(\\S+) Rate=(\\S+)')\n IvyBindMsg(lambda *l: self.reset_heading(int(l[1])), '^CancelLastOrder Flight=(\\S+)')\n IvyBindMsg(lambda *l: self.send_trajectory(l[1], l[2], l[3]), 'GetTrajectory MsgName=(\\S+) Flight=(\\S+) From=(\\S+)')\n\n def main_loop(self):\n \"\"\"\n Boucle principale permettant l'utilisation de l'objet RejeuClock\n\n :return: NONE\n \"\"\"\n list_flights = self.session.query(mod.Flight)\n (start_time, stop_time) = utils.extract_sim_bounds(list_flights)\n\n msg_rangeupdate = \"RangeUpdateEvent FirstTime=%s LastTime=%s\" % (\n utils.sec_to_str(start_time), utils.sec_to_str(stop_time))\n logging.debug(msg_rangeupdate)\n IvySendMsg(msg_rangeupdate)\n\n #Boucle d'horloge\n while self.running:\n if self.paused:\n # en pause, on ne doit plus faire avancer l'horloge\n # et émettre les messages\n time.sleep(0.1)\n continue\n\n logging.debug(\"Loop running, SimTime=%s\" \\\n % utils.sec_to_str(self.current_time))\n IvySendMsg(\"ClockEvent Time=%s Rate=%d Bs=0\" \\\n % (utils.sec_to_str(self.current_time), self.rate))\n\n # récupérer les plots à envoyer\n list_cones = self.session.query(mod.Cone)\\\n .join(mod.Cone.flight)\\\n .filter(mod.Cone.hour == self.current_time,\n mod.Cone.version == mod.Flight.last_version)\n\n\n # pour chaque plot\n for cone in list_cones:\n g_speed = math.sqrt((cone.vit_x)**2+(cone.vit_y)**2)\n heading = utils.get_heading(cone.vit_x, cone.vit_y)\n msg = \"TrackMovedEvent Flight=%d CallSign=%s Ssr=%d Sector=-- Layers=F X=%f Y=%f Vx=%d Vy=%d Afl=%d Rate=%d Heading=%d GroundSpeed=%d Tendency=%d Time=%s\" %\\\n ( cone.flight.id, cone.flight.callsign, cone.flight.ssr, cone.pos_x/64, cone.pos_y/64, cone.vit_x, cone.vit_y, cone.flight_level, cone.rate, heading,int(g_speed), cone.tendency, utils.sec_to_str(cone.hour) )\n #logging.debug(\"Message envoye : %s\" % msg)\n IvySendMsg(msg)\n\n IvySendMsg(\"EndTransmissionEvent Time=%s\" % (utils.sec_to_str(self.current_time)))\n\n if self.rate>0 :\n self.current_time += 1\n time.sleep(1.0 / self.rate)\n else :\n self.current_time -=1\n time.sleep(-1.0 / self.rate)\n self.session.close()\n\n def stop(self):\n \"\"\"\n Met en pause l'horloge et donc la simulation\n\n :return: NONE\n \"\"\"\n logging.debug(\"Clock Stopped\")\n self.paused = True\n\n def start(self):\n \"\"\"\n Lance la simulation en lançant l'horloge\n\n :return: NONE\n \"\"\"\n logging.debug(\"Clock Started\")\n self.paused = False\n\n def close(self):\n \"\"\"\n Arrête l'horloge et ferme le programme\n\n :return: NONE\n \"\"\"\n self.running = False\n\n def set_rate(self, rate_value):\n \"\"\"\n Modifie la vitesse de la simulation\n\n :param rate_value: Vitesse (Float)\n\n :return: NONE\n \"\"\"\n logging.debug(\"SetClock\")\n self.rate = int(rate_value)\n\n def set_init_time(self, init_time):\n \"\"\"\n Modifie l'heure à laquelle on effectue la simulation\n\n :param init_time: Heure d'initialisation\n\n :return: NONE\n \"\"\"\n logging.debug(\"Set Init Time\")\n self.current_time = utils.str_to_sec(init_time)\n\n def send_beacons(self, msg_name):\n \"\"\"\n En réponse à GetAllBeacons, Rejeu envoie l'ensemble des balises enregistrées.\n\n :param msg_name: Identifiant du msg envoyé et émit en réponse\n\n :return: NONE\n \"\"\"\n session = self.db_con.get_session()\n l_beacons = session.query(mod.Beacon)\n count = 0\n msg = \"AllBeacons %s Slice=\" % (msg_name)\n for beacon in l_beacons:\n msg += beacon.display_beacon() + \" \"\n count += 1\n if count == 50:\n IvySendMsg(msg.strip())\n count = 0\n msg = \"AllBeacons %s Slice=\" % (msg_name)\n if count > 0:\n IvySendMsg(msg)\n IvySendMsg(\"AllBeacons %s EndSlice\" % msg_name)\n session.close()\n\n def send_pln(self, msg_name, flight_id, init_order):\n \"\"\"\n En réponse à GetPln, Rejeu renvoie le plan de vol associé au vol dans le message.\n\n :param msg_name: Identifiant du message envoyé et émis en réponse\n\n :param flight_id: Num de vol (Int)\n\n :param init_order: now, un nom de balise ou une heure (Str)\n\n :return: NONE\n \"\"\"\n session = self.db_con.get_session()\n flight = session.query(mod.Flight).filter(mod.Flight.id == flight_id).first()\n if init_order == \"now\":\n starting_time = self.current_time\n starting_beacon = None\n elif init_order == \"origin\":\n starting_beacon = None\n starting_time = None\n elif len(init_order.split(':'))>1:\n starting_beacon = None\n starting_time = utils.str_to_sec(init_order)\n else :\n starting_beacon = init_order\n starting_time = None\n route = utils.extract_route(flight.flight_plan, starting_beacon, starting_time)\n msg_pln = \"Pln %s Flight=%d Time=%s CallSign=%s AircraftType=%s Ssr=%d Speed=%d Rfl=%d Dep=%s Arr=%s Rvsm=%s Tcas=%s Adsb=%s DLink=%s List=%s\" % \\\n (msg_name, flight.id, utils.sec_to_str(self.current_time), flight.callsign, flight.type, flight.ssr, flight.v, flight.fl, flight.dep, flight.arr,\n flight.rvsm, flight.tcas, flight.adsb, flight.dlink, route.strip())\n flight.pln_event = 1\n IvySendMsg(msg_pln)\n session.close()\n\n def send_sectors_info(self, msg_name, flight_id):\n \"\"\"\n Envoie la liste des secteurs\n\n :param msg_name: Identifiant du message envoyé et émis en réponse\n\n :param flight_id: Num de vol (Int)\n\n :return: NONE\n \"\"\"\n msg = \"SectorsInfo %s Flight=%d List=--\" % (msg_name, flight_id)\n IvySendMsg(msg)\n\n def set_heading(self, agent, flight_id, new_heading, side=\"\", rate=3):\n \"\"\"\n Appelle la fonction set_heading de control qui crée la nouvelle route suite à un ordre\n de changement de cap après réception d'un message Ivy Aircraft Heading.\n\n :param flight_id: Num de vol (Int).\n\n :param new_heading: Nouveau cap (Int).\n\n :return: NONE\n \"\"\"\n logging.debug(\"Set Heading\")\n session = self.db_con.get_session()\n control.set_heading(session, flight_id, new_heading, self.current_time, side, rate)\n session.close()\n # send report event\n order = \"AircraftHeading|{0}|{1}\".format(flight_id, new_heading)\n IvySendMsg(\"ReportEvent {0} Result=OK \"\n \"Info=NIL Order={1}\".format(agent, order))\n # send trajectory update event\n IvySendMsg(\"TrajectoryUpdateEvent Flight={0}\".format(flight_id))\n\n def reset_heading(self, flight_id):\n \"\"\"\n Appelle la fonction delete_last_version de control qui annule l'ordre de changement de cap\n après réception d'un message Ivy CancelLastOrder\n\n :param flight_id: Num de vol (Int).\n\n :return: NONE\n \"\"\"\n logging.debug(\"Reset Heading\")\n session = self.db_con.get_session()\n control.delete_last_version(session, flight_id)\n session.close()\n # send trajectory update event\n IvySendMsg(\"TrajectoryUpdateEvent Flight={0}\".format(flight_id))\n\n def send_trajectory(self, msg_name, flight_id, from_type):\n \"\"\"\n Envoie la trajectoire du vol considéré sur réception d'un message GetTrajectory\n\n :param msg_name: Identifiant du message (renvoyé lors de la réponse)\n\n :param flight_id: Identifiant du vol\n\n :param from_type: Instant à partir duquel la trajectoire est désirée (\"now\", \"origin\" ou \"HH:MM:SS\")\n\n :return: NONE\n \"\"\"\n session = self.db_con.get_session()\n start = 0\n if from_type == \"now\":\n start = self.current_time\n elif re.match(\"^\\d{2}:\\d{2}:\\d{2}$\", from_type):\n start = utils.str_to_sec(from_type)\n flight = session.query(mod.Flight).filter(mod.Flight.id == flight_id).first()\n logging.debug(\"Vol %d : last_version = %d\"%(flight.id, flight.last_version))\n cones = session.query(mod.Cone)\\\n .join(mod.Cone.flight)\\\n .filter(mod.Cone.flight_id == flight_id,\n mod.Cone.hour > start,\n mod.Cone.version == mod.Flight.last_version)\\\n .order_by(mod.Cone.hour)\\\n .all()\n\n nb_msg = (len(cones)/30) + 1\n for i in range(0, nb_msg):\n first, last = 30*i, min(30*(i+1), len(cones))\n data = \" \".join([c.format() for c in cones[first:last]])\n if data != \"\":\n IvySendMsg(\"Trajectory %s Slice=%s\" % (msg_name, data))\n IvySendMsg(\"Trajectory %s EndSlice\" % msg_name)\n session.close()\n\n\n\n","repo_name":"Albantestenac/RejeuTraffic","sub_path":"pyrejeu/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":11706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"19830238634","text":"\"\"\"\nWrite a simple parser that will parse and run Deadfish.\n\nDeadfish has 4 commands, each 1 character long:\n\ni increments the value (initially 0)\nd decrements the value\ns squares the value\no outputs the value into the return array\nInvalid characters should be ignored.\n\nparse(\"iiisdoso\") ==> [8, 64]\n\"\"\"\n\nimport re\ndef parse(data):\n data = re.sub(\"[^oids]+\", \"\", data)\n arr = list(data)\n val = 0\n new_arr = []\n for x in arr:\n if x == 'i': val += 1\n elif x == 'd': val -= 1\n elif x == 's': val = val ** 2\n else: new_arr.append(val)\n return new_arr\n\nprint(parse(\"ooo\")) # [0,0,0]\nprint(parse(\"ioioio\")) # [1,2,3]\nprint(parse(\"idoiido\")) # [0,1]\nprint(parse(\"isoisoiso\")) # [1,4,25]\nprint(parse(\"codewars\")) # [0]\n","repo_name":"heroisaprinciple/problemSolving","sub_path":"Python/codewars/6kyu/makeTheDeadfishSwim.py","file_name":"makeTheDeadfishSwim.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"69860625588","text":"from bs4 import BeautifulSoup\nimport logging\nimport json\nimport re\nimport random\nfrom .config import *\nfrom aqt.utils import askUser\nimport os\n\nADDON = os.path.dirname(os.path.abspath(__file__))\n###############################################################################################################\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\nfile_handler = logging.FileHandler(ADDON + '/logs.log')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n\n################################################################################################################\n\ndef get_all_note_ids(models):\n ids = []\n for modelName in models:\n model = mw.col.models.byName(modelName)\n model_id = str(model['id'])\n notes_ids = mw.col.db.all(\"SELECT id from notes where mid = '\" + model_id + \"'\")\n for note_id in notes_ids:\n note_id = str(note_id)[1:-2]\n ids.append(note_id)\n return ids\n\n\n################################################################################################################\n\n\ndef get_bank():\n \"\"\"\n loops over all the notes in the sentences bank and adds them to a dict of dicts and returns that dict\n \"\"\"\n # Get the model id of the sentences notes\n model = mw.col.models.byName(sentenceModelName)[\"id\"]\n notes = mw.col.findNotes(\"mid:\" + str(model))\n\n collected_example_id = 0\n bank = {}\n for note in notes:\n french_sentence_note = mw.col.getNote(note)\n try:\n example = {\n 'fr': BeautifulSoup(french_sentence_note[target_lang_field], 'html.parser').get_text().replace(\"\\'\",\n \"’\"),\n 'en': BeautifulSoup(french_sentence_note[mother_lang_field], 'html.parser').get_text().replace(\"\\'\",\n \"’\"),\n 'audio': BeautifulSoup(french_sentence_note[audio_field], 'html.parser').get_text().replace(\"\\'\",\n \"’\"),\n 'noteId': french_sentence_note.id}\n bank[collected_example_id] = example\n collected_example_id += 1\n except:\n pass\n logger.info('bank retrieved successfully')\n return bank\n\n\n###############################################################################################################\n\n\ndef create_dict(bank):\n \"\"\"\n creates a dictionary of all the words in the examples and which examples they appear on\n \"\"\"\n\n logger.info(\"There are \" + str(len(bank)) + \" sentences\")\n dictionary = {}\n for id in bank:\n example = bank[id]\n example['audio'] = example['audio'].split(',')[0]\n example['audio'] = example['audio'][7:-1]\n words = example['fr'].lower()\n words = words.replace('\\\\xa0', ' ')\n words = re.split('[.?!, «»()\\\"]', words)\n\n for word in words:\n word = word.replace(\" d\\'\", \"\")\n word = word.replace(\" l\\'\", \"\")\n word = word.replace(\" d’\", \"\")\n word = word.replace(\" l’\", \"\")\n if word.startswith(\"d\\'\"):\n word = word.replace(\"d\\'\", \"\")\n elif word.startswith(\"l\\'\"):\n word = word.replace(\"l\\'\", \"\")\n elif word.startswith(\"d’\"):\n word = word.replace(\"d’\", \"\")\n elif word.startswith(\"l’\"):\n word = word.replace(\"l’\", \"\")\n\n if word not in dictionary and len(word) > 1:\n idsList = []\n idsList.append(id)\n dictionary[word] = idsList\n elif word in dictionary:\n dictionary[word].append(id)\n\n from pprint import pformat\n\n logger.info(pformat(dictionary))\n logger.info(\"There are \" + str(len(dictionary)) + \" unique words\")\n logger.info('dicts created successfully')\n return dictionary, bank\n\n\n################################################################################################################\n\n\ndef update_bank_deck():\n \"\"\"\n loops over all the notes in the (French (verbs , nouns , adv and adj ))\n and add the examples found in them and not in the french sentence model to\n the french sentence deck\n \"\"\"\n logger.info('updating bank deck .....')\n bank = get_bank()\n bank_keys = []\n for id in bank:\n example = bank[id]\n bank_keys.append(example['fr'])\n\n logger.info(len(bank_keys))\n\n new_examples_list = {}\n\n new_examples = 0\n\n for modelName in mainModelsNames:\n\n model_id = str(mw.col.models.by_name(modelName)['id'])\n french_sentences_ids = mw.col.db.all(\"SELECT id from notes where mid = '\" + model_id + \"'\")\n\n for id in french_sentences_ids:\n id = str(id)[1:-1]\n id = int(id)\n\n french_note = mw.col.getNote(id)\n example = {'fr': BeautifulSoup(french_note[example_field], 'html.parser').get_text().replace(\"\\'\", \"’\"),\n 'en': BeautifulSoup(french_note[translated_example_field], 'html.parser').get_text().replace(\n \"\\'\",\n \"’\"),\n 'audio': BeautifulSoup(french_note[example_audio_field], 'html.parser').get_text().replace(\"\\'\",\n \"’\"),\n 'noteId': french_note.id}\n\n if example['fr'] not in bank_keys and example['fr'] != \"\":\n new_examples_list[new_examples] = example\n new_examples += 1\n\n if new_examples > 0:\n for newExample in new_examples_list:\n example = new_examples_list[newExample]\n did = mw.col.decks.id(bankDeckName)\n mw.col.decks.select(did)\n m = mw.col.models.byName(sentenceModelName)\n deck = mw.col.decks.get(did)\n deck['mid'] = m['id']\n mw.col.decks.save(deck)\n\n new_note = mw.col.newNote()\n mw.col.addNote(new_note)\n new_note[target_lang_field] = example['fr']\n new_note[mother_lang_field] = example['en']\n new_note[audio_field] = example['audio']\n new_note.flush()\n # mw.col.addNote(new_note)\n\n # tooltip(_(\"New Notes added to sentence bank\"), period=1000)\n\n return new_examples, new_examples_list\n else:\n logger.info('No new sentences were found in the main notes')\n\n\n###########################################################\n\n\ndef refresh_one_note(french_note):\n bank_field = french_note['Bank']\n\n if len(bank_field) > 0:\n examples_list = json.loads(bank_field)\n ex_count = len(examples_list)\n if ex_count > 0:\n\n selector = random.randint(0, ex_count - 1)\n # ensure the previous example doesn't appear again\n if examples_list[selector]['fr'] == french_note[example_field] and ex_count > 1:\n if selector >= 0 and selector < ex_count - 3:\n selector += 1\n else:\n selector -= 1\n\n french_note[example_field] = examples_list[selector]['fr']\n french_note[translated_example_field] = examples_list[selector]['en']\n french_note[example_audio_field] = \"[sound:\" + examples_list[selector]['audio'] + \"]\"\n french_note[example_id_field] = str(examples_list[selector]['noteId'])\n french_note.flush()\n else:\n french_note[example_field] = \"\"\n french_note[translated_example_field] = \"\"\n french_note[example_audio_field] = \"\"\n french_note[example_id_field] = \"\"\n french_note.flush()\n\n\n########################################################################################################\n\n\ndef delete_example_from_note(note):\n try:\n old_bank = json.loads(note['Bank'])\n try:\n old_garbage = json.loads(note['garbage'])\n except:\n old_garbage = []\n\n current_example = note[example_field]\n logger.info(old_bank)\n logger.info(len(old_bank))\n if len(old_bank) == 1:\n msg = 'This Note has only this example are you sure you want to delete it ?'\n if not askUser(msg, title=ADDON_NAME):\n return\n\n for example in old_bank:\n if example['fr'] == note[example_field]:\n old_bank.remove(example)\n old_garbage.append(example)\n break\n note['Bank'] = json.dumps(old_bank, ensure_ascii=False)\n note['garbage'] = json.dumps(old_garbage, ensure_ascii=False)\n\n\n except:\n pass\n\n note[example_field] = \"\"\n note[translated_example_field] = \"\"\n note[example_audio_field] = \"\"\n note[example_id_field] = \"\"\n note.flush()\n\n########################################################################################################\n","repo_name":"ShoroukAziz/multiple-examples-per-note","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"10100091682","text":"from question_model import Question\nfrom data import question_data\nfrom quiz_brain import QuizBrain\n\nquestion_bank = []\n\n# make a list of question objects using the database\nfor question in question_data:\n question_text = question[\"text\"]\n question_answer = question[\"answer\"]\n new_question = Question(question_text, question_answer)\n question_bank.append(new_question)\n\n# create quizbrain object and feed the question bank into it\nquiz = QuizBrain(question_bank)\nwhile quiz.still_has_questions():\n quiz.next_question()\n\nnum_questions = quiz.num_questions\nscore = quiz.score\nprint(\"You've completed the quiz.\")\nprint(f\"Your score is {score}/{num_questions}.\")\n","repo_name":"srmcnutt/100DaysOfCode","sub_path":"d17-quiz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"24397740349","text":"from fastapi import FastAPI, Request\r\nimport sqlite3\r\nimport uvicorn\r\n\r\ndef execute(query='', commit=False):\r\n try:\r\n con = sqlite3.connect('employee.db')\r\n cursor = con.cursor()\r\n resp = list(cursor.execute(query))\r\n if commit:\r\n con.commit()\r\n con.close()\r\n if resp:\r\n return resp\r\n else:\r\n return ''\r\n except Exception as E:\r\n print(E)\r\n return 'sqlite error'\r\n\r\napp = FastAPI()\r\n\r\n@app.post(\"/add_employee\")\r\nasync def push_record(req: Request):\r\n data = await req.json()\r\n record = (data['id'], data['name'])\r\n return execute('insert into records values(%s, \"%s\")'%record, True)\r\n\r\n@app.get(\"/list_employee\")\r\nasync def list_records():\r\n return execute('select * from records')\r\n\r\n@app.get(\"/fetch_employee\")\r\nasync def fetch_record(Id: int):\r\n return execute('select * from records where emp_id=%s'%(Id))\r\n\r\n\r\nif __name__ == '__main__':\r\n uvicorn.run(app, port=8080, host='0.0.0.0')","repo_name":"hbethapudi/fastAPI","sub_path":"Employee.py","file_name":"Employee.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"5566306504","text":"from collections import OrderedDict\nfrom contextlib import suppress\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict\n\nfrom torch import Tensor\n\nimport lightning.pytorch as pl\nfrom lightning.pytorch.core.optimizer import do_nothing_closure\nfrom lightning.pytorch.loops import _Loop\nfrom lightning.pytorch.loops.optimization.closure import OutputResult\nfrom lightning.pytorch.loops.progress import _Progress, _ReadyCompletedTracker\nfrom lightning.pytorch.trainer import call\nfrom lightning.pytorch.utilities.exceptions import MisconfigurationException\nfrom lightning.pytorch.utilities.types import STEP_OUTPUT\n\n\n@dataclass\nclass ManualResult(OutputResult):\n \"\"\"A container to hold the result returned by ``_ManualOptimization``.\n\n It is created from the output of :meth:`~lightning.pytorch.core.LightningModule.training_step`.\n\n Attributes:\n extra: Anything returned by the ``training_step``.\n\n \"\"\"\n\n extra: Dict[str, Any] = field(default_factory=dict)\n\n @classmethod\n def from_training_step_output(cls, training_step_output: STEP_OUTPUT) -> \"ManualResult\":\n extra = {}\n if isinstance(training_step_output, dict):\n extra = training_step_output.copy()\n elif isinstance(training_step_output, Tensor):\n extra = {\"loss\": training_step_output}\n elif training_step_output is not None:\n raise MisconfigurationException(\n \"In manual optimization, `training_step` must either return a Tensor or have no return.\"\n )\n\n if \"loss\" in extra:\n # we detach manually as it's expected that it will have a `grad_fn`\n extra[\"loss\"] = extra[\"loss\"].detach()\n\n return cls(extra=extra)\n\n def asdict(self) -> Dict[str, Any]:\n return self.extra\n\n\n_OUTPUTS_TYPE = Dict[str, Any]\n\n\nclass _ManualOptimization(_Loop):\n \"\"\"A special loop implementing what is known in Lightning as Manual Optimization where the optimization happens\n entirely in the :meth:`~lightning.pytorch.core.LightningModule.training_step` and therefore the user is responsible\n for back-propagating gradients and making calls to the optimizers.\n\n This loop is a trivial case because it performs only a single iteration (calling directly into the module's\n :meth:`~lightning.pytorch.core.LightningModule.training_step`) and passing through the output(s).\n\n \"\"\"\n\n output_result_cls = ManualResult\n\n def __init__(self, trainer: \"pl.Trainer\") -> None:\n super().__init__(trainer)\n # since manual optimization does not track lr scheduler or optimizer frequencies, we use a simpler progress than\n # `_OptimizationProgress`\n self.optim_step_progress = _Progress.from_defaults(_ReadyCompletedTracker)\n\n self._output: _OUTPUTS_TYPE = {}\n\n def run(self, kwargs: OrderedDict) -> _OUTPUTS_TYPE:\n self.on_run_start()\n with suppress(StopIteration): # no loop to break at this level\n self.advance(kwargs)\n self._restarting = False\n return self.on_run_end()\n\n def on_run_start(self) -> None:\n # inject logic around the optimizer step\n for lightning_optimizer in self.trainer.strategy._lightning_optimizers:\n lightning_optimizer._on_before_step = self._on_before_step\n lightning_optimizer._on_after_step = self._on_after_step\n\n def advance(self, kwargs: OrderedDict) -> None:\n \"\"\"Performs the training step for manual optimization.\n\n Args:\n kwargs: The kwargs passed down to the hooks.\n\n \"\"\"\n trainer = self.trainer\n\n # manually capture logged metrics\n training_step_output = call._call_strategy_hook(trainer, \"training_step\", *kwargs.values())\n del kwargs # release the batch from memory\n self.trainer.strategy.post_training_step() # unused hook - call anyway for backward compatibility\n result = self.output_result_cls.from_training_step_output(training_step_output)\n\n self._output = result.asdict()\n\n def on_run_end(self) -> _OUTPUTS_TYPE:\n \"\"\"Returns the result of this loop, i.e., the post-processed outputs from the training step.\"\"\"\n output, self._output = self._output, {} # free memory\n # reset logic around the optimizer step\n for lightning_optimizer in self.trainer.strategy._lightning_optimizers:\n lightning_optimizer._on_before_step = do_nothing_closure\n lightning_optimizer._on_after_step = do_nothing_closure\n return output\n\n def _on_before_step(self) -> None:\n self.optim_step_progress.increment_ready()\n self.trainer.profiler.start(\"optimizer_step\")\n\n def _on_after_step(self) -> None:\n self.trainer.profiler.stop(\"optimizer_step\")\n self.optim_step_progress.increment_completed()\n","repo_name":"Lightning-AI/lightning","sub_path":"src/lightning/pytorch/loops/optimization/manual.py","file_name":"manual.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":25184,"dataset":"github-code","pt":"93"} +{"seq_id":"20517797331","text":"\nimport pymongo\nfrom flask import request\nfrom flask import jsonify\nfrom bson.json_util import dumps\nimport flask\n\napp = flask.Flask(__name__)\n#app.config[\"DEBUG\"] = True\nlog = app.logger\n\nclient = pymongo.MongoClient(\"mongodb+srv://user01:bl4ck4dd3r@cluster0-kooqx.mongodb.net/test?retryWrites=true&w=majority\")\ndb = client.sample_training\ncollection = db.zips\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"

Distant Reading Archive

This site is a prototype API for distant reading of science fiction novels.

\"\n\n\n# A route to return all of the available entries in our catalog.\n@app.route('/api/get/city/', methods=['GET'])\ndef get_city():\n name = request.args.get('name')\n result = collection.find({\"city\":name})\n op = []\n for obj in result:\n obj.pop('_id', None)\n op.append(obj) \n \n return jsonify(op)\n\n@app.route('/api/put/insert/', methods=['PUT'])\ndef insert_city_with_check():\n city = request.args.get('city')\n loc_x = request.args.get('loc_x')\n loc_y = request.args.get('loc_y')\n pop = request.args.get('pop')\n state = request.args.get('state')\n Zip = request.args.get('zip')\n\n if city and loc_x and loc_y and pop and state and Zip:\n result = collection.find({\"city\":city})\n op = []\n for obj in result:\n obj.pop('_id', None)\n obj.update({\"ERROR\": \"City already exists\"})\n op.append(obj)\n if op:\n return jsonify(op)\n else:\n test_json = {\"city\":city,\"loc\":{\"x\":loc_x,\"y\":loc_y},\"pop\":pop,\"state\":state,\"zip\":Zip}\n post_id = collection.insert_one(test_json).inserted_id\n return \"

Inserted Successfully \"\n else:\n return \"

Please give all keys \"\n\n@app.route('/api/post/insert/', methods=['POST'])\ndef insert_city_without_check():\n city = request.args.get('city')\n loc_x = request.args.get('loc_x')\n loc_y = request.args.get('loc_y')\n pop = request.args.get('pop')\n state = request.args.get('state')\n Zip = request.args.get('zip')\n\n if city and loc_x and loc_y and pop and state and Zip:\n test_json = {\"city\":city,\"loc\":{\"x\":loc_x,\"y\":loc_y},\"pop\":pop,\"state\":state,\"zip\":Zip}\n post_id = collection.insert_one(test_json).inserted_id\n return \"

Inserted Successfully \"\n\n else:\n return \"

Please give all keys \"\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"Acquil/tespy-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"72852987509","text":"import json\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom books.models import Author, Book\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n with open(\"books/static/data/books.json\", \"r\") as json_file:\n data = json.load(json_file)\n\n for book in data:\n\n author, created = Author.objects.get_or_create(name=book[\"author\"])\n\n book, created = Book.objects.get_or_create(\n title=book[\"title\"],\n defaults={\n \"author\": author,\n \"pages\": book[\"pages\"],\n \"country\": book[\"country\"],\n \"language\": book[\"language\"],\n \"link\": book[\"link\"],\n \"year\": book[\"year\"],\n \"image_link\": book[\"imageLink\"],\n },\n )\n\n print(book)","repo_name":"jangeador/django_books","sub_path":"books/management/commands/import_books.py","file_name":"import_books.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"34435776513","text":"import pandas as pd\nimport re\nimport os\nfrom digital_manuscript import BnF\n\nif not os.path.exists('categories'):\n os.mkdir('categories')\n\nmanuscript = BnF()\ndf = pd.read_csv('categories.csv').fillna('')\nidentity_dict = {}\n\nfor i, row in df.iterrows():\n cat_string = ';'.join([x for x in [row.category, row.category1, row.category2] if x])\n identity_dict[row.div_id] = cat_string\n\nfor entry in manuscript.entries:\n cat_string = identity_dict.get(entry.identity, '')\n new_text = entry.versions['tl'].replace('&1 > /dev/null && python evaluate_model.py --load_from={trial_name} --evaluate_for={args.evaluate_for} 2> /dev/null | tail -n1\").read().strip().split()\n# print(output)\n return -float(output[-2])\n\n\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n\n study = optuna.create_study()\n study.optimize(evaluate, n_trials=args.n_trials)","repo_name":"kubic71/mff","sub_path":"2020-zs/rl/06_car_racing/optuna_test.py","file_name":"optuna_test.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39231535415","text":"#lambda arguments: expression - onle line function\nadd10 =lambda x: x+10 # same lambda function in one line\nprint(add10(6))\n\ndef add10_func(x): # Same function\n return x+10\nprint(add10_func(8))\n\nmult= lambda x,y: x*y # simple function used once or to hire other functions arguments\nprint(mult(5,9))\n\n\n#---- sorting\npoints2D = [(1,2),(15,1),(5,-1),(10,4)] \npoints2D_sorted=sorted(points2D, key=lambda x: x[1]) # uses lambda to sort the points in the list\n\ndef sort_by_y(x): # use function and lambda, long way, to sort the points in the list\n return x[1]\npoints2D_sorted2=sorted(points2D, key=sort_by_y)\n\nprint(points2D)\nprint(points2D_sorted)\nprint(points2D_sorted2)\n\npoints2D_sorted_2v=sorted(points2D, key=lambda x: x[0]+x[1])\nprint(points2D_sorted_2v)\n\n\n#map(func, seq) map function transforms each element with a function \na=[1,2,3,4,5] # lambda function sintax\nb=map(lambda x: x*4,a)\nprint(list(b))\n\nc= [x*4 for x in a] #Another sintax without use map and lambda, same result\nprint(c)\n\n#Filter: filter(func,seq)\na=[1,2,3,4,5,6]\nb=filter(lambda x: x%2==0,a)\nprint(list(b))\n\nc=[x for x in a if x%2==0]\nprint(c)\n\n#Reduce(func,seq) #It applies in functions,repeteadly in values to return a single value, always two arguments\nfrom functools import reduce\na=[1,2,3,4,5,6]\nproduct_a=reduce(lambda x,y: x*y,a)\nprint(product_a)","repo_name":"monicalimachi/python_learning","sub_path":"intermediate/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72654249269","text":"import graphene\nimport boto3\n\n\nclass Cluster(graphene.ObjectType):\n arn = graphene.String(description='Cluster ARN')\n name = graphene.String(description='Cluster Name')\n status = graphene.String(description='Cluster Status')\n registeredContainerInstancesCount = graphene.Int(description='Container Instances Count')\n runningTasksCount = graphene.Int(description='Running Tasks Count')\n pendingTasksCount = graphene.Int(description='Pending Tasks Count')\n activeServicesCount = graphene.Int(description='Active Services Count')\n\n\nclass Query(graphene.ObjectType):\n\n clusters = graphene.List(Cluster)\n\n def resolve_clusters(self, args, context, info):\n client = boto3.client('ecs')\n response = client.list_clusters()\n response = client.describe_clusters(clusters=response['clusterArns'])\n clusters = []\n for cluster in response['clusters']:\n '''\n {\n 'clusterArn': 'string',\n 'clusterName': 'string',\n 'status': 'string',\n 'registeredContainerInstancesCount': 123,\n 'runningTasksCount': 123,\n 'pendingTasksCount': 123,\n 'activeServicesCount': 123\n }\n '''\n obj = Cluster(arn=cluster['clusterArn'],\n name=cluster['clusterName'],\n status=cluster['status'],\n registeredContainerInstancesCount=cluster['registeredContainerInstancesCount'],\n runningTasksCount=cluster['runningTasksCount'],\n pendingTasksCount=cluster['pendingTasksCount'],\n activeServicesCount=cluster['activeServicesCount'])\n clusters.append(obj)\n return clusters\n\nschema = graphene.Schema(query=Query)\nquery = '''\n query something{\n clusters {\n arn\n name\n status\n activeServicesCount\n }\n }\n'''\n\n\nif __name__ == '__main__':\n result = schema.execute(query)\n print(result.data)","repo_name":"gistable/gistable","sub_path":"dockerized-gists/d59ef30d84bb3a9dd1d578e9fc4343cf/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"1670705087","text":"import cv2\nimport numpy as np\nfrom math import pi, sin, cos\nfrom scipy import signal\nimport toolbox.pixeltools\n\nclass HoughTransform:\n def get_points_and_radius_range(self, img):\n points = PixelTools.get_white_pixels(img)\n center = np.array([img.shape[0]//2, img.shape[1]//2])\n points -= center\n\n radiuses = np.linalg.norm(points, axis = 1)\n radius_max = radiuses.max()\n radius_min = -radius_max\n return points, (radius_min, radius_max)\n\n def get_prevote_projections(self, points, center, n_radius_bins, theta):\n angle_vec = np.array([-sin(theta), cos(theta)])\n projs = np.dot(points, angle_vec.T)\n return projs\n\n def remove_small_contours(self, img, min_contour_size):\n ret, contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n for i in range(0, len(contours)):\n if contours[i].shape[0] < min_contour_size:\n cv2.drawContours(img, contours, i, 0)\n return img\n\n def draw_houghs(self, image, hough, radius_bounds, hough_points, color = (0,255,0)):\n center = np.array([image.shape[0]//2, image.shape[1]//2])\n max_mag = np.linalg.norm(np.array(image.shape))\n t_bounds = (-max_mag, max_mag)\n for point in hough_points:\n r = (radius_bounds[1]-radius_bounds[0])*(point[0]/hough.shape[0])+radius_bounds[0]\n theta = 2.0 * pi * point[1]/hough.shape[1]\n base_x = r*cos(theta)+center[1]\n base_y = center[0] - r*sin(theta)\n\n p1 = (int(base_x + cos(theta + pi/2.0)*t_bounds[0]), int(base_y - sin(theta + pi/2.0)*t_bounds[0]))\n p2 = (int(base_x + cos(theta + pi/2.0)*t_bounds[1]), int(base_y - sin(theta + pi/2.0)*t_bounds[1]))\n image = cv2.line(image, p1, p2, color)\n return image\n","repo_name":"peterhusisian/CV-Experimenting","sub_path":"line_detect/HoughTransform.py","file_name":"HoughTransform.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70845807031","text":"import os\nimport sys\n\nimport certifi\nfrom babel.messages import frontend as babel\nfrom cx_Freeze import Executable, setup\n\nVERSION = \"0.1\"\n\nWIN_NT = os.name == \"nt\"\n\nSRC_PATH = os.path.dirname(__file__)\nROOT_PATH = os.path.join(SRC_PATH, '..')\nCERT_PATH = certifi.where()\n\nif not CERT_PATH:\n print(\"Couldn't find cacert.pem for SSL requests.\")\n sys.exit()\n\nincludefiles = [\n (os.path.join(SRC_PATH, 'database/schema.sql'), 'database/schema.sql'),\n (os.path.join(SRC_PATH, 'conf/scripts/example'), 'conf/scripts/example'),\n (os.path.join(SRC_PATH, 'conf/scripts/greeter'), 'conf/scripts/greeter'),\n (os.path.join(SRC_PATH, 'conf/marquee/example'), 'conf/marquee/example'),\n (os.path.join(SRC_PATH, '../locale'), 'locale/'),\n (os.path.join(SRC_PATH, 'lua_bridge/init.lua'), 'lua_bridge/init.lua'),\n (CERT_PATH, 'certifi/cacert.pem'),\n]\n\ntarget_name = \"magicked_admin\"\nif WIN_NT:\n target_name += \".exe\"\n\nbuild_exe_options = {\n \"packages\": [\"os\", \"queue\", \"idna\", \"lxml\", \"requests\", \"encodings\"],\n \"excludes\": [\"tkinter\"],\n \"includes\": [],\n \"include_files\": includefiles,\n \"include_msvcr\": True,\n \"optimize\": 2,\n \"build_exe\": os.path.join(ROOT_PATH, 'bin/magicked_admin'),\n \"zip_include_packages\": \"*\",\n \"zip_exclude_packages\": \"\"\n}\nsetup(\n name=\"Magicked Admin\",\n version=VERSION + \".0\",\n description=\"Scripted management, stats, and bot for KF2-Server\",\n options={\"build_exe\": build_exe_options},\n executables=[\n Executable(os.path.join(SRC_PATH, \"magicked_admin.py\"),\n base=None,\n targetName=target_name,\n icon=os.path.join(SRC_PATH, \"icon.ico\")\n )\n ],\n cmdclass={\n 'compile_catalog': babel.compile_catalog,\n 'extract_messages': babel.extract_messages,\n 'init_catalog': babel.init_catalog,\n 'update_catalog': babel.update_catalog\n }\n)\n","repo_name":"th3-z/kf2-magicked-admin","sub_path":"magicked_admin/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"94"} +{"seq_id":"43146965048","text":"from functools import partial\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QColor, QPen, QIcon\nfrom PyQt5.QtWidgets import (\n QWidget,\n QVBoxLayout,\n QHBoxLayout,\n QSpacerItem,\n QSizePolicy,\n QPushButton,\n QMessageBox,\n QAction,\n QMenu,\n)\n\nfrom .simpleEdgeFeatureSelection import SimpleEdgeFeatureSelection\nfrom lazyflow.utility.orderedSignal import OrderedSignal\n\nfrom ilastik.utility.gui import threadRouted, silent_qobject\nfrom ilastik.shell.gui.iconMgr import ilastikIcons\nfrom ilastik.applets.layerViewer.layerViewerGui import LayerViewerGui\nfrom ilastik.config import cfg as ilastik_config\n\nfrom volumina.api import createDataSource\nfrom volumina.layer import SegmentationEdgesLayer, LabelableSegmentationEdgesLayer\nfrom volumina.utility import ShortcutManager\n\nfrom lazyflow.request import Request\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass EdgeTrainingMixin:\n DEFAULT_PEN = QPen(SegmentationEdgesLayer.DEFAULT_PEN)\n DEFAULT_PEN.setColor(Qt.yellow)\n\n # signal used to synchronize live update button enable status across lanes\n labels_updated = OrderedSignal()\n\n ###########################################\n ### AppletGuiInterface Concrete Methods ###\n ###########################################\n\n def appletDrawer(self):\n return self._drawer\n\n def stopAndCleanUp(self):\n # Unsubscribe to all signals\n for fn in self.__cleanup_fns:\n fn()\n\n # Base class\n super().stopAndCleanUp()\n\n ###########################################\n ###########################################\n def __init_subclass__(cls, **kwargs):\n \"\"\"Make sure Mixin can only be used with LayerViewerGui\"\"\"\n assert issubclass(cls, LayerViewerGui), \"Mixin should only be used with LayerViewerGui\"\n super().__init_subclass__(**kwargs)\n\n def __init__(self, parentApplet, topLevelOperatorView, **kwargs):\n self._currently_updating = False\n self.__cleanup_fns = []\n self.parentApplet = parentApplet\n self.topLevelOperatorView = topLevelOperatorView\n super().__init__(parentApplet, topLevelOperatorView, **kwargs)\n\n self._init_edge_label_colortable()\n self._init_probability_colortable()\n\n # init features\n if not self.topLevelOperatorView.FeatureNames.ready():\n self.topLevelOperatorView.FeatureNames.setValue(self._get_default_feature_selection())\n\n def _after_init(self):\n super()._after_init()\n self.update_probability_edges()\n\n # Initialize everything with the operator's initial values\n self.configure_gui_from_operator()\n # need to call this callback manually, as signals a re blocked in\n # configure_gui_from_operator()\n self._handle_live_update_clicked(self.live_update_button.isChecked())\n\n def createDrawerControls(self):\n op = self.topLevelOperatorView\n\n def configure_update_handlers(qt_signal, op_slot):\n qt_signal.connect(self.configure_operator_from_gui)\n cleanup_fn = op_slot.notifyDirty(self.configure_gui_from_operator, defer=True)\n self.__cleanup_fns.append(cleanup_fn)\n\n # Controls\n feature_selection_button = QPushButton(\n text=\"Select Features\",\n icon=QIcon(ilastikIcons.AddSel),\n toolTip=\"Select edge/superpixel features to use for classification.\",\n clicked=self._open_feature_selection_dlg,\n )\n self.train_from_gt_button = QPushButton(\n text=\"Auto-label\",\n icon=QIcon(ilastikIcons.Segment),\n toolTip=\"Automatically label all edges according to your pre-loaded groundtruth volume.\",\n clicked=self._handle_label_from_gt_clicked,\n )\n self.clear_labels_button = QPushButton(\n text=\"Clear Labels\",\n icon=QIcon(ilastikIcons.Clear),\n toolTip=\"Remove all edge labels. (Start over on this image.)\",\n clicked=self._handle_clear_labels_clicked,\n )\n self.live_update_button = QPushButton(\n text=\"Live Predict\",\n checkable=True,\n icon=QIcon(ilastikIcons.Play),\n toolTip=\"Update the edge classifier predictions\",\n clicked=self._handle_live_update_clicked,\n enabled=False,\n )\n\n configure_update_handlers(self.live_update_button.toggled, op.FreezeClassifier)\n configure_update_handlers(self.train_from_gt_button.toggled, op.TrainRandomForest)\n\n cleanup_fn = op.EdgeLabelsDict.notifyDirty(self.any_edge_annotations_available)\n self.__cleanup_fns.append(cleanup_fn)\n\n # call once when instantiating with a saved project to make the live update button available\n # if there are annotations loaded from file.\n self.labels_updated.subscribe(self.enable_live_update_button)\n self.__cleanup_fns.append(partial(self.labels_updated.unsubscribe, self.enable_live_update_button))\n self.any_edge_annotations_available()\n\n # Layout\n label_layout = QHBoxLayout()\n label_layout.addWidget(self.clear_labels_button)\n label_layout.addWidget(self.train_from_gt_button)\n label_layout.setSpacing(1)\n\n layout = QVBoxLayout()\n layout.addWidget(feature_selection_button)\n layout.setSpacing(1)\n layout.addLayout(label_layout)\n layout.addWidget(self.live_update_button)\n layout.addSpacerItem(QSpacerItem(0, 10, QSizePolicy.Minimum, QSizePolicy.Expanding))\n\n # Finally, the whole drawer widget\n drawer = QWidget(parent=self)\n drawer.setLayout(layout)\n\n # Widget Shortcuts\n mgr = ShortcutManager()\n ActionInfo = ShortcutManager.ActionInfo\n shortcut_group = \"Edge Training\"\n mgr.register(\n \"l\",\n ActionInfo(\n shortcut_group,\n \"Live Predict\",\n \"Toggle live edge classifier update mode\",\n self.live_update_button.toggle,\n self.live_update_button,\n self.live_update_button,\n ),\n )\n\n return drawer\n\n def any_edge_annotations_available(self, *args, **kwargs):\n any_have_edges = False\n op = self.topLevelOperatorView\n top_level_edge_labels_dict = op.EdgeLabelsDict.top_level_slot\n assert top_level_edge_labels_dict.level == 1\n for subslot in op.EdgeLabelsDict.top_level_slot:\n any_have_edges = subslot.ready() and bool(subslot.value)\n if any_have_edges:\n break\n\n self.labels_updated(any_have_edges)\n\n @threadRouted\n def enable_live_update_button(self, enable):\n self.live_update_button.setEnabled(enable)\n\n def initAppletDrawerUi(self):\n \"\"\"\n Overridden from base class (LayerViewerGui)\n \"\"\"\n\n op = self.topLevelOperatorView\n cleanup_fn = op.GroundtruthSegmentation.notifyReady(self.configure_gui_from_operator, defer=True)\n self.__cleanup_fns.append(cleanup_fn)\n\n def _get_default_feature_selection(self):\n raw_channels = self.topLevelOperatorView.RawData.meta.channel_names\n raw_is_3d = self.topLevelOperatorView.RawData.meta.getTaggedShape().get(\"z\", 1) > 1\n selected_input_channels = self.topLevelOperatorView.WatershedSelectedInput.meta.channel_names\n return SimpleEdgeFeatureSelection.default_features(raw_channels, selected_input_channels, raw_is_3d)\n\n def _open_feature_selection_dlg(self):\n if not self.topLevelOperatorView.FeatureNames.ready():\n self.topLevelOperatorView.FeatureNames.setValue(self._get_default_feature_selection())\n current_selection = self.topLevelOperatorView.FeatureNames.value\n\n def decodeToStringIfBytes(s):\n if isinstance(s, bytes):\n return s.decode()\n else:\n return s\n\n rag = self.topLevelOperatorView.Rag.value\n feature_names = rag.supported_features()\n channel_names = self.topLevelOperatorView.VoxelData.meta.channel_names\n channel_names = [decodeToStringIfBytes(s) for s in channel_names]\n feature_names = [decodeToStringIfBytes(s) for s in feature_names]\n\n initial_selections = {}\n for key, value in current_selection.items():\n initial_selections[decodeToStringIfBytes(key)] = [decodeToStringIfBytes(s) for s in value]\n\n raw_channels = self.topLevelOperatorView.RawData.meta.channel_names\n selected_input_channels = self.topLevelOperatorView.WatershedSelectedInput.meta.channel_names\n probability_channels = [x for x in channel_names if x not in raw_channels + selected_input_channels]\n raw_is_3d = self.topLevelOperatorView.RawData.meta.getTaggedShape().get(\"z\", 1) > 1\n\n dlg = SimpleEdgeFeatureSelection(\n raw_channels,\n selected_input_channels,\n probability_channels,\n initial_selections,\n supported_features=feature_names,\n data_is_3d=raw_is_3d,\n parent=self,\n )\n res = dlg.exec_()\n if res != dlg.Accepted:\n return\n\n self.topLevelOperatorView.FeatureNames.setValue(dlg.selections())\n\n # Configure the handler for updated edge label maps\n def _init_edge_label_colortable(self):\n self.edge_label_colortable = [\n QColor(0, 255, 0, 255), # green\n QColor(255, 0, 0, 255),\n ] # red\n\n self.edge_label_pen_table = [\n self.DEFAULT_PEN,\n ]\n for color in self.edge_label_colortable:\n pen = QPen(SegmentationEdgesLayer.DEFAULT_PEN)\n pen.setColor(color)\n pen.setWidth(5)\n self.edge_label_pen_table.append(pen)\n # When the edge labels are dirty, update the edge label layer pens\n op = self.topLevelOperatorView\n cleanup_fn = op.EdgeLabelsDict.notifyDirty(self.update_labeled_edges, defer=True)\n self.__cleanup_fns.append(cleanup_fn)\n\n @threadRouted\n def update_labeled_edges(self, *args):\n op = self.topLevelOperatorView\n edge_label_layer = self.getLayerByName(\"Edge Labels\")\n if not edge_label_layer:\n return\n\n edge_label_layer.overwrite_edge_labels(op.EdgeLabelsDict.value)\n\n def _handle_edge_label_clicked(self, updated_edge_labels):\n \"\"\"\n The user clicked an edge label.\n Update the operator with the new values.\n \"\"\"\n op = self.topLevelOperatorView\n edge_labels = op.EdgeLabelsDict.value\n\n new_labels = dict(edge_labels)\n new_labels.update(updated_edge_labels)\n for sp_id_pair, new_label in dict(new_labels).items():\n if new_label == 0:\n del new_labels[sp_id_pair]\n\n op.EdgeLabelsDict.setValue(new_labels)\n\n def _handle_label_from_gt_clicked(self):\n def train_from_gt():\n try:\n op = self.topLevelOperatorView\n op.setEdgeLabelsFromGroundtruth(op.current_view_index())\n finally:\n self.parentApplet.busy = False\n self.parentApplet.progressSignal(100)\n self.parentApplet.appletStateUpdateRequested()\n\n self.parentApplet.busy = True\n self.parentApplet.progressSignal(-1)\n self.parentApplet.appletStateUpdateRequested()\n\n Request(train_from_gt).submit()\n\n def _handle_clear_labels_clicked(self):\n response = QMessageBox.warning(\n self,\n \"Clear Labels?\",\n \"This will clear all edge labels in the current image.\\nAre you sure?\",\n buttons=QMessageBox.Ok | QMessageBox.Cancel,\n )\n if response == QMessageBox.Ok:\n op = self.topLevelOperatorView\n op.EdgeLabelsDict.setValue({})\n op.FreezeClassifier.setValue(True)\n\n def _handle_live_update_clicked(self, checked):\n if checked:\n probs_layer = self.getLayerByName(\"Edge Probabilities\")\n if probs_layer:\n probs_layer.visible = True\n\n # Configure the handler for updated probability maps\n # FIXME: Should we make a new Layer subclass that handles this colortable mapping for us? Yes.\n\n def _init_probability_colortable(self):\n self.probability_colortable = []\n # TODO: move to volumina.colortables\n for v in np.linspace(0, 255, num=101, dtype=int):\n self.probability_colortable.append(QColor(v, 255 - v, 0))\n\n self.probability_pen_table = []\n for color in self.probability_colortable:\n pen = QPen(SegmentationEdgesLayer.DEFAULT_PEN)\n pen.setColor(color)\n self.probability_pen_table.append(pen)\n\n # When the edge probabilities are dirty, update the probability edge layer pens\n op = self.topLevelOperatorView\n cleanup_fn = op.EdgeProbabilitiesDict.notifyDirty(self.update_probability_edges, defer=True)\n self.__cleanup_fns.append(cleanup_fn)\n\n def update_probability_edges(self, *args):\n def _impl():\n op = self.topLevelOperatorView\n if not self.getLayerByName(\"Edge Probabilities\"):\n return\n edge_probs = op.EdgeProbabilitiesDict.value\n new_pens = {}\n for id_pair, probability in list(edge_probs.items()):\n new_pens[id_pair] = self.probability_pen_table[int(probability * 100)]\n self.apply_new_probability_edges(new_pens)\n\n # submit the worklaod in a request and return immediately\n req = Request(_impl).submit()\n\n # Now that we've trained the classifier, the workflow may wish to enable downstream applets.\n self.parentApplet.appletStateUpdateRequested()\n\n @threadRouted\n def apply_new_probability_edges(self, new_pens):\n # This function is threadRouted because you can't\n # touch the layer colortable outside the main thread.\n superpixel_edge_layer = self.getLayerByName(\"Edge Probabilities\")\n if superpixel_edge_layer:\n superpixel_edge_layer.pen_table.overwrite(new_pens)\n\n @contextmanager\n def set_updating(self):\n assert not self._currently_updating\n self._currently_updating = True\n try:\n yield\n finally:\n self._currently_updating = False\n\n def configure_gui_from_operator(self, *args):\n if self._currently_updating:\n return False\n with self.set_updating():\n op = self.topLevelOperatorView\n with silent_qobject(self.train_from_gt_button) as w:\n w.setEnabled(op.GroundtruthSegmentation.ready())\n with silent_qobject(self.live_update_button) as w:\n w.setChecked(not op.FreezeClassifier.value)\n if op.FreezeClassifier.value:\n self.live_update_button.setIcon(QIcon(ilastikIcons.Play))\n else:\n self.live_update_button.setIcon(QIcon(ilastikIcons.Pause))\n\n def configure_operator_from_gui(self):\n if self._currently_updating:\n return False\n with self.set_updating():\n op = self.topLevelOperatorView\n op.FreezeClassifier.setValue(not self.live_update_button.isChecked())\n\n def create_prefetch_menu(self, layer_name):\n def prefetch_layer(axis=\"z\"):\n layer_index = self.layerstack.findMatchingIndex(lambda l: l.name == layer_name)\n num_slices = self.editor.dataShape[\"txyzc\".index(axis)]\n view2d = self.editor.imageViews[\"xyz\".index(axis)]\n view2d.scene().triggerPrefetch([layer_index], spatial_axis_range=(0, num_slices))\n\n prefetch_menu = QMenu(\"Prefetch\")\n prefetch_menu.addAction(QAction(\"All Z-slices\", prefetch_menu, triggered=partial(prefetch_layer, \"z\")))\n prefetch_menu.addAction(QAction(\"All Y-slices\", prefetch_menu, triggered=partial(prefetch_layer, \"y\")))\n prefetch_menu.addAction(QAction(\"All X-slices\", prefetch_menu, triggered=partial(prefetch_layer, \"x\")))\n return prefetch_menu\n\n def setupLayers(self):\n layers = []\n op = self.topLevelOperatorView\n ActionInfo = ShortcutManager.ActionInfo\n\n superpixels_ready = op.Superpixels.ready()\n with_training = op.TrainRandomForest.value\n\n # Superpixels -- Edge Probabilities\n if superpixels_ready and op.EdgeProbabilitiesDict.ready() and with_training:\n layer = SegmentationEdgesLayer(createDataSource(op.Superpixels), isHoverable=True)\n layer.name = \"Edge Probabilities\" # Name is hard-coded in multiple places: grep before changing.\n layer.visible = False\n layer.opacity = 1.0\n self.update_probability_edges() # Initialize\n\n layer.contexts.append(self.create_prefetch_menu(\"Edge Probabilities\"))\n\n layer.shortcutRegistration = (\n \"p\",\n ActionInfo(\n \"Edge Training Layers\",\n \"EdgePredictionsVisibility\",\n \"Show/Hide Edge Predictions\",\n layer.toggleVisible,\n self.viewerControlWidget(),\n layer,\n ),\n )\n\n layers.append(layer)\n del layer\n\n # Superpixels -- Edge Labels\n if superpixels_ready and op.EdgeLabelsDict.ready() and with_training:\n edge_labels = op.EdgeLabelsDict.value\n layer = LabelableSegmentationEdgesLayer(\n createDataSource(op.Superpixels), self.edge_label_pen_table, edge_labels\n )\n layer.name = \"Edge Labels\"\n layer.visible = True\n layer.opacity = 1.0\n\n self.update_labeled_edges() # Initialize\n layer.labelsChanged.connect(self._handle_edge_label_clicked)\n layer.contexts.append(self.create_prefetch_menu(\"Edge Labels\"))\n\n layer.shortcutRegistration = (\n \"0\",\n ActionInfo(\n \"Edge Training Layers\",\n \"LabelVisibility\",\n \"Show/Hide Edge Labels\",\n layer.toggleVisible,\n self.viewerControlWidget(),\n layer,\n ),\n )\n\n layers.append(layer)\n del layer\n\n # Superpixels -- Edges\n if superpixels_ready:\n layer = SegmentationEdgesLayer(\n createDataSource(op.Superpixels), default_pen=self.DEFAULT_PEN, isHoverable=with_training\n )\n layer.name = \"Superpixel Edges\"\n layer.visible = True\n layer.opacity = 1.0\n layers.append(layer)\n del layer\n\n if ilastik_config.getboolean(\"ilastik\", \"debug\"):\n # Naive Segmentation\n if op.NaiveSegmentation.ready():\n layer = self.createStandardLayerFromSlot(op.NaiveSegmentation)\n layer.name = \"Naive Segmentation\"\n layer.visible = False\n layer.opacity = 0.5\n\n layer.shortcutRegistration = (\n \"n\",\n ActionInfo(\n \"Edge Training Layers\",\n \"NaiveSegmentationVisibility\",\n \"Show/Hide Naive Segmentation (shows output if classifier output is respected verbatim)\",\n layer.toggleVisible,\n self.viewerControlWidget(),\n layer,\n ),\n )\n\n layers.append(layer)\n del layer\n\n # Groundtruth\n if op.GroundtruthSegmentation.ready():\n layer = self.createStandardLayerFromSlot(op.GroundtruthSegmentation)\n layer.name = \"Groundtruth\"\n layer.visible = False\n layer.opacity = 0.5\n\n layer.shortcutRegistration = (\n \"g\",\n ActionInfo(\n \"Edge Training Layers\",\n \"GroundtruthVisibility\",\n \"Show/Hide Groundtruth\",\n layer.toggleVisible,\n self.viewerControlWidget(),\n layer,\n ),\n )\n\n layers.append(layer)\n del layer\n\n if ilastik_config.getboolean(\"ilastik\", \"debug\"):\n # Voxel data\n if op.VoxelData.ready():\n layer = self._create_grayscale_layer_from_slot(op.VoxelData, op.VoxelData.meta.getTaggedShape()[\"c\"])\n layer.name = \"Voxel Data\"\n layer.visible = False\n layer.opacity = 1.0\n layers.append(layer)\n del layer\n\n # Raw Data (grayscale)\n if op.RawData.ready():\n layer = self.createStandardLayerFromSlot(op.RawData)\n layer.name = \"Raw Data\"\n layer.visible = True\n layer.opacity = 1.0\n layers.append(layer)\n layer.shortcutRegistration = (\n \"i\",\n ActionInfo(\n \"Edge Training Layers\",\n \"Hide all but Raw\",\n \"Hide all but Raw\",\n partial(self.toggle_show_raw, \"Raw Data\"),\n self.viewerControlWidget(),\n layer,\n ),\n )\n del layer\n\n return layers\n\n\nclass EdgeTrainingGui(EdgeTrainingMixin, LayerViewerGui):\n def appletDrawer(self):\n return self.__drawer\n\n def initAppletDrawerUi(self):\n \"\"\"\n Overridden from base class (LayerViewerGui)\n \"\"\"\n # Save these members for later use\n self.__drawer = self.createDrawerControls()\n\n # Initialize everything with the operator's initial values\n self.configure_gui_from_operator()\n","repo_name":"ilastik/ilastik","sub_path":"ilastik/applets/edgeTraining/edgeTrainingGui.py","file_name":"edgeTrainingGui.py","file_ext":"py","file_size_in_byte":21962,"program_lang":"python","lang":"en","doc_type":"code","stars":312,"dataset":"github-code","pt":"94"} +{"seq_id":"38843672518","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\n\ndef plot(x, _xlabel, y, _ylabel):\n \n if len(x) == 1 and len(y) == 1:\n print(\"Error in plot!! Len == 1\")\n else:\n ax = plt.gca()\n ax.plot(x, y, color=\"blue\", linewidth=1.5)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\n ax.set_xlim([np.min(x), np.max(x)])\n ax.set_ylim([np.min(y), np.max(y)])\n\n ax.set_xlabel(_xlabel)\n ax.set_ylabel(_ylabel)\n ax.set_title(f\"{_xlabel} vs {_ylabel}\")\n\n ax.grid()\n plt.show()\n\n","repo_name":"JoaoGustavoRogel/materiais-c210-lab","sub_path":"Redes Neurais/_plot/PlotUtils.py","file_name":"PlotUtils.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"13416124722","text":"\n\nclass Plugin:\n def __init__(self):\n self.category='process'\n self.name=_('Filter by Stars')\n self.need_config=True\n\n\n def start_config(self,frame,plug):\n Label(frame,text=_('Approve if')).grid(row=0,column=0,padx=5,pady=5,sticky=W)\n self.if_var=StringVar()\n ifs=[_('Rating more than'),_('Rating is'),_('Rating less than')]\n self.if_var.set(ifs[plug.settings.get('if',1)])\n self.if_combo=Combobox(frame,textvariable=self.if_var,state=\"readonly\",width=20)\n self.if_combo.grid(row=0,column=1,columnspan=1,padx=5,pady=5,sticky=W)\n self.if_combo['values']=ifs\n\n self.rating_var=StringVar()\n rts=[_('Not set'),'★','★★','★★★','★★★★','★★★★★']\n self.rating_var.set(rts[plug.settings.get('rating',1)])\n self.rat_combo=Combobox(frame,textvariable=self.rating_var,state=\"readonly\",width=15)\n self.rat_combo.grid(row=0,column=2,columnspan=1,padx=5,pady=5,sticky=W)\n self.rat_combo['values']=rts\n\n\n\n\n\n\n def save_config(self):\n d = {'rating': self.rat_combo.current()}\n d['if']=self.if_combo.current()\n return d\n\n\n\n\n\n","repo_name":"kaist/photomachine","sub_path":"plugins/filter_stars/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18478911799","text":"import datetime\nimport glob\n\ncontent = []\n\nfor filename in fileName:\n with open(filename,'r') as file:\n content.append(file.read())\n\n\nwith open(str(datetime.datetime.now())+'.txt','w') as file2:\n for cont in content:\n file2.write(cont+'\\n')\n\n","repo_name":"RikLakhe/pythonTutorial","sub_path":"ex77.py","file_name":"ex77.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18968827964","text":"from problem import Problem\ndef arithmetic_arranger(problems, solve=False):\n if len(problems) > 5:\n return \"Error: Too many problems.\"\n\n formattedProblems = []\n for problem in problems:\n p = Problem(problem)\n returnString = p.parseProblemString()\n if returnString:\n return returnString\n p.formatProblem(solve)\n formattedProblems.append(p)\n\n numProblems = len(formattedProblems)\n numLines = len(p.formattedLines)\n finalStrings = []\n for i in range(numLines):\n for j in range(numProblems):\n finalStrings.append(formattedProblems[j].formattedLines[i])\n if j != (numProblems - 1):\n finalStrings.append(' ' * 4)\n else:\n if i != (numLines - 1):\n finalStrings.append('\\n')\n\n final = ''.join(finalStrings)\n print(finalStrings)\n print(final)\n return final\n\n return None","repo_name":"mcfarke311/python_arithmetic_formatter","sub_path":"arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"13036565718","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('articles.views',\n url(r'^/?$', 'list_articles', name=\"list_articles\"),\n url(r'^add-article/?$', 'add_article', name=\"add_article\"),\n url(r'^delete-article/?$', 'delete_article', name=\"delete_article\"),\n url(r'^delete-image/?$', 'delete_image', name=\"delete_image\"),\n \n url(r'^categories/?$', 'categories', name=\"categories\"),\n url(r'^add-category/?$', 'add_category', name=\"add_category\"),\n url(r'^delete-category/?$', 'delete_category', name=\"delete_category\"),\n \n url(r'^authors/?$', 'authors', name=\"authors\"),\n url(r'^add-author/?$', 'add_author', name=\"add_author\"),\n url(r'^delete-author/?$', 'delete_author', name=\"delete_author\"),\n \n url(r'^upload-article-image/', 'upload_article_image', name='upload_article_image'),\n \n url(r'^(?P[-\\w]+)(.html)\\/?$', 'article_details'),\n \n)\n","repo_name":"sandeeprao6289/SampleArticleProject","sub_path":"articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"27828006715","text":"import os.path\nimport random\nimport shutil\nimport time\n\nimport prometheus_client as prom\nimport requests\n\n\ndef readNewFile():\n file_time = 0\n file_sum = 0\n file_n = 0\n file_new = open(\"/app/new.log\", \"r\")\n for file_line in file_new:\n file_data = file_line.split(\" \")\n file_time = float(file_data[1])\n\n file_sum = file_sum + file_time\n file_n = file_n + 1\n\n file_new.close()\n if file_n == 0:\n file_avg = 0\n else:\n file_avg = file_sum / file_n\n return file_avg\n\n\ndef checkFirstLine():\n flag = 0\n file_new = open(\"/app/new.log\", \"r\")\n file_old = open(\"/app/old.log\", \"r\")\n for line1 in file_new:\n for line2 in file_old:\n if line1 == line2:\n flag = 1\n break\n else:\n flag = 0\n break\n file_new.close()\n file_old.close()\n return flag\n\n\ndef diffFile():\n old = set((line.strip() for line in open(\"/app/old.log\")))\n new = set((line.strip() for line in open(\"/app/new.log\")))\n\n file_sum = 0\n file_n = 0\n\n for line in new:\n if line not in old:\n file_data = line.split(\" \")\n file_time = float(file_data[1])\n\n file_sum = file_sum + file_time\n file_n = file_n + 1\n\n if file_n == 0:\n file_avg = 0\n else:\n file_avg = file_sum / file_n\n return file_avg\n\n\nif __name__ == \"__main__\":\n\n active_conn = prom.Gauge(\n \"konghq_active\", \"Gauge untuk mendapatkan jumlah active connection\")\n # accept_conn = prom.Gauge('konghq_accept', 'Counter untuk mendapatkan total accepted connection')\n # handle_conn = prom.Gauge('konghq_handle', 'Counter untuk mendapatkan total hundled connection')\n # request_conn = prom.Gauge('konghq_request', 'Counter untuk mendapatkan total client request')\n accept_conn = prom.Counter(\n \"konghq_accept\", \"Counter untuk mendapatkan total accepted connection\")\n handle_conn = prom.Counter(\n \"konghq_handle\", \"Counter untuk mendapatkan total hundled connection\")\n request_conn = prom.Counter(\n \"konghq_request\", \"Counter untuk mendapatkan total client request\")\n read_conn = prom.Gauge(\n \"konghq_reading\", \"Gauge untuk mendapatkan jumlah reading connection\")\n write_conn = prom.Gauge(\n \"konghq_writing\", \"Gauge untuk mendapatkan jumlah writing connection\")\n wait_conn = prom.Gauge(\n \"konghq_waiting\", \"Gauge untuk mendapatkan jumlah waiting connection\")\n\n rtime_desc = prom.Gauge(\n \"konghq_rtime\", \"Gauge untuk mendapatkan rata-rata rtime tiap 1 detik\")\n\n prom.start_http_server(8888)\n url = \"http://127.0.0.1:8001/nginx_status\"\n # url= \"https://schematics.its.ac.id/server_stats\"\n\n file_flag = 1\n while True:\n try:\n rtime_value = 0\n if os.path.exists(\"/cuslog/custom_nginx.log\"):\n if file_flag == 1:\n shutil.copyfile(\"/cuslog/custom_nginx.log\", \"/app/new.log\")\n rtime_value = readNewFile()\n file_flag = 0\n else:\n shutil.copyfile(\"/cuslog/custom_nginx.log\", \"/app/new.log\")\n same_line = checkFirstLine()\n if same_line == 1:\n rtime_value = diffFile()\n else:\n rtime_value = readNewFile()\n\n shutil.move(\"/app/new.log\", \"/app/old.log\")\n\n r = requests.get(url)\n data = r.text.split(\" \")\n active_value = int(data[2])\n accept_value = int(data[7])\n handle_value = int(data[8])\n request_value = int(data[9])\n read_value = int(data[11])\n write_value = int(data[13])\n wait_value = int(data[15])\n\n active_conn.set(active_value)\n accept_conn.inc(accept_value)\n handle_conn.inc(handle_value)\n request_conn.inc(request_value)\n # accept_conn.set(accept_value)\n # handle_conn.set(handle_value)\n # request_conn.set(request_value)\n read_conn.set(read_value)\n write_conn.set(write_value)\n wait_conn.set(wait_value)\n\n rtime_desc.set(rtime_value)\n\n except:\n pass\n\n time.sleep(1)\n","repo_name":"LERUfic/open-repository","sub_path":"Aguel/self-metric/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"12954636335","text":"__author__ = 'Samsung'\nx,y = input().split()\n\nx = float(x)\ny = float(y)\n\nif (x%5==0) and ( x < y ) and ( y - x - 0.5 >= 0 ):\n\tprint( y - x - 0.5 )\nelse:\n\tprint(y)\n","repo_name":"sakshi-chauhan/PythonCodeChef","sub_path":"ATM.py","file_name":"ATM.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"30584877426","text":"import glob\nimport os\nimport argparse\nimport shutil\n\nimport cv2\nimport moviepy.editor as mpy\n\nfrom emosaic.utils.gif import create_gif_from_images\nfrom emosaic.utils.misc import ensure_directory\nfrom emosaic.utils.indexing import index_at_multiple_scales\nfrom emosaic.utils.misc import is_running_jupyter\n\nif is_running_jupyter():\n from tqdm import tqdm_notebook as tqdm\nelse:\n from tqdm import tqdm\n\n\"\"\"\nExample:\n\n $ run make_gif.py \\\n --target \"media/pics/2018-04-01 12.00.27.jpg\" \\\n --savepath \"media/output/%s-from-%d-to-%d.gif\" \\\n --codebook-dir /pics \\\n --min-scale 1 \\\n --max-scale 18 \\\n --fps 3 \\\n --detect-faces\n\"\"\"\n\nparser = argparse.ArgumentParser()\n\n# required\nparser.add_argument(\"--detect-faces\", dest='detect_faces', action='store_true', default=False, help=\"If we should only include pictures with faces in them\")\nparser.add_argument(\"--codebook-dir\", dest='codebook_dir', type=str, required=True, help=\"Source folder of images\")\nparser.add_argument(\"--target\", dest='target', type=str, required=True, help=\"Video to mosaicify\")\nparser.add_argument(\"--min-scale\", dest='min_scale', type=int, required=True, help=\"Start scale rendering here\")\nparser.add_argument(\"--max-scale\", dest='max_scale', type=int, required=True, help=\"Continue rendering up until this scale\")\nparser.add_argument(\"--savepath\", dest='savepath', type=str, required=True, help=\"Final name for the video, will add scale and base path name (use .gif extension)\")\nparser.add_argument(\"--fps\", dest='fps', type=float, default=3, help=\"Frames per second to render\") \nparser.add_argument(\"--fuzz\", dest='fuzz', type=float, default=5, help=\"Fuzz factor for moviepy blur rendering\") \nparser.add_argument(\"--vectorization-factor\", dest='vectorization_factor', type=float, default=1., \n help=\"Downsize the image by this much before vectorizing\")\n\n# optional / has default\nparser.add_argument(\"--randomness\", dest='randomness', type=float, default=0.0, help=\"Probability to use random tile\")\nparser.add_argument(\"--ascending\", dest='ascending', type=int, default=1, help=\"1 for ascending, 0 for descending order of scales\")\nparser.add_argument(\"--height-aspect\", dest='height_aspect', type=float, default=4.0, help=\"Height aspect\")\nparser.add_argument(\"--width-aspect\", dest='width_aspect', type=float, default=3.0, help=\"Width aspect\")\n\nargs = parser.parse_args()\n\n# index at various scales\nscale2index, scale2mosaic = index_at_multiple_scales(\n args.codebook_dir,\n min_scale=args.min_scale,\n max_scale=args.max_scale,\n height_aspect=args.height_aspect,\n width_aspect=args.width_aspect,\n vectorization_factor=args.vectorization_factor,\n precompute_target=cv2.imread(args.target),\n use_stabilization=True,\n stabilization_threshold=0.85,\n caching=True,\n use_detect_faces=args.detect_faces,\n)\n\n# create a temporary diretory to save images to\ntmp_dir = '/tmp/%s-dir' % args.savepath\nensure_directory(tmp_dir)\n\n# create mosaics at various scales, and save them to the folder above\nimg_paths = []\nscales = range(args.min_scale, args.max_scale + 1, 1)\n\nwith tqdm(desc='Indexing:', total=len(scales)) as pbar:\n for i, scale in enumerate(scales):\n img_savepath = os.path.join(tmp_dir, \"%08d.jpg\" % i)\n mosaic = scale2mosaic[scale]\n cv2.imwrite(img_savepath, mosaic)\n img_paths.append(img_savepath)\n pbar.update(1)\n\n# create the GIF!\nsavepath = args.savepath % (\n os.path.basename(args.target), args.min_scale, args.max_scale)\ncreate_gif_from_images(\n img_paths, savepath, \n fps=args.fps, fuzz=args.fuzz, \n ascending=bool(args.ascending))\n\n# remove temp directory \nshutil.rmtree(tmp_dir)\n","repo_name":"worldveil/photomosaic","sub_path":"make_gif.py","file_name":"make_gif.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":633,"dataset":"github-code","pt":"94"} +{"seq_id":"7310749943","text":"# -*- coding: utf8 -*-\n\"\"\" Ansible module\n\nModule for running ansible playbooks\n\nAttributes:\n ansible_arguments (dict): Contains config, flags, and extra_flags options\n ansible_file_location (str): location to playbook\n domain (str): target domain\n ansible_base (str): Base command string\n output_dir (str): location to dump output of playbook\n infile (str): Infile for playbook job\n verbose (bool): More output Yes/No\n final_command (str): Final command to run\n\"\"\"\nimport logging\nimport subprocess\nfrom string import Template\nfrom robot_api.parse import join_abs\n\nLOG = logging.getLogger(__name__)\n\n\nclass Ansible:\n def __init__(self, **kwargs):\n \"\"\"\n Build Ansible object\n\n Args:\n\n **kwargs: {\n \"ansible_arguments\" : {\n \"config\" : \"$config/httpscreenshot_play.yml\",\n \"flags\": \"-e '$extra' -i configs/ansible_inventory\",\n \"extra_flags\":{\n \"1\" : \"variable_host=localhost\",\n \"2\" : \"variable_user=user\", \n \"3\" : \"infile=$infile\",\n \"4\" : \"outfile=$outfile/httpscreenshots.tar\",\n \"5\" : \"outfolder=$outfile/httpscreenshots\"\n }\n },\n \"ansible_file_location\" : \"location\",\n \"verbose\" : True,\n \"domain\" : \"target.domain\"\n }\n\n Returns:\n\n \"\"\"\n self.ansible_base = \"ansible-playbook $flags $config\"\n self.ansible_arguments = kwargs.get('ansible_arguments')\n self.ansible_file = kwargs.get('ansible_file_location', None)\n if not self.ansible_file:\n raise TypeError(\n \"argument ansible_file must be of type string, not 'NoneType'\")\n\n self.domain = kwargs.get('domain', None)\n if not self.domain:\n raise TypeError(\n \"argument domain must be of type string, not 'NoneType'\")\n self.output_dir = kwargs.get('output_dir')\n self.infile = kwargs.get('infile', None)\n if not self.infile:\n self.infile = join_abs(self.output_dir, \"aggregated\", \"aggregated_protocol_hostnames.txt\")\n\n self.verbose = kwargs.get('verbose', False)\n self.final_command = None\n\n def _print(self, msg):\n \"\"\"Utility for logging\n \"\"\"\n if self.verbose:\n print(\"[D] \" + msg)\n LOG.debug(msg)\n\n def build(self):\n \"\"\"Build the final command for the ansible process.\n Uses the arguments provided in the ansible_arguments\n\n Args:\n\n Returns:\n\n \"\"\"\n try:\n system_replacements = {\n \"infile\": self.infile,\n \"outdir\": self.output_dir,\n \"config\": self.ansible_file\n }\n\n extra_flags = self.ansible_arguments.get('extra_flags', None)\n extra_replace_string = \"\"\n\n self._print(f\"building with extra flags {extra_flags}\")\n\n if extra_flags:\n for _, val in extra_flags.items():\n cur_str = Template(val)\n extra_replace_string += cur_str.safe_substitute(\n system_replacements) + \" \"\n\n flags = Template(self.ansible_arguments.get(\"flags\"))\n config = Template(self.ansible_arguments.get(\"config\"))\n\n \"\"\"\n This may seem redundant but this prepends the\n path of the ansible location to the ansible file\n specified in the config.\n All the flags are added to the flags\n argument to be input into the final command\n \"\"\"\n substitutes = {\n \"flags\": flags.safe_substitute(extra=extra_replace_string),\n \"config\": config.safe_substitute(system_replacements)\n }\n\n _temp = Template(self.ansible_base)\n self.final_command = _temp.safe_substitute(substitutes)\n\n self._print(f\"Final ansible command {self.final_command}\")\n except BaseException:\n raise TypeError(\"NoneType object supplied in Dict build\")\n\n def run(self):\n \"\"\"Run the final command built at runtime\n\n Args:\n\n Returns:\n\n \"\"\"\n try:\n self.build()\n _ = subprocess.check_output(\n self.final_command,\n shell=True,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n except subprocess.CalledProcessError:\n print(f\"[!] CallProcessError check logs\")\n LOG.exception(\"Called Process Error Ansible\")\n except OSError:\n print(f\"[!] OSError check logs\")\n LOG.exception(\"OSError in Ansible\")\n except subprocess.SubprocessError:\n print(f\"[!] SubprocessError check logs\")\n LOG.exception(\"Subprocess Error in Ansible\")\n except TypeError:\n print(f\"[!] TypeError check logs\")\n LOG.exception(\"Type Error in Ansible\")\n","repo_name":"sandialabs/dr_robot","sub_path":"src/robot_api/api/ansible.py","file_name":"ansible.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"94"} +{"seq_id":"72483685109","text":"#!/usr/bin/env python3\n\"\"\"\nCreated on Fri Mar 27 13:48:27 2020\nUsage:\n pytest -rf tests/test_demographics.py::TestClass::test_demographic_proportions\n from the main folder\n@author: anelnurtay\n\"\"\"\n\nimport numpy as np, pandas as pd\nimport pytest\nimport random as rd\n\nimport sys\nsys.path.append(\"src/COVID19\")\nfrom parameters import ParameterSet\n\nfrom . import constant\nfrom . import utilities as utils\n\ndef pytest_generate_tests(metafunc):\n # called once per each test function\n funcarglist = metafunc.cls.params[metafunc.function.__name__]\n argnames = sorted(funcarglist[0])\n metafunc.parametrize(\n argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]\n )\n\n\nclass TestClass(object):\n params = {\n \"test_demographic_proportions\" : [\n dict( # UK\n n_total = 10000,\n population_0_9 = 10000 * 0.12,\n population_10_19= 10000 * 0.11,\n population_20_29= 10000 * 0.13,\n population_30_39= 10000 * 0.13,\n population_40_49= 10000 * 0.13,\n population_50_59= 10000 * 0.13,\n population_60_69= 10000 * 0.11,\n population_70_79= 10000 * 0.08,\n population_80= 10000 * 0.05\n ),\n dict( # even\n n_total = 50000,\n population_0_9 = 50000 * 0.111,\n population_10_19= 50000 * 0.111,\n population_20_29= 50000 * 0.111,\n population_30_39= 50000 * 0.111,\n population_40_49= 50000 * 0.111,\n population_50_59= 50000 * 0.111,\n population_60_69= 50000 * 0.111,\n population_70_79= 50000 * 0.111,\n population_80= 50000 * 0.111\n ),\n dict( # Japan 2019\n n_total = 100000,\n population_0_9 = 100000 * 0.08,\n population_10_19= 100000 * 0.09,\n population_20_29= 100000 * 0.10,\n population_30_39= 100000 * 0.12,\n population_40_49= 100000 * 0.15,\n population_50_59= 100000 * 0.13,\n population_60_69= 100000 * 0.13,\n population_70_79= 100000 * 0.13,\n population_80= 100000 * 0.09\n ),\n# dict( # Nigeria 2019 FAILS \n# # \"because the reference household panel does not include \n# # sufficient households with large numbers of children\"\n# n_total = 250000,\n# population_0_9 = 250000 * 0.312,\n# population_10_19= 250000 * 0.230,\n# population_20_29= 250000 * 0.160,\n# population_30_39= 250000 * 0.119,\n# population_40_49= 250000 * 0.082,\n# population_50_59= 250000 * 0.052,\n# population_60_69= 250000 * 0.030,\n# population_70_79= 250000 * 0.013,\n# population_80= 250000 * 0.002\n# ),\n dict( # Kazakhstan 2019 \n n_total = 250000,\n population_0_9 = 250000 * 0.21,\n population_10_19= 250000 * 0.14,\n population_20_29= 250000 * 0.15,\n population_30_39= 250000 * 0.16,\n population_40_49= 250000 * 0.12,\n population_50_59= 250000 * 0.11,\n population_60_69= 250000 * 0.07,\n population_70_79= 250000 * 0.03,\n population_80= 250000 * 0.02\n ),\n dict( # UK\n n_total = 500000,\n population_0_9 = 500000 * 0.12,\n population_10_19= 500000 * 0.11,\n population_20_29= 500000 * 0.13,\n population_30_39= 500000 * 0.13,\n population_40_49= 500000 * 0.13,\n population_50_59= 500000 * 0.13,\n population_60_69= 500000 * 0.11,\n population_70_79= 500000 * 0.08,\n population_80= 500000 * 0.05\n )\n ],\n \"test_household_size\" : [\n dict(\n n_total = 10000, # default sizes\n household_size_1 = 0.29,\n household_size_2 = 0.34,\n household_size_3 = 0.15,\n household_size_4 = 0.13,\n household_size_5 = 0.04,\n household_size_6 = 0.02\n ),\n dict(\n n_total = 10000, # shift from small to large\n household_size_1 = 0.24,\n household_size_2 = 0.29,\n household_size_3 = 0.10,\n household_size_4 = 0.18,\n household_size_5 = 0.09,\n household_size_6 = 0.07\n ),\n dict(\n n_total =10000, # shift from large to small\n household_size_1 = 0.33,\n household_size_2 = 0.37,\n household_size_3 = 0.18,\n household_size_4 = 0.10,\n household_size_5 = 0.02,\n household_size_6 = 0.01\n ),\n dict(\n n_total = 10000,# shift from medium\n household_size_1 = 0.32,\n household_size_2 = 0.37,\n household_size_3 = 0.08,\n household_size_4 = 0.05,\n household_size_5 = 0.08,\n household_size_6 = 0.06\n )\n ],\n \"test_user_demographics\": [ \n dict( \n test_params = dict(\n n_total = 1e3,\n end_time = 20,\n ),\n n_houses = 1e1\n )\n ],\n }\n \"\"\"\n Test class for checking \n \"\"\"\n def test_demographic_proportions(\n self,\n n_total,\n population_0_9,\n population_10_19,\n population_20_29,\n population_30_39,\n population_40_49,\n population_50_59,\n population_60_69,\n population_70_79,\n population_80\n ):\n \"\"\"\n Test that the proportion of people in different age groups agrees with \n the population\n \"\"\"\n \n error_tolerance = 0.01\n\n params = utils.get_params_swig()\n\n params.set_param(\"n_total\", n_total)\n params.set_param(\"end_time\", 1)\n params.set_param(\"population_0_9\",population_0_9)\n params.set_param(\"population_10_19\",population_10_19)\n params.set_param(\"population_20_29\",population_20_29)\n params.set_param(\"population_30_39\",population_30_39)\n params.set_param(\"population_40_49\",population_40_49)\n params.set_param(\"population_50_59\",population_50_59)\n params.set_param(\"population_60_69\",population_60_69)\n params.set_param(\"population_70_79\",population_70_79)\n params.set_param(\"population_80\",population_80)\n \n population_fraction = [ population_0_9, population_10_19, population_20_29,\n population_30_39, population_40_49, population_50_59,\n population_60_69, population_70_79, population_80 ]\n \n model = utils.get_model_swig( params )\n \n # step through the model and write the relevant files the end\n for _ in range( params.get_param( \"end_time\") ):\n model.one_time_step()\n \n model.write_individual_file()\n\n df_indiv = pd.read_csv( constant.TEST_INDIVIDUAL_FILE )\n\n # population proportion by age\n N_tot = len( df_indiv )\n for idx in range( constant.N_AGE_GROUPS ):\n N = len(df_indiv[(df_indiv['age_group'] == constant.AGES[idx])])\n np.testing.assert_allclose(N, population_fraction[idx], atol=N_tot * error_tolerance)\n\n def test_household_size(self, n_total, household_size_1, household_size_2,\n household_size_3, household_size_4, household_size_5, household_size_6):\n \"\"\"\n Test to check the household size distribution\n \"\"\"\n\n # Set the parameters we want for the simulation.\n params = utils.get_params_swig()\n params.set_param(\"end_time\", 1)\n params.set_param(\"n_total\", n_total)\n params.set_param(\"household_size_1\", household_size_1)\n params.set_param(\"household_size_2\", household_size_2)\n params.set_param(\"household_size_3\", household_size_3)\n params.set_param(\"household_size_4\", household_size_4)\n params.set_param(\"household_size_5\", household_size_5)\n params.set_param(\"household_size_6\", household_size_6)\n\n # Calculate the number of people expected to be living in households of\n # each different size, based on the parameter definitions.\n household_size_counts = [household_size_1, household_size_2,\n household_size_3, household_size_4, household_size_5, household_size_6]\n household_size_counts_weighted = np.array(\n [count * (i + 1) for i, count in enumerate(household_size_counts)],\n dtype=float)\n household_size_counts_weighted *= float(n_total) / \\\n sum(household_size_counts_weighted)\n\n # Run the simulation\n model = utils.get_model_swig( params )\n \n # step through the model and write the relevant files the end\n for _ in range( params.get_param( \"end_time\") ):\n model.one_time_step()\n \n model.write_individual_file()\n\n # Find the number of people living in households of each different size\n # in the simulation output.\n df_indiv = pd.read_csv( constant.TEST_INDIVIDUAL_FILE )\n \n df_house = df_indiv.groupby([\"house_no\"]).size().reset_index(name=\"size\")\n df_house = df_house.groupby([\"size\"]).size().reset_index(\n name=\"house_count\")\n df_house[\"people_count\"] = df_house[\"size\"] * df_house[\"house_count\"]\n df_house[\"people_count_expected\"] = household_size_counts_weighted\n\n # Test!\n np.testing.assert_allclose(df_house[\"people_count\"],\n df_house[\"people_count_expected\"], rtol=0.02) \n \n \n def test_user_demographics(self, test_params, n_houses):\n \"\"\"\n Adds in a user defined demographic and household structure\n \"\"\"\n \n n_houses = int(n_houses)\n n_total = int(test_params[\"n_total\"])\n n_age_groups = int(constant.N_AGE_GROUPS)\n \n # build a random household/demographic structure\n IDs = range(n_total)\n houses = [0]*n_total\n ages = [0]*n_total\n for i in range(n_total):\n houses[i] = round( i / 4 )\n# houses[i] = rd.randrange(0,n_houses-1)\n ages[i] = rd.randrange(0,n_age_groups-1) \n houses = sorted( houses )\n df_demo = pd.DataFrame({'ID':np.array(IDs, dtype='int32'),'age_group':np.array(ages, dtype='int32'),'house_no':np.array(houses, dtype='int32')})\n \n # get the intial paramters and add user defined demographics \n params = utils.get_params_swig()\n for param, value in test_params.items():\n params.set_param( param, value ) \n params.set_demographic_household_table(df_demo)\n \n # get the model and run for the required time steps\n model = utils.get_model_swig( params )\n for time in range( test_params[ \"end_time\" ] ):\n model.one_time_step()\n \n # get the individual file and check age and houses\n model.write_individual_file()\n df_indiv = pd.read_csv(constant.TEST_INDIVIDUAL_FILE)\n \n # now check the households ages are correct\n df = pd.merge(df_demo,df_indiv,on = [\"ID\"], how = \"left\")\n n_wrong_age = sum( df[\"age_group_x\"]!=df[\"age_group_y\"])\n n_wrong_house = sum( df[\"house_no_x\"]!=df[\"house_no_y\"])\n np.testing.assert_equal( n_wrong_age, 0, err_msg = \"people in the wrong age group\")\n np.testing.assert_equal( n_wrong_house, 0, err_msg = \"people in the wrong house\")\n \n \n \n \n \n \n \n","repo_name":"BDI-pathogens/OpenABM-Covid19","sub_path":"tests/test_demographics.py","file_name":"test_demographics.py","file_ext":"py","file_size_in_byte":12176,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"94"} +{"seq_id":"36748138260","text":"from aitextgen import aitextgen\nimport pandas as pd\n\nclass CardTextGenerator:\n def __init__(self) -> None:\n self.text_gen_ai = aitextgen(model=\"minimaxir/magic-the-gathering\", to_gpu=False)\n\n\n def generate_card(self, prompt:str, craziness:float, num_of_cards:int, batch_size:int):\n cards = self.text_gen_ai.generate(n=num_of_cards,\n batch_size=batch_size,\n schema=True,\n prompt=prompt,\n temperature=craziness,\n return_as_list=True)\n\n cards = pd.DataFrame(list(cards))\n cards['text'] = cards.apply(\n lambda row: row['text'].replace(\"~\", row['name']), axis=1)\n cards = cards[['name', 'manaCost', 'type',\n 'power', 'toughness','loyalty','text']]\n return cards\n","repo_name":"CasualC-er/mtg-ai-draft","sub_path":"01-generating_card_text/generate_card_text.py","file_name":"generate_card_text.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"38484022079","text":"from program import *\nfrom grammar import *\n\n\nfrom arithmeticPrimitives import *\nfrom utilities import *\nfrom listPrimitives import *\n\nfrom recognition import *\n\nimport torch.nn.functional as F\n\ndef extract_scaler(v):\n v = v.view(-1)\n assert v.shape == (1,)\n v = v.data.tolist()[0]\n return v\n\ndef ancestorPrimitive(t):\n return Primitive(\"ancestor\",t,None)\n\nclass InvalidMutation(Exception): pass\n\ndef applyMutation(m,a):\n def curse(e):\n if e.isPrimitive and e.name == \"ancestor\":\n if a is None: raise InvalidMutation()\n return a\n if e.isApplication:\n return Application(curse(e.f),\n curse(e.x))\n if e.isAbstraction:\n return Abstraction(curse(e.body))\n return e\n return curse(m)\n\nclass EvolutionGuide(RecognitionModel):\n def __init__(self, featureExtractor, grammar, hidden=[64], activation=\"relu\", request=None,\n cuda=False, contextual=False):\n super(EvolutionGuide, self).__init__(featureExtractor, grammar,\n hidden=hidden, activation=activation,\n cuda=cuda, contextual=contextual)\n\n assert request is not None\n\n # value and policy\n self.value = nn.Linear(self.outputDimensionality, 1)\n\n self.mutationGrammar = Grammar.uniform(grammar.primitives + [ancestorPrimitive(request)],\n continuationType=grammar.continuationType)\n if contextual:\n self.policy = ContextualGrammarNetwork(self.outputDimensionality, self.mutationGrammar)\n self.mutationGrammar = ContextualGrammar.fromGrammar(self.mutationGrammar)\n else:\n self.policy = GrammarNetwork(self.outputDimensionality, self.mutationGrammar)\n\n if cuda: self.cuda()\n\n def batchedForward(self, goal, currents):\n features = self._MLP(self.featureExtractor.featuresOfTasks([goal]*len(currents), currents))\n B = features.shape[0]\n v = self.value(features)\n return [self.policy(features[b]) for b in range(B) ], [v[b] for b in range(B) ]\n\n def graphForward(self, root):\n \"\"\"Returns a dictionary of {node: (policy, value)}, for each node in the graph\"\"\"\n children = root.reachable()\n children = list(children)\n # Make sure that everything has a task associated with it\n for c in children:\n if c.current is None and c.program is not None:\n c.current = self.featureExtractor.taskOfProgram(c.program, c.goal.request,\n lenient=True)\n assert c.current is not None\n\n goal = root.goal\n policies, values = self.batchedForward(goal, [c.current for c in children])\n return {c: (p,v)\n for c,p,v in zip(children, policies, values) }\n\n def valuesAndEdgeCosts(self, root):\n \"\"\"Returns a dictionary of {node: value}, for each node in the graph,\n as well as a dictionary of {edge: -logLikelihood}, for each edge in the graph\"\"\"\n children = root.reachable()\n children = list(children)\n edges = []\n # Make sure that everything has a task associated with it\n for childIndex, c in enumerate(children):\n for e in c.descendents: edges.append((e,childIndex))\n if c.current is None and c.program is not None:\n c.current = self.featureExtractor.taskOfProgram(c.program, c.goal.request,\n lenient=True)\n assert c.current is not None\n\n goal = root.goal\n # features: (# vertices) x (self.outputDimensionality)\n features = self._MLP(self.featureExtractor.featuresOfTasks([goal]*len(children),\n [c.current for c in children]))\n B = features.shape[0]\n V = self.value(features)\n\n summaries = [ e.likelihoodSummary(self.mutationGrammar) for e,_ in edges ]\n xs_indices = torch.tensor(np.array([ childIndex for _,childIndex in edges ]))\n xs = features[xs_indices]\n edgeCosts = -self.policy.batchedLogLikelihoods(xs, summaries)\n \n return ({children[b]: V[b]\n for b in range(B) },\n {edges[e][0]: edgeCosts[e]\n for e in range(len(edges)) })\n\n def batchedLoss(self, root):\n if True:\n with timing(lambda dt: \"calculated %d edge costs (%f s/edge)\"%(len(edgeCost),\n dt/len(edgeCost))):\n values, edgeCost = self.valuesAndEdgeCosts(root)\n else:\n with timing(lambda dt: \"calculated %d edge costs (%f s/edge)\"%(len(edgeCost),\n dt/len(edgeCost))):\n pv = self.graphForward(root)\n values = {ev: value[1] for ev, value in pv.items() }\n edgeCost = {}\n for ev,(policy,_) in pv.items():\n for edge in ev.descendents:\n edgeCost[edge] = -edge.likelihoodSummary(self.mutationGrammar).logLikelihood(policy)[0]\n eprint(edgeCost)\n\n \n\n distance = {}\n def _distance(ev):\n if ev in distance: return distance[ev]\n if ev.isGoal:\n d = 0.\n else:\n alternatives = []\n for edge in ev.descendents:\n alternatives.append(edgeCost[edge] + _distance(edge.child))\n if alternatives == []:\n eprint(ev,\"has no alternatives/descendents\")\n if False:\n alternatives = [ edgeCost + distanceToGo\n for edgeCost, distanceToGo in alternatives ]\n d = torch.stack(alternatives,1).view(-1)\n d = d.squeeze(0).min(0)[0]\n else:\n d = -torch.logsumexp(-torch.stack(alternatives), 0)\n distance[ev] = d\n return d\n with timing(\"_distance\"):\n pl = _distance(root)\n vl = sum( (distance[ev] - values[ev])**2\n for ev in root.reachable())\n vl = vl/len(distance) # MSE\n return pl,vl\n\n def visualize(self, root):\n pv = self.graphForward(root)\n\n actualDistance = {}\n predictedDistance = {}\n edgeCost = {}\n \n def analyze(ev):\n if ev in predictedDistance: return\n predictedDistance[ev] = extract_scaler(pv[ev][1])\n \n if ev.isGoal:\n actualDistance[ev] = 0.\n else:\n alternatives = []\n mg = pv[ev][0]\n for edge in ev.descendents:\n ec = -edge.likelihoodSummary(self.mutationGrammar).logLikelihood(mg).view(-1)\n ec = extract_scaler(ec)\n edgeCost[edge] = ec\n analyze(edge.child)\n alternatives.append(ec + actualDistance[edge.child])\n \n actualDistance[ev] = min(alternatives)\n \n analyze(root)\n\n from graphviz import Digraph\n g = Digraph()\n\n def name(ev):\n return \"%s\\nV*=%f\\nV=%f\"%(ev.program,\n actualDistance[ev],\n predictedDistance[ev])\n\n for ev in actualDistance:\n g.node(name(ev))\n for ev in actualDistance:\n if len(ev.descendents) == 0: continue\n \n bestEdge = min(ev.descendents, key=lambda e: edgeCost[e])\n for edge in ev.descendents:\n g.edge(name(ev),\n name(edge.child),\n label=\"%s\\n%f\"%(edge.mutation,\n edgeCost[edge]),\n color=\"red\" if edge == bestEdge else \"black\")\n g.render(\"/tmp/evolutionGraph.pdf\",view=True)\n \n\n \n \n \n \n \n\n def children(self, _=None,\n ancestor=None, timeout=None):\n \"\"\"\n ancestor: EV.\n returns: list of (program built from ancestor, log likelihood of transition)\n \"\"\"\n g = self.policy(self._MLP(self.featureExtractor.featuresOfTask(ancestor.goal, ancestor.current))).untorch()\n message = {\"DSL\": g.json(),\n \"request\": ancestor.goal.request.json(),\n \"extras\": [[]],\n \"timeout\": float(timeout)\n }\n if ancestor.program is not None: message[\"ancestor\"] = str(ancestor.program)\n if self.featureExtractor.__class__.special:\n message[\"special\"] = self.featureExtractor.__class__.special\n\n response = jsonBinaryInvoke(\"./evolution\", message)\n children = []\n for e in response:\n mutation = Program.parse(e['programs'][0])\n if ancestor.program is None: child = mutation\n else: child = applyMutation(mutation, ancestor.program)\n children.append((child, e['ll']))\n return children\n\n def train(self, graphs, _=None,\n lr=0.001, timeout=None, steps=None):\n # Use a single optimizer for both the policy and the value\n # Just add the losses from the policy and the value\n # This is what Alpha go does\n optimizer = torch.optim.Adam(self.parameters(), lr=lr, eps=1e-3, amsgrad=True)\n\n startTime = time.time()\n losses = []\n graphs = list(graphs)\n \n while True:\n random.shuffle(graphs)\n latestLosses = []\n with timing(\"completed a single pass over the data set\"):\n for ev in graphs:\n with timing(\"gradient step with %d vertices\"%len(ev.reachable())):\n self.zero_grad()\n pl, vl = self.batchedLoss(ev)\n (pl + vl).backward()\n optimizer.step()\n\n losses.append((pl.data.tolist(),\n vl.data.tolist()[0]))\n latestLosses.append(pl.data.tolist())\n eprint(losses[-1])\n\n if steps and len(losses) > steps or timeout and time.time() - startTime > timeout:\n return\n eprint(latestLosses)\n eprint(\"Average policy loss over the most recent pass:\\n\\t\",\n sum(latestLosses)/len(latestLosses))\n \n\n \n def search(self, goal, _=None,\n populationSize=None, timeout=None, generations=None,\n fitnessBatch=100):\n assert populationSize is not None\n assert timeout is not None\n assert generations is not None\n\n # Map from parent to fitness\n seed = EV(goal, None)\n seed.logLikelihood = 0\n population = {seed: 1.}\n everyChild = set()\n\n for _ in range(generations):\n z = sum(population.values())\n children = {} # map from EV to log likelihood\n for ancestor, fitness in population.items():\n numberOfChildren = 0\n for child, ll in self.children(ancestor=ancestor,\n timeout=timeout*fitness/z):\n ev = EV(goal, child)\n ev.logLikelihood = ll + ancestor.logLikelihood\n ev.current = self.featureExtractor.taskOfProgram(child, goal.request)\n if ev.current is not None:\n if ev not in children or children[ev] < ev.logLikelihood:\n children[ev] = ev.logLikelihood\n numberOfChildren += 1\n eprint(\"Ancestor\",ancestor.program,\n \"produced\",numberOfChildren,\"children.\")\n \n children = list(children.keys())\n eprint(\"All of the ancestors collectively produced\",len(children),\n \"new children.\")\n\n # Keep only populationSize children\n\n bestChildren = PQ()\n childIndex = 0\n while childIndex < len(children):\n childBatch = children[childIndex:childIndex + fitnessBatch]\n childTasks = [ child.current for child in childBatch ]\n childFeatures = self.featureExtractor.featuresOfTasks([goal]*len(childTasks), childTasks)\n ds = self.value(self._MLP(childFeatures))\n ds = ds.data.view(-1).tolist()\n for child, d, batchIndex in zip(childBatch, ds, range(len(childBatch))):\n f = -d + child.logLikelihood\n eprint(\"Child\",child.program,\"has fitness\",f)\n bestChildren.push(-f, (f, childIndex + batchIndex, child))\n if len(bestChildren) > populationSize:\n _1, worstChildIndex, _2 = bestChildren.popMaximum()\n children[worstChildIndex] = None # garbage collect\n childIndex += fitnessBatch\n \n population = {}\n for f,_,child in bestChildren:\n everyChild.add(child)\n population[child] = f\n\n for child, fitness in sorted(population.items(), key=lambda cf: -cf[1]):\n eprint(\"Surviving child\",child.program,\"has fitness\",fitness)\n if goal.logLikelihood(child.program) > -0.01:\n eprint(\"\\t^^^^HIT \",goal,\"\\n\")\n\n return everyChild\n\n def sampleWithPolicy(self, goal):\n currentProgram = None\n while True:\n if currentProgram is None:\n currentTask = None\n else:\n currentTask = self.featureExtractor.taskOfProgram(currentProgram, goal.request)\n if currentTask is None: assert False\n\n mg = self.policy(self._MLP(self.featureExtractor.featuresOfTask(goal, currentTask)))\n mg = mg.untorch()\n request = goal.request\n m = mg.sample(request, maxAttempts=50)\n\n eprint(\"Mutation\",m)\n if m is None: return\n try: currentProgram = applyMutation(m, currentProgram).betaNormalForm()\n except InvalidMutation:\n eprint(\"Invalid mutation - this means it uses the ancestor but it has no ancestor\")\n return \n eprint(\"New current program:\")\n eprint(currentProgram)\n eprint()\n yield currentProgram\n \n \ndef possibleAncestors(request, program):\n from itertools import permutations\n\n program = program.clone()\n context = MutableContext()\n program.annotateTypes(context, [])\n def annotateIndices(p):\n if p.isIndex:\n p.variableTypes = {p.i: p.annotatedType.applyMutable(context)}\n elif p.isPrimitive or p.isInvented:\n p.variableTypes = dict()\n elif p.isAbstraction:\n annotateIndices(p.body)\n p.variableTypes = {(i - 1): t\n for i,t in p.body.variableTypes.items()\n if i > 0}\n elif p.isApplication:\n annotateIndices(p.f)\n annotateIndices(p.x)\n p.variableTypes = {i: p.f.variableTypes.get(i, p.x.variableTypes.get(i, None))\n for i in set(list(p.f.variableTypes.keys()) + list(p.x.variableTypes.keys()))}\n else: assert False\n\n annotateIndices(program)\n\n def renameAncestorVariables(d,a, mapping):\n if a.isIndex:\n if a.i - d >= 0:\n return Index(mapping[a.i - d])\n return a\n if a.isApplication:\n return Application(renameAncestorVariables(d,a.f,mapping),\n renameAncestorVariables(d,a.x,mapping))\n if a.isAbstraction:\n return Abstraction(renameAncestorVariables(d + 1, a.body, mapping))\n if a.isPrimitive or a.isInvented:\n return a\n assert False\n \n desiredNumberOfArguments = len(request.functionArguments())\n def curse(d, p):\n # Returns a set of (mutation, ancestor)\n parses = set()\n\n # Could this be the ancestor?\n freeVariableTypes = p.variableTypes\n tp = p.annotatedType\n if not p.isIndex and \\\n len(freeVariableTypes) + len(tp.functionArguments()) == desiredNumberOfArguments:\n for fv in permutations(freeVariableTypes.items()):\n t = tp\n for _,fvt in reversed(fv): t = arrow(fvt,t)\n if canUnify(t, request):\n # Apply the ancestor\n m = ancestorPrimitive(request)\n for fi,_ in fv: m = Application(m,Index(fi))\n # rename variables inside of ancestor\n mapping = {fi: fi_ for fi_,(fi,_) in enumerate(reversed(fv)) }\n a = renameAncestorVariables(0, p, mapping)\n for _ in fv: a = Abstraction(a)\n a = EtaLongVisitor(request).execute(a)\n parses.add((m, a))\n\n if p.isIndex or p.isPrimitive or p.isInvented:\n parses.add((p,None))\n if p.isApplication:\n f = curse(d, p.f)\n x = curse(d, p.x)\n for fp,fa in f:\n for xp,xa in x:\n if fa is not None and \\\n xa is not None and \\\n fa != xa:\n continue\n a = fa or xa\n parses.add((Application(fp,xp), a))\n if p.isAbstraction:\n for b,a in curse(d + 1, p.body):\n parses.add((Abstraction(b), a))\n return parses\n\n return {(EtaLongVisitor(request).execute(m.clone()),\n a.clone())\n for m,a in curse(0, program)\n if a is not None and m != ancestorPrimitive(request) and a != program}\n\nclass EV:\n \"\"\"evolution vertex: a vertex in the graph describing all evolutionary trajectories to a solution\"\"\"\n def __init__(self, goal, program):\n self.program = program\n self.goal = goal\n # outgoing edges\n self.descendents = []\n\n # current: task option\n # where we are currently in the search space\n self.current = None\n\n self.isGoal = False\n\n def __eq__(self,o):\n if self.program is None: return o.program is None\n if o.program is None: return False\n return self.program == o.program\n def __ne__(self,o): return not (self == o)\n def __hash__(self): return hash(self.program)\n\n def reachable(self, visited=None):\n if visited is None: visited = set()\n if self in visited: return visited\n visited.add(self)\n for d in self.descendents: d.child.reachable(visited)\n return visited\n\n def removeLongPaths(self, maxPath):\n assert self.program is None\n\n distanceFromSource = {}\n frontier = {self}\n d = 0\n while len(frontier) > 0:\n for v in frontier:\n if v not in distanceFromSource: distanceFromSource[v] = d\n frontier = {v\n for f in frontier\n for e in f.descendents\n for v in [e.child]\n if v not in frontier and v not in distanceFromSource}\n d += 1\n\n shortestPath = {}\n def curse(v):\n if v in shortestPath:\n assert shortestPath[v] <= maxPath\n return shortestPath[v]\n if v.isGoal:\n shortestPath[v] = 0\n return 0\n\n for d in v.descendents: curse(d.child)\n v.descendents = [d for d in v.descendents\n if shortestPath[d.child] + distanceFromSource[v] + 1 <= maxPath]\n shortestPath[v] = min( shortestPath[c.child] for c in v.descendents ) + 1\n assert shortestPath[v] <= maxPath\n return shortestPath[v]\n curse(self)\n \n class Edge:\n \"\"\"evolutionary edge\"\"\"\n def __init__(self, ancestor, mutation, child, request, ll=None):\n self.ancestor = ancestor\n self.mutation = mutation\n self.child = child\n self.request = request\n self._likelihoodSummary = None\n self.ll = ll\n\n def __eq__(self, o):\n return (self.ancestor, self.mutation, self.child) == \\\n (o.ancestor, o.mutation, o.child)\n def __ne__(self, o): return not (self == o)\n def __hash__(self): return hash((self.ancestor, self.mutation, self.child))\n\n def likelihoodSummary(self, g):\n try:\n if self._likelihoodSummary is None:\n self._likelihoodSummary = g.closedLikelihoodSummary(self.request, self.mutation)\n return self._likelihoodSummary\n except:\n eprint(\"Could not calculate likelihood of mutation\",self.mutation)\n assert False\n \ndef evolutionaryTrajectories(task, seed):\n request = task.request\n\n # map from program to EV\n # Initially we just have no program\n table = {None: EV(task, None)}\n\n def getVertex(p):\n if p in table: return table[p]\n v = EV(task,p)\n # Single step mutation that just gets us here in one shot\n table[None].descendents.append(EV.Edge(ancestor=None,\n mutation=p,\n child=v,\n request=request))\n table[p] = v\n for m,a in possibleAncestors(request,p):\n av = getVertex(a)\n av.descendents.append(EV.Edge(ancestor=av,\n mutation=m,\n child=v,\n request=request))\n return v\n\n v = getVertex(seed)\n v.isGoal = True\n\n return table[None] \n\nfrom towerPrimitives import *\nfrom makeTowerTasks import *\nfrom tower import TowerCNN\ng = Grammar.uniform(primitives)\ntasks = makeSupervisedTasks()#[:3]\n\ntrajectories = []\nfor t in tasks:\n p = t.original\n try: g.logLikelihood(t.request, p)\n except: continue\n \n trajectory = evolutionaryTrajectories(t,p)\n trajectory.removeLongPaths(2)\n trajectories.append(trajectory)\n\ntesting, trajectories = testTrainSplit(trajectories, 0.7)\n#trajectories = trajectories[:2]\neprint(\"Training on\",len(trajectories),\"/\",len(tasks),\"tasks\")\n\nrm = EvolutionGuide(TowerCNN([]),g,contextual=True,\n request=t.request,\n cuda=torch.cuda.is_available())\nfor trajectory in []:#testing:\n eprint(\"Testing on\",trajectory.goal)\n rm.search(trajectory.goal,\n populationSize=10,\n timeout=20,\n generations=2)\n\n\nrm.train(trajectories, timeout=7200*2)\n#rm.visualize(trajectories[0])\n\n\nif False:\n solutions = []\n for _ in range(15):\n eprint(\"Sampling a new trajectory from the policy...\")\n thisSequence = []\n for p in rm.sampleWithPolicy(t):\n current = rm.featureExtractor.taskOfProgram(p,None)\n if current is None: break\n\n thisSequence.append(current)\n if len(thisSequence) > 3: break\n if thisSequence:\n solutions.append(thisSequence)\n\n from utilities import *\n from tower_common import fastRendererPlan\n m = montageMatrix([[fastRendererPlan(t.plan,pretty=True,Lego=True)\n for t in ts]\n for ts in solutions])\n from pylab import imshow,show\n imshow(m)\n show()\nelse:\n for trajectory in testing:\n eprint(\"Testing on\",trajectory.goal)\n rm.search(trajectory.goal,\n populationSize=10,\n timeout=20,\n generations=2)\n\n","repo_name":"lcary/ec-backup","sub_path":"evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":24228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24667103516","text":"\"\"\"Module that can be used for Danabot API-Hashing deobfuscation\"\"\"\r\n\r\nimport sys\r\n\r\n\r\ndef get_hash(funcname):\r\n \"\"\"Calculate the hash value for function name. Return hash value as integer\"\"\"\r\n strlen = len(funcname)\r\n # if the length is even, we encounter a different behaviour\r\n i = 0\r\n hashv = 0x0\r\n while i < strlen:\r\n if i == (strlen - 1):\r\n ch1 = funcname[0]\r\n else:\r\n ch1 = funcname[strlen - 2 - i]\r\n # init first character and capitalize it\r\n ch = funcname[i] # ebp 0x1c and 0x18\r\n uc_ch = ch.capitalize()\r\n # Capitalize the second character\r\n uc_ch1 = ch1.capitalize() # ebp 0x20\r\n # Calculate all XOR values\r\n xor_ch = ord(ch) ^ strlen\r\n xor_uc_ch = ord(uc_ch) ^ strlen\r\n xor_ch1 = ord(ch1) ^ strlen\r\n xor_uc_ch1 = ord(uc_ch1) ^ strlen\r\n # do the multiplication and XOR again with upper case character1\r\n hashv += ((xor_ch * xor_ch1) * xor_uc_ch)\r\n hashv = hashv ^ xor_uc_ch1\r\n i += 1\r\n return hashv\r\n\r\n\r\ndef main():\r\n \"\"\"Main.\"\"\"\r\n hashv = get_hash(sys.argv[1])\r\n print(f\"[+] Calculated hash = {hex(hashv)}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"hackingump/malwareStuff","sub_path":"DeobfuscatingDanaBot/calc_apihash.py","file_name":"calc_apihash.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"71459740791","text":"import tkinter as tk\nfrom tkinter import ttk\nimport os\n\n# Obtener la direcion\nseparador = os.path.sep\ndirecion_actual = os.getcwd()\n\nventana = tk.Tk()\nventana.geometry('600x400')\nventana.title('Manejo de Grid')\nventana.iconbitmap(direcion_actual + separador + 'icono' + separador + 'icono.ico')\n\n# Eventos\ndef evento_1():\n boton1.config(text='Boton 1 Presionado')\n\ndef evento_2():\n boton2.config(text='Boton 2 Presionado')\n\n# N, S, E,W\n# Definir botones\nboton1 = ttk.Button(ventana,text='Boton 1', command=evento_1)\nboton1.grid(row=0,column=0, sticky= tk.E)\n\nboton2 = ttk.Button(ventana, text='Boton 2', command=evento_2)\nboton2.grid(row=1,column=0, sticky='W')\n\n\n# MOstrar ventana\nventana.mainloop()","repo_name":"KUSHIRO13/Universidad_Python_con_Frameworks_Django_Flask_etc_71hrs","sub_path":"Tkinters/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70657723510","text":"import configparser\r\nimport os.path\r\nimport argparse\r\nimport pandas as pd\r\nfrom Safety import SafetyStock\r\nfrom collections import defaultdict\r\nimport ipdb\r\n\r\n\r\nclass Initialisation():\r\n \"\"\"This task verifies file integrity and create necessary objects\"\"\"\r\n\r\n def __init__(self):\r\n # super(Initialisation, self).__init__()\r\n self.config_dict = self.read_config()\r\n self.biobj, self.tune = self.read_args()\r\n self.decimal = self.config_dict[\"filenames\"][\"decimal\"]\r\n\r\n def read_config(self):\r\n '''\r\n extract the paramaters defined nto config file into a dictionary for easier access troughout code\r\n '''\r\n config = self.config_dialog()\r\n config_dict = {}\r\n for section in config.sections():\r\n config_dict[section] = dict(config.items(section))\r\n for k in config_dict:\r\n for key, value in config_dict[k].items():\r\n if value == \"True\":\r\n config_dict[k][key] = True\r\n if value == \"False\":\r\n config_dict[k][key] = False\r\n if self.verify_files(config_dict[\"filenames\"]):\r\n return config_dict\r\n\r\n def config_dialog(self, overide=None):\r\n config = configparser.ConfigParser()\r\n while True:\r\n config_file = input(\"Please specifiy configuration file \")\r\n if os.path.exists(config_file):\r\n config.read(config_file)\r\n break\r\n else:\r\n print(\"The configuration file could not be found. Please try again. \")\r\n continue\r\n return config\r\n\r\n def create_files(self):\r\n self.writer = pd.ExcelWriter(\r\n self.config_dict[\"filenames\"][\"result_file\"], engine=\"xlsxwriter\")\r\n if not os.path.exists(\"CSV export files\"):\r\n os.makedirs(\"CSV export files\")\r\n if not os.path.exists(\"Saved Models\"):\r\n os.makedirs(\"Saved Models\")\r\n\r\n def arg_parse(self):\r\n '''\r\n defines argument of cmd flags\r\n '''\r\n parser = argparse.ArgumentParser(\r\n description=\"Launch a bi-objective optimisation\")\r\n parser.add_argument(\"--biobj\", type=bool)\r\n parser.add_argument(\"--tune\", type=bool)\r\n return parser\r\n\r\n def read_args(self):\r\n # need test\r\n biobj = self.arg_parse().parse_args().biobj\r\n tune = self.arg_parse().parse_args().tune\r\n return biobj, tune\r\n\r\n def verify_files(self, files):\r\n for f in list(files.values())[:4]:\r\n if os.path.isfile(f) is False:\r\n print(f\"Please check the file path for {f} could not be found\")\r\n else:\r\n return True\r\n\r\n def create_writer(self):\r\n writer = pd.ExcelWriter(\r\n self.config_dict[\"filenames\"][\"result_file\"], engine=\"xlsxwriter\")\r\n return writer\r\n\r\n def read_batch_size(self):\r\n decimal = self.config_dict[\"filenames\"][\"decimal\"]\r\n m = pd.read_csv(self.config_dict[\"filenames\"][\"batch_size\"],\r\n index_col=[0],\r\n sep=None,\r\n engine=\"python\",\r\n decimal=decimal)\r\n return m\r\n\r\n\r\nclass Subsetting():\r\n \"\"\"docstring for Subsetting\"\"\"\r\n\r\n def __init__(self, initial, data):\r\n self.data = data\r\n self.initial = initial\r\n loc_data_path = self.initial.config_dict[\"filenames\"][\"location_data\"]\r\n self.loc_data = pd.read_csv(loc_data_path,\r\n index_col=[0],\r\n sep=None,\r\n engine=\"python\",\r\n decimal=self.initial.decimal)\r\n self.define_indices()\r\n\r\n def compute_ss(self):\r\n e = self.echelons()\r\n loc = self.loc_data\r\n s = SafetyStock(self.data, loc, e[\"cw\"], e[\"rw\"], self.initial.config_dict)\r\n ss = s.ss_allocation()\r\n return ss\r\n\r\n def define_indices(self):\r\n loc_data_path = self.initial.config_dict[\"filenames\"][\"location_data\"]\r\n loc_data = pd.read_csv(loc_data_path,\r\n index_col=[0],\r\n sep=None,\r\n engine=\"python\",\r\n decimal=self.initial.decimal)\r\n self.sku_id = self.data[\"SKU_id\"].unique() # Set of all SKUs I\r\n self.time_id = self.data[\"period\"].unique(\r\n ).tolist() # Set of all time periods T\r\n self.time_id.insert(0, \"0\") # create t=0 index\r\n self.loc_id = self.loc_data.index.tolist() # Set of all network facilities W\r\n self.factory_id = (self.loc_data[self.loc_data[\"echelon\"] == \"Factory\"]\r\n .index.tolist()) # Set of factories F\r\n self.cw_id = (self.loc_data[self.loc_data[\"echelon\"] == \"Central\"]\r\n .index.tolist()) # Set of CW D\r\n self.rw_id = (self.loc_data[self.loc_data[\"echelon\"] == \"Regional\"]\r\n .index.tolist()) # set of RW J\r\n self.ext_factory_id = list(set(self.data[\"producedBy\"].unique(\r\n ).tolist()) - set(self.factory_id)) # Set of ext suppliers O\r\n idx_dict = {\"time_id\": self.time_id,\r\n \"sku_id\": self.sku_id,\r\n \"loc_id\": self.loc_id,\r\n \"fact_id\": self.factory_id,\r\n \"cw_id\": self.cw_id,\r\n \"rw_id\": self.rw_id,\r\n \"ext_fact_id\": self.ext_factory_id}\r\n return idx_dict\r\n\r\n def read_loc_data(self):\r\n loc_data_path = self.initial.config_dict[\"filenames\"][\"location_data\"]\r\n loc_data = pd.read_csv(loc_data_path,\r\n index_col=[0],\r\n sep=None,\r\n engine=\"python\",\r\n decimal=self.initial.decimal)\r\n return loc_data\r\n\r\n def echelons(self):\r\n \"\"\"\r\n Define and return a dict of echelon memberships\r\n \"\"\"\r\n f = (self.loc_data[self.loc_data[\"echelon\"] == \"Factory\"]\r\n .index.tolist()) # Set of factories F\r\n c = (self.loc_data[self.loc_data[\"echelon\"] == \"Central\"]\r\n .index.tolist()) # Set of CW D\r\n r = (self.loc_data[self.loc_data[\"echelon\"] == \"Regional\"]\r\n .index.tolist()) # set of RW J\r\n ech = {\"f\": f,\r\n \"cw\": c,\r\n \"rw\": r}\r\n return ech\r\n\r\n def find_last_t(self):\r\n '''\r\n for each SKU find index of last time period where an order occurs\r\n returns a dictionary with last time period for each SKU\r\n {\"SKU1\": \"t1\", \"SKU2\": \"t2\"}\r\n '''\r\n last = self.data.sort_values(by=[\"period\"]).drop_duplicates(subset=[\"SKU_id\"], keep=\"last\")\r\n last_t = dict(zip(last[\"SKU_id\"], last[\"period\"]))\r\n last_t = {k: self.time_id.index(v) + 1\r\n for k, v in last_t.items()}\r\n return last_t\r\n\r\n def inventory_indices(self):\r\n last_t = self.find_last_t()\r\n skus = self.SKU_loc_assignment()\r\n return [(i, w, t) for w in self.loc_id for i in skus.get(w, []) for t in self.time_id[:last_t[i]]]\r\n\r\n def shipment_indices(self):\r\n dep = self.departure_allocation()\r\n origins = self.factory_id + self.cw_id + self.ext_factory_id\r\n skus = self.SKU_loc_assignment()\r\n inv_idx = self.inventory_indices()\r\n return [(o, d, i, t) for i, d, t in inv_idx for o in origins if i in skus.get(o, []) and d in dep.get(o, [])]\r\n\r\n def ftl_indices(self):\r\n ship_idx = self.shipment_indices()\r\n return [(o, d, t) for o,d,i,t in ship_idx]\r\n\r\n def production_indices(self):\r\n intsku_f, _ = self.production()\r\n last_t = self.find_last_t()\r\n return [(i, f, t) for f in self.factory_id for i in intsku_f.get(f, []) for t in self.time_id[:last_t[i] + 1]]\r\n\r\n def ls_indices(self):\r\n demand = self.data.set_index([\"period\",\r\n \"SKU_id\",\r\n \"location\"])[\"pallets\"].to_dict()\r\n return demand.keys()\r\n\r\n def bin_indices(self):\r\n last_t = self.find_last_t()\r\n mbs = self.minimize_bin()\r\n big_M, big_M_alt = self.compute_M(self.data, mbs)\r\n return [(i, t) for i in mbs for t in self.time_id[:last_t[i]]]\r\n\r\n\r\n def sum_indices(self):\r\n ship_idx = self.shipment_indices()\r\n sum_idx = defaultdict(list)\r\n for a in ship_idx: # isolate all SKUs that occur for each origin-destination-period triplet\r\n k = (a[0], a[1], a[3])\r\n sum_idx[k].append(a[2])\r\n return sum_idx \r\n\r\n def minimize_bin(self):\r\n m = self.initial.read_batch_size()\r\n m = m[m[\"min_batch_active\"] > 0 ] # only keep min batch size for active skus\r\n m = m[m.index.isin(self.sku_id)]\r\n mbs = dict(zip(m.index, m[\"min. Batch Size (PAL)\"]))\r\n temp = self.data.groupby([\"SKU_id\", \"period\"]).agg({\"pallets\": \"sum\"})\r\n temp = temp[temp[\"pallets\"] != 0].groupby([\"SKU_id\"]).agg({\"pallets\": \"min\"})\r\n temp = dict(zip(temp.index, temp[\"pallets\"]))\r\n l = []\r\n for i in list(mbs):\r\n if temp[i] > mbs[i]:\r\n l.append(i)\r\n del mbs[i]\r\n r = str(len(l) * len(self.time_id))\r\n if r != \"0\":\r\n print(r + \" Binaries variables have been removed\")\r\n return mbs\r\n\r\n def compute_M(self, data, mbs):\r\n '''\r\n big_M:\r\n compute big M as to tighten the bound\r\n Production will never be > Sum of demand for entire network and all periods\r\n big_M_alt:\r\n if sum of the demand is smaller than batch size -> production can only be min. batch size or 0\r\n -> this allows to reduce solution space\r\n '''\r\n # temp = demand.groupby([self.sku_col, self.time_col]).agg({self.qty_col: \"sum\"})\r\n # temp = temp[temp[self.qty_col] != 0].groupby([self.sku_col]).agg({self.qty_col: \"max\"})\r\n temp = self.data.groupby(\"SKU_id\").agg({\"pallets\": \"sum\"})\r\n temp = dict(zip(temp.index, temp[\"pallets\"]))\r\n big_M = {}\r\n big_M_alt = {}\r\n for i in mbs.keys():\r\n # big_M[i] = max((temp[i] + self.cw_ss.get(i, 0) + self.rw_ss.get(i, 0)), mbs[i])\r\n if temp[i] > mbs[i]:\r\n big_M[i] = temp[i]\r\n else:\r\n big_M_alt[i] = mbs[i] \r\n return big_M, big_M_alt\r\n\r\n def departure_allocation(self):\r\n ''' dictionarry holding for each departure point in the network the set of facilities it is allow to serve\r\n example: {'factory1': ['Central Warehoouse1', 'Regional Warehouse2']}\r\n '''\r\n loc = self.loc_data.fillna(\"\")\r\n dep = {}\r\n for f in self.factory_id:\r\n dep[f] = self.cw_id + loc.index[loc[\"factory_allocation\"].str.contains(f)].tolist() + loc.index[loc[\"factory_to_factory\"].str.contains(f)].tolist()\r\n for d in self.cw_id:\r\n dep[d] = loc.index[loc[\"resp. central WH\"].str.contains(d)].tolist()\r\n for e in self.ext_factory_id:\r\n dep[e] = self.data[\"location\"][self.data[\"producedBy\"] == e].unique().tolist() + self.cw_id\r\n return dep\r\n\r\n # def function(self):\r\n # self.int_skus, self.ext_skus = self.int_ext_skus()\r\n # self.intsku_fact, self.sku_plan = self.production()\r\n # self.intsku_CW, self.extsku_CW = self.cw_int_ext_skus()\r\n\r\n def int_ext_skus(self):\r\n int_skus = (self.data[self.data[\"producedBy\"]\r\n .isin(self.factory_id)][\"SKU_id\"]\r\n .to_list()) # [intsku1, intsku2]\r\n ext_skus = (self.data[self.data[\"producedBy\"]\r\n .isin(self.ext_factory_id)][\"SKU_id\"]\r\n .to_list()) # [extsku1, extsku2]\r\n return int_skus, ext_skus\r\n\r\n def production(self):\r\n # assign which factory produces which sku\r\n k = (self.data[self.data[\"producedBy\"]\r\n .isin(self.factory_id)]\r\n .groupby(\"producedBy\")[\"SKU_id\"]\r\n .apply(set).apply(list))\r\n intsku_fact = dict(zip(k.index, k)) # {\"factory1\": [\"intSKU1\", \"intSKU2\"]}\r\n # asssign which sku can be supplied by which factory inverse of the previous dic\r\n k = (self.data[self.data[\"producedBy\"]\r\n .isin(self.factory_id)]\r\n .groupby(\"SKU_id\")[\"producedBy\"]\r\n .apply(set).apply(list))\r\n sku_plan = dict(zip(k.index, k)) # {\"sku1\": \"[factory1]\"}\r\n return intsku_fact, sku_plan\r\n\r\n def fact_ext_skus(self):\r\n _, ext = self.int_ext_skus()\r\n k = (self.data[(self.data[\"location\"].isin(self.factory_id)) \r\n & (self.data[\"SKU_id\"].isin(ext))]\r\n .groupby(\"location\")[\"SKU_id\"]\r\n .apply(set).apply(list))\r\n extsku_f = dict(zip(k.index, k))\r\n return extsku_f\r\n\r\n def f2f_skus(self):\r\n f2f_fact_id = list(self.loc_data.loc[~self.loc_data[\"factory_to_factory\"].isna()].index) # extract factories in need of special constraints\r\n f2f_sku = {}\r\n for f in f2f_fact_id:\r\n temp = self.loc_data.loc[f, \"factory_to_factory\"].split(\",\") # extract factories that can supply [\"fact1\", \"fact2\"]\r\n skus = (self.data[(self.data[\"location\"] == f) & (self.data[\"producedBy\"].isin(temp))]\r\n .groupby(\"location\")[\"SKU_id\"]\r\n .apply(set).tolist()) # extract SKUs that 1. factory f 2. is produced by a factory in \"factory_to_factory\" column, {f: [\"sku1\", \"sku2\"]}\r\n\r\n if len(skus) > 0:\r\n skus = [s for s in skus[0]]\r\n f2f_sku[f] = skus # {fact1: [\"sku1\", \"sku2\"]}\r\n return f2f_sku\r\n\r\n def cw_int_ext_skus(self):\r\n k = self.compute_ss()[0]\r\n k = k.reset_index() # retrieve ss for cws\r\n int_s, ext_s = self.int_ext_skus()\r\n k = (k[k[\"SKU_id\"].isin(int_s)]\r\n .groupby(\"location\")[\"SKU_id\"]\r\n .apply(set).apply(list))\r\n intsku_CW = dict(zip(k.index, k)) # subset of int SKUs held at central warehouses {\"CW1\": [\"intSKU1\", \"intSKU2\"]}\r\n \r\n k = self.compute_ss()[0]\r\n k = k.reset_index()\r\n k = (k[k[\"SKU_id\"].isin(ext_s)]\r\n .groupby(\"location\")[\"SKU_id\"]\r\n .apply(set).apply(list))\r\n extsku_CW = dict(zip(k.index, k)) # subset of ext SKUs held at central warehouses {\"CW1\": [\"extSKU1\", \"extSKU2\"]}\r\n return intsku_CW, extsku_CW\r\n\r\n def rw_int_ext_skus(self):\r\n int_s, ext_s = self.int_ext_skus()\r\n k = (self.data[(self.data[\"location\"]\r\n .isin(self.rw_id)) & (self.data[\"SKU_id\"]\r\n .isin(int_s))]\r\n .groupby(\"location\")[\"SKU_id\"]\r\n .apply(set).apply(list))\r\n intsku_RW = dict(zip(k.index, k)) # {\"RW1\": [\"intsku1\", \"intsku2\"]}\r\n\r\n k = (self.data[(self.data[\"location\"]\r\n .isin(self.rw_id)) \r\n & (self.data[\"SKU_id\"].isin(ext_s))]\r\n .groupby(\"location\")[\"SKU_id\"]\r\n .apply(set).apply(list))\r\n extsku_RW = dict(zip(k.index, k)) # {\"RW1\": [\"extsku1\", \"extsku2\"]}\r\n return intsku_RW, extsku_RW\r\n # create this subet for skuloc then finish invenotry indices\r\n\r\n def SKU_loc_assignment(self):\r\n '''\r\n Store all SKUs that could potentially be held at each location\r\n '''\r\n sku_LOC = {}\r\n int_f, _ = self.production() # skus produced by each factory\r\n ext_f, f2f = self.fact_ext_skus(), self.f2f_skus() # external skus at factories, skus produced by other factories\r\n int_c, ext_c = self.cw_int_ext_skus() # internal + external skus at central w\r\n int_r, ext_r = self.rw_int_ext_skus() # internal + external skus at regional w\r\n for l in self.loc_id:\r\n if l in self.factory_id:\r\n sku_LOC[l] = int_f.get(l, []) + ext_f.get(l, []) + f2f.get(l, [])\r\n if l in self.cw_id:\r\n sku_LOC[l] = int_c.get(l, []) + ext_c.get(l, [])\r\n if l in self.rw_id:\r\n sku_LOC[l] = int_r.get(l, []) + ext_r.get(l, [])\r\n # adding ext suppliers skus\r\n k = (self.data[self.data[\"producedBy\"]\r\n .isin(self.ext_factory_id)]\r\n .groupby(\"producedBy\")[\"SKU_id\"]\r\n .apply(set).apply(list))\r\n ext = dict(zip(k.index, k))\r\n sku_LOC.update(ext)\r\n return sku_LOC\r\n\r\n def arrival_allocation(self):\r\n ''' dicitoanry holding for each destination in the network the location that it could be supplied from\r\n example: {'Reg. Warehouse1': ['factory1, factory2']}\r\n '''\r\n factory_to_rw = {}\r\n cw_to_rw = {}\r\n fact_to_fact = {}\r\n for w in self.rw_id:\r\n factory_to_rw[w] = self.loc_data.loc[w, \"factory_allocation\"].split(\",\")\r\n \r\n try:\r\n cw_to_rw[w] = self.loc_data.loc[w, \"resp. central WH\"].split(\",\")\r\n except AttributeError:\r\n cw_to_rw[w] = []\r\n for f in self.factory_id:\r\n try:\r\n fact_to_fact[f] = self.loc_data.loc[f, \"factory_to_factory\"].split(\",\")\r\n except AttributeError:\r\n fact_to_fact[f] = []\r\n\r\n return factory_to_rw, cw_to_rw, fact_to_fact\r\n\r\n def departure_allocation(self):\r\n ''' dictionarry holding for each departure point in the network the set of facilities it is allow to serve\r\n example: {'factory1': ['Central Warehoouse1', 'Regional Warehouse2']}\r\n '''\r\n loc = self.loc_data.fillna(\"\")\r\n dep = {}\r\n for f in self.factory_id:\r\n dep[f] = self.cw_id + loc.index[loc[\"factory_allocation\"].str.contains(f)].tolist() + loc.index[loc[\"factory_to_factory\"].str.contains(f)].tolist()\r\n for d in self.cw_id:\r\n dep[d] = loc.index[loc[\"resp. central WH\"].str.contains(d)].tolist()\r\n for e in self.ext_factory_id:\r\n dep[e] = self.data[\"location\"][self.data[\"producedBy\"] == e].unique().tolist() + self.cw_id\r\n return dep\r\n\r\n def supplier(self):\r\n # assign which supplier supplies which sku\r\n k = (self.data[self.data[\"producedBy\"]\r\n .isin(self.ext_factory_id)]\r\n .groupby(\"SKU_id\")[\"producedBy\"]\r\n .apply(set).apply(list))\r\n supplier = dict(zip(k.index, k)) # {\"SKU1\": \"ExtFact1\"}\r\n return supplier\r\n\r\n def direct_sh_cons(self):\r\n direct_sh_extsku = dict(zip(self.loc_data.index, self.loc_data[\"direct shipment ext. SKU\"]))\r\n return direct_sh_extsku\r\n\r\n def minimize_constraint(self):\r\n '''\r\n return the index last time period where a demand occur for each sku at 3rd echelon\r\n in order to reduce the number of constraint being generated \r\n => no need to define a constraint after the last demand has occured for that sku\r\n returns: a dictionary -> {(Location1, SKU1): index(period1)}\r\n '''\r\n last = self.data.sort_values(by=[\"period\"]).drop_duplicates(subset=[\"SKU_id\", \"location\"], keep=\"last\")\r\n last = last[last[\"location\"].isin(self.rw_id)]\r\n last_t = dict(zip(zip(last[\"location\"], last[\"SKU_id\"]), last[\"period\"]))\r\n last_t = {k: self.time_id.index(v)\r\n for k, v in last_t.items()}\r\n return last_t\r\n\r\n\r\n\r\n\r\nclass Preprocess():\r\n \"\"\"This class is tasked with preparing the data before \"\"\"\r\n\r\n def __init__(self, n_rows=None):\r\n self.initial = Initialisation()\r\n \r\n\r\n self.n_rows = n_rows\r\n\r\n self.data = self.clean_data()\r\n\r\n self.sub = Subsetting(self.initial, self.data)\r\n\r\n def read_raw_data(self):\r\n decimal = self.initial.config_dict[\"filenames\"][\"decimal\"]\r\n d = {\"SKU_id\": \"object\"}\r\n path = self.initial.config_dict[\"filenames\"][\"orders\"]\r\n raw_data = pd.read_csv(path,\r\n index_col=[0],\r\n nrows=self.n_rows,\r\n dtype=d,\r\n sep=None,\r\n engine=\"python\",\r\n decimal=decimal)\r\n return raw_data\r\n\r\n def read_clusters(self, data):\r\n try:\r\n data[\"XYZ_cluster\"] = data[\"XYZ_cluster\"].fillna(\"Z\")\r\n data = data.loc[~((data[\"ABC_cluster\"] == \"C\") &\r\n (data[\"XYZ_cluster\"] == \"Z\"))]\r\n return data\r\n except KeyError:\r\n print(\"No clusters found\")\r\n\r\n\r\n def save_data(self, data):\r\n writer = self.initial.create_writer()\r\n data.to_excel(writer, sheet_name=\"Demand\")\r\n\r\n def clean_data(self):\r\n raw = self.read_raw_data()\r\n data = raw[raw[\"location\"].notna()]\r\n self.data = self.read_clusters(data)\r\n self.save_data(data)\r\n return self.data \r\n\r\n def compute_ss(self):\r\n e = self.sub.echelons()\r\n loc = self.sub.read_loc_data()\r\n s = SafetyStock(self.data, loc, e[\"cw\"], e[\"rw\"], self.initial.config_dict)\r\n ss = s.ss_allocation()\r\n return ss\r\n\r\n def holding_costs(self):\r\n hc = self.data.groupby([\"SKU_id\", \"location\"])[\"hold_cost_pallet\"].first()\r\n default_hc = hc.groupby(\"SKU_id\").mean().to_dict()\r\n return hc.to_dict(), default_hc\r\n\r\n def prod_costs(self):\r\n p_c = dict(zip(zip(self.data[\"SKU_id\"], self.data[\"producedBy\"]), self.data[\"prod_costs\"]))\r\n return p_c\r\n\r\n def ftl_matrix(self):\r\n ftl_matrix_path = self.initial.config_dict[\"filenames\"][\"ftl_matrix\"]\r\n decimal = self.initial.config_dict[\"filenames\"][\"decimal\"]\r\n ftl_m = pd.read_csv(ftl_matrix_path,\r\n index_col=[0],\r\n sep=None,\r\n engine=\"python\",\r\n decimal=decimal)\r\n return ftl_m.to_dict()\r\n\r\n def holding_capacity(self):\r\n loc = self.sub.read_loc_data().to_dict()\r\n return loc[\"hold. cap.\"]\r\n\r\n def prod_capacity(self):\r\n loc = self.sub.read_loc_data().to_dict()\r\n return loc[\"prod. cap.\"]\r\n\r\n def lead_time(self):\r\n sku_id = self.sub.define_indices()[\"sku_id\"]\r\n t_id = self.sub.define_indices()[\"time_id\"]\r\n lt_dic = self.data[\"lead_time\"].to_dict()\r\n lt = {(i,t): t_id[max(int(ind - lt_dic.get(i,1)), ind-1)]\r\n for i in sku_id\r\n for ind, t in enumerate(t_id)}\r\n return lt\r\n\r\n def sum_ss(self):\r\n ss_sum = {}\r\n time_skus_dic = self.sub.find_last_t()\r\n cw_ss, rw_ss = self.compute_ss()\r\n # find out for each time period which skus has last demand in time for that time period\r\n d = {n: [k for k in time_skus_dic.keys() if time_skus_dic[k] == n] for n in set(time_skus_dic.values())}\r\n\r\n for w in self.sub.loc_id:\r\n for k, v in d.items():\r\n t = self.sub.time_id[k-1]\r\n ss_sum[(w, t)] = sum(cw_ss.get((w, sku), rw_ss.get((w, sku), 0)) for sku in v)\r\n return ss_sum\r\n\r\n# TODO\r\n# break-up define_indices\r\n \r\n\r\n\r\n \r\n\r\n\r\n","repo_name":"Ervin66/Inventory-Optimization","sub_path":"Preprocess.py","file_name":"Preprocess.py","file_ext":"py","file_size_in_byte":23491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"3018946538","text":"# Problem 136 A - Presents\n\n# input\nn = int(input())\np_nums = list(map(int, input().split()))\n\n# initialization\nans_nums = [0]*n\n\n# count\nfor i in range(n):\n p = p_nums[i]\n ans_nums[p-1] = i + 1\n\n# output\nprint(\" \".join(list(map(str, ans_nums))))\n","repo_name":"steinstadt/CodeForces","sub_path":"20200528/prob_4.py","file_name":"prob_4.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35004055156","text":"#!/usr/bin/python\n\n### LCD Init Script ###\n\nimport time\nimport Adafruit_CharLCD as LCD\n\n# Raspberry Pi pin configuration:\nlcd_rs = 27 \nlcd_en = 22\nlcd_d4 = 25\nlcd_d5 = 24\nlcd_d6 = 23\nlcd_d7 = 18\nlcd_backlight = 4 # Only used if controlling the backlight as well as the characters\n\n# Define LCD column and row size for 16x2 LCD.\nlcd_columns = 16\nlcd_rows = 2\n\n# Initialize the LCD using the pins above.\nlcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,\n lcd_columns, lcd_rows, lcd_backlight)\n\n### init message ####\n\nlcd.clear()\nlcd.message('PyRadio is On!')\n","repo_name":"dvdokkum/pyradio","sub_path":"lcd_init.py","file_name":"lcd_init.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"830653059","text":"from utapi.common.utrc import UtrcType, UtrcClient, UtrcDecode\nfrom utapi.common.utcc import UtccType, UtccClient, UtccDecode\nfrom utapi.common.socket_serial import SocketSerial\n\nfrom utapi.adra.adra_api_base import AdraApiBase\n\n\nclass AdraApiSerial(AdraApiBase):\n def __init__(self, port, baud, bus_type=0):\n u\"\"\"AdraApiSerial is an interface class that controls the ADRA actuator through a serial port.\n USB-to-RS485 or USB-to-CAN module hardware is required to connect the computer and the actuator.\n\n Args:\n port (string): USB serial port, The default port on Linux is \"/dev/ttyUSB0\" or \"/dev/ttyACM0\"\n baud (int): Baud rate of serial communication\n bus_type (int, optional): 0 indicates the actuator that uses the RS485 port.\n 1 indicates the actuator that uses the CAN port.\n Defaults to 0.\n \"\"\"\n self.DB_FLG = \"[SApiSeri] \"\n self.__is_err = 0\n id = 1\n print(self.DB_FLG + \"SocketSerial, com:%s, baud:%d\" % (port, baud))\n if bus_type == 0:\n self.bus_decode = UtrcDecode(0xAA, id)\n self.socket_fp = SocketSerial(port, baud, self.bus_decode)\n if self.socket_fp.is_error() != 0:\n print(self.DB_FLG + \"Error: SocketSerial, port:%s, baud:%d\" % (port, baud))\n self.__is_err = 1\n return\n\n self.socket_fp.flush()\n self.bus_client = UtrcClient(self.socket_fp)\n\n self.tx_data = UtrcType()\n self.tx_data.state = 0x00\n self.tx_data.slave_id = id\n\n elif bus_type == 1:\n self.bus_decode = UtccDecode(0xAA, id)\n self.socket_fp = SocketSerial(port, baud, self.bus_decode)\n if self.socket_fp.is_error() != 0:\n print(self.DB_FLG + \"Error: SocketSerial, port:%s, baud:%d\" % (port, baud))\n self.__is_err = 1\n return\n\n self.socket_fp.flush()\n self.bus_client = UtccClient(self.socket_fp)\n self.tx_data = UtccType()\n self.tx_data.state = 0x00\n self.tx_data.id = id\n\n self.id = id\n self.virid = id\n\n AdraApiBase.__init__(self, self.socket_fp, self.bus_client, self.tx_data)\n\n def is_error(self):\n return self.__is_err\n\n def into_usb_pm(self):\n u\"\"\"If use the USB of the EtherNet to RS485/CAN module to transmit RS485/CAN data,\n need to use this function to put the EtherNet to RS485/CAN module into USB transmission mode.\n After the EtherNet to RS485/CAN module is powered on, the transmission mode is TCP/UDP by default.\n Therefore, only need to set the transmission mode once you are powered on.\n \"\"\"\n self.socket_fp.write(\"# INTO-USB-PM\\n\".encode('utf-8'))\n","repo_name":"UmbraTek/ut_sdk_python","sub_path":"utapi/adra/adra_api_serial.py","file_name":"adra_api_serial.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"94"} +{"seq_id":"993018713","text":"# -*- coding:utf-8 -*-\n'''\nCreated on Nov 14, 2015\n\n@author: wTayyeb https://github.com/wtayyeb\n@license: MIT\n'''\n\nimport argparse\nimport logging\nimport os\nimport urllib2\nimport warnings\n\nimport caldav\nimport ics\n\n\n__version__ = '0.2.3'\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\nclass Ical2CalDav(object):\n def __init__(self, ics_url, caldav_url, username=None, password=None,\n calendar_id=None, purge_calendar=False, delay_purge=True):\n\n self.ics_url = ics_url\n self.caldav_url = caldav_url\n self.username = username\n self.password = password\n self.calendar_id = calendar_id\n self.purge_calendar = purge_calendar\n self.delay_purge = delay_purge\n\n\n def __call__(self):\n vcal = self.get_vcal_content()\n # vcal = self.get_test_vcal()\n events = self.split_events(vcal)\n self.upload_to_dav(events)\n\n\n def get_vcal_content(self):\n if os.path.exists(self.ics_url):\n with open(self.ics_url) as f:\n return f.read()\n else:\n return urllib2.urlopen(self.ics_url).read()\n\n\n def split_events(self, vcal):\n c = ics.Calendar(vcal.decode('utf-8'))\n logger.info('Found %s event in %s', len(c.events), self.ics_url)\n return c.events\n\n\n def fix_uid(self, uid):\n ''' overwrite this method if you want to change event.uid '''\n pass\n\n\n def upload_to_dav(self, events):\n logger.debug('try to connect to dav server')\n client = caldav.DAVClient(url=self.caldav_url, username=self.username, password=self.password, ssl_verify_cert=False)\n logger.debug('getting principal')\n principal = client.principal()\n logger.debug('getting principal.calendars')\n calendars = principal.calendars()\n\n #--- select calender to work with\n calendar = None\n for cal in calendars:\n cu = cal.canonical_url\n logger.debug('Found %s', cu)\n if self.calendar_id and cu.endswith('/%s/' % self.calendar_id):\n calendar = cal\n\n if self.calendar_id is None:\n calendar = calendars[0]\n\n if calendar is None:\n if self.calendar_id:\n logger.error('no calendar found with id `%s`', self.calendar_id)\n else:\n logger.error('no calendar found.')\n return\n logger.info('Using %s', calendar)\n\n #--- purge calendar if needed\n purge_uids = []\n l = len(calendar.events())\n if self.purge_calendar and l > 0:\n if not self.delay_purge:\n logger.info('Purging calendar %s before importing (%s events)', calendar, l)\n for ev in calendar.events():\n if self.delay_purge:\n uid = ev.instance.vevent.uid.value\n purge_uids.append(uid)\n logger.debug('will purge if not overwrited %s', uid)\n else:\n logger.debug('purging %s', ev)\n ev.delete()\n\n #--- import event to calendar\n for event in events:\n new_uid = self.fix_uid(event.uid)\n if new_uid:\n event.uid = new_uid\n try:\n purge_uids.remove(event.uid)\n except ValueError:\n pass\n\n ev = ('BEGIN:VCALENDAR\\n'\n 'VERSION:2.0\\n'\n 'CALSCALE:GREGORIAN'\n 'PRODID:-ics2caldav.py\\n'\n '%s\\n'\n 'END:VCALENDAR'\n ) % str(event)\n # print ev, '\\n', '=' * 30\n res = calendar.add_event(ev)\n logger.debug('%s created', res)\n\n #--- now purge them\n for uid in purge_uids:\n ev = calendar.event_by_uid(uid)\n logger.debug('purging %s', ev)\n ev.delete()\n\n logger.info('done.')\n\n\n def get_test_vcal(self):\n return '\\n'.join((\n 'BEGIN:VCALENDAR',\n 'VERSION:2.0',\n 'PRODID:-ics2caldav.py',\n 'BEGIN:VEVENT',\n 'UID:1234567890',\n 'CREATED;VALUE=DATE:20120702Z',\n 'DTSTAMP:20151114T182145Z',\n 'DTSTART:20151114T170000Z',\n 'DTEND:20151114T180000Z',\n 'SUMMARY:First Test',\n 'END:VEVENT',\n 'BEGIN:VEVENT',\n 'UID:1234567891',\n 'DTSTAMP:20151115T182145Z',\n 'DTSTART:20151115T170000Z',\n 'DTEND:20151115T180000Z',\n 'SUMMARY:Second test event',\n 'END:VEVENT',\n 'END:VCALENDAR',\n ))\n\n\n @classmethod\n def parse_args(cls, args=None):\n parser = argparse.ArgumentParser(description='import events from ics to caldav')\n\n parser.add_argument('-i', '--ics', metavar='url', type=str, action='store', required=True, help='url to ics calendar which want to get events from')\n parser.add_argument('-c', '--caldav', metavar='url', type=str, action='store', required=True, help='url to CalDAV server which want to put event to')\n parser.add_argument('-d', '--calid', metavar='calendar_id', type=str, action='store', help='CalDAV calendar id')\n parser.add_argument('-u', '--username', metavar='username', type=str, action='store', help='CalDAV username')\n parser.add_argument('-p', '--password', metavar='password', type=str, action='store', help='CalDAV password')\n parser.add_argument('--purge', action='store_true', help='purge calendar')\n parser.add_argument('--delay', action='store_true', help='delay purge calendar')\n\n args = parser.parse_args(args)\n\n return cls(\n ics_url=args.ics,\n caldav_url=args.caldav,\n calendar_id=args.calid,\n username=args.username,\n password=args.password,\n purge_calendar=args.purge,\n delay_purge=args.delay,\n )()\n\n\n @staticmethod\n def config_logger():\n warnings.simplefilter('ignore')\n\n hdlr = logging.StreamHandler()\n hdlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT))\n logger.addHandler(hdlr)\n logger.setLevel(logging.INFO)\n","repo_name":"wtayyeb/ics2caldav","sub_path":"ics2caldav/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72650728309","text":"#!/usr/bin/env python\nfrom sys import argv\n\nif len(argv) != 2:\n print('usage: %s filename.kv' % argv[0])\n exit(1)\n\n\nfrom kivy.lang import Builder\nfrom kivy.app import App\nfrom kivy.core.window import Window\nfrom kivy.clock import Clock, mainthread\nfrom kivy.uix.label import Label\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nfrom os.path import dirname, basename, join\n\nPATH = dirname(argv[1])\nTARGET = basename(argv[1])\n\n\nclass KvHandler(FileSystemEventHandler):\n def __init__(self, callback, target, **kwargs):\n super(KvHandler, self).__init__(**kwargs)\n self.callback = callback\n self.target = target\n\n def on_any_event(self, event):\n if basename(event.src_path) == self.target:\n self.callback()\n\n\nclass KvViewerApp(App):\n def build(self):\n o = Observer()\n o.schedule(KvHandler(self.update, TARGET), PATH)\n o.start()\n Clock.schedule_once(self.update, 1)\n return super(KvViewerApp, self).build()\n\n @mainthread\n def update(self, *args):\n Builder.unload_file(join(PATH, TARGET))\n Window.remove_widget(Window.children[0])\n try:\n Window.add_widget(Builder.load_file(join(PATH, TARGET)))\n except Exception as e:\n Window.add_widget(Label(text=e.message if e.message else str(e)))\n\n\nif __name__ == '__main__':\n KvViewerApp().run()","repo_name":"gistable/gistable","sub_path":"all-gists/9fc66c08f5d68ad486b4/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"41361313403","text":"import TransformReads as tr\nimport TransformTemplate as tt\nimport BWTStructure as bwt\nimport Parameters\nimport numpy as np\nimport time\nimport argparse\nimport os\n\nbasePath = os.getcwd() + \"/../inputs/\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--chrnum', type=int, required=True)\nparser.add_argument('--nr', type=int, required=True)\nparser.add_argument('--rl', type=int, required=True)\nparser.add_argument('--nrg', type=int, required=True)\nparser.add_argument('--nrb', type=int, required=True)\nargs = parser.parse_args()\n\n# get script arguments\nchrNum = int(args.chrnum)\nnumReads = int(args.nr)\nreadLength = int(args.rl)\nnumReadGroups = int(args.nrg)\nnumReadBatches = int(args.nrb)\nnumReadsPerBatch = numReads//numReadBatches\n\nstartTime = time.process_time()\n\n# extract total number of template chunks\nchrLengthFile = \"chrLengths.txt\"\nwith open(basePath + chrLengthFile, 'r') as f:\n chrLengthsString = f.read().splitlines()\n\nnumTemplateChunks = int(chrLengthsString[int(args.chrnum-1)][:-1])\n\n# create a path for storing results\nresultsPath = basePath + \"../results/chr\" + str(chrNum) + '_' + str(numTemplateChunks) + '_reads_' + str(readLength) + \"_\" + str(numReadGroups) + '_' + str(numReadBatches) + \"/\"\nif not os.path.isdir(resultsPath):\n os.makedirs(resultsPath)\n\n# extract the template\ntemplateFile = basePath + \"chr\" + str(chrNum) + \".fa\"\nwith open(templateFile, \"r\") as f:\n template = f.read().splitlines()\n template = template[1]\n\n# extract reads\nreadFile = basePath + \"reads.txt\"\nwith open(readFile, \"r\") as f:\n reads = f.read().splitlines()\n\n# Create template chunks\nprint(\"\\nStarting to process chromosome \", chrNum, \"...\")\nst = time.process_time()\nscrambledTemplateChunks, scramblePermutation = bwt.ScrambleTemplate(template, numTemplateChunks)\ntemplateChunkTime = time.process_time() - st\nprint(\"\\nTime for creating all template chunks for chromosome \", chrNum, \": \", round(time.process_time() - st, 2), \"\\n\")\n\n# Load primes\nwith open(basePath + 'primes.txt', 'r') as f:\n primesList = list(map(int, f.readlines()))\n\n# Transform template\nfor templateChunk in range(numTemplateChunks):\n localPath = resultsPath + \"template_chunk_\" + str(templateChunk) + \"/\"\n st = time.process_time()\n tt.TransformReferenceTemplate(scrambledTemplateChunks[templateChunk], localPath, templateChunkTime/numTemplateChunks)\n templateTime = time.process_time() - st\n print(\"Template Chunk #\", templateChunk, \" time: \", round(templateTime, 2))\n\n # Create primes\n numPrimes = Parameters.numPrimes\n primes = np.reshape(np.random.choice(primesList, numPrimes*numReadGroups, replace=False), (numReadGroups, numPrimes))\n np.save(localPath + 'primes', primes)\n\n# Transform reads\nprint(\"\\nStarting to process reads...\")\nfor readBatch in range(numReadBatches):\n localPath = resultsPath + \"read_batch_\" + str(readBatch) + \"/\"\n st = time.process_time()\n currReads = reads[readBatch*numReadsPerBatch: (readBatch+1)*numReadsPerBatch]\n tr.TransformReads(currReads, numReadGroups, localPath)\n readsTime = time.process_time() - st\n print('Read Batch #', readBatch, ' time: ', round(readsTime, 2))\n\nprint('\\nTotal preprocessing time: ', round(time.process_time() - startTime, 2))\n","repo_name":"sindhujamohan9/TwoCloudShuffledBWT","sub_path":"codes/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10174604782","text":"import json\nimport os.path\nimport sys\n\nimport pandas as pd\n\nfrom helpers import NpEncoder\n\n\ndef save_as_json(destination, data):\n with open(destination, \"w\", encoding=\"utf-8\") as f:\n output = json.dumps(data, cls=NpEncoder, ensure_ascii=False).replace(\"NaN\", \"null\")\n f.write(output)\n\n\ndef read_json_from_file(path):\n log_status(f\"Reading {path}\")\n if not os.path.isfile(path):\n raise Exception(f\"{path} not found\")\n\n with open(path, encoding=\"utf-8\") as f:\n data = json.load(f)\n\n return data\n\n\ndef get_json_from_csv_file(path, column_list):\n log_status(f\"Reading {path}\")\n if not os.path.isfile(path):\n raise Exception(f\"{path} not found\")\n\n df = pd.read_csv(path, usecols=column_list, keep_default_na=False)\n log_status(\"Converting data to JSON string\")\n json_string = df.to_json(orient='records')\n del df\n log_status(\"Parsing JSON\")\n json_parsed = json.loads(json_string)\n\n return json_parsed\n\n\ndef log_status(message):\n print(message, file=sys.stderr)\n","repo_name":"mcmatrix/koroonakaart","sub_path":"build/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"24931818347","text":"#금지된 단어를 제외한 가장 흔하게 등장하는 단어를 출력하라, 대소문자 구분을 하지 않으며, 구두점또한 무시한다.\nimport collections\nimport re\n\n\ndef mostwords(paragraph:str, ban:str) -> None:\n words = [word for word in re.sub(r'[^\\w]',' ', paragraph) #리스트 컴프리핸션\n .lower().split()\n if word not in ban]\n\n counts = collections.Counter(words)\n return counts.most_common(1)[0][0] #크기 순으로 정렬 #(1)개만큼 리턴\n\ndef mostwords2(paragraph:str, ban:str) -> None:\n words = [word for word in re.sub(r'[^\\w]',' ', paragraph) #리스트 컴프리핸션\n .lower().split()\n if word not in ban]\n\n counts = collections.defaultdict(int)#int를 디폴트 값으로 둔다.\n for word in words:\n counts[word] += 1\n\n return max(counts, key=counts.get)#파이썬은 argmax를 지원하지 않으므로 key를 지정해주고 해당 값(value)으로 크기 비교 #get은 값 가져오는거 알제?\n\n\nparagraph = \"Bob hit a ball, the hit BALL flew far after it was hit\"\nban = \"hit\"\n\nmostwords(paragraph, ban)\nmostwords2(paragraph, ban)","repo_name":"nextyourmoney/Algorithm","sub_path":"파이썬 알고리즘 인터뷰/문자/04.가장 흔한 단어.py","file_name":"04.가장 흔한 단어.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72464895028","text":"class Solution:\n def maximumRemovals(self, s: str, p: str, removable: List[int]) -> int:\n def check(k):\n i = j = 0\n ids = set(removable[:k])\n while i < m and j < n:\n if i not in ids and s[i] == p[j]:\n j += 1\n i += 1\n return j == n\n\n m, n = len(s), len(p)\n left, right = 0, len(removable)\n while left < right:\n mid = (left + right + 1) >> 1\n if check(mid):\n left = mid\n else:\n right = mid - 1\n return left\n","repo_name":"doocs/leetcode","sub_path":"solution/1800-1899/1898.Maximum Number of Removable Characters/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":25791,"dataset":"github-code","pt":"94"} +{"seq_id":"2738976665","text":"\ninpFile = open(\"./in1\")\nlines = inpFile.readlines();\ninpFile.close();\n\nnumbers = []\n\nfor line in lines:\n num = int(line)\n numbers.append(num)\n\nnumbers.sort()\n\nlN = 0\n\n# cntD1 = 0\n# cntD3 = 1\n\n# for n in numbers:\n# if n - lN == 1:\n# cntD1+=1\n# elif n - lN == 3:\n# cntD3+=1\n# lN = n\n\ntarget = numbers[-1] + 3\n\ndp = list(range(200))\nex = list(range(200))\n\nfor i in range(200):\n dp[i] = 0\n ex[i] = 0\n\nfor n in numbers:\n ex[n] = 1\n\ndp[0] = 1\nex[0] = 1\nex[target] = 1\n\nfor i in range(196):\n if dp[i] == 0 or ex[i] == 0:\n continue\n for j in range(1,4):\n if ex[i+j] == 1:\n dp[i+j]+=dp[i] \n\nprint(dp[target])\n","repo_name":"icebox246/aoc2020","sub_path":"10/puz1.py","file_name":"puz1.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"9095174977","text":"from pc_ble_driver_py.ble_driver import (\n BLEDriver,\n BLEEnableParams,\n BLEConfig,\n BLEConfigConnGatt,\n BLEConfigConnGap,\n)\nfrom pc_ble_driver_py.ble_adapter import BLEAdapter\nfrom driver_setup import Settings\n\n\ndef setup_adapter(\n port,\n auto_flash,\n baud_rate,\n retransmission_interval,\n response_timeout,\n driver_log_level,\n):\n settings = Settings.current()\n\n driver = BLEDriver(\n serial_port=port,\n auto_flash=auto_flash,\n baud_rate=baud_rate,\n retransmission_interval=retransmission_interval,\n response_timeout=response_timeout,\n log_severity_level=driver_log_level,\n )\n\n adapter = BLEAdapter(driver)\n adapter.default_mtu = settings.mtu\n adapter.open()\n if settings.nrf_family == \"NRF51\":\n adapter.driver.ble_enable(\n BLEEnableParams(\n vs_uuid_count=1,\n service_changed=0,\n periph_conn_count=1,\n central_conn_count=1,\n central_sec_count=1,\n )\n )\n elif settings.nrf_family == \"NRF52\":\n gatt_cfg = BLEConfigConnGatt()\n gatt_cfg.att_mtu = adapter.default_mtu\n gatt_cfg.tag = Settings.CFG_TAG\n adapter.driver.ble_cfg_set(BLEConfig.conn_gatt, gatt_cfg)\n\n if hasattr(settings, \"event_length\"):\n gap_cfg = BLEConfigConnGap()\n gap_cfg.event_length = settings.event_length\n adapter.driver.ble_cfg_set(BLEConfig.conn_gap, gap_cfg)\n\n adapter.driver.ble_enable()\n\n return adapter\n","repo_name":"NordicSemiconductor/pc-ble-driver-py","sub_path":"tests/driver_setup/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"94"} +{"seq_id":"33033926189","text":"\n\n\n\n'''\nExpander code for clipping individual gradients.\n\nThis code is due to Mikko Heikkilä (@mixheikk)\n'''\n\n\n\nimport torch\nfrom torch.autograd import Variable\n\nimport sys\n\n\n# clip and accumulate clipped gradients\n\ndef acc_scaled_grads(model, C, cum_grads, use_cuda=False):\n\n batch_size = model.batch_proc_size\n\n g_norm = Variable(torch.zeros(batch_size),requires_grad=False)\n\n if use_cuda:\n g_norm = g_norm.cuda()\n\n for p in filter(lambda p: p.requires_grad, model.parameters() ):\n if p.grad is not None:\n g_norm += torch.sum( p.grad.view(batch_size,-1)**2, 1)\n\n g_norm = torch.sqrt(g_norm)\n\n # do clipping and accumulate\n for p, key in zip( filter(lambda p: p.requires_grad, model.parameters()), cum_grads.keys() ):\n if p is not None:\n cum_grads[key] += torch.sum( (p.grad/torch.clamp(g_norm.contiguous().view(-1,1,1)/C, min=1)), dim=0 )\n\n\n# add noise and replace model grads with cumulative grads\ndef add_noise_with_cum_grads(model, C, sigma, cum_grads, use_cuda=False):\n\n batch_proc_size = model.batch_proc_size\n for p, key in zip( filter(lambda p: p.requires_grad, model.parameters()), cum_grads.keys() ):\n\n if p.grad is not None:\n\n # add noise to summed clipped pars\n if use_cuda:\n p.grad = ((cum_grads[key].expand(batch_proc_size,-1,-1) + \\\n Variable( (sigma*C)*torch.normal(mean=torch.zeros_like(p.grad[0]).data, \\\n std=1.0).expand(batch_proc_size,-1,-1) ) )/model.batch_size).cuda()\n else:\n p.grad = (cum_grads[key].expand(batch_proc_size,-1,-1) + \\\n Variable( (sigma*C)*torch.normal(mean=torch.zeros_like(p.grad[0]).data,std=1.0).expand(batch_proc_size,-1,-1) ) )/model.batch_size\n","repo_name":"DPBayes/ADADP","sub_path":"CIFAR_tests/px_expander.py","file_name":"px_expander.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"94"} +{"seq_id":"42843653970","text":"##나만의 백테스팅 프로그램 작성하기\r\n### - userBank : 실제 은행처럼 백테스팅을 진행할 금액, 수수료, 슬리피지\r\n### - Boss : 여러 tradeManager에게 유저가 원하는 종목별 비중을 조절해서 tradeManager에게 백테스트를 시키고 전체적인 결과를 계산함. tradeManager에게 추후 각종목별로 종목1은 알고리즘1번 종목 2는 알고리즘번과같이 매매 방식도 전달 가능하도록 진행할 예정\r\n### - StockData : 전체적인 백테스트에 필요한 데이터를 전부 여기서 관리\r\n### - Algorithm : 추후 전체적인 백테스트 프로그램 완성 이후 이 알고리즘이란 클래스만 작성하면 되도록 프로그래밍할 예정\r\n\r\nimport mysql.connector\r\nimport os\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\n\r\n#상위 경로 폴더를 가져오기 위해 선언\r\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\r\n\r\nfrom lecture_practice.pwd import mysql_credentials, xing_credentials\r\nimport pandas as pd\r\nimport datetime\r\nfrom talib import SMA\r\n\r\nclass userBank:\r\n def __init__(self, money, commission, slippage):\r\n self.money = money\r\n self.commission = commission\r\n self.slippage = slippage\r\n\r\n\r\nclass stockData:\r\n def __init__(self, s_date, e_date, shcode):\r\n self.connection = mysql.connector.connect(user=mysql_credentials[\"user\"], password=mysql_credentials[\"password\"], host=mysql_credentials[\"host\"], database=\"backtest\")\r\n self.df = pd.read_sql_query(\"select * from sh{}\".format(shcode), self.connection, index_col='date').loc[s_date:e_date]\r\n #print(self.df)\r\n self.precalculated_data()\r\n \r\n def precalculated_data(self):\r\n self.sma_5 = SMA(self.df[\"close\"], timeperiod=5)\r\n self.sma_20 = SMA(self.df[\"close\"], timeperiod=20)\r\n\r\n\r\nclass tradeManager:\r\n def __init__(self, s_date, e_date, shcode, s_money, show_result = False):\r\n self.stock_data = stockData(s_date, e_date, shcode)\r\n self.money = s_money\r\n\r\n self.total_quantity = []\r\n self.order_tracking = []\r\n self.asset = []\r\n\r\n self.run_backtest()\r\n if show_result == True:\r\n self.show_result(s_money, shcode)\r\n\r\n def run_backtest(self):\r\n integer_index = 0\r\n for daily_stock_date in self.stock_data.df.itertuples():\r\n #print(daily_stock_date)\r\n is_buy_signal = self.buy_signal(daily_stock_date, integer_index)\r\n is_sell_signal = self.sell_signal(daily_stock_date, integer_index)\r\n\r\n if (is_buy_signal == True) and (is_sell_signal == True):\r\n try:\r\n quantity = self.total_quantity[-1]\r\n except IndexError:\r\n self.total_quantity.append(0)\r\n else:\r\n self.total_quantity.append(quantity)\r\n\r\n elif (is_buy_signal == True) and (is_sell_signal == False):\r\n self.buy(daily_stock_date)\r\n elif (is_buy_signal == False) and (is_sell_signal == True):\r\n self.sell(daily_stock_date)\r\n else:\r\n try:\r\n quantity = self.total_quantity[-1]\r\n except IndexError:\r\n self.total_quantity.append(0)\r\n else:\r\n self.total_quantity.append(quantity)\r\n\r\n self.asset.append( self.money + self.total_quantity[-1] * daily_stock_date.close)\r\n\r\n integer_index+=1\r\n\r\n def buy_signal(self, data, integer_index):\r\n if integer_index == 0 :\r\n return False\r\n else:\r\n if (self.stock_data.sma_5[integer_index] > self.stock_data.sma_20[integer_index]) and (self.stock_data.sma_5[integer_index-1] < self.stock_data.sma_20[integer_index-1]):\r\n return True\r\n else:\r\n return False\r\n\r\n def sell_signal(self, data, integer_index):\r\n if integer_index == 0:\r\n return False\r\n else:\r\n if (self.stock_data.sma_5.iloc[integer_index] < self.stock_data.sma_20[integer_index]) and (self.stock_data.sma_5[integer_index-1] > self.stock_data.sma_20[integer_index-1]):\r\n return True\r\n else:\r\n return False\r\n \r\n def buy(self, data):\r\n buy_price = data.close\r\n quantity = self.money // buy_price\r\n if quantity != 0:\r\n # 꼭 필요하지 않을수도 있음...\r\n self.money = self.money - ( quantity * buy_price )\r\n self.total_quantity.append(quantity)\r\n self.order_tracking.append((data.Index, buy_price, quantity))\r\n else:\r\n print(\"**WARN: BUY ZERO@{}\".format(data.Index))\r\n self.total_quantity.append(0)\r\n\r\n def sell(self, data):\r\n sell_price = data.close\r\n\r\n try:\r\n quantity = self.total_quantity[-1]\r\n except IndexError:\r\n print(\"첫날 매도 시그널 발생\")\r\n else:\r\n if quantity != 0:\r\n self.money = self.money + (quantity * sell_price)\r\n #전량 매도했기 때문에 보유주식수 =0\r\n self.total_quantity.append(0)\r\n self.order_tracking.append((data.Index, sell_price, -quantity))\r\n else:\r\n print(\"**WARN: SELL ZERO @{}\".format(data.Index))\r\n self.total_quantity.append(0)\r\n\r\n def show_result(self, s_money, shcode):\r\n print(\"------------------------------------------------------------\")\r\n print(\"시작 금액: {}\".format(s_money))\r\n print(\"거래 내역: \", self.order_tracking)\r\n print(\"자산 변동 내역 :\", self.asset)\r\n print(\"------------------------------------------------------------\")\r\n print(\"Total Length of Data: \")\r\n print(len(self.total_quantity))\r\n print(len(self.stock_data.df))\r\n print(len(self.asset))\r\n print(\"------------------------------------------------------------\")\r\n\r\n\r\n x = self.stock_data.df.index\r\n y = self.stock_data.df[\"close\"]\r\n\r\n y_sma_5 = self.stock_data.sma_5\r\n y_sma_20 = self.stock_data.sma_20\r\n\r\n #plt.plot(x,y)\r\n plt.plot(x, y_sma_5, '#fc037b', label = \"MA_5\")\r\n plt.plot(x,y_sma_20, '#03fc56', label= \"MA_20\")\r\n for order in self.order_tracking:\r\n if (order[2] > 0):\r\n # 전량 매수\r\n #plt.scatter(order[0], order[1], marker='o', color='#03b1fc') \r\n plt.axvline(order[0] ,color='#03b1fc' )\r\n elif (order[2] < 0):\r\n # 전량 매도\r\n #plt.scatter(order[0], order[1], marker='o', color='#fc03db') \r\n plt.axvline(order[0], color='#fc03db' )\r\n\r\n plt.legend()\r\n\r\n\r\n plt.title(shcode)\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n s_date = datetime.date(2015,6,12)\r\n e_date = datetime.date(2022,3,25)\r\n\r\n tradeManager(s_date, e_date, \"000020\", 100000000, True)","repo_name":"SungwookLE/Inflearn_pyqt_backtesting","sub_path":"lecture_practice/backtesting_1.py","file_name":"backtesting_1.py","file_ext":"py","file_size_in_byte":7049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"499694287","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# python libraries\nimport sys\nimport logging\nimport threading\n\n# pypi libraries\nfrom twisted.internet import reactor\nfrom twisted.web.server import Site\nfrom twisted.web.static import File\nfrom autobahn.util import newid\nfrom autobahn.twisted.websocket import (\n WebSocketServerFactory,\n WebSocketServerProtocol\n)\nfrom autobahn.twisted.resource import (\n WebSocketResource,\n HTTPChannelHixie76Aware\n)\n\n# create logger\nlogger = logging.getLogger(__file__)\n\n\nclass MyWebSocketServerProtocol(WebSocketServerProtocol):\n \"\"\" MyWebSocketServerProtoco class \"\"\"\n def onConnect(self, request):\n logger.info('connected')\n self.session_id = newid()\n\n def onOpen(self):\n logger.info('open websocket')\n self.factory.register_session(self)\n\n def onClose(self, wasClean, code, reason):\n logger.info('close websocket. code:%d reason:%s', code, reason)\n self.factory.unregister_session(self)\n\n def connectionLost(self, reason):\n logger.debug('connection lost')\n self.factory.unregister_session(self)\n\n def onMessage(self, payload, isBinary):\n logger.debug('receive message')\n self.factory.add_recv_packet(payload, isBinary, self.session_id)\n\n\nclass MyWebSocketServerFactory(WebSocketServerFactory):\n \"\"\" MyWebSocketServerFactory class \"\"\"\n\n def __init__(self, uri, debug):\n WebSocketServerFactory.__init__(self, uri, debug=debug)\n self._session_id_list = []\n self._clients = {}\n\n def add_recv_packet(self, packet, is_binary, session_id):\n \"\"\" add data to data que\n\n :param packet:\n :param is_binary:\n :param session_id:\n \"\"\"\n if session_id in self._session_id_list:\n # check primary session\n pass\n else:\n logger.debug('target session [%s] is not found.', session_id)\n\n def register_session(self, session):\n \"\"\" register new session\n\n :param session_id:\n \"\"\"\n session_id = session.session_id\n logger.info('add session [%s]', session_id)\n\n # add data que for this session\n self._session_id_list.append(session_id)\n self._clients[session_id] = session\n\n def unregister_session(self, session):\n \"\"\" delete session id\n\n :session_id:\n \"\"\"\n session_id = session.session_id\n logger.info('delete session [%s]', session_id)\n self._session_id_list = [id for id in self._session_id_list if id != session_id]\n del self._clients[session_id]\n\n def send_packet(self, packet):\n \"\"\" add send packet by protocol class\n\n :packet:\n \"\"\"\n for client in self._clients.values():\n client.sendMessage(packet, isBinary=True)\n\n\nclass MyWebSocketServer(object):\n \"\"\" MyWebSocketServer class \"\"\"\n def __init__(self, host, port, debug=False):\n \"\"\" init function \"\"\"\n self._thread = None\n self._factory = None\n self._resource = None\n self._host = host\n self._port = port\n self._url = 'ws://{host}:{port}'.format(\n host=host,\n port=port\n )\n self._debug = debug\n\n def start(self):\n \"\"\" start websocket server \"\"\"\n logger.info('start websocket server at %s', self._url)\n self._factory = MyWebSocketServerFactory(\n self._url,\n debug=self._debug\n )\n\n self._factory.protocol = MyWebSocketServerProtocol\n self._factory.setProtocolOptions(allowHixie76=True)\n\n self._resource = WebSocketResource(self._factory)\n\n # we server static files under \"/\" ..\n root = File('.')\n\n # and our WebSocket server under \"/websocket\"\n root.putChild('websocket', self._resource)\n\n # both under one Twisted Web Site\n site = Site(root)\n site.protocol = HTTPChannelHixie76Aware\n reactor.listenTCP(self._port, site)\n self._thread = threading.Thread(target=reactor.run, args=(False,))\n self._thread.start()\n\n def stop(self):\n \"\"\" stop websocket server\n \"\"\"\n reactor.stop()\n self._thread.join(0.1)\n\n def send_packet(self, packet):\n \"\"\" notify connected\n \"\"\"\n self._factory.send_packet(packet)\n\n\ndef main():\n server = MyWebSocketServer(\n host='127.0.0.1',\n port=9000,\n debug=True\n )\n server.start()\n\n try:\n while True:\n time.sleep(1)\n except:\n pass\n finally:\n server.stop()\n return 1\n\nif __name__ == '__main__':\n sys.exit(main())\n\n","repo_name":"chikuta/autobahn_websocketserver_sample","sub_path":"websocket_server_sample.py","file_name":"websocket_server_sample.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73930578229","text":"from django_filters.rest_framework import FilterSet\nfrom .models import sales\n\nclass ProductFilter(FilterSet):\n class Meta:\n model = sales\n fields = {\n 'Name':['exact'],\n 'Rank':['exact','lt'],\n 'Year':['exact'],\n 'Platform':['exact'],\n 'Genre':['exact'],\n\n }","repo_name":"amirali1999/cloud-project","sub_path":"sales/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35253886349","text":"import sys\nimport os\nimport logging\nfrom datetime import datetime\n\nfrom logs.Logger import Logger\n\n\nclass FileLogger(Logger):\n def __init__(self, log_folder, session_name):\n super().__init__()\n self.timeformat = \"%Y-%m-%d_%H-%M-%S\"\n self.session_name = session_name\n self.log_folder = log_folder\n self.file_name = self.generate_file_name()\n self.logger = self.init_logger()\n\n def init_logger(self):\n logging.basicConfig(filename=self.file_name,\n filemode='a',\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt=self.timeformat,\n level=logging.INFO)\n\n logger = logging.getLogger(self.session_name)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n\n return logger\n\n\n def info(self, message):\n self.logger.info(message)\n self.add_to_log_file(message)\n\n def error(self, message):\n self.logger.error(message)\n self.add_to_log_file(message)\n\n def debug(self, message):\n self.logger.debug(message)\n self.add_to_log_file(message)\n\n def generate_file_name(self):\n folder_name = f'{self.log_folder}/{self.session_name}'\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n return f'{folder_name}/{datetime.utcnow().strftime(self.timeformat)}.log'\n","repo_name":"TomasDavidYe/pyserverless","sub_path":"logs/FileLogger.py","file_name":"FileLogger.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"74358776308","text":"from flask import Flask, render_template\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import FileField, SubmitField\nfrom werkzeug.utils import secure_filename\nfrom utils import extract_top_hex_colours_from_image\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'topsec'\nBootstrap(app)\n\n\nclass UploadForm(FlaskForm):\n file = FileField('Upload an Image File')\n submit = SubmitField('Submit') \n\n\n@app.route(\"/\", methods=['GET', 'POST']) \ndef index():\n image_name = 'default_image.jpeg'\n form = UploadForm()\n if form.validate_on_submit():\n file = form.file.data\n image_name = secure_filename(file.filename)\n file.save(f\"static/{image_name}\")\n \n file_name = f\"static/{image_name}\" \n hex_codes = extract_top_hex_colours_from_image(file_name)\n return render_template(\"index.html\", form=form, image=image_name, hex_codes = hex_codes) \n \nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"b3mery/Flask-Image-Colour-Palette-Web-App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"12795783781","text":"from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom form_utils.forms import BetterForm\n\nfrom tao import datasets\nfrom tao.forms import NO_FILTER\nfrom tao.models import DataSetProperty\nfrom tao.xml_util import module_xpath\n\n#### XML version 2 ####\n\ndef to_xml_2(form, root):\n from tao.xml_util import find_or_create, child_element\n\n selected_type, selected_filter = form.cleaned_data['filter'].split('-')\n if selected_filter == NO_FILTER:\n return\n\n filter_parameter = None\n filter_type = ''\n units = ''\n if selected_type == 'D':\n filter_parameter = DataSetProperty.objects.get(pk=selected_filter)\n filter_type = filter_parameter.name\n units = filter_parameter.units\n elif selected_type == 'B':\n selected_filter, selected_extension = selected_filter.split('_')\n filter_parameter = datasets.band_pass_filter(selected_filter)\n filter_type = str(filter_parameter.filter_id) + '_' + selected_extension\n units = 'bpunits'\n\n rf_elem = find_or_create(root, 'record-filter')\n child_element(rf_elem, 'module-version', text=RecordFilterForm.MODULE_VERSION)\n filter_elem = find_or_create(rf_elem, 'filter')\n child_element(filter_elem, 'filter-attribute', filter_type)\n filter_min = form.cleaned_data['min']\n filter_max = form.cleaned_data['max']\n default_filter = form.ui_holder.dataset.default_filter_field\n if default_filter is not None and filter_parameter.id == default_filter.id and filter_min is None and filter_max is None:\n filter_min = form.ui_holder.dataset.default_filter_min\n filter_max = form.ui_holder.dataset.default_filter_max\n child_element(filter_elem, 'filter-min', text=str(filter_min), units=units)\n child_element(filter_elem, 'filter-max', text=str(filter_max), units=units)\n\ndef from_xml_2(cls, ui_holder, xml_root, prefix=None):\n simulation = module_xpath(xml_root, '//light-cone/simulation')\n galaxy_model = module_xpath(xml_root, '//light-cone/galaxy-model')\n data_set = datasets.dataset_find_from_xml(simulation, galaxy_model)\n filter_attribute = module_xpath(xml_root, '//record-filter/filter/filter-attribute')\n filter_min = module_xpath(xml_root, '//record-filter/filter/filter-min')\n filter_max = module_xpath(xml_root, '//record-filter/filter/filter-max')\n filter_units = module_xpath(xml_root, '//record-filter/filter/filter-min', attribute='units')\n if filter_min == 'None': filter_min = None\n if filter_max == 'None': filter_max = None\n data_set_id = 0\n if data_set is not None: data_set_id = data_set.id\n kind, record_id = datasets.filter_find_from_xml(data_set_id, filter_attribute, filter_units)\n if filter_attribute == None:\n kind = 'X'\n record_id = NO_FILTER\n attrs = {prefix+'-filter': kind + '-' + str(record_id),\n prefix+'-min': filter_min,\n prefix+'-max': filter_max,\n }\n return cls(ui_holder, attrs, prefix=prefix)\n\n########################\n\nclass RecordFilterForm(BetterForm):\n EDIT_TEMPLATE = 'mock_galaxy_factory/record_filter.html'\n MODULE_VERSION = 1\n SUMMARY_TEMPLATE = 'mock_galaxy_factory/record_filter_summary.html'\n LABEL = 'Selection'\n TAB_ID = settings.MODULE_INDICES['record_filter']\n\n class Meta:\n fieldsets = [('primary', {\n 'legend': '',\n 'fields': ['filter', 'min', 'max',],\n }),]\n\n def __init__(self, *args, **kwargs):\n self.ui_holder = args[0]\n super(RecordFilterForm, self).__init__(*args[1:], **kwargs)\n is_int = False\n if self.ui_holder.is_bound('light_cone'):\n objs = datasets.filter_choices(\n self.ui_holder.raw_data('light_cone', 'dark_matter_simulation'),\n self.ui_holder.raw_data('light_cone', 'galaxy_model'))\n choices = [('X-' + NO_FILTER, 'No Filter')] + [('D-' + str(x.id), x.label + ' (' + x.units + ')') for x in objs] + \\\n [('B-' + str(x.id) + '_apparent', x.label) for x in datasets.band_pass_filters_objects()] + \\\n [('B-' + str(x.id) + '_absolute', x.label) for x in datasets.band_pass_filters_objects()]\n filter_type, record_filter = args[1]['record_filter-filter'].split('-')\n if filter_type == 'D':\n obj = DataSetProperty.objects.get(pk = record_filter)\n is_int = obj.data_type == DataSetProperty.TYPE_INT or obj.data_type == DataSetProperty.TYPE_LONG_LONG\n else:\n choices = [] # [('X-' + NO_FILTER, 'No Filter')]\n if is_int:\n args = {'required': False, 'decimal_places': 0, 'max_digits': 20}\n val_class = forms.DecimalField\n else:\n args = {'required': False}\n val_class = forms.FloatField\n\n self.fields['filter'] = forms.ChoiceField(required=True, choices=choices)\n self.fields['max'] = val_class(**dict(args.items()+{'label':_('Max'), 'widget': forms.TextInput(attrs={'maxlength': '20'})}.items()))\n self.fields['min'] = val_class(**dict(args.items()+{'label':_('Min'), 'widget': forms.TextInput(attrs={'maxlength': '20'})}.items()))\n self.fields['filter'].label = 'Apply global catalogue selection using...'\n\n self.fields['filter'].widget.attrs['data-bind'] = 'options: selections, value: selection, optionsText: function(i) { return i.label }'\n self.fields['min'].widget.attrs['data-bind'] = 'value: selection_min'\n self.fields['max'].widget.attrs['data-bind'] = 'value: selection_max'\n\n\n def check_min_or_max_or_both(self):\n if 'filter' not in self.cleaned_data:\n return\n selected_type, selected_filter = self.cleaned_data['filter'].split('-')\n if selected_filter == NO_FILTER:\n return\n min_field = self.cleaned_data.get('min')\n max_field = self.cleaned_data.get('max')\n if min_field is None and max_field is None:\n msg = _('Either \"min\", \"max\" or both to be provided.')\n self._errors[\"min\"] = self.error_class([msg])\n self._errors[\"max\"] = self.error_class([msg])\n\n def check_min_less_than_max(self):\n min_field = self.cleaned_data.get('min')\n max_field = self.cleaned_data.get('max')\n if min_field is not None and max_field is not None and min_field >= max_field:\n msg = _('The \"min\" field must be less than the \"max\" field.')\n self._errors[\"min\"] = self.error_class([msg])\n del self.cleaned_data[\"min\"]\n\n def clean(self):\n super(RecordFilterForm, self).clean()\n self.check_min_or_max_or_both()\n self.check_min_less_than_max()\n return self.cleaned_data\n\n def to_json_dict(self):\n \"\"\"Answer the json dictionary representation of the receiver.\n i.e. something that can easily be passed to json.dumps()\"\"\"\n json_dict = {}\n for fn in self.fields.keys():\n ffn = self.prefix + '-' + fn\n val = self.data.get(ffn)\n json_dict[ffn] = val \n return json_dict\n\n def to_xml(self, parent_xml_element):\n version = 2.0\n to_xml_2(self, parent_xml_element)\n\n @classmethod\n def from_xml(cls, ui_holder, xml_root, prefix=None):\n version = module_xpath(xml_root, '//workflow/schema-version')\n if version == '2.0':\n return from_xml_2(cls, ui_holder, xml_root, prefix=prefix)\n else:\n return cls(ui_holder, {}, prefix=prefix)\n\n","repo_name":"IntersectAustralia/asvo-tao","sub_path":"web/tao/record_filter_form.py","file_name":"record_filter_form.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8073295202","text":"times = input()\n\nfor i in range(int(times)):\n length = input()\n data = input()\n tmp = sorted(data)\n ans = []\n for i in range(len(data)):\n if data[i] != tmp[i]:\n ans.append(str(i+1))\n if len(ans) != 0:\n print(1)\n print(str(len(ans)) + ' ' + ' '.join(ans))\n else:\n print(0)\n","repo_name":"Destiny0504/contest","sub_path":"2021/codeforce/20211112/B-2.py","file_name":"B-2.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"39731520489","text":"import pandas as pd\r\nfrom playwright.sync_api import sync_playwright\r\n\r\n########## ABRE PLAYWRIGHT ##########\r\nwith sync_playwright() as p:\r\n navegador = p.chromium.launch(headless = False) # headless = abre a janela do navegador\r\n pagina = navegador.new_page()\r\n\r\n # acessa a page\r\n pagina.goto(\"https://cos.ufrj.br/index.php/pt-BR/pos-graduacao/disciplinas-3\")\r\n\r\n # COPIA O LINK DO OBJETO\r\n url = pagina.locator('//*[@id=\"adminForm\"]/table/tbody/tr[1]/td[1]/a').get_attribute('href')\r\n \r\n # DEFINE O NOME DO ARQUIVO\r\n nome_arq = pagina.locator('//*[@id=\"adminForm\"]/table/tbody/tr[1]/td[1]/a').text_content().strip().replace(\r\n '/','_') + '.xlsx'\r\n \r\n # ADICIONA A PARTE INICIAL DA URL\r\n url = 'https://cos.ufrj.br' + url\r\n print(nome_arq)\r\n\r\n # fecha o navegador\r\n navegador.close()\r\n########## FIM PLAYWRIGHT ##########\r\n\r\n########## LEITURA DE TABELA HTML ##########\r\nurl = pd.read_html(url)\r\nurl = pd.DataFrame(url[0]) # transforma em um dataframe\r\nurl.to_excel(nome_arq) # salva a o dataframe em um arquivo excel","repo_name":"pclss/Trabalhos-Web-Scraping","sub_path":"disc_pesc_final.py","file_name":"disc_pesc_final.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73300504954","text":"# -*- coding: utf-8 -*-\n\nimport scipy.integrate as integrate\nimport scipy.special as special\nresult = integrate.quad(lambda x: special.jv(2.5,x), 0, 4.5)\nresult\n\n#%% matplotlib绘图\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nY, X = np.mgrid[-3:3:100j, -3:3:100j]\nU = -1 - X**2 + Y\nV = 1 + X - Y**2\nspeed = np.sqrt(U*U + V*V)\n\nfig0, ax0 = plt.subplots()\nstrm = ax0.streamplot(X, Y, U, V, color=U, linewidth=2, cmap=plt.cm.autumn)\nfig0.colorbar(strm.lines)\n\nfig1, (ax1, ax2) = plt.subplots(ncols=2) # 即有两张子图并列排放\nax1.streamplot(X, Y, U, V, density=[0.5, 1])\n\nlw = 5*speed / speed.max()\nax2.streamplot(X, Y, U, V, density=0.6, color='k', linewidth=lw)\n\nplt.show()\n\n#%% matplotlib草稿\n# 绘制马鞍面 z=x^2/9 - y^2/4\nfrom mpl_tookkits.mplot3d import Axes3D\nfrom matplotlib import pyplot as plt\n\nX, Y = np.mgrid[-3:3:100j, -3:3:100j]\nZ = X**2/9 - Y**2/4\nfig = plt.figure()\n\nax.plot_surface(X, Y, Z)\n# ax.streamplot(X, Y, Z, color=Z)\n# plt.show()","repo_name":"try-agaaain/science_book","sub_path":"SymPy/scipyRecord.py","file_name":"scipyRecord.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22861408702","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Date : 2020/8/6\n# @Author : Bruce Liu /Lin Luo\n# @Mail : 15869300264@163.com\n\nfrom creational.example.supplier import SupplierBuilder\nfrom creational.example.component import Os, Logo, Mould, Product\n\n\nclass SingletonPipeline(object):\n \"\"\"\n 单例模式流水线基类\n 无论外面刮风下雨,打雷吹逼,我始终如一,一,一\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n # 判断类变量中是否有singleton属性\n if not hasattr(cls, '_instance'):\n # 如果没有singleton属性,则调用super方法,创建一个实例并赋予singleton属性\n cls._instance = super().__new__(cls)\n print('return instance by created')\n return cls._instance\n else:\n print('return instance from class attribute')\n return cls._instance\n\n\nclass Pipeline(SingletonPipeline):\n \"\"\"\n 流水线类,继承了单例的基类\n \"\"\"\n\n def produce_phone(self, supplier: SupplierBuilder.__subclasses__(), os: Os.__subclasses__(),\n logo: Logo.__subclasses__(), mould: Mould.__subclasses__()) -> Product:\n \"\"\"\n 手机制造方法,使用工厂模式,传入对应的类进行查u你更加爱你\n :param supplier: 供应商类\n :param os: 操作系统类\n :param logo: logo类\n :param mould: 模具类\n :return:\n \"\"\"\n print(f'this is pipeline with id: {id(self)}')\n print('begin to produce a phone')\n print(f'stage 1: create the phone mould, model: {mould.__name__}')\n phone = mould()\n print(f'stage 2: order a mother board from supplier: {supplier.__name__}')\n mother_board_supplier = supplier()\n print(f'\\t mother board supplier begin to build up the mother board')\n mother_board_supplier.add_cpu()\n mother_board_supplier.add_memory()\n mother_board_supplier.add_storage()\n print(f'\\t mother board build up complete')\n print(f'\\t get mother board from supplier')\n mother_board = mother_board_supplier.get_mother_board()\n print(f'stage 3: get a copy of os and logo from os:{os.__name__} and logo:{logo.__name__}')\n os_copy = os().clone()\n logo_copy = logo().clone()\n print(f'stage 4: install os into mother_board')\n mother_board.storage.os = os_copy\n print(f'stage 5: add the mother board into mould')\n phone.mother_board = mother_board\n print(f'stage 6: paste logo on the phone')\n phone.logo = logo_copy\n print('the phone had benn produced')\n print('here are the information:')\n phone.information()\n product = Product()\n product.mould = phone\n return product\n","repo_name":"BruceWW/python_design_pattern","sub_path":"creational/example/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"72099374715","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 15 15:16:18 2021\n\n@author: brianroepke\n\"\"\"\n\n\ndef getTitle():\n moviesDict = {\"movies\": {}}\n genre = input('Enter genre : ')\n keys = ['title', 'year', 'rating']\n titles = []\n count = 5\n total = 0\n\n # Request the user enter 5 movie titles\n for i in range(count):\n values = []\n\n ok = False\n while not ok:\n title = input('Enter movie title: ')\n try:\n val = str(title)\n ok = True\n values.append(val)\n except ValueError:\n print(\"Invalid type. Please try again\")\n\n ok = False\n while not ok:\n year = input('Enter movie year: ')\n try:\n val = int(year)\n ok = True\n values.append(val)\n except ValueError:\n print(\"Invalid type. Please try again\")\n\n ok = False\n while not ok:\n rating = input(\"Enter movie rating: \")\n try:\n val = float(rating)\n ok = True\n values.append(val)\n except ValueError:\n print(\"Invalid type. Please try again\")\n\n d = {k: v for k, v in zip(keys, values)}\n titles.append(d)\n\n moviesDict['movies'][genre] = titles\n\n return moviesDict\n\n\nmy_movie = getTitle()\nprint(my_movie)\n","repo_name":"broepke/DATA110","sub_path":"Week 3/python_basics_2_broepke.py","file_name":"python_basics_2_broepke.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"74595103675","text":"'''\nThis module includes all the visualizations pertinent to the project\n'''\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom utils import time_series, inflation_adjust, clean_values\n\n\ndef wars_over_time(mega_dataset):\n '''\n plots the number of wars over time\n '''\n def count(i, mega_dataset):\n row = mega_dataset.loc[i, :]\n return len(row[row == 2])\n mega_dataset = mega_dataset.resample(rule='1Y').mean()\n num = pd.Series(mega_dataset.index).apply(lambda i: count(i, mega_dataset))\n num.index = mega_dataset.index\n num.name = 'War Count'\n num = pd.DataFrame(num)\n sns.lineplot(x=num.index, y='War Count', data=num)\n plt.xlabel('Time (Years)')\n plt.ylabel('Number of occuring wars')\n plt.title(('Number of Eventually-Terminated Wars' +\n ' Ongoing each Year Over Time'), y=1.08)\n plt.savefig('./visuals/wars_over_time.png', bbox_inches='tight')\n\n\ndef stock_vs_inflation_adjusted(stocks):\n '''\n plots our weighted average in comparison to the global market indices\n '''\n fig, ax = plt.subplots(1)\n stocks.sort_index(inplace=True)\n stocks = stocks.resample('1M').mean()\n inflation = time_series('inflation_ratios', col='Unnamed: 0', concat=False)\n vals = ['S&P Adjusted', 'EUR Adjusted', 'NIKKEI Adjusted',\n 'SSE Adjusted', 'Average']\n for val in vals:\n adjusted = pd.Series(stocks.index).apply(lambda x: inflation_adjust(x,\n val, stocks, inflation))\n adjusted.index = stocks.index\n stocks[val] = adjusted\n\n post_inflation = stocks[vals[0:-1]]\n post_inflation.plot(ax=ax, alpha=0.3)\n stocks.rename(columns={'Average': 'Weighted Average'}, inplace=True)\n average = stocks['Weighted Average']\n average.plot(ax=ax, legend=True, color='#000000')\n\n plt.title('Weighted Average of Inflation Adjusted Market Indices',\n y=1.08)\n plt.xlabel('Date (1929-2022)')\n plt.xticks(rotation=-45)\n plt.ylabel('Adjusted Market Value of each Index (in Millions)')\n plt.savefig('./visuals/average_index_post_inflation.png',\n bbox_inches='tight')\n return average\n\n\ndef economy_over_time(global_economy):\n '''\n plots the global economy over time\n '''\n fig, ax = plt.subplots(1)\n global_economy.plot(ax=ax)\n plt.title('Global Economic as a Weighted Average of Market Indexes',\n y=1.08)\n plt.xlabel('Date (1929-2022)')\n plt.ylabel('Global Economic Health Index (Millions)')\n plt.savefig('./visuals/global_economy.png', bbox_inches='tight',\n pad_inches=0.3)\n\n\ndef freedom_over_time(freedom):\n '''\n plots the percentages of free, not free, and partially free\n countries over time.\n '''\n def clean_percents(data):\n data = str(data)\n data = data[0:-1]\n return float(data)\n freedom['Year(s) Under Review**'] = \\\n freedom['Year(s) Under Review**'] \\\n .apply(clean_values)\n relevant_indexes = [4, 6, 8]\n for value in relevant_indexes:\n freedom[freedom.columns[value]] = \\\n freedom[freedom.columns[value]] \\\n .apply(clean_percents)\n freedom = freedom.sort_values('Year(s) Under Review**')\n relavent = freedom[['Year(s) Under Review**', '% of F Countries',\n '% of PF Countries', '% of NF Countries']]\n relavent = relavent.set_index('Year(s) Under Review**')\n relavent.plot()\n plt.title('World Freedom Percentages of Countries over Time')\n plt.xlabel('Year (1972 to 2021)')\n plt.ylabel('Percentage')\n plt.savefig('./visuals/global_freedom.png', bbox_inches='tight')\n\n\ndef main():\n mega_dataset = pd.read_csv('./datasets/final.csv', parse_dates=True,\n index_col='Date')\n wars_over_time(mega_dataset)\n stocks = time_series('averaged', 'stocks/processed/', concat=False)\n global_economy = stock_vs_inflation_adjusted(stocks)\n economy_over_time(global_economy)\n freedom = pd.read_csv('./datasets/freedom_by_year.csv')\n freedom_over_time(freedom)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"krishna-panchap/163-Final-Project","sub_path":"visuals.py","file_name":"visuals.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42755618486","text":"import QuantLib as ql\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy import stats\nimport pandas as pd\n\n# simulate one Geometric Brownian motion\n# dS_{t} = mu * S_{t} * dt + sigma * S_{t} * dW_{t}\n# time to maturity\nTT = 1\nmu = -0.2\nsigma = 0.05\nr = 0.05\n\n# number of trading days\nNUM_OF_DATE = 252\ndt = 1/NUM_OF_DATE\n\n# gaussian noise\nnoise = np.random.normal(0, 1, NUM_OF_DATE)\nstock = np.zeros((NUM_OF_DATE))\nstock[0] = 100\n\nfor i in range(1, NUM_OF_DATE):\n # stocks in P-dynamics\n # dW_{t} = sqrt(dt) * noise\n # drift mu\n stock[i] = stock[i - 1] + stock[i - 1] * (mu * dt + np.sqrt(dt) * sigma * noise[i])\n\n# plt.plot(np.arange(0, NUM_OF_DATE, 1), stock)\n# plt.show()\n\n# simulate many paths Geometric Brownian Motion\nnum_paths = 10\nstock_paths = np.zeros((num_paths, NUM_OF_DATE))\nstock_paths[:, 0] = 100\n\nfor p in range(num_paths):\n noise = np.random.normal(0, 1, NUM_OF_DATE)\n\n for j in range(1, NUM_OF_DATE):\n # Stocks in Q-dynamics\n # drift r\n stock_paths[p, j] = stock_paths[p, j - 1] + stock_paths[p, j - 1] * (r * dt + sigma * noise[j])\n\n plt.plot(np.arange(0, NUM_OF_DATE, 1), stock_paths[p,:])\n\nplt.show()\n\n# compute call option price\n# strike\nK = 100\n\ncall_payoff = stock_paths[:, NUM_OF_DATE - 1] - K\n# price by the Monte Carlo method\nprice_call_Monte_Carlo = math.exp(-r * TT) / num_paths * np.sum(call_payoff[call_payoff > 0])\nprint(\"Price call Monte Carlo: \", price_call_Monte_Carlo)\n\n# BS formula\nd1 = 1 / (sigma * math.sqrt(TT)) * (math.log(100/K) + (r + sigma ** 2 / 2) * TT)\nd2 = d1 - sigma * math.sqrt(TT)\n\n# price by Black - Scholes formula\nsample = stats.norm(0, 1)\nBS_call = 100 * sample.cdf(d1) - K * math.exp(-r * TT) * sample.cdf(d2)\nprint(\"Example Black Scholes call option pricing: \", BS_call)\n\n# ---------------------------------\n# real data\nspx_2020 = pd.read_csv('SPX2020.csv')\nspx_2020['ClosePrice'].plot(figsize=(10, 6), )\nplt.xlabel('SPX close prices for the year 2020')\nplt.show()\n\n# number of trading days\nnum_trading_days = len(spx_2020['ClosePrice'])\n\n# Estimate historical volatility\nspx_prices = spx_2020['ClosePrice']\nr = spx_prices / spx_prices.shift(1)\nr = r[1:]\nr = np.log(r)\nr_average = np.mean(r)\n\nprint(\"Average returns: \", r_average)\n\n# annual historical volatility\nsigma_vol = np.sqrt(num_trading_days) * np.sqrt(1 / (num_trading_days - 1) * np.sum((r - r_average) ** 2))\nprint(\"Volatility: \", sigma_vol)\n\n# option pricing example\nspot = 3700\nstrike = 3700\ndividend_rate = 0.0163\nrisk_free_rate = 0.0025\nvol = 0.3475\nday_count = ql.Actual365Fixed()\ncalendar = ql.UnitedStates()\n\nmaturity_date = ql.Date().todaysDate()\ncalculation_date = maturity_date - ql.Period(1, ql.Years)\nql.Settings.instance().evaluationDate = calculation_date\n\nspot_handle = ql.QuoteHandle(ql.SimpleQuote(spot))\npayoff = ql.PlainVanillaPayoff(ql.Option.Call, strike)\nriskFreeTS = ql.YieldTermStructureHandle(ql.FlatForward(calculation_date, risk_free_rate, day_count))\ndividendTS = ql.YieldTermStructureHandle(ql.FlatForward(calculation_date, dividend_rate, day_count))\nvolTS = ql.BlackVolTermStructureHandle(ql.BlackConstantVol(calculation_date, calendar, vol, day_count))\n\neuropean_exercise = ql.EuropeanExercise(maturity_date)\neuropean_option = ql.EuropeanOption(payoff, european_exercise)\n\nbsm_process = ql.BlackScholesMertonProcess(spot_handle, dividendTS, riskFreeTS, volTS)\n\neuropean_option.setPricingEngine(ql.AnalyticEuropeanEngine(bsm_process))\noption_price = european_option.NPV()\nprint('Option price: ', option_price)\n\n# real data option pricing\noption_jan_2021 = pd.read_csv('SPX2021.csv')\nprint(option_jan_2021.head())","repo_name":"ldtpvince/option_pricing","sub_path":"black_scholes.py","file_name":"black_scholes.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41129588759","text":"\"\"\"Constants for the Abode Security System component.\"\"\"\nimport logging\nfrom datetime import timedelta\nimport voluptuous as vol\n\nfrom homeassistant.helpers import config_validation as cv, entity\nfrom homeassistant.const import (\n ATTR_DATE,\n ATTR_DEVICE_ID,\n ATTR_ENTITY_ID,\n ATTR_TIME,\n CONF_PASSWORD,\n CONF_USERNAME,\n EVENT_HOMEASSISTANT_STOP,\n Platform,\n)\n\nLOGGER = logging.getLogger(__package__)\n\nDOMAIN = \"hass_fwiot\"\nATTRIBUTION = \"provided by iot.frontware.com\"\n\nDEFAULT_CACHEDB = \"fwiot_cache.pickle\"\nCONF_POLLING = \"polling\"\n\nUPDATE_INTERVAL = timedelta(seconds=15)\nPOLLING_TIMEOUT_SEC = 10\n\nDEVICE_FINGER = 'FINGER'\nDEVICE_EMPDETECTOR = 'EMPDETECTOR'\nDEVICE_THERMIDITY = 'THERMIDITY'\n\nDEVICES_READY = [DEVICE_EMPDETECTOR, DEVICE_THERMIDITY]\n\nDEVICES_ICON = {\n DEVICE_FINGER: 'mdi:fingerprint',\n DEVICE_EMPDETECTOR: 'mdi:motion-sensor',\n DEVICE_THERMIDITY: 'mdi:thermometer-lines',\n}\n\n# Keys\nKEY_COORDINATOR = \"coordinator\"\nKEY_DEVICE = \"device\"\n\nFLOWTYPE_IOT = \"IOT device\"\nFLOWTYPE_FINGER = \"Fingerprint\"\n\nMODETYPE_ADD = \"Add new device\"\nMODETYPE_CHANGE = \"Change device settings\"\n\nFIELD_TYPE =\"type\"\nFIELD_API =\"api_key\"\nFIELD_IP =\"finger_print_ip\"\nFIELD_PORT =\"finger_print_port\"\nFIELD_TZ = \"finger_print_tz\"\nFIELD_UPDATE_EVERY = \"update_every\"\nFIELD_MODE =\"mode\"\nFIELD_QUERY =\"query\"\n\nATTR_DEVICE_NAME = \"device_name\"\nATTR_DEVICE_TYPE = \"device_type\"\nATTR_EVENT_CODE = \"event_code\"\nATTR_EVENT_NAME = \"event_name\"\nATTR_EVENT_TYPE = \"event_type\"\nATTR_EVENT_UTC = \"event_utc\"\nATTR_SETTING = \"setting\"\nATTR_USER_NAME = \"user_name\"\nATTR_APP_TYPE = \"app_type\"\nATTR_EVENT_BY = \"event_by\"\nATTR_VALUE = \"value\"\n\nSERVICE_SETTINGS = \"change_setting\"\nSERVICE_CAPTURE_IMAGE = \"capture_image\"\nSERVICE_TRIGGER_AUTOMATION = \"trigger_automation\"\n\n\nCHANGE_SETTING_SCHEMA = vol.Schema(\n {vol.Required(ATTR_SETTING): cv.string, vol.Required(ATTR_VALUE): cv.string}\n)\n\nCAPTURE_IMAGE_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})\n\nAUTOMATION_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})","repo_name":"Frontware/hass-iot","sub_path":"custom_components/hass-fwiot/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38225324874","text":"import torch\nfrom torchvision.transforms import functional as F\nfrom torchvision.transforms import transforms as T\nimport random\nimport numpy as np\nfrom typing import Sequence\nfrom my_cosmostat.astro.wl.mass_mapping import massmap2d, shear_data\nfrom astropy.io import fits\ntry:\n import pysparse\nexcept ImportError:\n print(\n \"Warning in transforms.py: do not find pysap bindings ==> use slow python code. \"\n )\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, target=None):\n for t in self.transforms:\n image, target = t(image, target)\n\n return image, target\n\n\nclass ToTensor(object):\n def __call__(self, image, target=None):\n if target:\n return F.to_tensor(image), F.to_tensor(target)\n else:\n return F.to_tensor(image)\n\n\nclass RandomHorizontalFlip(object):\n def __init__(self, prob):\n self.flip_prob = prob\n\n def __call__(self, image, target):\n if random.random() < self.flip_prob:\n image = F.hflip(image)\n target = F.hflip(target)\n return image, target\n\n\nclass RandomVerticalFlip(object):\n def __init__(self, prob):\n self.flip_prob = prob\n\n def __call__(self, image, target):\n if random.random() < self.flip_prob:\n image = F.vflip(image)\n target = F.vflip(target)\n return image, target\n\n\nclass DiscreteRotation(object):\n def __init__(self, angles: Sequence[int]):\n self.angles = angles\n\n def __call__(self, image, target):\n angle = random.choice(self.angles)\n image = F.rotate(image, angle)\n target = F.rotate(target, angle)\n return image, target\n\n\nclass ContinuousRotation(object):\n def __init__(self, degrees):\n self.degrees = degrees\n\n def __call__(self, image, target):\n angle = random.uniform(-self.degrees, self.degrees)\n image = F.rotate(image, angle)\n target = F.rotate(target, angle)\n return image, target\n\n\nclass RandomCrop(object):\n def __init__(self, size: int):\n self.size = size\n\n def __call__(self, image, target):\n crop_params = T.RandomCrop.get_params(image, (self.size, self.size))\n image = F.crop(image, *crop_params)\n target = F.crop(target, *crop_params)\n return image, target\n\n\n# add gaussian noise to shear\nclass AddGaussianNoise(object):\n def __init__(self, n_galaxy, mean=0.):\n \"\"\"\n calculate the gaussian noise standard deviation to be added to shear.\n please refer to https://articles.adsabs.harvard.edu/pdf/2004MNRAS.350..893H Eq.12. 面积替换为方形的\n noise_std^2 = {sigma_e^2 / 2} / {θ_G^2 * n_galaxy}\n \"\"\"\n self.n_galaxy = n_galaxy\n self.mean = mean\n sigma_e = 0.4 # rms amplitude of the intrinsic ellipticity distribution\n theta_G = 0.205 # pixel side length in arcmin (gaussian smoothing window)\n variance = (sigma_e**2 / 2) / (theta_G**2 * self.n_galaxy)\n self.std = np.sqrt(variance)\n # print('shear noise std =', self.std)\n\n def __call__(self, image):\n # image = image + np.random.normal(loc=self.mean, scale=self.std, size=image.shape)\n image = image + torch.randn(image.size()) * self.std + self.mean\n return image\n # for 50 galaxies per arcmin^2, std = 0.1951; \n # for 20 galaxies per arcmin^2, std = 0.3085\n\n\nclass KS_rec(object):\n \"\"\"\n reconstruct kappa map from shear using Kaiser-Squires deconvolution.\n \"\"\"\n def __init__(self, args):\n self.activate = args.ks\n self.M = massmap2d(name='mass_ks')\n self.psWT_gen1 = pysparse.MRStarlet(bord=1, gen2=False, nb_procs=1, verbose=0)\n self.psWT_gen2 = pysparse.MRStarlet(bord=1, gen2=True, nb_procs=1, verbose=0)\n self.M.init_massmap(nx=1024, ny=1024, pass_class=[self.psWT_gen1, self.psWT_gen2])\n print('KS initialized')\n\n def shear_rec(self, shear1, shear2):\n ks = self.M.g2k(shear1, shear2, pass_class=[self.psWT_gen1, self.psWT_gen2])\n return ks\n\n def __call__(self, image, target):\n if self.activate == 'off':\n return image, target\n \n elif self.activate == 'add':\n # perdict kappa using KS and add it as a 3rd channel to gamma\n # if ks add: image shape = torch.Size([3, 512, 512]); last channel is ks map\n ks_kappa = self.shear_rec(-image[0], image[1]) # negative sign is important\n ks_kappa = torch.FloatTensor(ks_kappa)\n image = torch.concat((image, ks_kappa.unsqueeze(0)), dim=0)\n return image, target\n \n elif self.activate == 'only':\n # perdict kappa using KS and remove shear information\n # if ks only: image shape = torch.Size([1, 512, 512])\n ks_kappa = self.shear_rec(-image[0], image[1]) # negative sign is important\n ks_kappa = torch.FloatTensor(ks_kappa)\n image = ks_kappa.unsqueeze(0)\n # ks_kappa = np.float32(ks_kappa)\n # image = np.expand_dims(ks_kappa, axis=0)\n return image, target\n\n\nclass Wiener(object):\n \"\"\"\n reconstruct kappa map from shear using Wiener filtering.\n \"\"\"\n def __init__(self, args):\n self.activate = args.wiener\n self.p_signal = fits.open('./signal_power_spectrum.fits')[0].data\n if args.n_galaxy == 50:\n self.p_noise = fits.open('./noise_power_spectrum_g50.fits')[0].data\n elif args.n_galaxy == 20:\n self.p_noise = fits.open('./noise_power_spectrum_g20.fits')[0].data\n # Create the cosmostat mass mapping structure and initialize it\n self.M = massmap2d(name='mass_wiener')\n self.psWT_gen1 = pysparse.MRStarlet(bord=1, gen2=False, nb_procs=1, verbose=0)\n self.psWT_gen2 = pysparse.MRStarlet(bord=1, gen2=True, nb_procs=1, verbose=0)\n self.M.init_massmap(nx=512, ny=512, pass_class=[self.psWT_gen1, self.psWT_gen2])\n print('wiener initialized')\n\n def wiener(self, shear1, shear2):\n retr, reti = self.M.wiener(shear1, shear2, \n PowSpecSignal=self.p_signal, \n PowSpecNoise=self.p_noise, \n pass_class=[self.psWT_gen1, self.psWT_gen2])\n return retr, reti\n\n def __call__(self, image, target):\n if self.activate == 'off':\n return image, target\n \n elif self.activate == 'add':\n wf_kappa, _ = self.wiener(-image[0], image[1]) # negative sign is important\n wf_kappa = np.float32(wf_kappa)\n image = np.concatenate([image, np.expand_dims(wf_kappa, axis=0)], axis=0)\n return image, target\n \n elif self.activate == 'only':\n wf_kappa, _ = self.wiener(-image[0], image[1]) # negative sign is important\n wf_kappa = np.float32(wf_kappa)\n # image = wf_kappa.unsqueeze(0)\n image = np.expand_dims(wf_kappa, axis=0)\n return image, target\n\n\nclass sparse(object):\n \"\"\"\n reconstruct kappa map from shear using sparse reconstruction.\n \"\"\"\n def __init__(self, args):\n self.activate = args.sparse\n self.M = massmap2d(name='mass_sparse')\n self.psWT_gen1 = pysparse.MRStarlet(bord=1, gen2=False, nb_procs=1, verbose=0)\n self.psWT_gen2 = pysparse.MRStarlet(bord=1, gen2=True, nb_procs=1, verbose=0)\n self.M.init_massmap(nx=512, ny=512, pass_class=[self.psWT_gen1, self.psWT_gen2])\n self.D = shear_data()\n\n sigma_e = 0.4 # rms amplitude of the intrinsic ellipticity distribution\n theta_G = 0.205 # pixel side length in arcmin (gaussian smoothing window)\n variance = (sigma_e**2 / 2) / (theta_G**2 * args.n_galaxy)\n std = np.sqrt(variance)\n\n # Create the covariance matrix, assumed to be diagonal\n CovMat = np.ones((512, 512)) * (std**2)\n self.D.Ncov = CovMat\n\n print('sparse initialized')\n\n def sparse(self, shear1, shear2):\n self.D.g1 = shear1\n self.D.g2 = shear2\n # Do a sparse reconstruction with a 5 sigma detection\n ksr5, ti = self.M.sparse_recons(InshearData=self.D, \n UseNoiseRea=False, \n niter=12, \n Nsigma=5, \n ThresCoarse=False, \n Inpaint=False, \n pass_class=[self.psWT_gen1, self.psWT_gen2])\n return ksr5, ti\n\n def __call__(self, image, target):\n if self.activate == 'off':\n return image, target\n \n elif self.activate == 'add':\n sp_kappa, _ = self.sparse(np.float32(-image[0]), np.float32(image[1])) # negative sign is important\n sp_kappa = np.float32(sp_kappa)\n image = np.concatenate([image, np.expand_dims(sp_kappa, axis=0)], axis=0)\n return image, target\n\n\nclass MCALens(object):\n \"\"\"\n reconstruct kappa map from shear using MCALens reconstruction.\n \"\"\"\n def __init__(self, args):\n self.activate = args.mcalens\n self.p_signal = fits.open('./signal_power_spectrum.fits')[0].data\n self.M = massmap2d(name='mass_mcalens')\n self.psWT_gen1 = pysparse.MRStarlet(bord=1, gen2=False, nb_procs=1, verbose=0)\n self.psWT_gen2 = pysparse.MRStarlet(bord=1, gen2=True, nb_procs=1, verbose=0)\n self.M.init_massmap(nx=512, ny=512, pass_class=[self.psWT_gen1, self.psWT_gen2])\n self.D = shear_data()\n\n sigma_e = 0.4 # rms amplitude of the intrinsic ellipticity distribution\n theta_G = 0.205 # pixel side length in arcmin (gaussian smoothing window)\n variance = (sigma_e**2 / 2) / (theta_G**2 * args.n_galaxy)\n std = np.sqrt(variance)\n\n # Create the covariance matrix, assumed to be diagonal\n CovMat = np.ones((512, 512)) * (std**2)\n self.D.Ncov = CovMat\n\n print('MCALens initialized')\n\n def mcalens(self, shear1, shear2):\n self.D.g1 = shear1\n self.D.g2 = shear2\n # MCAlens reconstruction with a 5 sigma detection\n k1r5, k1i5, k2r5, k2i = self.M.sparse_wiener_filtering(InshearData=self.D, \n PowSpecSignal=self.p_signal,\n niter=12, \n Nsigma=5, \n Inpaint=False, \n Bmode=False, \n ktr=None, \n pass_class=[self.psWT_gen1, self.psWT_gen2])\n return k1r5, k1i5, k2r5, k2i\n\n def __call__(self, image, target):\n if self.activate == 'off':\n return image, target\n \n elif self.activate == 'add':\n mca_kappa, _, _, _ = self.mcalens(np.float32(-image[0]), np.float32(image[1])) # negative sign is important\n # mca_kappa = torch.FloatTensor(mca_kappa)\n mca_kappa = np.float32(mca_kappa)\n # image = torch.concat((image, mca_kappa.unsqueeze(0)), dim=0)\n image = np.concatenate([image, np.expand_dims(mca_kappa, axis=0)], axis=0)\n return image, target","repo_name":"WenhanGuo/WL-kappa-map","sub_path":"transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":11559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"4330466769","text":"import os\nimport json\nimport sys \nsys.path.append(\"..\")\nimport cv2\nimport numpy as np\nfrom cv2 import (CAP_PROP_FOURCC, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT,\n CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH,\n CAP_PROP_POS_FRAMES, VideoWriter_fourcc)\n\nfrom const.colors import get_random_colors\nCOLORS = get_random_colors()\n\nfrom tools.visual_utils import (draw_box, draw_text, draw_boxes,\n create_blk_board)\n\nfrom dataset import BoxDataLoader\n\ndef gen_dir(folder):\n if not os.path.exists(folder):\n os.system(f'mkdir -p {folder}')\n\ndef load_gt(gt_file):\n json_data = json.load(open(gt_file)) \n return json_data\n\ndef load_seq_info(seq_info_file):\n info = {}\n with open(seq_info_file, 'r+') as f:\n lines = f.readlines()\n for line in lines:\n if '=' in line:\n splits = line.strip().split('=')\n info[splits[0]] = splits[1]\n\n return info\n\nDATA_DIR = '/media/workspace/yirui/data/tracking_reid/prepared/MOTSynth/'\n\ndef main():\n\n # for seq_id in [0, 1, 3, 4, 5, 6]:\n for seq_id in [1]:\n # seq_id = 13\n img_dir = DATA_DIR + '/MOTSynth_train/frames/{:03d}'.format(seq_id)\n vid_file = DATA_DIR + '/MOTSynth_1/{:03d}.mp4'.format(seq_id)\n gt_file = DATA_DIR + '/annotations/{:03d}.json'.format(seq_id)\n gt_txt = DATA_DIR + '/mot_annotations/{:03d}/gt/gt.txt'.format(seq_id)\n seq_info_file = DATA_DIR + '/mot_annotations/{:03d}/seqinfo.ini'.format(seq_id)\n\n gts = load_gt(gt_file)\n cam_info = gts['images'][0]\n print(cam_info)\n\n in_video = cv2.VideoCapture(vid_file)\n w_img = int(in_video.get(CAP_PROP_FRAME_WIDTH))\n h_img = int(in_video.get(CAP_PROP_FRAME_HEIGHT))\n fps = in_video.get(CAP_PROP_FPS)\n num_frames = int(in_video.get(CAP_PROP_FRAME_COUNT))\n \n down_fact = 4\n w_out, h_out = w_img // down_fact, h_img // down_fact \n res_dir = os.path.abspath('../results/motsynth')\n gen_dir(res_dir)\n out_video = cv2.VideoWriter(\n res_dir + '/{:03d}_cam_motion.mp4'.format(seq_id), cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w_out + h_out * 3 // 2 , h_out)\n )\n \n # load seq info\n seq_info = load_seq_info(seq_info_file)\n\n # load boxes\n boxloader = BoxDataLoader(track_file = gt_txt,\n img_size = (w_img, h_img),\n window = fps * 2,\n stride = fps // 2,\n height_dif_thresh = 3,\n front_ratio_thresh = 0.8,\n fps = fps)\n \n boxes = boxloader.raw_tracks.copy()\n print(boxes.shape)\n\n # stop_frame_id = 10\n for i in range(num_frames):\n ret, frame = in_video.read()\n if not ret or i >= boxes.shape[1]: continue\n # if i >= stop_frame_id: break\n\n board = create_blk_board(h_out * 3 // 2 , h_out)\n img_info = gts['images'][i]\n\n # extrinsic parameters\n draw_text(board, f'pos: {np.round(img_info[\"cam_world_pos\"], 1)}', (5, 30), (0, 255, 0), scale = 0.5, thickness = 1)\n draw_text(board, f'rot: {np.round(img_info[\"cam_world_rot\"], 2)}', (5, 60),(0, 255, 0), scale = 0.5, thickness = 1)\n\n # intrinsic parameters\n draw_text(board, f'FOV:{seq_info[\"FOV\"]}, fx:{seq_info[\"fx\"]}, fy:{seq_info[\"fy\"]}', (5, 90), (0, 0, 255), scale = 0.5, thickness = 1)\n draw_text(board, f'cx:{seq_info[\"cx\"]}, cy:{seq_info[\"cy\"]}', (5, 120), (0, 0, 255), scale = 0.5, thickness = 1)\n\n d_frame = cv2.resize(frame, (w_out, h_out))\n\n frame_boxes = boxes[:, i, :]\n box_mask = frame_boxes[:, -1] > 0\n frame_boxes = frame_boxes[box_mask, :] / down_fact\n draw_boxes(d_frame, frame_boxes, (0, 255, 0), thickness = 1)\n\n draw_board = cv2.hconcat([d_frame, board])\n out_video.write(draw_board)\n\n if i == 0: cv2.imwrite(res_dir + '/{:03d}_cam_motion-frame-0.png'.format(seq_id), draw_board)\n\nif __name__ == '__main__':\n main()\n","repo_name":"shenghh2015/mot_calib","sub_path":"src_motsynth/vis_sample.py","file_name":"vis_sample.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33184248789","text":"qual_words = [\"feel\",\"i am\",\"im\",\"makes me\",\"going to\",\"buy\",\"sell\"]\npositive_words = {\"buy\": 1,\"good\": 1,\"undervalued\":3,\"buying\":1,\"good value\":2,\"buying now\":2,\"epic\":1,\"gains\":1,\"buy this now\":3,\"best\":2,\"hot\":1,\"thrilled\":2,\"happy\":1,\"up\":1}\nnegative_words = {\"sell\":1,\"overvalued\":2,\"rip-off\":2,\"terrible\":2,\"bad\":1,\"short\":1,\"put\":1,\"suicide\":3,\"drop\":1,\"worst\":2,\"mistake\":1,\"cooler\":1,\"caution\":1,\"afraid\":1,\"down\":1}\nneg_inflection = [\"dont\",\"negative\",\"not\"]\npos_inflection = [\"do\",\"positive\"]\n\ndef qualify(tweet):\n qualified = False\n for word in qual_words:\n if word in tweet:\n qualified = True\n return qualified\n\ndef analysis(word):\n# print(word)\n score = 0\n if word in positive_words:\n score = score + positive_words[word]\n# print(word,positive_words[word])\n if word in negative_words:\n score = score - negative_words[word]\n# print(word,negative_words[word])\n# print(\"Word Score: \",score)\n return score\n \n\ndef total_score(split_tweet):\n sentiment = 0\n prev_word = \"\"\n for word in split_tweet:\n score = analysis(word)\n# print(score)\n if score != 0:\n if prev_word in pos_inflection:\n score = score + 2\n# print(\"added 2 to score because of pos inflection, new score is: \", score)\n elif prev_word in neg_inflection:\n score = score - 2\n# print(\"took 2 score off due to neg inflection, new score is: \",score)\n sentiment = sentiment + score\n# print(\"current total score: \",sentiment)\n prev_word = word\n print(\"Overall Score: \",sentiment)\n return sentiment\n \ndef tweet_splitter(tweet):\n lower = tweet.lower()\n split = lower.split()\n punctuation = [\",\",\".\",\"'\"]\n split_words = []\n for word in split:\n for char in punctuation:\n if char in word:\n rebuild = \"\"\n for letter in word:\n if letter not in punctuation:\n rebuild = rebuild + letter\n word = rebuild\n split_words.append(word)\n# print(\"appended \", word, \" to list\")\n return split_words\n \n \n\n#tweet1 = \"i AM buying buy buy TLRY it is undervalued, you have to be insane to sell\"\n#tweet2 = \"bu.y buy buy sell\"\n#tweet3 = \"stock is oveRvalued, sell. now\"\n#\n#\n#print(tweet_splitter(tweet1))\n#print(tweet_splitter(tweet2))\n#print(tweet_splitter(tweet3))\n\n#split_tweet = [\"dont\",\"buy\",\"this\",\"stock\"]\n#split_tweet2 = [\"this\",\"is\",\"a\",\"positive\",\"buy\"]\n#split_tweet3 = [\"dont\",\"forget\",\"to\",\"buy\",\"good\",\"stocks\"]\n#split_tweet4 = [\"dont\",\"forget\",\"to\",\"buy\",\"good\",\"stocks\",\"not\",\"terrible\",\"ones\"]\n#print(total_score(split_tweet))\n#print(total_score(split_tweet2))\n#print(total_score(split_tweet3))\n#print(total_score(split_tweet4))","repo_name":"scott1928/Stock-Scores","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20740951693","text":"# -*- coding: utf-8 -*-\n\"\"\"\nauthor = 'Pierluigi'\ndate = '2016-05-14'\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\n\nfrom colorama import Fore\n\nfrom pyco.core.global_variables import GlobalVariables\n\n_colors = {\n 'white': Fore.WHITE,\n 'red': Fore.LIGHTRED_EX,\n 'green': Fore.LIGHTGREEN_EX,\n 'blue': Fore.LIGHTBLUE_EX,\n 'yellow': Fore.LIGHTYELLOW_EX,\n 'cyan': Fore.LIGHTCYAN_EX,\n 'magenta': Fore.LIGHTMAGENTA_EX\n}\n\n\ndef _get_pyco_debug_status():\n \"\"\"Return the curret status for the PyCo debug.\"\"\"\n debug = GlobalVariables.get_instance().get('debug')\n if debug is None:\n return False\n return True if debug.strip().lower() == 'true' else False\n\n\ndef disp(text='', color='white', end='\\n', flush=False, debug=False):\n \"\"\"A display function\n\n :param text: the text to be shown\n :param color: color for the text\n :param end: terminator\n :param flush: force flush\n :param debug: if debug is true the text is shown only if config set debug true\n \"\"\"\n d = ''\n show_debug = (debug and _get_pyco_debug_status())\n if show_debug:\n d = 'DEBUG: '\n if not debug or show_debug:\n if sys.stdout != sys.__stdout__:\n print('{0}{1}'.format(d, text), end=end)\n else:\n print('{0}{1}{2}'.format(d, _colors.get(color, 'white'), text), end=end)\n if flush:\n sys.stdout.flush()\n","repo_name":"pierluigi-failla/pyco","sub_path":"pyco/utils/disp.py","file_name":"disp.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39695283497","text":"from flask import Flask, request, render_template\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\n\napp = Flask(__name__)\n\nwith open('models/model.pkl', 'rb') as file:\n model = pickle.load(file)\n\n@app.route(\"/\")\ndef hello_world():\n return \"

Hello, World!

\"\n\n@app.route('/analyze-text', methods=['GET', 'POST'])\ndef analyze_text():\n if request.method == 'POST':\n text = request.form['text']\n # perform analysis on the text here // here i am not using nlp as i haven't learned it yet but will surely learn it\n # Load the saved model from the pickle file\n\n # words = text.split()\n # save words to a text file\n # with open('words.txt', 'a') as f:\n # f.write('\\n'.join(words) + '\\n')\n\n # Preprocess the input text\n input_text = text\n vectorizer = CountVectorizer()\n preprocessed_text = vectorizer.transform([input_text])\n\n # Use the trained model to predict the label\n predicted_label = model.predict(preprocessed_text)[0]\n\n # Print the predicted label\n # print(\"Predicted label:\", predicted_label)\n\n if predicted_label == 1:\n return 'The Text Contains References to self-harm'\n else:\n return 'Text analyzed successfully and it has no self-harm text'\n return render_template('text_input_form.html')\n\nif __name__==\"__main__\":\n app.run(host=\"0.0.0.0\")\n","repo_name":"adarshmarvel22/Self-Harm-Detection-Web-Application","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"34997946359","text":"import re\nimport subprocess\nimport os.path\n\nfrom . import app, root_dir\n\n\nmacRE = re.compile(r'BSS (?P[0-9a-fA-F:]+)\\(on .+?\\)( -- associated)?$')\nregexps = [\n re.compile(r'^SSID: (?P.*)$'),\n re.compile(r'^last seen: (?P\\d+) ms ago$'),\n re.compile(r'^signal: (?P[-\\.\\d]+) dBm$'),\n re.compile(r'^DS Parameter set: channel (?P\\d+)$'),\n]\n\ndef scan(interface='wlan0'):\n cmd = [os.path.join(root_dir, '..', 'raspi_scan_wlan'), interface]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n points = proc.stdout.read().decode('utf-8')\n return parse(points)\n\ndef parse(content):\n cells = []\n lines = content.split('\\n')\n for line in lines:\n line = line.strip()\n m = macRE.search(line)\n if m is not None:\n cells.append(m.groupdict())\n continue\n for expression in regexps:\n m = expression.search(line)\n if m is not None:\n cells[-1].update(m.groupdict())\n continue\n return cells\n","repo_name":"hosang/thewolf","sub_path":"thewolf/iwlist.py","file_name":"iwlist.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"96"} +{"seq_id":"1479510615","text":"import zope.interface\nfrom ..interface.CommandExecutorItf import CommandExecutorItf\nfrom src.main.constants import constant\n@zope.interface.implementer(CommandExecutorItf)\nclass AllotCourseCmdExecutor:\n def executeCommand(self, coursesDet, regIDvsCourse, command_attributes)->str: # return error message if any\n courseID = command_attributes[0]\n course = coursesDet.get(courseID, None)\n if (course!=None):\n numOfRegisteredEmployees = len(course._get_registered_employees())\n if (numOfRegisteredEmployees List[List[int]]:\n result = []\n people.sort(key=lambda x:(x[0], -x[1]), reverse=True)\n q = deque(people)\n \n while q:\n person = q.popleft()\n result.insert(person[1], person)\n\n return result\n\n","repo_name":"JwahoonKim/PS","sub_path":"LeetCode/Python/406_Queue Reconstruction by Height.py","file_name":"406_Queue Reconstruction by Height.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"37639373034","text":"import math\n\n\ndef calc(N, D, K):\n D = D % N\n g = math.gcd(N, D)\n if g != 1:\n NG = N // g\n k = K - 1\n tmp1 = k // NG\n tmp2 = k % NG\n return tmp1 + (tmp2 * D) % N\n else:\n return (D * (K - 1)) % N\n\n\nT = int(input())\nresult = []\nfor t in range(T):\n N, D, K = [int(l) for l in input().split()]\n result.append(calc(N, D, K))\nfor r in result:\n print(r)\n","repo_name":"Aryudesu/ABC","sub_path":"ABC/290/ABC290D.py","file_name":"ABC290D.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37935980400","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass VAE(nn.Module):\n def __init__(self, in_dim=784, latent_dim=20):\n super(VAE, self).__init__()\n\n self.latent_dim = latent_dim\n self.in_dim = in_dim\n\n self.hdim = 512\n self.vdim = in_dim\n\n self.mu = nn.Linear(self.hdim, latent_dim)\n self.logvar = nn.Linear(self.hdim,latent_dim)\n\n self.l1e = nn.Sequential(nn.Linear(self.vdim, self.hdim), nn.LayerNorm(self.hdim), nn.ELU())\n self.l2e = nn.Sequential(nn.Linear(self.hdim, self.hdim), nn.LayerNorm(self.hdim), nn.ELU())\n self.l3e = nn.Sequential(nn.Linear(self.hdim, self.hdim), nn.LayerNorm(self.hdim), nn.ELU())\n\n self.l1d = nn.Sequential(nn.Linear(latent_dim, self.hdim), nn.LayerNorm(self.hdim), nn.ELU())\n self.l2d = nn.Sequential(nn.Linear(self.hdim, self.hdim), nn.LayerNorm(self.hdim), nn.ELU())\n self.l3d = nn.Sequential(nn.Linear(self.hdim, self.vdim), nn.LayerNorm(self.vdim), nn.Sigmoid())\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n z = mu + eps * std\n return z\n\n def encode(self, x):\n x = self.l1e(x)\n x = x + self.l2e(x)\n x = x + self.l3e(x)\n\n mu = self.mu(x)\n logvar = self.logvar(x)\n z = self.reparameterize(mu, logvar)\n return z, mu, logvar\n\n def decode(self, z):\n z = self.l1d(z)\n z = z + self.l2d(z)\n x = self.l3d(z)\n return x\n\n def forward(self, x):\n z, mu, logvar = self.encode(x.view(-1, self.in_dim))\n rec_x = self.decode(z)\n return rec_x, mu, logvar\n\n def loss_function(self, rec_x, x, mu, logvar):\n x = (x + 1.)/2. # images are dynamically binarized to [-1,1]\n BCE = F.binary_cross_entropy(rec_x.view(-1, self.in_dim), x.view(-1, self.in_dim), reduction='sum')\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n return BCE + KLD\n\n def NLLimportance(self, X, batchsz_importance):\n X = (X + 1.)/2. # images are dynamically binarized to [-1,1]\n\n batch_size = X.shape[0]\n ll = torch.Tensor(batch_size).to(X.device)\n\n with torch.no_grad():\n for i in range(batch_size):\n X_repeat = X[i].unsqueeze(0).repeat(batchsz_importance, 1)\n z, mu, logvar = self.encode(X_repeat.view(-1, self.in_dim))\n rec_x = self.decode(z)\n\n # p(x|z)\n logprob_QB = (X_repeat * torch.log(rec_x) + (1 - X_repeat) * torch.log(1 - rec_x)).sum(-1)\n\n #p(z)\n distprior = torch.distributions.normal.Normal(0,1)\n logprior = distprior.log_prob(z).sum(-1)\n\n # q(z|x)\n dist = torch.distributions.normal.Normal(mu, torch.exp(0.5 * logvar))\n logprob_QF = dist.log_prob(z).sum(-1)\n\n ll[i] = torch.logsumexp(logprob_QB + logprior - logprob_QF, dim=0) - np.log(batchsz_importance)\n\n return -ll\n\n def sample(self, n, device):\n z = torch.randn(n, self.latent_dim).to(device)\n x = self.decode(z)\n return x\n\n def recon(self,x):\n z,_,_ = self.encode(x)\n x = self.decode(z)\n x = 2 * torch.bernoulli(x) - 1\n return x\n","repo_name":"GFNOrg/Delta-AI","sub_path":"mnist/algorithms/VAE.py","file_name":"VAE.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"30134450471","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nx = np.linspace(-3, 3, 50)\ny1 = 2 * x + 1\ny2 = x**2\n\nplt.figure()\nplt.plot(x, y2)\nplt.plot(x, y1, linestyle='--', color='red')\n\nplt.xlim((-1, 2))\nplt.ylim((-2, 3))\nplt.xlabel('i an x')\nplt.ylabel('i an y')\n\nnew_ticks = np.linspace(-1, 2, 5)\nprint(new_ticks)\nplt.xticks(new_ticks)\n# set tick labels\nplt.yticks([-2, -1.8, -1, 1.22, 3], [r'$really\\ bad$',\n r'$bad$', r'$normal$', r'$good$', r'$really\\ good$'])\n\n\n\n# 该坐标轴\nax = plt.gca()\nax.spines['right'].set_color('none')#右边边框无色\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')#用底边框代替x轴\nax.yaxis.set_ticks_position('left')\nax.spines['bottom'].set_position(('data', 0))#x设置位置\nax.spines['left'].set_position(('data', 0))\n\n# 加图例\nl1, = plt.plot(x, y2, label='up')\nl2, = plt.plot(x, y1, color='red', linewidth=1.0, linestyle='--', label='down')\nplt.legend(handles=[l1, l2], labels=['aa'], loc='best')\n\nplt.show()\n","repo_name":"wmt2631/text-file","sub_path":"matplotlib3.12.py","file_name":"matplotlib3.12.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41497365603","text":"import os\nimport re\nimport argparse\nimport glob\nimport imageio\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n '''\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n '''\n return [atoi(c) for c in re.split(r'(\\d+)', text)]\n\n\ndef make_progress_video(args):\n\n # filepaths\n file_path = os.path.join(args.images_path, f'{args.images_names}*.png')\n filenames = glob.glob(file_path)\n filenames.sort(key=natural_keys)\n\n images = []\n\n for filename in filenames:\n images.append(imageio.imread(filename))\n if args.print_fns:\n print(filename)\n\n fp = os.path.join(args.results_dir, f'{args.save_name}.{args.ext}')\n imageio.mimsave(fp, images, fps=args.fps)\n return 0\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--images_path', type=str, help='path to folder with images')\n parser.add_argument('--images_names', type=str, default='test')\n parser.add_argument('--results_dir', type=str, default='visualization')\n parser.add_argument('--save_name', type=str, default=None)\n parser.add_argument('--ext', type=str, default='mp4', choices=('mp4', 'gif'))\n parser.add_argument('--fps', type=int, default=1)\n parser.add_argument('--print_fns', action='store_true')\n args = parser.parse_args()\n args.dataset_root_path = os.path.split(os.path.normpath(args.images_path))[0]\n\n os.makedirs(args.results_dir, exist_ok=True)\n if not args.save_name:\n os.path.split(os.path.normpath(args.images_path))[0]\n args.save_name = os.path.split(os.path.normpath(args.images_path))[1]\n\n make_progress_video(args)\n\n\nmain()\n","repo_name":"arkel23/ColorIT","sub_path":"tools/postprocess/make_progress_video.py","file_name":"make_progress_video.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16372797883","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 1 10:59:55 2023\n\n@author: richard\n\"\"\"\n\nimport os\nimport re\nimport sys\n\n\nMAXDEPTH = 2\n\nPRIORITIZE = [\"getting-started\"]\n\n\ndef priority_sort(paths):\n paths.sort()\n basenames = [os.path.basename(p) for p in paths]\n for prio in PRIORITIZE[::-1]:\n try:\n i = basenames.index(prio)\n except ValueError:\n pass\n else:\n basenames.insert(0, basenames.pop(i))\n paths.insert(0, paths.pop(i))\n\n\ndef recursive_python_files(path):\n out = {}\n files_and_dirs = os.listdir(path)\n priority_sort(files_and_dirs)\n for part_path in files_and_dirs:\n part = os.path.abspath(os.path.join(path, part_path))\n\n if os.path.isdir(part):\n out[part] = recursive_python_files(part)\n elif part.endswith(\".py\"):\n out[part] = part\n\n return out\n\n\ndef title_to_heading(title):\n pos = title.rfind(\".\")\n if pos == -1:\n ret = title[0].upper() + title[1:]\n else:\n ret = title[pos + 1].upper() + title[pos + 2:]\n ret = re.sub(r\"^(\\d+)-\", r\"\\1. \", ret)\n m = re.match(r\"^\\d+\\. \", ret)\n if m:\n pos = len(m.group(0))\n ret = ret[:pos] + ret[pos].upper() + ret[pos + 1:]\n return ret.replace(\"-\", \" \")\n\n\ndef title_from_path(path, origpath, extra=None):\n title = path.removeprefix(origpath).removesuffix(\".py\")\n title = title.replace(\"/\", \".\").replace(\"\\\\\", \".\")\n if len(title):\n title = title.lstrip(\".\")\n return extra + \".\" + title\n else:\n return extra # For empty, i.e., main folder\n\n\ndef rstfn_from_title(title, outpath):\n return os.path.join(outpath, title + \".rst\")\n\n\ndef readme_path(path):\n if os.path.isdir(path):\n readme = os.path.join(path, \"README.rst\")\n else:\n d, f = os.path.split(path)\n readme = os.path.join(d, \"README.\" + f.removesuffix(\".py\") + \".rst\")\n\n if os.path.exists(readme):\n print(f\"Found readme: {readme}\")\n return readme\n print(f\"Did not find readme: {readme}\")\n return None\n\n\ndef output_readme(rstfile, path):\n readme = readme_path(path)\n if readme:\n with open(readme, \"r\") as r:\n rstfile.write(r.read())\n rstfile.write(\"\\n\")\n\n\ndef combine_pyfiles(paths, origpath, extra, outpath):\n parentpath = os.path.dirname(paths[0])\n title = title_from_path(parentpath, origpath, extra)\n rstfn = rstfn_from_title(title, outpath)\n print(f\"Creating {rstfn} from {parentpath}\")\n with open(rstfn, \"w\") as rstfile:\n heading = title_to_heading(title)\n rstfile.write(f\"{heading}\\n{'=' * len(heading)}\\n\")\n output_readme(rstfile, parentpath)\n for path in paths:\n title = title_from_path(path, origpath, extra)\n\n if len(paths) > 1:\n heading = title_to_heading(title)\n rstfile.write(f\"{heading}\\n{'-' * len(heading)}\\n\")\n\n output_readme(rstfile, path)\n\n rstfile.write(\".. code-block:: python\\n\")\n rstfile.write(f\" :name: {heading}\\n\")\n rstfile.write(\" :linenos:\\n\\n\")\n with open(path, \"r\") as pyfile:\n for line in pyfile.read().split('\\n'):\n rstfile.write(f\" {line}\\n\")\n\n\ndef print_pyfile(path, origpath, extra, outpath):\n title = title_from_path(path, origpath, extra)\n rstfn = rstfn_from_title(title, outpath)\n\n print(f\"Creating {rstfn} from {path}\")\n with open(rstfn, \"w\") as rstfile:\n heading = title_to_heading(title)\n rstfile.write(f\"{heading}\\n{'=' * len(heading)}\\n\")\n\n output_readme(rstfile, path)\n\n rstfile.write(\".. code-block:: python\\n\")\n rstfile.write(\" :linenos:\\n\\n\")\n with open(path, \"r\") as pyfile:\n for line in pyfile.read().split('\\n'):\n rstfile.write(f\" {line}\\n\")\n\n\ndef print_folder(path, paths, origpath, extra, outpath):\n keys = paths.keys()\n title = title_from_path(path, origpath, extra)\n rstfn = rstfn_from_title(title, outpath)\n\n print(f\"Creating {rstfn} from {path}\")\n with open(rstfn, \"w\") as rstfile:\n heading = title_to_heading(title)\n rstfile.write(f\"{heading}\\n{'=' * len(heading)}\\n\")\n\n output_readme(rstfile, path)\n\n rstfile.write(\".. toctree::\\n\")\n for key in keys:\n rstfile.write(f\" {title_from_path(key, origpath, extra)}\\n\")\n\n\ndef folders(paths, origpath, extra, outpath, depth=0):\n keys = paths.keys()\n if depth < MAXDEPTH:\n for key in keys:\n if os.path.isdir(key):\n print(f\"Processing {key}\")\n print(f\"FolderDepth {depth}\")\n folders(paths[key], origpath, extra, outpath, depth+1)\n if depth+1 < MAXDEPTH:\n print_folder(key, paths[key], origpath, extra, outpath)\n else:\n print_pyfile(paths[key], origpath, extra, outpath)\n else:\n combine_files = []\n for key in keys:\n if os.path.isdir(key):\n print(f\"Ignoring folder {key}\")\n else:\n combine_files.append(paths[key])\n print(f\"Combining {combine_files}\")\n combine_pyfiles(combine_files, origpath, extra, outpath)\n\n\nif __name__ == \"__main__\":\n extra, outpat = sys.argv[2], sys.argv[3]\n origpath = os.path.abspath(sys.argv[1])\n print(f\"Scanning and printing {origpath}\")\n paths = recursive_python_files(origpath)\n folders(paths, origpath, extra, outpat)\n print_folder(origpath, paths, origpath, extra, outpat)\n","repo_name":"atmtools/arts","sub_path":"python/doc/gen_examples.py","file_name":"gen_examples.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"96"} +{"seq_id":"7237235032","text":"# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras import layers, activations, optimizers, models, losses\nimport os\nimport matplotlib.pyplot as plt\n\n# Model Parameters\n\n# In[2]:\n\n\nbatch_size = 32\nnum_classes = 10\nepochs = 100\ndata_augmentation = False\nnum_predictions = 20\nsave_dir = os.path.join(os.getcwd(), \"saved_models\")\nmodel_name = 'keras_cifar10_trained_model.h5'\n\n# # Load data into memory\n\n# In[3]:\n\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# Convert class vectors to binary class matrices.\ny_train = tf.keras.utils.to_categorical(y_train, num_classes)\ny_test = tf.keras.utils.to_categorical(y_test, num_classes)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n# # Define Model\n\n# In[4]:\n\n\ncifar_model = models.Sequential()\ncifar_model.add(layers.Conv2D(filters=32, kernel_size=[2, 2], strides=1, activation=activations.relu,\n input_shape=x_train.shape[1:],padding=\"same\"))\ncifar_model.add(layers.Conv2D(filters=32, kernel_size=[2, 2], strides=1, activation=activations.relu))\ncifar_model.add(layers.MaxPool2D(pool_size=(2, 2)))\ncifar_model.add(layers.Dropout(0.25))\n\ncifar_model.add(layers.Conv2D(filters=64, kernel_size=[2, 2], strides=1, activation=activations.relu,padding=\"same\"))\ncifar_model.add(layers.Conv2D(filters=64, kernel_size=[2, 2], strides=1, activation=activations.relu))\ncifar_model.add(layers.MaxPool2D(pool_size=(2, 2)))\ncifar_model.add(layers.Dropout(0.25))\n\ncifar_model.add(layers.Flatten())\ncifar_model.add(layers.Dense(units=512, activation=activations.relu))\ncifar_model.add(layers.Dropout(0.5))\ncifar_model.add(layers.Dense(units=num_classes, activation=activations.softmax))\n\ncifar_model.compile(\n optimizer=optimizers.RMSprop(lr=0.001, decay=1e-6),\n loss=losses.categorical_crossentropy,\n metrics=['accuracy']\n)\n\n# # Run the model\n\n# In[ ]:\n\n\nif not data_augmentation:\n print('Not using data augmentation.')\n cifar_model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))\n\nelse:\n print(\"Using real time data augmentation\")\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n # Compute quantities required for feature-wise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n # Fit the model on the batches generated by datagen.flow().\n cifar_model.fit_generator(datagen.flow(x_train, y_train,\n batch_size=batch_size),\n epochs=epochs,\n validation_data=(x_test, y_test),\n workers=4)\n\n# # Accuracy Score\n\n# In[ ]:\n\n\n# Score trained model.\nscores = cifar_model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n","repo_name":"HardikSuthar3/DeepLearningTutorials","sub_path":"Image/Keras/cifar10_cnn_bak.py","file_name":"cifar10_cnn_bak.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"29205485971","text":"#*************************** Libraries ********************************#\r\nimport numpy as np\r\nimport keras\r\nimport keras.backend as k\r\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.optimizers import Adam\r\nfrom keras.preprocessing import image\r\nimport cv2\r\nimport datetime\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.preprocessing.image import load_img\r\nimport sys\r\nfrom PIL import Image\r\n\r\n\r\n\r\n\r\n\r\n#********************************* Buling The MODEL *****************************************#\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))\r\nmodel.add(MaxPooling2D())\r\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D())\r\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D())\r\nmodel.add(Flatten())\r\nmodel.add(Dense(100, activation='relu'))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\n\r\n# CUSTOM COMPILE\r\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n\r\n\r\n#************************************** DATA PREPROCESSING **********************************#\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1. / 255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True)\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\r\n\r\ntraining_set = train_datagen.flow_from_directory(\r\n 'train',\r\n target_size=(150, 150),\r\n batch_size=16,\r\n class_mode='binary')\r\n\r\ntest_set = test_datagen.flow_from_directory(\r\n 'test',\r\n target_size=(150, 150),\r\n batch_size=16,\r\n class_mode='binary')\r\n\r\n# TRAIN MODEL\r\nmodel_saved = model.fit_generator(\r\n training_set,\r\n epochs=10,\r\n validation_data=test_set,\r\n\r\n)\r\n\r\n# SAVE MODEL\r\nmodel.save('mymodel.h5', model_saved)\r\n\r\n\r\n\r\ntest_image = image.load_img(r'E:\\Courses\\FaceMaskDetector\\test\\with_mask\\1-with-mask.jpg', target_size=(150, 150, 3))\r\ntest_image = image.img_to_array(test_image)\r\ntest_image = np.expand_dims(test_image, axis=0)\r\nprint(model.predict(test_image))\r\n\r\ntest_image = image.load_img(r'E:\\Courses\\FaceMaskDetector\\test\\without_mask\\11.jpg', target_size=(150, 150, 3))\r\ntest_image = image.img_to_array(test_image)\r\ntest_image = np.expand_dims(test_image, axis=0)\r\nprint(model.predict(test_image))\r\n\r\n# 0 means .......> with mask\r\n# 1 means .......> without mask","repo_name":"Seifeldin-Ahmed/Real-Time-Face-Mask-Detection","sub_path":"facemask.py","file_name":"facemask.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5925531911","text":"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport pandas_datareader as web\nimport datetime as dt\nimport os\nfrom sklearn.preprocessing import MinMaxScaler\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\nkeras = tf.keras\n\n# Constants\nSYMBOL = 'BTC-USD'\nEPOCHS = 200\nBATCH_SIZE = 32\nWINDOW_SIZE = 64\n\n# Helper Methods\ndef seq2seq_window_dataset(series, shuffle_buffer):\n series = tf.expand_dims(series, axis=-1)\n ds = tf.data.Dataset.from_tensor_slices(series)\n ds = ds.window(WINDOW_SIZE + 1, shift=1, drop_remainder=True)\n ds = ds.flat_map(lambda w: w.batch(WINDOW_SIZE + 1))\n ds = ds.shuffle(shuffle_buffer)\n ds = ds.map(lambda w: (w[:-1], w[1:]))\n return ds.batch(BATCH_SIZE).prefetch(1)\n\n# Get Data from Yahoo Finance API\nstart = dt.datetime(1970,1,1)\nend = dt.datetime(2022,1,1)\ndata = web.DataReader(SYMBOL, 'yahoo', start=start, end=end)\nframe = pd.DataFrame(data)\nframe.reset_index(inplace=True,drop=False)\ndate = frame['Date'][0]\n\nscaler = MinMaxScaler(feature_range=(0,1))\nscaled_data = scaler.fit_transform(data['Close'].values.reshape(-1,1))\n\ntime = len(data)\nbuffer_size = int(time)\nsplit_time = int(buffer_size*0.8)\nprint(f\"Days: {buffer_size}\")\nprint(f\"Split: {split_time}\")\ntime = np.arange(buffer_size)\n\n#reshaped_data = data['Close'].values.reshape(-1,1)\nseries=[]\nfor i in scaled_data:\n series.append(i[0])\nseries=np.array(series)\nprint(len(series))\n\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]\n\ntrain_set = seq2seq_window_dataset(x_train, buffer_size)\nvalid_set = seq2seq_window_dataset(x_valid, buffer_size-split_time)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.InputLayer(input_shape=[None, 1]))\nfor dilation_rate in (1, 2, 4, 8, 16, 32):\n model.add(\n keras.layers.Conv1D(filters=32,\n kernel_size=2,\n strides=1,\n dilation_rate=dilation_rate,\n padding=\"causal\",\n activation=\"relu\")\n )\nmodel.add(keras.layers.Conv1D(filters=1, kernel_size=1))\n\noptimizer = keras.optimizers.Adam(learning_rate=3e-5)\n\nmodel.compile(loss=keras.losses.Huber(),\n optimizer=optimizer,\n metrics=[\"mae\"])\n\nmodel_checkpoint = keras.callbacks.ModelCheckpoint(\n filepath=\"prototype_models/cnn_checkpoint.h5\", save_best_only=True)\nearly_stopping = keras.callbacks.EarlyStopping(patience=20)\n\nmodel.fit(train_set, epochs=EPOCHS, validation_data=valid_set, callbacks=[early_stopping, model_checkpoint])\n\n# Display Forecast\ntest_start = dt.datetime(2022,1,1)\ntest_end = dt.datetime.now()\n\ntest_data=web.DataReader(SYMBOL, 'yahoo', test_start, test_end)\n\ntotal_dataset = pd.concat((data['Close'], test_data['Close']), axis=0)\n\nmodel_inputs = total_dataset[len(total_dataset) - len(test_data) - WINDOW_SIZE:].values\nmodel_inputs = model_inputs.reshape(-1,1)\nmodel_inputs = scaler.transform(model_inputs)\n\nreal_data = [model_inputs[len(model_inputs)+1 - WINDOW_SIZE:len(model_inputs+1),0]]\nreal_data = np.array(real_data)\nreal_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1],1))\n\nprediction = model.predict(real_data)\nprediction = scaler.inverse_transform(prediction[-1])\nprint(f\"{SYMBOL} Prediction: {prediction[-1]}\")","repo_name":"Naszagul/AI","sub_path":"forecasting/cnn_compile_train.py","file_name":"cnn_compile_train.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26328563255","text":"import json\nimport random\nimport sys\nimport warnings\nimport operator\n\nimport pandas as pd\nfrom rdflib import Graph, Literal, Namespace, URIRef\nfrom rdflib.collection import Collection\nfrom rdflib.namespace import FOAF, RDF, RDFS, SKOS, XSD\nfrom rdflib.serializer import Serializer\nfrom rdfpandas.graph import to_dataframe\nfrom SPARQLWrapper import XML, SPARQLWrapper\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef score(meaningful_messages_final):\n len = meaningful_messages_final.shape[0]\n score = random.sample(range(10, 1000), len)\n meaningful_messages_final[\"score\"] = score\n meaningful_messages_final.reset_index(drop=True, inplace=True)\n #meaningful_messages_final.to_csv(\"df_es1.csv\")\n return meaningful_messages_final\n # meaningful_messages_final.to_csv(\"df_es1.csv\")\n\n\n\n\ndef apply_indv_preferences(meaningful_messages_final,indv_preferences_read):\n indv_preferences_df = pd.json_normalize(indv_preferences_read)\n display_preferences_df =indv_preferences_df [['Utilities.Display_Format.short_sentence_with_no_chart', 'Utilities.Display_Format.bar_chart','Utilities.Display_Format.line_graph']]\n message_preferences_df =indv_preferences_df[['Utilities.Message_Format.1','Utilities.Message_Format.2','Utilities.Message_Format.16','Utilities.Message_Format.24','Utilities.Message_Format.18','Utilities.Message_Format.11','Utilities.Message_Format.22','Utilities.Message_Format.14','Utilities.Message_Format.21']]\n \n display_preferences,max_val = displaypreferences(meaningful_messages_final,display_preferences_df)\n messages_preferences= messagepreferences(display_preferences,message_preferences_df)\n \n #display_preferences_df.to_csv('display_preferences.csv')\n #message_preferences_df.to_csv('message_preferences_df.csv')\n #indv_preferences_df.to_csv('individual_preferences.csv')\n #messages_preferences.to_csv('message_preferences_final.csv')\n return messages_preferences ,max_val\n\ndef displaypreferences(meaningful_messages_final,display_preferences_df):\n nochart_pref=display_preferences_df.at[0,'Utilities.Display_Format.short_sentence_with_no_chart']\n bar_pref=display_preferences_df.at[0,'Utilities.Display_Format.bar_chart']\n line_pref=display_preferences_df.at[0,'Utilities.Display_Format.line_graph']\n line_pref = float(line_pref)\n bar_pref = float(bar_pref)\n nochart_pref = float(nochart_pref)\n my_dict = {\"line_pref\":[],\"bar_pref\":[],\"nochart_pref\":[]}\n if line_pref == 0:\n line_pref= 1\n elif bar_pref == 0:\n bar_pref =1\n elif nochart_pref ==0:\n nochart_pref = 1 \n display_score =[]\n #max_pref =[]\n my_dict[\"line_pref\"].append(line_pref)\n my_dict[\"bar_pref\"].append(bar_pref)\n my_dict[\"nochart_pref\"].append(nochart_pref)\n max_val = max(my_dict.items(), key=operator.itemgetter(1))[0]\n #max_val=max(max_pref)\n #print(max_val)\n \n #line_pref = int(line_pref)\n #bar_pref = int(bar_pref)\n #no_chart_pref = int(no_chart_pref)\n #print(type(line_pref))\n for index, row in meaningful_messages_final.iterrows():\n display_pref = row['psdo:PerformanceSummaryDisplay{Literal}']\n display_pref = display_pref.replace(\"'\", \"\")\n x = display_pref.split(\",\")\n bar='bar'\n line='line'\n text='none'\n if bar in x:\n row['score'] = row['score']*bar_pref\n if line in x:\n row['score'] = row['score']*line_pref\n if text in x :\n row['score'] = row['score']*nochart_pref\n display_score.append(row['score'])\n\n meaningful_messages_final['display_score'] = display_score\n \n return meaningful_messages_final,max_val\n\ndef messagepreferences(display_preferences,message_preferences_df):\n #message_preferences_df.to_csv('before_select.csv')\n top_performer_pref=float(message_preferences_df.at[0,'Utilities.Message_Format.1'])\n nontop_performer_pref=float(message_preferences_df.at[0,'Utilities.Message_Format.2'])\n performance_dropped_below_peer_pref=float(message_preferences_df.at[0,'Utilities.Message_Format.16'])\n no_message_pref=float(message_preferences_df.at[0,'Utilities.Message_Format.24'])\n may_improve_pref=float(message_preferences_df.at[0,'Utilities.Message_Format.18'])\n approaching_goal_pref=float(message_preferences_df.at[0,'Utilities.Message_Format.11'])\n performance_improving_pref = float(message_preferences_df.at[0,'Utilities.Message_Format.22'])\n getting_worse_pref = float(message_preferences_df.at[0,'Utilities.Message_Format.14'])\n adverse_event_pref = float(message_preferences_df.at[0,'Utilities.Message_Format.21'])\n message_score = []\n if top_performer_pref == 0:\n top_performer_pref= 1\n elif nontop_performer_pref== 0:\n nontop_performer_pref =1\n elif performance_dropped_below_peer_pref ==0:\n performance_dropped_below_peer_pref = 1 \n elif approaching_goal_pref ==0:\n approaching_goal_pref = 1\n elif getting_worse_pref ==0:\n getting_worse_pref = 1\n #print(top_performer_pref,nontop_performer_pref,performance_dropped_below_peer_pref ,approaching_goal_pref,getting_worse_pref )\n for index, row in display_preferences.iterrows():\n #message_pref = row['display_score']\n text=row['psdo:PerformanceSummaryTextualEntity{Literal}']\n #x = text.split(\" \")\n #print(x)\n if text == \"1\" :\n row['display_score'] = row['display_score']*top_performer_pref\n if text == \"2\":\n row['display_score'] = row['display_score']*nontop_performer_pref\n #if (top and not not1 in x) or (reached and goal in x) or (reached and benchmark in x) or(above and goal in x):\n \n if text == \"16\":\n row['display_score'] = row['display_score']*performance_dropped_below_peer_pref\n if text == \"24\":\n row['display_score'] = row['display_score']*no_message_pref\n if text == \"18\":\n row['display_score'] = row['display_score']*may_improve_pref\n if text == \"11\":\n row['display_score'] = row['display_score']*approaching_goal_pref\n if text == \"22\":\n row['display_score'] = row['display_score']*performance_improving_pref\n if text == \"14\":\n row['display_score'] = row['display_score']*getting_worse_pref\n if text == \"21\":\n row['display_score'] = row['display_score']*adverse_event_pref\n message_score.append(row['display_score'])\n display_preferences['message_score'] = message_score\n return display_preferences\n\n\ndef apply_history_message(applied_individual_messages,history,max_val,message_code):\n message_code_df = pd.json_normalize(message_code)\n history_df =pd.json_normalize(history)\n #history_df.to_csv(\"history_df.csv\")\n month1 = history_df[['History.Month1.psdo:PerformanceSummaryDisplay{Literal}','History.Month1.Measure Name','History.Month1.Message Code']].copy()\n month2 = history_df[['History.Month2.psdo:PerformanceSummaryDisplay{Literal}','History.Month2.Measure Name','History.Month2.Message Code']].copy()\n month3 = history_df[['History.Month3.psdo:PerformanceSummaryDisplay{Literal}','History.Month3.Measure Name','History.Month3.Message Code']].copy()\n month4 = history_df[['History.Month4.psdo:PerformanceSummaryDisplay{Literal}','History.Month4.Measure Name','History.Month4.Message Code']].copy()\n month5 = history_df[['History.Month5.psdo:PerformanceSummaryDisplay{Literal}','History.Month5.Measure Name','History.Month5.Message Code']].copy()\n month6 = history_df[['History.Month6.psdo:PerformanceSummaryDisplay{Literal}','History.Month6.Measure Name','History.Month6.Message Code']].copy()\n applied_individual_messages.reset_index()\n for index, row in applied_individual_messages.iterrows():\n disp=row['psdo:PerformanceSummaryDisplay{Literal}'].split(\",\")\n if (month1['History.Month1.psdo:PerformanceSummaryDisplay{Literal}'][0] in disp and month1['History.Month1.Measure Name'][0]== row['Measure Name'] and month1['History.Month1.Message Code'][0]== row['psdo:PerformanceSummaryTextualEntity{Literal}'] ):\n applied_individual_messages = applied_individual_messages.drop(index)\n if (month2['History.Month2.psdo:PerformanceSummaryDisplay{Literal}'][0] in disp and month2['History.Month2.Measure Name'][0]== row['Measure Name'] and month2['History.Month2.Message Code'][0]== row['psdo:PerformanceSummaryTextualEntity{Literal}'] ):\n applied_individual_messages = applied_individual_messages.drop(index)\n if (month3['History.Month3.psdo:PerformanceSummaryDisplay{Literal}'][0] in disp and month3['History.Month3.Measure Name'][0]== row['Measure Name'] and month3['History.Month3.Message Code'][0]== row['psdo:PerformanceSummaryTextualEntity{Literal}'] ):\n applied_individual_messages = applied_individual_messages.drop(index)\n if (month4['History.Month4.psdo:PerformanceSummaryDisplay{Literal}'][0] in disp and month4['History.Month4.Measure Name'][0]== row['Measure Name'] and month4['History.Month4.Message Code'][0]== row['psdo:PerformanceSummaryTextualEntity{Literal}'] ):\n applied_individual_messages = applied_individual_messages.drop(index)\n if (month5['History.Month5.psdo:PerformanceSummaryDisplay{Literal}'][0] in disp and month5['History.Month5.Measure Name'][0]== row['Measure Name'] and month5['History.Month5.Message Code'][0]== row['psdo:PerformanceSummaryTextualEntity{Literal}'] ):\n applied_individual_messages = applied_individual_messages.drop(index)\n if (month6['History.Month6.psdo:PerformanceSummaryDisplay{Literal}'][0] in disp and month6['History.Month6.Measure Name'][0]== row['Measure Name'] and month6['History.Month6.Message Code'][0]== row['psdo:PerformanceSummaryTextualEntity{Literal}'] ):\n applied_individual_messages = applied_individual_messages.drop(index)\n return applied_individual_messages\n\n\n\n\n\n\n\n\n\n\ndef select(applied_individual_messages,max_val,message_code):\n # max value of score\n column = applied_individual_messages[\"message_score\"]\n message_code_df = pd.json_normalize(message_code)\n #message_code_df.to_csv(\"message_code.csv\")\n # max_value = column.max()\n h = applied_individual_messages[\"message_score\"].idxmax()\n message_selected_df = applied_individual_messages.iloc[h, :]\n message_selected_df.at['psdo:PerformanceSummaryDisplay{Literal}']=max_val\n #message_selected_df.to_csv('message_selected.csv')\n mes_id=message_selected_df.at['psdo:PerformanceSummaryTextualEntity{Literal}'].split(\".\")\n #print(mes_id[0])\n message = \"Message_ids.\"+mes_id[0]\n message_selected_df = message_selected_df.append(pd.Series(message_selected_df.at['psdo:PerformanceSummaryTextualEntity{Literal}'], index=['Message Code']))\n message_selected_df.at['psdo:PerformanceSummaryTextualEntity{Literal}']=message_code_df.at[0,message]\n message_selected_df = message_selected_df.drop(['score','display_score','message_score']);\n message_selected_df = message_selected_df.T\n data = message_selected_df.to_json(orient=\"index\", indent=2 )\n \n return data.replace(\"\\\\\", \"\")\n #return column","repo_name":"Display-Lab/esteemer","sub_path":"python/src/esteemer/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30780497016","text":"from sklearn import mixture\r\nimport load_data,os\r\nimport numpy as np\r\nfrom sklearn.metrics import silhouette_samples, silhouette_score\r\nfrom sklearn.feature_selection import RFE\r\nfrom sklearn.svm import SVR\r\nimport util, pandas as pd\r\n\r\nSEED = 100\r\nOUTPUT_DIR = 'RFE'\r\nif not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)\r\n\r\ndef rfe(dataset, n_components, save_to_file=False):\r\n if dataset =='creditcard':\r\n X,y = load_data.load_creditcard_data()\r\n else:\r\n X,y = load_data.load_cancer_data()\r\n\r\n estimator = SVR(kernel=\"linear\")\r\n model = RFE(estimator,n_features_to_select=n_components, step=1)\r\n model = model.fit(X, y)\r\n \r\n n_samples = X.shape[0]\r\n \r\n X_fitted = model.transform(X)\r\n kurt = pd.DataFrame(X_fitted)\r\n kurt = kurt.kurt(axis=0)\r\n kurt= kurt.abs().mean()\r\n X_inverse = model.inverse_transform(X_fitted)\r\n reconstruction_error = np.linalg.norm(X - X_inverse)/n_samples\r\n if save_to_file:\r\n X_fitted = pd.DataFrame(X_fitted)\r\n X_fitted['label']= y.values\r\n X_fitted.to_csv(OUTPUT_DIR+os.sep+dataset+'.csv') \r\n return kurt, reconstruction_error\r\n\r\ndataset=['creditcard','cancer']\r\nn_init = 10\r\nmax_iter = 300\r\nn_components_list = range(2,11) \r\nindex=1\r\ndf = pd.DataFrame()\r\n\r\nfor data in dataset:\r\n print(data)\r\n for n_cluster in n_components_list:\r\n kurt, error = rfe(data, n_cluster)\r\n df.loc[index,'data'] = data\r\n df.loc[index,'n_cluster']= int(n_cluster)\r\n# df.loc[index, 'variance']= variance[index%len(n_components_list)]\r\n# df.loc[index, 'cumsum']= cumsum[index%len(n_components_list)]\r\n df.loc[index, 'reconstruction_error']= error\r\n df.loc[index, 'kurt']= kurt\r\n index+=1\r\ndf['n_cluster'] =df['n_cluster'].astype(int) \r\n\r\ndef plot_cumsum(df, title, output_file):\r\n f, axes = plt.subplots(1, 2, figsize=(10,5))\r\n \r\n# df['n_cluster'] = df['n_cluster'].astype(int)\r\n \r\n credit_df = df[df['data']=='creditcard']\r\n sns.lineplot(y=\"kurt\", x= \"n_cluster\", data=credit_df,label='kurt',marker='o', ax=axes[0])\r\n sns.lineplot(y=\"reconstruction_error\", x= \"n_cluster\", data=credit_df,label='reconstruction_error',marker='o', ax=axes[0]).set(title='CreditCard',ylabel='score')\r\n# axes[0].set_ylim(0,1)\r\n\r\n cancer_df = df[df['data']=='cancer']\r\n# print(cancer_df)\r\n sns.lineplot(y=\"kurt\", x= \"n_cluster\", data=cancer_df,label='kurt',marker='o', ax=axes[1])\r\n sns.lineplot(y=\"reconstruction_error\", x= \"n_cluster\", data=cancer_df,label='reconstruction_error',marker='o', ax=axes[1]).set(title='Cancer',ylabel='score')\r\n# axes[1].set_ylim(0,1)\r\n \r\n f.savefig(output_file)\r\n \r\nplot_cumsum(df, 'RFE',OUTPUT_DIR +os.sep+'RFE.png')\r\nrfe('creditcard', 9,save_to_file=True)\r\nrfe('cancer', 8 ,save_to_file=True)\r\n","repo_name":"mengjinfeng0331/cs7641-a3","sub_path":"RFE.py","file_name":"RFE.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73148295035","text":"import itertools\nfrom collections import defaultdict\nfrom decimal import Decimal\n\n\ndef angle(c):\n \"\"\"\n Angles are represented as a tuple of (half plane index, tan(|angle|))\n where angle is the shortest clockwise angle from (negative or positive)\n y-axis.\n \"\"\"\n dx, dy = c\n if dx == 0:\n tan_angle = Decimal(0)\n index = 2*int(dy > 0) # Indices 0 and 2 used for dx == 0\n else:\n tan_angle = Decimal(dy) / Decimal(dx)\n index = 1 + 2*int(dx < 0) # Indices 1 and 3 used for dx != 0\n return (index, tan_angle)\n\n\ndef visualize(coords_by_angle_):\n coords = set().union(*coords_by_angle_.values())\n for y in range(y_len):\n for x in range(x_len):\n if x == ox and y == oy:\n print('o', end='')\n elif (x - ox, y - oy) in coords:\n print('#', end='')\n else:\n print('.', end='')\n print()\n\n\ninput_file = \"../input.txt\"\nwith open(input_file, 'r') as f:\n input_text = f.read()\n\n# Test:\n# input_text = \"\"\".#....#####...#..\n# ##...##.#####..##\n# ##...#...#.#####.\n# ..#.....#...###..\n# ..#.#.....#....##\"\"\"\n\n# Test:\n# input_text = (\n# \"\"\".#..##.###...#######\n# ##.############..##.\n# .#.######.########.#\n# .###.#######.####.#.\n# #####.##.#.##.###.##\n# ..#####..#.#########\n# ####################\n# #.####....###.#.#.##\n# ##.#################\n# #####.##.###..####..\n# ..######..##.#######\n# ####.##.####...##..#\n# .#####..#.######.###\n# ##...#.##########...\n# #.##########.#######\n# .####.#.###.###.#.##\n# ....##.##.###..#####\n# .#.#.###########.###\n# #.#.#.#####.####.###\n# ###.##.####.##.#..##\"\"\")\n\ny_len = len(input_text.splitlines())\nx_len = len(input_text.splitlines()[0])\n\nasteroids = [\n (x, y)\n for y, line in enumerate(input_text.splitlines())\n for x, char in enumerate(line)\n if char == '#'\n]\n\ndef shrink(x, y):\n ax, ay = abs(x), abs(y)\n gcd = max(i for i in range(1, max(ax, ay) + 1) if ax % i == ay % i == 0)\n return x / gcd, y / gcd\n\nvisible = [\n (len({\n shrink(ax - ox, ay - oy)\n for (ax, ay) in asteroids\n if ox != ax or oy != ay\n }), ox, oy) for (ox, oy) in asteroids]\n\nprint(\"Max visible (part 1):\", max(visible))\n\n\nox, oy = max(visible)[1:]\ncoords = [(ax - ox, ay - oy) for (ax, ay) in asteroids]\n\n\ncoords_by_angle = defaultdict(list)\nfor c in coords:\n if c != (0, 0):\n coords_by_angle[angle(c)].append(c)\ncoords_by_angle = dict(coords_by_angle)\n\nfor angle in coords_by_angle.keys():\n # Sort coordinates with the same angle by sum of coordinates, i.e. distance\n # from origin.\n coords_by_angle[angle].sort(key=lambda c: abs(c[0]) + abs(c[1]))\n\nvaporized = 0\nfor angle in itertools.cycle(sorted(coords_by_angle.keys())):\n # visualize(coords_by_angle)\n # input()\n try:\n c = coords_by_angle[angle].pop(0)\n except IndexError:\n pass\n else:\n vaporized += 1\n if vaporized == 200:\n print(\"Asteroid number 200: (part 2)\", (c[0] + ox, c[1] + oy))\n break\n","repo_name":"freidrichen/advent-of-code-2019","sub_path":"day10/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71081139837","text":"#coding=utf-8\n\n'''\nexcept和raise同时使用:\n\n在实际应用中对异常可能需要更复杂的处理方式--当一个异常出现时,单靠某个方法无法完全处理该异常,必须由几个方法协作才能完全处理该异常。也就\n是说,在异常出现的当前方法中,程序只对异常进行部分处理还有些处理需要在该方法的调用者中才能完成,所以应该再次引发异常,让该方法的调用者也能捕获到异常。\n\n为了实现这种通过多个方法协作处理同一个异常的情形,可以在except块中结合raise语句来完成.\n'''\n\nclass AuctionException(Exception):\n\tpass\n\nclass AuctionTest(object):\n\tdef __init__(self,init_price):\n\t\tself.init_price = init_price\n\n\tdef bid(self,bid_price):\n\t\td = 0.0\n\t\ttry:\n\t\t\td = float(bid_price)\n\t\texcept Exception as e:\n\t\t\t#此处只是简单地打印异常信息\n\t\t\tprint(\"转换出异常:\",e)\n\t\t\t#再次引发自定义异常\n\t\t\traise AuctionException(\"竞拍价格必须是数值,不能包含其他字符!\")\n\t\tif self.init_price > d:\n\t\t\traise AuctionException(\"竞拍价格比起拍价低,不允许竞拍!\")\n\t\tinitprice = d\n\ndef main():\n\tat = AuctionTest(20.4)\n\ttry:\n\t\tat.bid(\"df\")\n\texcept AuctionException as ae:\n\t\t#再次捕获到bid()方法中的异常,并对该异常进行处理\n\t\tprint(\"main函数捕获的异常:\",ae)\nmain()\n\n\n'''\n上面的程序中,except块捕获到异常后,系统打印了该异常的字符串信息,接着引发了一个AuctionException异常,通知该方法的调用者再次\n处理该AuctionException异常。所以程序中的main()函数,也就是bid()方法的调用者还可以再次捕获AuctionException异常,并将该异常\n的详细信息打印出来。\n\n这种except和raise结合使用的情况在实际应用中非常常用。实际应用对异常的处理通常分成两部分:\n1.应用后台需要通过日志来记录异常发生的详细情况;\n2.应该还需要根据异常向应用使用者传达某种提示。\n3.在这种情况下,所有异常都需要两个方法共同完成,也就必须将except和raise结合使用。\n\n如果程序需要将原始异常的详细信息直接传播出去,Python也允许用自定义异常对原始异常进行包装,代码形式如下:\n\t\traise AuctionException(e)\n'''","repo_name":"canglongqixiu/NA_PYTHON","sub_path":"python/training/crazy_python/chapter7/p10.py","file_name":"p10.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42528813871","text":"from collections import OrderedDict\nimport pickle\nimport os\nimport librosa\nimport numpy as np\nimport scipy\nimport scipy.signal as scisignal\nimport tqdm\nimport math\nfrom dtw import dtw\nfrom numpy.linalg import norm\nfrom scipy.spatial.transform import Rotation as R\n\n\n# ===========================================================\n# Frechet Distance\n# ===========================================================\ndef euc_dist(pt1, pt2):\n return math.sqrt((pt2[0]-pt1[0])*(pt2[0]-pt1[0])+(pt2[1]-pt1[1])*(pt2[1]-pt1[1]))\n \ndef _c(ca,i,j,P,Q):\n if ca[i,j] > -1:\n return ca[i,j]\n elif i == 0 and j == 0:\n ca[i,j] = euc_dist(P[0],Q[0])\n elif i > 0 and j == 0:\n ca[i,j] = max(_c(ca,i-1,0,P,Q),euc_dist(P[i],Q[0]))\n elif i == 0 and j > 0:\n ca[i,j] = max(_c(ca,0,j-1,P,Q),euc_dist(P[0],Q[j]))\n elif i > 0 and j > 0:\n ca[i,j] = max(min(_c(ca,i-1,j,P,Q),_c(ca,i-1,j-1,P,Q),_c(ca,i,j-1,P,Q)),euc_dist(P[i],Q[j]))\n else:\n ca[i,j] = float(\"inf\")\n return ca[i,j]\n \ndef frechet_distance(P,Q):\n ca = np.ones((len(P),len(Q)))\n ca = np.multiply(ca,-1)\n return _c(ca, len(P) - 1, len(Q) - 1, P, Q) # ca是a*b的矩阵(3*4),2,3\n\n\ndef pos_frechet(predict_joints, target_joints):\n pos_f = 0\n seq_len = predict_joints.shape[0]\n for i in range(seq_len):\n pos_f += frechet_distance(predict_joints[i, ], target_joints[i, ])\n return pos_f\n\n\ndef vel_frechet(predict_joints, target_joints):\n vel_f = 0\n velocity_predicted = np.diff(predict_joints, axis=0)\n velocity_target = np.diff(target_joints, axis=0)\n print(velocity_predicted.shape, velocity_target.shape)\n seq_len = velocity_predicted.shape[0]\n for i in range(seq_len):\n vel_f += frechet_distance(velocity_predicted[i, ], velocity_target[i, ])\n return vel_f\n\n\ndef motion_div(predict_joints):\n\n velocity_predicted = np.diff(predict_joints, axis=0)\n pos_var = np.var(predict_joints)\n vel_var = np.var(velocity_predicted)\n return pos_var, vel_var\n\n\ndef select_aligned(music_beats, motion_beats, tol=6):\n \"\"\" Select aligned beats between music and motion.\n\n For each motion beat, we try to find a one-to-one mapping in the music beats.\n Kwargs:\n music_beats: onehot vector\n motion_beats: onehot vector\n tol: tolerant number of frames [i-tol, i+tol]\n Returns:\n music_beats_aligned: aligned idxs list\n motion_beats_aligned: aligned idxs list\n \"\"\"\n music_beat_idxs = np.where(music_beats)[0]\n motion_beat_idxs = np.where(motion_beats)[0]\n\n music_beats_aligned = []\n motion_beats_aligned = []\n accu_inds = []\n for motion_beat_idx in motion_beat_idxs:\n dists = np.abs(music_beat_idxs - motion_beat_idx).astype(np.float32)\n dists[accu_inds] = np.Inf\n ind = np.argmin(dists)\n\n if dists[ind] > tol:\n continue\n else:\n music_beats_aligned.append(music_beat_idxs[ind])\n motion_beats_aligned.append(motion_beat_idx)\n accu_inds.append(ind)\n\n music_beats_aligned = np.array(music_beats_aligned)\n motion_beats_aligned = np.array(motion_beats_aligned)\n # print(music_beats_aligned.shape, motion_beats_aligned.shape)\n return music_beats_aligned, motion_beats_aligned\n\ndef alignment_score(music_beats, motion_beats, sigma=3):\n \"\"\"Calculate alignment score between music and motion.\"\"\"\n if motion_beats.sum() == 0:\n return 0.0\n\n music_beat_idxs = np.where(music_beats)[0]\n motion_beat_idxs = np.where(motion_beats)[0]\n\n score_all = []\n for motion_beat_idx in motion_beat_idxs:\n dists = np.abs(music_beat_idxs - motion_beat_idx).astype(np.float32)\n ind = np.argmin(dists)\n score = np.exp(- dists[ind]**2 / 2 / sigma**2)\n score_all.append(score)\n return sum(score_all) / len(score_all) \n\ndef dtw_motion_music(music_beats, motion_beats):\n dist, cost, acc_cost, path = dtw(music_beats.T, motion_beats.T, dist=lambda x, y: norm(x - y, ord=1))\n print('Normalized distance between the two sounds:', dist)\n return cost.T","repo_name":"GuHuangAI/GCDG","sub_path":"evaluation/evalution_metric.py","file_name":"evalution_metric.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"3719102494","text":"\"\"\"\n掘金小册,按购买量排序\n\"\"\"\nimport requests\nimport looter as lt\nfrom pprint import pprint\n\ndomain = 'https://juejin.im'\ntotal = []\n\n\ndef crawl(url):\n items = requests.get(url, headers=lt.DEFAULT_HEADERS).json()['d']\n for item in items:\n data = {}\n data['title'] = item['title']\n data['desc'] = item['desc']\n data['author'] = item['userData']['username']\n data['profile'] = item['profile']\n data['buyCount'] = item['buyCount']\n data['price'] = item['price']\n data['publishDate'] = item['finishedAt']\n data['url'] = f\"{domain}/book/{item['_id']}\"\n pprint(data)\n total.append(data)\n\n\nif __name__ == '__main__':\n tasklist = [\n f'https://xiaoce-timeline-api-ms.juejin.im/v1/getListByLastTime?uid=5901b4faac502e0063cf9e02&client_id=1555503959385&token=eyJhY2Nlc3NfdG9rZW4iOiJuM0g1REUzUUZ0RjczNnJwIiwicmVmcmVzaF90b2tlbiI6InVJck0zcURsbjlkU2dJRm8iLCJ0b2tlbl90eXBlIjoibWFjIiwiZXhwaXJlX2luIjoyNTkyMDAwfQ%3D%3D&src=web&alias=&pageNum={n}'\n for n in range(1, 4)\n ]\n [crawl(task) for task in tasklist]\n lt.save(total, name='juejin_books.csv', sort_by='buyCount', order='desc')\n","repo_name":"alphardex/looter","sub_path":"examples/juejin_books.py","file_name":"juejin_books.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"96"} +{"seq_id":"34346226565","text":"import sklearn\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom itertools import chain, combinations\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.datasets import load_iris\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.datasets import load_boston\nimport matplotlib.pyplot as plt\nimport datetime\nimport pickle\nimport sys\nfrom PIL import Image\nimport io\n\nfrom matplotlib.dates import (YEARLY, DateFormatter,\n rrulewrapper, RRuleLocator, drange)\n\n\n\n# dataCol = ['Name (E)' ,'YY/MM','Dissolved Total N(㎎/L)', 'NH3-N(㎎/L)', 'NO3-N(㎎/L)', 'Dissolved Total P(㎎/L)','Conductivity(µS/㎝)','TSI(Chl-a)', 'Grade.3' ]\n\ndataCol = ['Name (E)' ,'YY/MM','NH3-N(㎎/L)', 'NO3-N(㎎/L)', 'PO4-P(㎎/L)',\n 'T-N(㎎/L)','T-P(㎎/L)', 'Dissolved Total N(㎎/L)','Dissolved Total P(㎎/L)',\n 'Hydrogen ion conc.','DO (㎎/L)', 'TSI(Chl-a)']\n\nMasterDataframe = pd.read_excel('predata.xls')\nMasterDataframe.rename(columns=MasterDataframe.iloc[0])\n#Get all subset of the column\nColumnList = list(MasterDataframe)\n# print(ColumnList)\n\n\n# print(MasterDataframe.head())\n\nMasterDataframe2 = pd.read_excel('Data.xlsx')\nMasterDataframe2.rename(columns=MasterDataframe2.iloc[0])\n#Get all subset of the column\n# ColumnList = list(MasterDataframe)\n\n\ndf1 = MasterDataframe[dataCol]\ndf2 = MasterDataframe2[dataCol]\n\ndf = pd.concat([df1,df2])\n\n# print(df.head())\n# print(df.describe())\n\n# print(df.isnull().sum())\n\n\nfor col in dataCol:\n\ttry:\n\t\t# print(col)\n\t\tmedian = df[col].median()\n\t\t# print(median)\n\t\t# print(\"_____________________+\")\n\t\tdf[col].fillna(median, inplace=True)\n\texcept:\n\t\t# print(col)\n\t\t# print(\"it here\")\n\t\tdf[col].fillna(\"Mesotrophic\", inplace=True)\n\nwindow_len = 5\n\nmyDf = df\n# myDf = myDf.sort_values(by='YY/MM')\n\ndictrictArr = (myDf['Name (E)'].unique())\n# trash = [\"Choicheon Stream\",\"Gamicheon Stream\",'Mangwolcheon Stream','Mokgamcheon Stream','Ahnyangcheon Stream 3-2','Min', 'Mesotrophic', 'Max' ,'Average']\n\n# dictrictArr =[]\n# for x in dictrictPreArr:\n# \tif x not in trash:\n# \t\tdictrictArr.append(x)\n\ntraining_input = np.empty((1,window_len,6))\ntraining_output = np.empty((1,))\ntest_input = np.empty((1,window_len,6))\ntest_output = np.empty((1,))\n\n\n# print(training_input)\n# print(test_input)\n# quit()\nnorm_cols = ['NH3-N(㎎/L)', 'NO3-N(㎎/L)', 'PO4-P(㎎/L)',\n 'T-N(㎎/L)','T-P(㎎/L)', 'Dissolved Total N(㎎/L)','Dissolved Total P(㎎/L)',\n 'Hydrogen ion conc.','DO (㎎/L)', 'TSI(Chl-a)']\n\ndictrictLabel = []\ndictrictValue = []\n\n\nfinal_predict = []\ntrue_label = []\n# quit()\n\ncount = 0\nlenValue = []\nfor dictrict in dictrictArr:\n\t# print(dictrict)\n\tcount = count + 1\n\tsmall_data = df[df['Name (E)']==dictrict]\n\tsmall_data = small_data.drop('Name (E)', 1)\n\t# print(len(small_data))s\n\tif (len(small_data)) <= 48:\n\t\tcontinue\n\n\tdictrictLabel.append(dictrict)\n\tdictrictValue.append(np.array(small_data['TSI(Chl-a)'].values).tolist())\n\n\t# continue\n\t# print(small_data)\n\tsplit_date = \"2020/01\"\n\ttraining_set, test_set = small_data[small_data['YY/MM']<\"2019/09\"], small_data[small_data['YY/MM']>=\"2019/09\"]\n\n\n\ttimeframe = small_data['YY/MM'].values\n\n\toutput = small_data['TSI(Chl-a)']\n\n\ttraining_set = training_set.drop('YY/MM', 1)\n\ttest_set = test_set.drop('YY/MM', 1)\n\ttraining_set=training_set.astype('float')\n\t\n\n\n\ttrain_X = np.array(training_set[['NH3-N(㎎/L)', 'NO3-N(㎎/L)', 'Dissolved Total P(㎎/L)']])[:-1]\n\ttrain_y = np.array(training_set['TSI(Chl-a)'])[1:]\n\n\n\ttest_X = np.array(test_set[['NH3-N(㎎/L)', 'NO3-N(㎎/L)', 'Dissolved Total P(㎎/L)']])[:-1]\n\ttest_y = np.array(test_set['TSI(Chl-a)'])\n\n\tfrom statsmodels.tsa.statespace.sarimax import SARIMAX\n\tmodel= SARIMAX(train_y, \n\t exog= train_X, \n\t order=(3,1,1),\n\t enforce_invertibility=False, enforce_stationarity=False)\n\tresults = model.fit()\n\n\n\tpredictions= results.predict(start = len(train_y) -13 , end= len(train_y) +2, exog= test_X[:3])\n\n\t\n\n\treal = output[len(train_y)-13:len(train_y) +2]\n\n\tdates = [i for i in range(0,len(predictions))]\n\n\tmseValue =(np.mean(np.abs(predictions[:len(real)] - real))/np.min(predictions))\n\n\tif mseValue == 0.0:\n\t\tcontinue\n\n\tano = 'Relative MSE: ' +str(mseValue)+'%', \n\tlenValue.append([dictrict,len(small_data),mseValue])\n\n\tfig, ax1 = plt.subplots(1,1,figsize=(20,10))\n\tax1.plot(timeframe[:len(real)],real, label='Actual')\n\tax1.plot(timeframe[:len(predictions)],predictions, label='Predicted')\n\tax1.annotate(ano, \n xy=(0.75, 0.9), xycoords='axes fraction',\n xytext=(0.75, 0.9), textcoords='axes fraction')\n\tax1.set_title(\"Chi-a Prediction at \"+dictrict,fontsize=13)\n\tax1.legend()\n\tfig.autofmt_xdate()\n\tax1.set_ylim(bottom=0)\n\tax1.set_ylim(top=100)\n\t# plt.show()\n\tplt.savefig(\"sarima/\"+ dictrict +'.png', dpi=100)\n\n\tfinal_predict.append(list(predictions[-2:]))\n\ttrue_label.append(list(real[-2:]))\n\t# quit()\n\n\n# totalCount = 0\n# dictrictName = []\n\n# dictrictMSE = []\n\n# print(dictrictName)\nprint(final_predict)\nprint(true_label)\n\ny_pred = []\nprint(\"final predict\")\nfor i in range(0, len(lenValue)):\n\tif final_predict[i][0] > 60 or final_predict[i][1] >60:\n\t\ty_pred.append(1)\n\telse:\n\t\ty_pred.append(0)\n\t\t# grade = \"Eutrophy\"\n\t\t# if finalArr[i][0] > 70 or finalArr[i][1] >70:\n\t\t# \tgrade = \"Hypereutrophy\"\n\t\t# if finalArr[i][0] > 80 or finalArr[i][1] >80:\n\t\t# \tgrade = \"Algae bloom\"\n\t\t# print(\"tram \"+ dictrictName[i]+ \" co kha nang no hoa\")\n\t\t# print(\"Grade:\" + grade) \n\ny_true = []\nprint(\"final predict\")\nfor i in range(0, len(lenValue)):\n\tif true_label[i][0] > 60 or true_label[i][1] >60:\n\t\ty_true.append(1)\n\telse:\n\t\ty_true.append(0)\n\t\t# grade = \"Eutrophy\"\n\t\t# if finalArr2[i][0] > 70 or finalArr2[i][1] >70:\n\t\t# \tgrade = \"Hypereutrophy\"\n\t\t# if finalArr2[i][0] > 80 or finalArr2[i][1] >80:\n\t\t# \tgrade = \"Algae bloom\"\n\t\t# print(\"tram \"+ dictrictName[i]+ \" co kha nang no hoa\")\n\t\t# print(\"Grade:\" + grade) \n\nprint(y_true)\nprint(len(y_true))\nprint(y_pred)\nprint(len(y_pred))\n\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_true, y_pred))\n\nprint(final_predict)\ni= 0 \nfor row in lenValue:\n\tif final_predict[i][0] > 60 or final_predict[i][1] >60:\n\t\tgrade = \"Eutrophy\"\n\t\tif final_predict[i][0] > 70 or final_predict[i][1] >70:\n\t\t\tgrade = \"Hypereutrophy\"\n\t\tif final_predict[i][0] > 80 or final_predict[i][1] >80:\n\t\t\tgrade = \"Algae bloom\"\n\t\tprint(\"tram \"+ row[2]+ \" co kha nang no hoa\")\n\t\tprint(\"Grade:\" + grade) \n\ti = i +1\n\t# totalCount = totalCount + int(row[1])\n\t# print(\"Name: \"+ row[0] + \", Count: \" + str(row[1]) + \", Relative MSE:\" + str(row[2])+\"%\")\n\tdictrictName.append(row[0])\n\tdictrictMSE.append(row[2])\n\n# quit()\ndf = pd.DataFrame(list(zip(dictrictName, dictrictMSE)), \n columns =['Name', 'val']) \ndf.to_csv(\"sarima.csv\")\n# print(totalCount)\n\n# print(dictrictLabel)\n# print(dictrictValue)\n\n# dictrictValue = np.array(dictrictValue).transpose().tolist()\n\n# # print(len(dictrictValue))\n# # print(len(dictrictLabel))\n\n\n# df = pd.DataFrame(data =dictrictValue).transpose()\n# df.columns = dictrictLabel\n# # print(df)\n\n# correlation = (df.corr())\n\n# # correlation.to_csv(\"output.csv\")\n\n\n# correlation = correlation.values\n\n\n# # print(correlation[0][0])\n\n# for i in range(0,26):\n# \tfor j in range(0,26):\n# \t\tif i==j:\n# \t\t\tcontinue\n# \t\tif correlation[i][j] > 0.7:\n# \t\t\tprint(dictrictLabel[i] + \"<--->\" + dictrictLabel [j] +\" ||| correlation: \"+str(correlation[i][j]))","repo_name":"ttdung997/WaterQualityIndex","sub_path":"sarima.py","file_name":"sarima.py","file_ext":"py","file_size_in_byte":7389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"2231755721","text":"import sys\nfrom PyQt5.QtWidgets import (\n QMainWindow, QApplication,\n QLabel, QToolBar, QAction, QStatusBar\n)\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import Qt\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super(MainWindow, self).__init__()\n\n self.setWindowTitle(\"My Awesome App\")\n\n\napp = QApplication(sys.argv)\n\nwindow = MainWindow()\nwindow.show()\n\napp.exec()","repo_name":"suraj-singh12/Qt6","sub_path":"Actions/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37255905415","text":"from elasticsearch_dsl.query import Q, MultiMatch, SF\nfrom .search_indexes import BooksIndex\nfrom .models import Books\n\ndef get_search_query(phrase):\n query = Q(\n 'function_score',\n query=MultiMatch(\n fields=['title', 'author', 'publisher'],\n query=phrase\n ),\n functions=[\n SF('field_value_factor', field='number_of_views')\n ]\n )\n return BooksIndex.search().query(query)\ndef search(phrase):\n return get_search_query(phrase).to_queryset()\n\n","repo_name":"StormStudioAndroid2/practice","sub_path":"litsite/viewdb/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"73776727994","text":"from sc.core.element import Element\nfrom sc.core.keynodes import Keynodes\nfrom sc.core.keywords import Labels, TypeAttrs\nfrom sc.core.transaction.utils import _parse_output_element, _get_label_from_type\n\nimport neo4j\n\n\ndef _const_attr() -> str:\n return f\"{TypeAttrs.CONST}: 'CONST'\"\n\n\ndef _arc_member_const_pos_perm_attrs() -> str:\n return (f\"{TypeAttrs.CONST}: 'CONST', {TypeAttrs.ARC_PERM}: 'PERM',\"\n f\"{TypeAttrs.ARC_POS}: 'POS', {TypeAttrs.ARC_MEMBER}: true\")\n\n\nclass TransactionNamesWriteResult:\n\n def __init__(self, result_summary: neo4j.ResultSummary):\n self.run_time = result_summary.result_available_after\n self.consume_time = result_summary.result_consumed_after\n\n def __repr__(self) -> str:\n return \"run_time: {} ms, consume_time: {} ms\".format(\n self.run_time,\n self.consume_time\n )\n\n\nclass TransactionNamesWrite:\n\n def __init__(self,\n driver: neo4j.Driver,\n nrel_sys_idtf: str = Keynodes.NREL_SYS_IDTF) -> None:\n self._driver = driver\n self._sys_idtfs = set()\n self._tasks = []\n\n self._nrel_sys_idtf = nrel_sys_idtf\n\n assert isinstance(self._nrel_sys_idtf, str)\n\n def set_system_identifier(self, el: Element, sys_idtf: str):\n \"\"\"\n Adds command to setup system identifier of specified element.\n If element already have system_identifier, then it will be replaces with new one\n\n :param el: Element id to setup system identifier\n :params sys_idtf: Value of system identifier\n \"\"\"\n assert sys_idtf not in self._sys_idtfs\n\n self._sys_idtfs.add(sys_idtf)\n self._tasks.append((el, sys_idtf))\n\n def _is_empty(self) -> bool:\n return len(self._sys_idtfs) == 0\n\n def _make_query(self) -> str:\n query = (f\"MATCH (l:{Labels.SC_LINK} {{content: '{Keynodes.NREL_SYS_IDTF}', {_const_attr()} }})\"\n f\"<-[__idtf_edge:{Labels.SC_ARC} {{ {TypeAttrs.CONST}: 'CONST' }}]\"\n f\"-(__sys_idtf:{Labels.SC_NODE}), \\n\"\n f\"(:{Labels.SC_EDGE_SOCK} {{edge_id: id(__idtf_edge)}})\"\n f\"<-[:{Labels.SC_ARC} {{ {_arc_member_const_pos_perm_attrs()} }}]\"\n f\"-(__sys_idtf)\\n\")\n\n def _subquery_item(task):\n el, idtf = task\n\n return (f\"\\n MATCH (el:{_get_label_from_type(el.type)}) WHERE id(el) = {el.id.identity}\\n\"\n f\" OPTIONAL MATCH (el)\"\n f\"-[edge:{Labels.SC_ARC} {{ {_const_attr()} }}]\"\n f\"->(link: {Labels.SC_LINK} {{ {_const_attr()} }}),\\n\"\n f\" (edge_sock: {Labels.SC_EDGE_SOCK} {{ edge_id: id(edge)}})\"\n f\"<-[edge_rel:{Labels.SC_ARC} {{ {_arc_member_const_pos_perm_attrs()} }}]\"\n f\"-(__sys_idtf)\\n\"\n f\" RETURN el, edge_sock, edge, '{idtf}' as idtf\\n\")\n\n query += (f\"CALL {{\"\n f\"{'UNION'.join(map(lambda t: _subquery_item(t), self._tasks))}\"\n f\"}}\\n\"\n f\"WITH el, edge_sock, edge, idtf, __sys_idtf\\n\"\n f\"DETACH DELETE edge_sock\\nDELETE edge\\n\"\n f\"WITH el, idtf, __sys_idtf\\n\"\n f\"CREATE (el)-[edge:{Labels.SC_ARC} {{ {_const_attr()} }}]->(:{Labels.SC_LINK} {{ content: idtf, type: 'str', is_url: false, {_const_attr()} }})\\n\"\n f\"WITH __sys_idtf, edge\\n\"\n f\"CREATE (: {Labels.SC_EDGE_SOCK} {{edge_id: id(edge)}})<-[:{Labels.SC_ARC} {{ {_arc_member_const_pos_perm_attrs()} }}]-(__sys_idtf)\\n\"\n f\"RETURN edge\")\n\n return query\n\n def run(self) -> TransactionNamesWriteResult:\n assert not self._is_empty()\n\n query = self._make_query()\n # print (query)\n with self._driver.session() as session:\n return session.write_transaction(TransactionNamesWrite._run_impl, query)\n\n @neo4j.unit_of_work(timeout=30)\n def _run_impl(tx: neo4j.Transaction, query):\n try:\n query_res = tx.run(query)\n except neo4j.exceptions.DriverError:\n return None\n\n info = query_res.consume()\n return TransactionNamesWriteResult(result_summary=info)\n\n# ------------------------------\n\n\nclass TransactionNamesReadResult:\n\n def __init__(self, values: dict, result_summary: neo4j.ResultSummary):\n self.values = values\n self.run_time = result_summary.result_available_after\n self.consume_time = result_summary.result_consumed_after\n\n def __getitem__(self, idtf):\n return self.values[idtf]\n\n def __len__(self):\n return len(self.values)\n\n def __repr__(self) -> str:\n return \"values_num: {}, run_time: {} ms, consume_time: {} ms\".format(\n len(self.values.keys()),\n self.run_time,\n self.consume_time\n )\n\n\nclass TransactionNamesRead:\n\n def __init__(self,\n driver: neo4j.Driver,\n nrel_sys_idtf: str = Keynodes.NREL_SYS_IDTF) -> None:\n self._driver = driver\n self._sys_idtfs = set()\n\n self._nrel_sys_idtf = nrel_sys_idtf\n\n assert isinstance(self._nrel_sys_idtf, str)\n\n def resolve_by_system_identifier(self, sys_idtf: str) -> str:\n \"\"\"\n Adds command to resolve element by system identifier\n\n :params sys_idtf: Value of system identifier\n :returns Returns alias of result value. IT shoudl be used to get ElementID from result\n \"\"\"\n assert sys_idtf not in self._sys_idtfs\n\n self._sys_idtfs.add(sys_idtf)\n return sys_idtf\n\n def _is_empty(self) -> bool:\n return len(self._sys_idtfs) == 0\n\n def _make_query(self) -> str:\n\n query = (f\"MATCH (l:{Labels.SC_LINK} {{ content: '{Keynodes.NREL_SYS_IDTF}', {_const_attr()} }})\"\n f\"<-[edge:{Labels.SC_ARC} {{ {_const_attr()} }}]\"\n f\"-(__sys_idtf:{Labels.SC_NODE}), \\n\"\n f\"(edge_sock:{Labels.SC_EDGE_SOCK} {{edge_id: id(edge)}})\"\n f\"<-[:{Labels.SC_ARC} {{ {_arc_member_const_pos_perm_attrs()} }}]\"\n f\"-(__sys_idtf)\\n\"\n f\"WITH __sys_idtf\\n\")\n\n with_values = [\"__sys_idtf\"]\n for idtf in self._sys_idtfs:\n if len(with_values) > 1:\n query += \"UNION\\n\"\n\n with_values.append(idtf)\n query += (f\"MATCH (link:{Labels.SC_LINK} {{ content: '{idtf}', {_const_attr()} }}),\"\n f\" ({idtf})-[edge:{Labels.SC_ARC} {{ {_const_attr()} }}] -> (link), \\n\"\n f\"(__sys_idtf)-[:{Labels.SC_ARC} {{ {_arc_member_const_pos_perm_attrs()} }}]->(:{Labels.SC_EDGE_SOCK} {{edge_id: id(edge)}})\\n\"\n f\"RETURN '{idtf}' as idtf, {idtf} as el\\n\")\n\n return query\n\n def run(self) -> TransactionNamesReadResult:\n assert not self._is_empty()\n\n query = self._make_query()\n # print (query)\n with self._driver.session() as session:\n return session.write_transaction(TransactionNamesRead._run_impl, query)\n\n @neo4j.unit_of_work(timeout=10)\n def _run_impl(tx: neo4j.Transaction, query):\n try:\n query_res = tx.run(query)\n except neo4j.exceptions.DriverError:\n return None\n\n values = {}\n for _, record in enumerate(query_res):\n key = record[\"idtf\"]\n value = record[\"el\"]\n\n values[key] = _parse_output_element(value)\n\n info = query_res.consume()\n\n return TransactionNamesReadResult(values, result_summary=info)\n","repo_name":"ostis-dev/neo4j-platform","sub_path":"db/sc/core/transaction/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":7569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"3651833564","text":"def rough_parse_phonebook():\n with open(\"WebCalls/phonebook.txt\", \"r\") as f:\n lines = [line.rstrip('\\n') for line in f]\n content_list = []\n for line in lines:\n if \"###\" in line:\n content_list.append([])\n else:\n content_list[len(content_list)-1].append(line)\n return content_list\n\n\ndef jsonify(rough_parse):\n content_list = []\n for piece in rough_parse:\n piece_dict = {}\n for line in piece:\n line_parts = line.split(\"::\",)\n key = line_parts[0]\n line_parts.pop(0)\n element = line_parts\n piece_dict.update({key: element})\n content_list.append(piece_dict)\n return content_list\n\n\ndef parse_section_1(input_dict):\n for key in input_dict:\n new_item = input_dict[key][0]\n input_dict[key] = new_item\n return input_dict\n\n\ndef parse_section_2(input_dict):\n for key in input_dict:\n input_dict[key][1] = int(input_dict[key][1])\n input_dict[key][2] = input_dict[key][2].strip('[').strip(']').split(',',)\n if len(input_dict[key][2][0]) == 0:\n input_dict[key][2] = []\n return input_dict\n\n\ndef parse_section_3(input_dict):\n for key in input_dict:\n input_dict[key] = str(input_dict[key][0])\n return input_dict\n\n\ndef parse_section_4(input_dict):\n for key in input_dict:\n new_input = input_dict[key][1].split(',',)\n i = 0\n for piece in new_input:\n if '[' in piece:\n new_input[i] = piece.strip('[').strip(']').split(';',)\n i += 1\n input_dict[key][1] = new_input\n return input_dict\n\n\ndef parse_section_5(input_dict):\n for key in input_dict:\n new_item = input_dict[key][0]\n new_item = new_item.strip('[').strip(']').split(',',)\n input_dict[key] = new_item\n return input_dict\n\n\ndef parse_section_6(input_dict):\n for key in input_dict:\n input_dict[key] = input_dict[key][0]\n return input_dict\n\n\ndef parse():\n content_list = jsonify(rough_parse_phonebook())\n content_list[0] = parse_section_1(content_list[0])\n content_list[1] = parse_section_2(content_list[1])\n content_list[2] = parse_section_3(content_list[2])\n content_list[3] = parse_section_4(content_list[3])\n content_list[4] = parse_section_5(content_list[4])\n content_list[5] = parse_section_6(content_list[5])\n return content_list\n\n\n# 1 - key words\n# keyword\n# :\n# meaning\n# 2 - api call functions\n# api function name\n# :\n# api function url string\n# number of arguments\n# arguments\n# 3 - arguments\n# argument name\n# :\n# type\n# 4 - api call paths\n# goal\n# :\n# api function to use\n# parse path\n# 5 - result interpretations\n# goal\n# :\n# result\n","repo_name":"AHBruns/Ar-Bot-Rage","sub_path":"WebCalls/spectacles.py","file_name":"spectacles.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"35334602510","text":"# main project : a bot to play cookikilicker\nfrom selenium import webdriver\nimport time\n\n\n\n# create a webdriver\ndriver = webdriver.Chrome()\ndriver.get(\"http://orteil.dashnet.org/experiments/cookie/\")\n\n\n# find Main cookie\nBIG_COOKIE = driver.find_element_by_id(\"cookie\")\nid_list = [\"buyElder Pledge\", \"buyTime machine\", \"buyPortal\", \"buyAlchemy lab\", \"buyShipment\", \"buyMine\", \"buyFactory\", \"buyGrandma\", \"buyCursor\"]\n\n# create a buffer time\none_min = time.time() + 5\ntotal_time = 0\n\n\n# click on cookie and stuff of store\nwhile total_time < 300:\n\n BIG_COOKIE.click()\n\n\n if one_min <= time.time():\n\n for item in id_list:\n\n try:\n i = driver.find_element_by_id(item)\n i.click()\n\n except:\n continue\n\n one_min = time.time() + 5\n total_time += 5\n\n\ncoocki_per_sce = driver.find_element_by_id(\"money\").text\n\nprint(F\"{coocki_per_sce} cookies per sec\")\ndriver.close()","repo_name":"saeedshiranii/100DAY_OF_PYTHON","sub_path":"DAY48/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"6972348145","text":"from datetime import timedelta\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import connection\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy\n\nfrom judge.contest_format.default import DefaultContestFormat\nfrom judge.contest_format.registry import register_contest_format\nfrom judge.timezone import from_database_time\nfrom judge.utils.timedelta import nice_repr\nfrom django.db.models import Min, OuterRef, Subquery\n\n\n@register_contest_format(\"ioi\")\nclass IOIContestFormat(DefaultContestFormat):\n name = gettext_lazy(\"IOI\")\n config_defaults = {\"cumtime\": False}\n \"\"\"\n cumtime: Specify True if time penalties are to be computed. Defaults to False.\n \"\"\"\n\n @classmethod\n def validate(cls, config):\n if config is None:\n return\n\n if not isinstance(config, dict):\n raise ValidationError(\n \"IOI-styled contest expects no config or dict as config\"\n )\n\n for key, value in config.items():\n if key not in cls.config_defaults:\n raise ValidationError('unknown config key \"%s\"' % key)\n if not isinstance(value, type(cls.config_defaults[key])):\n raise ValidationError('invalid type for config key \"%s\"' % key)\n\n def __init__(self, contest, config):\n self.config = self.config_defaults.copy()\n self.config.update(config or {})\n self.contest = contest\n\n def update_participation(self, participation):\n cumtime = 0\n score = 0\n format_data = {}\n\n queryset = participation.submissions\n if self.contest.freeze_after:\n queryset = queryset.filter(\n submission__date__lt=participation.start + self.contest.freeze_after\n )\n\n queryset = (\n queryset.values(\"problem_id\")\n .filter(\n points=Subquery(\n queryset.filter(problem_id=OuterRef(\"problem_id\"))\n .order_by(\"-points\")\n .values(\"points\")[:1]\n )\n )\n .annotate(time=Min(\"submission__date\"))\n .values_list(\"problem_id\", \"time\", \"points\")\n )\n\n for problem_id, time, points in queryset:\n if self.config[\"cumtime\"]:\n dt = (time - participation.start).total_seconds()\n if points:\n cumtime += dt\n else:\n dt = 0\n\n format_data[str(problem_id)] = {\"points\": points, \"time\": dt}\n score += points\n\n self.handle_frozen_state(participation, format_data)\n participation.cumtime = max(cumtime, 0)\n participation.score = round(score, self.contest.points_precision)\n participation.tiebreaker = 0\n participation.format_data = format_data\n participation.save()\n\n def display_user_problem(self, participation, contest_problem, show_final=False):\n if show_final:\n format_data = (participation.format_data_final or {}).get(\n str(contest_problem.id)\n )\n else:\n format_data = (participation.format_data or {}).get(str(contest_problem.id))\n if format_data:\n return format_html(\n '{points}
{time}
',\n state=(\n (\n \"pretest-\"\n if self.contest.run_pretests_only\n and contest_problem.is_pretested\n else \"\"\n )\n + self.best_solution_state(\n format_data[\"points\"], contest_problem.points\n )\n + (\" frozen\" if format_data.get(\"frozen\") else \"\")\n ),\n url=reverse(\n \"contest_user_submissions_ajax\",\n args=[\n self.contest.key,\n participation.id,\n contest_problem.problem.code,\n ],\n ),\n points=floatformat(\n format_data[\"points\"], -self.contest.points_precision\n ),\n time=nice_repr(timedelta(seconds=format_data[\"time\"]), \"noday\")\n if self.config[\"cumtime\"]\n else \"\",\n )\n else:\n return mark_safe('')\n\n def display_participation_result(self, participation, show_final=False):\n if show_final:\n score = participation.score_final\n cumtime = participation.cumtime_final\n else:\n score = participation.score\n cumtime = participation.cumtime\n return format_html(\n '{points}
{cumtime}
',\n points=floatformat(score, -self.contest.points_precision),\n cumtime=nice_repr(timedelta(seconds=cumtime), \"noday\")\n if self.config[\"cumtime\"]\n else \"\",\n )\n","repo_name":"LQDJudge/online-judge","sub_path":"judge/contest_format/ioi.py","file_name":"ioi.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"96"} +{"seq_id":"25771493799","text":"class Solution:\n def jump(self, nums: List[int]) -> int:\n s=d=0\n sl=len(nums)-1\n if(sl==0):\n return(0)\n for i in range(len(nums)-1,-1,-1):\n if(nums[i]>=d):\n sl=i\n d+=1\n sl+=1\n if(sl==0):\n return(1)\n s+=1\n s+=self.jump(nums[:sl])\n return(s)\n","repo_name":"parthmahe/leetcode","sub_path":"45-jump-game-ii/45-jump-game-ii.py","file_name":"45-jump-game-ii.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36357523431","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom . import api, views\n\nurlpatterns = [\n url('init', api.initialize),\n url('move', api.move),\n url('say', api.say),\n path('rooms', views.allRooms),\n path('rooms//', views.singleRoom),\n]","repo_name":"Polygon-MUD/mud_build","sub_path":"adventure/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23923496122","text":"# -*- coding: utf-8 -*-\r\n# Copyright (c) 2016 - 2017 Wolfgang Langner\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\n\"A standard INI style configuration parser.\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\n\r\n\r\n__version__ = \"1.1\"\r\n_is_release = all(int(v) for v in __version__.split(\".\"))\r\n\r\n__all__ = [\"NoSectionError\", \"DuplicateOptionError\", \"DuplicateSectionError\",\r\n \"NoOptionError\", \"InterpolationError\", \"InterpolationDepthError\",\r\n \"InterpolationMissingOptionError\", \"InterpolationSyntaxError\",\r\n \"ParsingError\", \"MissingSectionHeaderError\",\r\n \"ConfigParser\", \"SafeConfigParser\", \"RawConfigParser\",\r\n \"Interpolation\", \"BasicInterpolation\", \"ExtendedInterpolation\",\r\n \"LegacyInterpolation\", \"SectionProxy\", \"ConverterMapping\",\r\n \"DEFAULTSECT\", \"MAX_INTERPOLATION_DEPTH\",\r\n \"StdConfigParser\"]\r\n\r\n\r\nimport sys\r\n\r\n\r\nPY2 = sys.version_info[0] == 2\r\nPY33 = sys.version_info[:2] == (3, 3)\r\nPY34 = sys.version_info[:2] == (3, 4)\r\n\r\n\r\ndef from_none(exc):\r\n \"\"\"raise from_none(ValueError('a')) == raise ValueError('a') from None\"\"\"\r\n exc.__cause__ = None\r\n exc.__suppress_context__ = True\r\n return exc\r\n\r\n\r\n# whole Python 2 implementation based on the backport of configparser\r\n# lot of stuff copied also from Python standard library implementation\r\nif PY2:\r\n\r\n from collections import MutableMapping\r\n try:\r\n from collections import OrderedDict\r\n except ImportError:\r\n from ordereddict import OrderedDict\r\n\r\n from io import open\r\n try:\r\n from thread import get_ident\r\n except ImportError:\r\n try:\r\n from _thread import get_ident\r\n except ImportError:\r\n from _dummy_thread import get_ident\r\n\r\n\r\n # stuff from backports helpers\r\n\r\n str = type('str') # same as str = unicode because of __future__.unicode_literals\r\n\r\n # constants\r\n DEFAULTSECT = \"DEFAULT\"\r\n\r\n MAX_INTERPOLATION_DEPTH = 10\r\n\r\n # Used in parser getters to indicate the default behaviour when a specific\r\n # option is not found it to raise an exception. Created to enable `None' as\r\n # a valid fallback value.\r\n _UNSET = object()\r\n\r\n\r\n # from reprlib 3.2.1\r\n def recursive_repr(fillvalue='...'):\r\n 'Decorator to make a repr function return fillvalue for a recursive call'\r\n\r\n def decorating_function(user_function):\r\n repr_running = set()\r\n\r\n def wrapper(self):\r\n key = id(self), get_ident()\r\n if key in repr_running:\r\n return fillvalue\r\n repr_running.add(key)\r\n try:\r\n result = user_function(self)\r\n finally:\r\n repr_running.discard(key)\r\n return result\r\n\r\n # Can't use functools.wraps() here because of bootstrap issues\r\n wrapper.__module__ = getattr(user_function, '__module__')\r\n wrapper.__doc__ = getattr(user_function, '__doc__')\r\n wrapper.__name__ = getattr(user_function, '__name__')\r\n wrapper.__annotations__ = getattr(user_function, '__annotations__', {})\r\n return wrapper\r\n\r\n return decorating_function\r\n\r\n # from collections 3.2.1\r\n class ChainMap(MutableMapping):\r\n ''' A ChainMap groups multiple dicts (or other mappings) together\r\n to create a single, updateable view.\r\n\r\n The underlying mappings are stored in a list. That list is public and can\r\n accessed or updated using the *maps* attribute. There is no other state.\r\n\r\n Lookups search the underlying mappings successively until a key is found.\r\n In contrast, writes, updates, and deletions only operate on the first\r\n mapping.\r\n\r\n '''\r\n\r\n def __init__(self, *maps):\r\n '''Initialize a ChainMap by setting *maps* to the given mappings.\r\n If no mappings are provided, a single empty dictionary is used.\r\n\r\n '''\r\n self.maps = list(maps) or [{}] # always at least one map\r\n\r\n def __missing__(self, key):\r\n raise KeyError(key)\r\n\r\n def __getitem__(self, key):\r\n for mapping in self.maps:\r\n try:\r\n return mapping[key] # can't use 'key in mapping' with defaultdict\r\n except KeyError:\r\n pass\r\n return self.__missing__(key) # support subclasses that define __missing__\r\n\r\n def get(self, key, default=None):\r\n return self[key] if key in self else default\r\n\r\n def __len__(self):\r\n return len(set().union(*self.maps)) # reuses stored hash values if possible\r\n\r\n def __iter__(self):\r\n return iter(set().union(*self.maps))\r\n\r\n def __contains__(self, key):\r\n return any(key in m for m in self.maps)\r\n\r\n @recursive_repr()\r\n def __repr__(self):\r\n return '{0.__class__.__name__}({1})'.format(\r\n self, ', '.join(map(repr, self.maps)))\r\n\r\n @classmethod\r\n def fromkeys(cls, iterable, *args):\r\n 'Create a ChainMap with a single dict created from the iterable.'\r\n return cls(dict.fromkeys(iterable, *args))\r\n\r\n def copy(self):\r\n 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'\r\n return self.__class__(self.maps[0].copy(), *self.maps[1:])\r\n\r\n __copy__ = copy\r\n\r\n def new_child(self): # like Django's Context.push()\r\n 'New ChainMap with a new dict followed by all previous maps.'\r\n return self.__class__({}, *self.maps)\r\n\r\n @property\r\n def parents(self): # like Django's Context.pop()\r\n 'New ChainMap from maps[1:].'\r\n return self.__class__(*self.maps[1:])\r\n\r\n def __setitem__(self, key, value):\r\n self.maps[0][key] = value\r\n\r\n def __delitem__(self, key):\r\n try:\r\n del self.maps[0][key]\r\n except KeyError:\r\n raise KeyError('Key not found in the first mapping: {!r}'.format(key))\r\n\r\n def popitem(self):\r\n 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'\r\n try:\r\n return self.maps[0].popitem()\r\n except KeyError:\r\n raise KeyError('No keys found in the first mapping.')\r\n\r\n def pop(self, key, *args):\r\n 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'\r\n try:\r\n return self.maps[0].pop(key, *args)\r\n except KeyError:\r\n raise KeyError('Key not found in the first mapping: {!r}'.format(key))\r\n\r\n def clear(self):\r\n 'Clear maps[0], leaving maps[1:] intact.'\r\n self.maps[0].clear()\r\n\r\nif PY33 or PY34:\r\n\r\n from reprlib import recursive_repr\r\n from collections import ChainMap, MutableMapping, OrderedDict # noqa\r\n from configparser import _UNSET, DEFAULTSECT, MAX_INTERPOLATION_DEPTH\r\n\r\nif PY2 or PY33 or PY34:\r\n\r\n import functools\r\n from io import StringIO\r\n import itertools\r\n import re\r\n import warnings\r\n\r\n\r\n # exception classes\r\n class Error(Exception):\r\n \"\"\"Base class for ConfigParser exceptions.\"\"\"\r\n\r\n def __init__(self, msg=''):\r\n self.message = msg\r\n Exception.__init__(self, msg)\r\n\r\n def __repr__(self):\r\n return self.message\r\n\r\n __str__ = __repr__\r\n\r\n\r\n class NoSectionError(Error):\r\n \"\"\"Raised when no section matches a requested option.\"\"\"\r\n\r\n def __init__(self, section):\r\n Error.__init__(self, 'No section: %r' % (section,))\r\n self.section = section\r\n self.args = (section, )\r\n\r\n\r\n class DuplicateSectionError(Error):\r\n \"\"\"Raised when a section is repeated in an input source.\r\n\r\n Possible repetitions that raise this exception are: multiple creation\r\n using the API or in strict parsers when a section is found more than once\r\n in a single input file, string or dictionary.\r\n \"\"\"\r\n\r\n def __init__(self, section, source=None, lineno=None):\r\n msg = [repr(section), \" already exists\"]\r\n if source is not None:\r\n message = [\"While reading from \", repr(source)]\r\n if lineno is not None:\r\n message.append(\" [line {0:2d}]\".format(lineno))\r\n message.append(\": section \")\r\n message.extend(msg)\r\n msg = message\r\n else:\r\n msg.insert(0, \"Section \")\r\n Error.__init__(self, \"\".join(msg))\r\n self.section = section\r\n self.source = source\r\n self.lineno = lineno\r\n self.args = (section, source, lineno)\r\n\r\n\r\n class DuplicateOptionError(Error):\r\n \"\"\"Raised by strict parsers when an option is repeated in an input source.\r\n\r\n Current implementation raises this exception only when an option is found\r\n more than once in a single file, string or dictionary.\r\n \"\"\"\r\n\r\n def __init__(self, section, option, source=None, lineno=None):\r\n msg = [repr(option), \" in section \", repr(section),\r\n \" already exists\"]\r\n if source is not None:\r\n message = [\"While reading from \", repr(source)]\r\n if lineno is not None:\r\n message.append(\" [line {0:2d}]\".format(lineno))\r\n message.append(\": option \")\r\n message.extend(msg)\r\n msg = message\r\n else:\r\n msg.insert(0, \"Option \")\r\n Error.__init__(self, \"\".join(msg))\r\n self.section = section\r\n self.option = option\r\n self.source = source\r\n self.lineno = lineno\r\n self.args = (section, option, source, lineno)\r\n\r\n\r\n class NoOptionError(Error):\r\n \"\"\"A requested option was not found.\"\"\"\r\n\r\n def __init__(self, option, section):\r\n Error.__init__(self, \"No option %r in section: %r\" %\r\n (option, section))\r\n self.option = option\r\n self.section = section\r\n self.args = (option, section)\r\n\r\n\r\n class InterpolationError(Error):\r\n \"\"\"Base class for interpolation-related exceptions.\"\"\"\r\n\r\n def __init__(self, option, section, msg):\r\n Error.__init__(self, msg)\r\n self.option = option\r\n self.section = section\r\n self.args = (option, section, msg)\r\n\r\n\r\n class InterpolationMissingOptionError(InterpolationError):\r\n \"\"\"A string substitution required a setting which was not available.\"\"\"\r\n\r\n def __init__(self, option, section, rawval, reference):\r\n msg = (\"Bad value substitution: option {0!r} in section {1!r} contains \"\r\n \"an interpolation key {2!r} which is not a valid option name. \"\r\n \"Raw value: {3!r}\".format(option, section, reference, rawval))\r\n InterpolationError.__init__(self, option, section, msg)\r\n self.reference = reference\r\n self.args = (option, section, rawval, reference)\r\n\r\n\r\n class InterpolationSyntaxError(InterpolationError):\r\n \"\"\"Raised when the source text contains invalid syntax.\r\n\r\n Current implementation raises this exception when the source text into\r\n which substitutions are made does not conform to the required syntax.\r\n \"\"\"\r\n\r\n\r\n class InterpolationDepthError(InterpolationError):\r\n \"\"\"Raised when substitutions are nested too deeply.\"\"\"\r\n\r\n def __init__(self, option, section, rawval):\r\n msg = (\"Recursion limit exceeded in value substitution: option {0!r} \"\r\n \"in section {1!r} contains an interpolation key which \"\r\n \"cannot be substituted in {2} steps. Raw value: {3!r}\"\r\n \"\".format(option, section, MAX_INTERPOLATION_DEPTH,\r\n rawval))\r\n InterpolationError.__init__(self, option, section, msg)\r\n self.args = (option, section, rawval)\r\n\r\n\r\n class ParsingError(Error):\r\n \"\"\"Raised when a configuration file does not follow legal syntax.\"\"\"\r\n\r\n def __init__(self, source=None, filename=None):\r\n # Exactly one of `source'/`filename' arguments has to be given.\r\n # `filename' kept for compatibility.\r\n if filename and source:\r\n raise ValueError(\"Cannot specify both `filename' and `source'. \"\r\n \"Use `source'.\")\r\n elif not filename and not source:\r\n raise ValueError(\"Required argument `source' not given.\")\r\n elif filename:\r\n source = filename\r\n Error.__init__(self, 'Source contains parsing errors: %r' % source)\r\n self.source = source\r\n self.errors = []\r\n self.args = (source, )\r\n\r\n @property\r\n def filename(self):\r\n \"\"\"Deprecated, use `source'.\"\"\"\r\n warnings.warn(\r\n \"The 'filename' attribute will be removed in future versions. \"\r\n \"Use 'source' instead.\",\r\n DeprecationWarning, stacklevel=2\r\n )\r\n return self.source\r\n\r\n @filename.setter\r\n def filename(self, value):\r\n \"\"\"Deprecated, user `source'.\"\"\"\r\n warnings.warn(\r\n \"The 'filename' attribute will be removed in future versions. \"\r\n \"Use 'source' instead.\",\r\n DeprecationWarning, stacklevel=2\r\n )\r\n self.source = value\r\n\r\n def append(self, lineno, line):\r\n self.errors.append((lineno, line))\r\n self.message += '\\n\\t[line %2d]: %s' % (lineno, line)\r\n\r\n\r\n class MissingSectionHeaderError(ParsingError):\r\n \"\"\"Raised when a key-value pair is found before any section header.\"\"\"\r\n\r\n def __init__(self, filename, lineno, line):\r\n Error.__init__(\r\n self,\r\n 'File contains no section headers.\\nfile: %r, line: %d\\n%r' %\r\n (filename, lineno, line))\r\n self.source = filename\r\n self.lineno = lineno\r\n self.line = line\r\n self.args = (filename, lineno, line)\r\n\r\n\r\n\r\n class Interpolation(object):\r\n \"\"\"Dummy interpolation that passes the value through with no changes.\"\"\"\r\n\r\n def before_get(self, parser, section, option, value, defaults):\r\n return value\r\n\r\n def before_set(self, parser, section, option, value):\r\n return value\r\n\r\n def before_read(self, parser, section, option, value):\r\n return value\r\n\r\n def before_write(self, parser, section, option, value):\r\n return value\r\n\r\n\r\n class BasicInterpolation(Interpolation):\r\n \"\"\"Interpolation as implemented in the classic ConfigParser.\r\n\r\n The option values can contain format strings which refer to other values in\r\n the same section, or values in the special default section.\r\n\r\n For example:\r\n\r\n something: %(dir)s/whatever\r\n\r\n would resolve the \"%(dir)s\" to the value of dir. All reference\r\n expansions are done late, on demand. If a user needs to use a bare % in\r\n a configuration file, she can escape it by writing %%. Other % usage\r\n is considered a user error and raises `InterpolationSyntaxError'.\"\"\"\r\n\r\n _KEYCRE = re.compile(r\"%\\(([^)]+)\\)s\")\r\n\r\n def before_get(self, parser, section, option, value, defaults):\r\n L = []\r\n self._interpolate_some(parser, option, L, value, section, defaults, 1)\r\n return ''.join(L)\r\n\r\n def before_set(self, parser, section, option, value):\r\n tmp_value = value.replace('%%', '') # escaped percent signs\r\n tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax\r\n if '%' in tmp_value:\r\n raise ValueError(\"invalid interpolation syntax in %r at \"\r\n \"position %d\" % (value, tmp_value.find('%')))\r\n return value\r\n\r\n def _interpolate_some(self, parser, option, accum, rest, section, map,\r\n depth):\r\n rawval = parser.get(section, option, raw=True, fallback=rest)\r\n if depth > MAX_INTERPOLATION_DEPTH:\r\n raise InterpolationDepthError(option, section, rawval)\r\n while rest:\r\n p = rest.find(\"%\")\r\n if p < 0:\r\n accum.append(rest)\r\n return\r\n if p > 0:\r\n accum.append(rest[:p])\r\n rest = rest[p:]\r\n # p is no longer used\r\n c = rest[1:2]\r\n if c == \"%\":\r\n accum.append(\"%\")\r\n rest = rest[2:]\r\n elif c == \"(\":\r\n m = self._KEYCRE.match(rest)\r\n if m is None:\r\n raise InterpolationSyntaxError(option, section,\r\n \"bad interpolation variable reference %r\" % rest)\r\n var = parser.optionxform(m.group(1))\r\n rest = rest[m.end():]\r\n try:\r\n v = map[var]\r\n except KeyError:\r\n raise from_none(InterpolationMissingOptionError(\r\n option, section, rawval, var))\r\n if \"%\" in v:\r\n self._interpolate_some(parser, option, accum, v,\r\n section, map, depth + 1)\r\n else:\r\n accum.append(v)\r\n else:\r\n raise InterpolationSyntaxError(\r\n option, section,\r\n \"'%%' must be followed by '%%' or '(', \"\r\n \"found: %r\" % (rest,))\r\n\r\n\r\n class ExtendedInterpolation(Interpolation):\r\n \"\"\"Advanced variant of interpolation, supports the syntax used by\r\n `zc.buildout'. Enables interpolation between sections.\"\"\"\r\n\r\n _KEYCRE = re.compile(r\"\\$\\{([^}]+)\\}\")\r\n\r\n def before_get(self, parser, section, option, value, defaults):\r\n L = []\r\n self._interpolate_some(parser, option, L, value, section, defaults, 1)\r\n return ''.join(L)\r\n\r\n def before_set(self, parser, section, option, value):\r\n tmp_value = value.replace('$$', '') # escaped dollar signs\r\n tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax\r\n if '$' in tmp_value:\r\n raise ValueError(\"invalid interpolation syntax in %r at \"\r\n \"position %d\" % (value, tmp_value.find('$')))\r\n return value\r\n\r\n def _interpolate_some(self, parser, option, accum, rest, section, map,\r\n depth):\r\n rawval = parser.get(section, option, raw=True, fallback=rest)\r\n if depth > MAX_INTERPOLATION_DEPTH:\r\n raise InterpolationDepthError(option, section, rawval)\r\n while rest:\r\n p = rest.find(\"$\")\r\n if p < 0:\r\n accum.append(rest)\r\n return\r\n if p > 0:\r\n accum.append(rest[:p])\r\n rest = rest[p:]\r\n # p is no longer used\r\n c = rest[1:2]\r\n if c == \"$\":\r\n accum.append(\"$\")\r\n rest = rest[2:]\r\n elif c == \"{\":\r\n m = self._KEYCRE.match(rest)\r\n if m is None:\r\n raise InterpolationSyntaxError(option, section,\r\n \"bad interpolation variable reference %r\" % rest)\r\n path = m.group(1).split(':')\r\n rest = rest[m.end():]\r\n sect = section\r\n opt = option\r\n try:\r\n if len(path) == 1:\r\n opt = parser.optionxform(path[0])\r\n v = map[opt]\r\n elif len(path) == 2:\r\n sect = path[0]\r\n opt = parser.optionxform(path[1])\r\n v = parser.get(sect, opt, raw=True)\r\n else:\r\n raise InterpolationSyntaxError(\r\n option, section,\r\n \"More than one ':' found: %r\" % (rest,))\r\n except (KeyError, NoSectionError, NoOptionError):\r\n raise from_none(InterpolationMissingOptionError(\r\n option, section, rawval, \":\".join(path)))\r\n if \"$\" in v:\r\n self._interpolate_some(parser, opt, accum, v, sect,\r\n dict(parser.items(sect, raw=True)),\r\n depth + 1)\r\n else:\r\n accum.append(v)\r\n else:\r\n raise InterpolationSyntaxError(\r\n option, section,\r\n \"'$' must be followed by '$' or '{', \"\r\n \"found: %r\" % (rest,))\r\n\r\n\r\n class LegacyInterpolation(Interpolation):\r\n \"\"\"Deprecated interpolation used in old versions of ConfigParser.\r\n Use BasicInterpolation or ExtendedInterpolation instead.\"\"\"\r\n\r\n _KEYCRE = re.compile(r\"%\\(([^)]*)\\)s|.\")\r\n\r\n def before_get(self, parser, section, option, value, vars):\r\n rawval = value\r\n depth = MAX_INTERPOLATION_DEPTH\r\n while depth: # Loop through this until it's done\r\n depth -= 1\r\n if value and \"%(\" in value:\r\n replace = functools.partial(self._interpolation_replace,\r\n parser=parser)\r\n value = self._KEYCRE.sub(replace, value)\r\n try:\r\n value = value % vars\r\n except KeyError as e:\r\n raise from_none(InterpolationMissingOptionError(\r\n option, section, rawval, e.args[0]))\r\n else:\r\n break\r\n if value and \"%(\" in value:\r\n raise InterpolationDepthError(option, section, rawval)\r\n return value\r\n\r\n def before_set(self, parser, section, option, value):\r\n return value\r\n\r\n @staticmethod\r\n def _interpolation_replace(match, parser):\r\n s = match.group(1)\r\n if s is None:\r\n return match.group()\r\n else:\r\n return \"%%(%s)s\" % parser.optionxform(s)\r\n\r\n\r\n class RawConfigParser(MutableMapping):\r\n \"\"\"ConfigParser that does not do interpolation.\"\"\"\r\n\r\n # Regular expressions for parsing section headers and options\r\n _SECT_TMPL = r\"\"\"\r\n \\[ # [\r\n (?P
[^]]+) # very permissive!\r\n \\] # ]\r\n \"\"\"\r\n _OPT_TMPL = r\"\"\"\r\n (?P